input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
currently in the dictionary.
# - In contrast, the *defaultdict* will create an item of the type of the argument, in this case an integer with the default value of 0.
# - See [defaultdict](https://docs.python.org/3.3/library/collections.html#defaultdict-objects).
# In[13]:
# UNQ_C1 GRADED FUNCTION: create_dictionaries
def create_dictionaries(training_corpus, vocab, verbose=True):
"""
Input:
training_corpus: a corpus where each line has a word followed by its tag.
vocab: a dictionary where keys are words in vocabulary and value is an index
Output:
emission_counts: a dictionary where the keys are (tag, word) and the values are the counts
transition_counts: a dictionary where the keys are (prev_tag, tag) and the values are the counts
tag_counts: a dictionary where the keys are the tags and the values are the counts
"""
# initialize the dictionaries using defaultdict
emission_counts = defaultdict(int)
transition_counts = defaultdict(int)
tag_counts = defaultdict(int)
# Initialize "prev_tag" (previous tag) with the start state, denoted by '--s--'
prev_tag = '--s--'
# use 'i' to track the line number in the corpus
i = 0
# Each item in the training corpus contains a word and its POS tag
# Go through each word and its tag in the training corpus
for word_tag in training_corpus:
# Increment the word_tag count
i += 1
# Every 50,000 words, print the word count
if i % 50000 == 0 and verbose:
print(f"word count = {i}")
### START CODE HERE ###
# get the word and tag using the get_word_tag helper function (imported from utils_pos.py)
word, tag = get_word_tag(word_tag, vocab)
# Increment the transition count for the previous word and tag
transition_counts[(prev_tag, tag)] += 1
# Increment the emission count for the tag and word
emission_counts[(tag, word)] += 1
# Increment the tag count
tag_counts[tag] += 1
# Set the previous tag to this tag (for the next iteration of the loop)
prev_tag = tag
### END CODE HERE ###
return emission_counts, transition_counts, tag_counts
# In[14]:
emission_counts, transition_counts, tag_counts = create_dictionaries(training_corpus, vocab)
# In[15]:
# get all the POS states
states = sorted(tag_counts.keys())
print(f"Number of POS tags (number of 'states'): {len(states)}")
print("View these POS tags (states)")
print(states)
# ##### Expected Output
#
# ```CPP
# Number of POS tags (number of 'states'46
# View these states
# ['#', '$', "''", '(', ')', ',', '--s--', '.', ':', 'CC', 'CD', 'DT', 'EX', 'FW', 'IN', 'JJ', 'JJR', 'JJS', 'LS', 'MD', 'NN', 'NNP', 'NNPS', 'NNS', 'PDT', 'POS', 'PRP', 'PRP$', 'RB', 'RBR', 'RBS', 'RP', 'SYM', 'TO', 'UH', 'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ', 'WDT', 'WP', 'WP$', 'WRB', '``']
# ```
# In[16]:
# Test your function
w2_unittest.test_create_dictionaries(create_dictionaries, training_corpus, vocab)
# The 'states' are the Parts-of-speech designations found in the training data. They will also be referred to as 'tags' or POS in this assignment.
#
# - "NN" is noun, singular,
# - 'NNS' is noun, plural.
# - In addition, there are helpful tags like '--s--' which indicate a start of a sentence.
# - You can get a more complete description at [Penn Treebank II tag set](https://www.clips.uantwerpen.be/clips.bak/pages/mbsp-tags).
# In[17]:
print("transition examples: ")
for ex in list(transition_counts.items())[:3]:
print(ex)
print()
print("emission examples: ")
for ex in list(emission_counts.items())[200:203]:
print (ex)
print()
print("ambiguous word example: ")
for tup,cnt in emission_counts.items():
if tup[1] == 'back': print (tup, cnt)
# ##### Expected Output
#
# ```CPP
# transition examples:
# (('--s--', 'IN'), 5050)
# (('IN', 'DT'), 32364)
# (('DT', 'NNP'), 9044)
#
# emission examples:
# (('DT', 'any'), 721)
# (('NN', 'decrease'), 7)
# (('NN', 'insider-trading'), 5)
#
# ambiguous word example:
# ('RB', 'back') 304
# ('VB', 'back') 20
# ('RP', 'back') 84
# ('JJ', 'back') 25
# ('NN', 'back') 29
# ('VBP', 'back') 4
# ```
# <a name='1.2'></a>
# ### Part 1.2 - Testing
#
# Now you will test the accuracy of your parts-of-speech tagger using your `emission_counts` dictionary.
# - Given your preprocessed test corpus `prep`, you will assign a parts-of-speech tag to every word in that corpus.
# - Using the original tagged test corpus `y`, you will then compute what percent of the tags you got correct.
# <a name='ex-02'></a>
# ### Exercise 02
#
# **Instructions:** Implement `predict_pos` that computes the accuracy of your model.
#
# - This is a warm up exercise.
# - To assign a part of speech to a word, assign the most frequent POS for that word in the training set.
# - Then evaluate how well this approach works. Each time you predict based on the most frequent POS for the given word, check whether the actual POS of that word is the same. If so, the prediction was correct!
# - Calculate the accuracy as the number of correct predictions divided by the total number of words for which you predicted the POS tag.
# In[18]:
# UNQ_C2 GRADED FUNCTION: predict_pos
def predict_pos(prep, y, emission_counts, vocab, states):
'''
Input:
prep: a preprocessed version of 'y'. A list with the 'word' component of the tuples.
y: a corpus composed of a list of tuples where each tuple consists of (word, POS)
emission_counts: a dictionary where the keys are (tag,word) tuples and the value is the count
vocab: a dictionary where keys are words in vocabulary and value is an index
states: a sorted list of all possible tags for this assignment
Output:
accuracy: Number of times you classified a word correctly
'''
# Initialize the number of correct predictions to zero
num_correct = 0
# Get the (tag, word) tuples, stored as a set
all_words = set(emission_counts.keys())
# Get the number of (word, POS) tuples in the corpus 'y'
total = len(y)
for word, y_tup in zip(prep, y):
# Split the (word, POS) string into a list of two items
y_tup_l = y_tup.split()
# Verify that y_tup contain both word and POS
if len(y_tup_l) == 2:
# Set the true POS label for this word
true_label = y_tup_l[1]
else:
# If the y_tup didn't contain word and POS, go to next word
continue
count_final = 0
pos_final = ''
# If the word is in the vocabulary...
if word in vocab:
for pos in states:
### START CODE HERE (Replace instances of 'None' with your code) ###
# define the key as the tuple containing the POS and word
key = (pos,word)
# check if the (pos, word) key exists in the emission_counts dictionary
if key in emission_counts: # complete this line
# get the emission count of the (pos,word) tuple
count = emission_counts[key]
# keep track of the POS with the largest count
if count>count_final: # complete this line
# update the final count (largest count)
count_final = count
# update the final POS
pos_final = pos
# If the final POS (with the largest count) matches the true POS:
if pos_final == true_label: # complete this line
# Update the number of correct predictions
num_correct += 1
### END CODE HERE ###
accuracy = num_correct / total
return accuracy
# In[19]:
accuracy_predict_pos = predict_pos(prep, y, emission_counts, vocab, states)
print(f"Accuracy of prediction using predict_pos is {accuracy_predict_pos:.4f}")
# ##### Expected Output
# ```CPP
# Accuracy of prediction using predict_pos is 0.8889
# ```
#
# 88.9% is really good for this warm up exercise. With hidden markov models, you should be able to get **95% accuracy.**
# In[20]:
# Test your function
w2_unittest.test_predict_pos(predict_pos, prep, y, emission_counts, vocab, states)
# <a name='2'></a>
# # Part 2: Hidden Markov Models for POS
#
# Now you will build something more context specific. Concretely, you will be implementing a Hidden Markov Model (HMM) with a Viterbi decoder
# - The HMM is one of the most commonly used algorithms in Natural Language Processing, and is a foundation to many deep learning techniques you will see in this specialization.
# - In addition to parts-of-speech tagging, HMM is used in speech recognition, speech synthesis, etc.
# - By completing this part of the assignment you will get a 95% accuracy on the same dataset you used in Part 1.
#
# The Markov Model contains a number of states | |
<filename>src/ramstk/views/gtk3/requirement/panel.py
# -*- coding: utf-8 -*-
#
# ramstk.views.gtk3.requirement.panel.py is part of The RAMSTK Project
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""GTK3 Requirement Panels."""
# Standard Library Imports
from datetime import date
from typing import Any, Dict, List, Tuple
# Third Party Imports
import treelib
from pubsub import pub
# RAMSTK Package Imports
from ramstk.views.gtk3 import Gdk, Gtk, _
from ramstk.views.gtk3.widgets import (
RAMSTKButton,
RAMSTKCheckButton,
RAMSTKComboBox,
RAMSTKDateSelect,
RAMSTKEntry,
RAMSTKFixedPanel,
RAMSTKTextView,
RAMSTKTreePanel,
)
class RequirementTreePanel(RAMSTKTreePanel):
"""Panel to display hierarchy of requirements."""
# Define private dictionary class attributes.
# Define private list class attributes.
# Define private scalar class attributes.
_select_msg = "succeed_retrieve_all_requirement"
_tag = "requirement"
_title = _("Requirement Tree")
# Define public dictionary class attributes.
# Define public list class attributes.
# Define public scalar class attributes.
def __init__(self) -> None:
"""Initialize an instance of the Requirement panel."""
super().__init__()
# Initialize private dictionary class attributes.
self.tvwTreeView.dic_row_loader = {
"requirement": self.__do_load_requirement,
}
# Initialize private list class attributes.
# Initialize private scalar class attributes.
# Initialize public dictionary class attributes.
self.dic_attribute_widget_map = {
"revision_id": [
0,
Gtk.CellRendererText(),
"edited",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": False,
"fg_color": "#000000",
"visible": False,
},
_("Revision ID"),
"gint",
],
"requirement_id": [
1,
Gtk.CellRendererText(),
"edited",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": False,
"fg_color": "#000000",
"visible": False,
},
_("Requirement ID"),
"gint",
],
"derived": [
2,
Gtk.CellRendererToggle(),
"toggled",
super().on_cell_toggled,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Derived?"),
"gint",
],
"description": [
3,
Gtk.CellRendererText(),
"edited",
super().on_cell_edit,
"mvw_editing_requirement",
"",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Description"),
"gchararray",
],
"figure_number": [
4,
Gtk.CellRendererText(),
"edited",
super().on_cell_edit,
"mvw_editing_requirement",
"",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Figure Number"),
"gchararray",
],
"owner": [
5,
Gtk.CellRendererCombo(),
"changed",
super().on_cell_change,
"mvw_editing_requirement",
"",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Owner"),
"gchararray",
],
"page_number": [
6,
Gtk.CellRendererText(),
"edited",
super().on_cell_edit,
"mvw_editing_requirement",
"",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Page Number"),
"gchararray",
],
"parent_id": [
7,
Gtk.CellRendererText(),
"edited",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": False,
"fg_color": "#000000",
"visible": False,
},
_("Parent ID"),
"gint",
],
"priority": [
8,
Gtk.CellRendererCombo(),
"changed",
super().on_cell_change,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Priority"),
"gint",
],
"requirement_code": [
9,
Gtk.CellRendererText(),
"edited",
None,
"mvw_editing_requirement",
"",
{
"bg_color": "#FFFFFF",
"editable": False,
"fg_color": "#000000",
"visible": True,
},
_("Code"),
"gchararray",
],
"specification": [
10,
Gtk.CellRendererText(),
"edited",
super().on_cell_edit,
"mvw_editing_requirement",
"",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Specification"),
"gchararray",
],
"requirement_type": [
11,
Gtk.CellRendererCombo(),
"changed",
super().on_cell_change,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Type"),
"gchararray",
],
"validated": [
12,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Validated?"),
"gint",
],
"validated_date": [
13,
Gtk.CellRendererText(),
"edited",
None,
"mvw_editing_requirement",
"",
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": True,
},
_("Validated Date"),
"gchararray",
],
"q_clarity_0": [
14,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Clarity Q1"),
"gint",
],
"q_clarity_1": [
15,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Clarity Q2"),
"gint",
],
"q_clarity_2": [
16,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Clarity Q3"),
"gint",
],
"q_clarity_3": [
17,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Clarity Q4"),
"gint",
],
"q_clarity_4": [
18,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Clarity Q5"),
"gint",
],
"q_clarity_5": [
19,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Clarity Q6"),
"gint",
],
"q_clarity_6": [
20,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Clarity Q7"),
"gint",
],
"q_clarity_7": [
21,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Clarity Q8"),
"gint",
],
"q_clarity_8": [
22,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Clarity Q9"),
"gint",
],
"q_complete_0": [
23,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Complete Q1"),
"gint",
],
"q_complete_1": [
24,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Complete Q2"),
"gint",
],
"q_complete_2": [
25,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Complete Q3"),
"gint",
],
"q_complete_3": [
26,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Complete Q4"),
"gint",
],
"q_complete_4": [
27,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Complete Q5"),
"gint",
],
"q_complete_5": [
28,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Complete Q6"),
"gint",
],
"q_complete_6": [
29,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Complete Q7"),
"gint",
],
"q_complete_7": [
30,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Complete Q8"),
"gint",
],
"q_complete_8": [
31,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Complete Q9"),
"gint",
],
"q_complete_9": [
32,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Complete Q10"),
"gint",
],
"q_consistent_0": [
33,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Consistent Q1"),
"gint",
],
"q_consistent_1": [
34,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Consistent Q2"),
"gint",
],
"q_consistent_2": [
35,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Consistent Q3"),
"gint",
],
"q_consistent_3": [
36,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Consistent Q4"),
"gint",
],
"q_consistent_4": [
37,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Consistent Q5"),
"gint",
],
"q_consistent_5": [
38,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Consistent Q6"),
"gint",
],
"q_consistent_6": [
39,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Consistent Q7"),
"gint",
],
"q_consistent_7": [
40,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Consistent Q8"),
"gint",
],
"q_consistent_8": [
41,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Consistent Q9"),
"gint",
],
"q_verifiable_0": [
42,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Verifiable Q1"),
"gint",
],
"q_verifiable_1": [
43,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Verifiable Q2"),
"gint",
],
"q_verifiable_2": [
44,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Verifiable Q3"),
"gint",
],
"q_verifiable_3": [
45,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Verifiable Q4"),
"gint",
],
"q_verifiable_4": [
46,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Verifiable Q5"),
"gint",
],
"q_verifiable_5": [
47,
Gtk.CellRendererToggle(),
"toggled",
None,
"mvw_editing_requirement",
0,
{
"bg_color": "#FFFFFF",
"editable": True,
"fg_color": "#000000",
"visible": False,
},
_("Verifiable Q6"),
"gint",
],
}
# Initialize public list class attributes.
self.lst_owner: List[str] = [""]
self.lst_type: List[str] = [""]
# Initialize public scalar class attributes.
super().do_set_properties()
super().do_make_panel()
super().do_set_callbacks()
self.tvwTreeView.set_tooltip_text(
_("Displays the hierarchical list of requirements.")
)
# Subscribe to PyPubSub messages.
pub.subscribe(
self._on_module_switch,
"mvwSwitchedPage",
)
pub.subscribe(
self._on_workview_edit,
f"wvw_editing_{self._tag}",
)
def _on_module_switch(self, module: str = "") -> None:
"""Respond to change in selected Module View module (tab).
:param module: the name of the | |
self._response(request, status=200)
@routes.post("/order/{id}", name="order")
async def order(self, request):
"""Handler that queries the given order.
`7.1.3. Order Objects <https://tools.ietf.org/html/rfc8555#section-7.1.3>`_
:raises: :class:`aiohttp.web.HTTPNotFound` If the order does not exist.
:return: The order object.
"""
async with self._session(request) as session:
jws, account = await self._verify_request(
request, session, post_as_get=True
)
order_id = request.match_info["id"]
order = await self._db.get_order(session, account.account_id, order_id)
if not order:
raise web.HTTPNotFound
await order.validate()
return self._response(request, order.serialize(request))
@routes.post("/orders/{id}", name="orders")
async def orders(self, request):
"""Handler that retrieves the account's chunked orders list.
`7.1.2.1. Orders List <https://tools.ietf.org/html/rfc8555#section-7.1.2.1>`_
:return: An object with key *orders* that holds a chunk of the account's orders list.
"""
async with self._session(request) as session:
jws, account = await self._verify_request(
request, session, post_as_get=True
)
try:
cursor = int(request.query.get("cursor", 0))
orders = await self._db.get_orders_list(
session, account.account_id, self.ORDERS_LIST_CHUNK_LEN, cursor
)
except ValueError:
raise web.HTTPBadRequest(text="Cursor must be an integer >= 0.")
if len(orders) == 0:
raise web.HTTPNotFound(
text="No orders found. Try a lower cursor value or create some orders first."
)
"""The next two lines ensure that the extra order we query to see if there more orders is not returned
to the client. If there are no more orders after the current cursor, then return all of them."""
more_orders = len(orders) == (self.ORDERS_LIST_CHUNK_LEN + 1)
orders = orders[:-1] if more_orders else orders
return self._response(
request,
{
"orders": [
order.url(request)
for order in orders
if order.status == models.OrderStatus.PENDING
]
},
links=[
f'<{acmetk.util.next_url(account.orders_url(request), cursor)}>; rel="next"'
]
if more_orders
else [],
)
async def _validate_order(
self, request, session
) -> (models.Order, x509.CertificateSigningRequest):
jws, account = await self._verify_request(request, session)
order_id = request.match_info["id"]
order = await self._db.get_order(session, account.account_id, order_id)
if not order:
raise web.HTTPNotFound
await order.validate()
if order.status == models.OrderStatus.INVALID:
raise acme.messages.Error(
typ="orderInvalid",
detail="This order cannot be finalized because it is invalid.",
)
if order.status != models.OrderStatus.READY:
raise acme.messages.Error.with_code("orderNotReady")
csr = messages.CertificateRequest.json_loads(jws.payload).csr
pub_key = csr.public_key()
logger.debug(
"Received CSR; Type: %s, Key Size: %s bits", type(pub_key), pub_key.key_size
)
if isinstance(pub_key, self.SUPPORTED_CSR_KEYS):
try:
self._match_keysize(pub_key, "csr")
except ValueError as e:
raise acme.messages.Error.with_code(
"badPublicKey",
detail=e.args[0],
)
else:
raise acme.messages.Error.with_code(
"badPublicKey",
detail=f"At this moment, only the following keys are supported in CSRs: "
f"{', '.join([key_type.__name__ for key_type in self.SUPPORTED_CSR_KEYS])}.",
)
if not csr.is_signature_valid:
raise acme.messages.Error.with_code(
"badCSR", detail="The CSR's signature is invalid."
)
elif not order.validate_csr(csr):
raise acme.messages.Error.with_code(
"badCSR",
detail="The requested identifiers in the CSR differ from those "
"that this order has authorizations for.",
)
return order, csr
@routes.post("/order/{id}/finalize", name="finalize-order")
async def finalize_order(self, request):
"""Handler that initiates finalization of the given order.
`7.4. Applying for Certificate Issuance <https://tools.ietf.org/html/rfc8555#section-7.4>`_
Specifically: https://tools.ietf.org/html/rfc8555#page-47
:raises:
* :class:`aiohttp.web.HTTPNotFound` If the order does not exist.
* :class:`acme.messages.Error` if any of the following are true:
* The order is not in state :class:`acmetk.models.OrderStatus.READY`
* The CSR's public key size is insufficient
* The CSR's signature is invalid
* The identifiers that the CSR requests differ from those that the \
order has authorizations for
:return: The updated order object.
"""
async with self._session(request) as session:
order, csr = await self._validate_order(request, session)
order.csr = csr
order.status = models.OrderStatus.PROCESSING
serialized = order.serialize(request)
order_id = str(order.order_id)
account_id = order.account.account_id
await session.commit()
asyncio.ensure_future(self.handle_order_finalize(request, account_id, order_id))
return self._response(
request,
serialized,
headers={"Location": acmetk.util.url_for(request, "order", id=order_id)},
)
@routes.post("/certificate/{id}", name="certificate")
@abc.abstractmethod
async def certificate(self, request):
"""Handler that queries the given certificate.
`7.4.2. Downloading the Certificate <https://tools.ietf.org/html/rfc8555#section-7.4.2>`_
:raises: :class:`aiohttp.web.HTTPNotFound` If the certificate does not exist.
:return: The certificate's full chain in PEM format.
"""
pass
async def _handle_challenge_validate(self, request, account_id, challenge_id):
logger.debug("Validating challenge %s", challenge_id)
async with self._session(request) as session:
challenge = await self._db.get_challenge(session, account_id, challenge_id)
"""We want the reverse proxy application to always be able to issue certificates for itself inside the
Docker container.
Challenge validation would likely fail in that case. In the RequestIPDNS challenge for example,
the domain name does not resolve to 127.0.0.1 which is the host IP the request originates.
For that reason, we start a second instance of the relay that uses loose/no checks but is only
available within the Docker container.
"""
validator = self._challenge_validators[challenge.type]
await challenge.validate(session, request, validator)
await session.commit()
@routes.post("/key-change", name="key-change")
async def key_change(self, request):
"""7.3.5. Account Key Rollover"""
async with self._session(request) as session:
jws, account = await self._verify_request(request, session)
payload = jws.payload.decode()
inner_jws = acme.jws.JWS.json_loads(payload)
"""The inner JWS MUST meet the normal requirements …"""
sig = inner_jws.signature.combined
if sig.alg not in self.SUPPORTED_JWS_ALGORITHMS:
raise acme.messages.Error.with_code(
"badSignatureAlgorithm",
detail=f"Supported algorithms: {', '.join([str(alg) for alg in self.SUPPORTED_JWS_ALGORITHMS])}",
)
""", with the following differences:"""
if inner_jws.signature.combined.url != jws.signature.combined.url:
""" The inner JWS MUST have the same "url" header parameter as the outer JWS. """
raise acme.messages.Error.with_code(
"malformed",
detail="The inner JWS of the keychange url mismatches the outer JWS url.",
)
if inner_jws.signature.combined.nonce:
"""The inner JWS MUST omit the "nonce" header parameter."""
raise acme.messages.Error.with_code(
"malformed",
detail="The inner JWS has a nonce.",
)
if inner_jws.signature.combined.jwk is None:
"""The inner JWS MUST have a "jwk" header parameter, containing the public key of the new key pair."""
raise acme.messages.Error.with_code(
"malformed",
detail="The inner JWS of the keychange lacks a jwk.",
)
if not inner_jws.verify(sig.jwk):
""" 4. Check that the inner JWS verifies using the key in its "jwk" field."""
raise acme.messages.Error.with_code("unauthorized")
key_change = messages.KeyChange.json_loads(inner_jws.payload)
if key_change.account != acmetk.util.url_for(
request, "accounts", account_id=str(account.account_id)
):
"""7. Check that the "account" field of the keyChange object contains
the URL for the account matching the old key (i.e., the "kid"
field in the outer JWS)."""
raise acme.messages.Error.with_code(
"malformed", detail="The KeyChange object account mismatches"
)
if key_change.oldKey != account.key:
"""8. Check that the "oldKey" field of the keyChange object is the same as the account key for the
account in question."""
raise acme.messages.Error.with_code(
"malformed", detail="The KeyChange object oldKey mismatches"
)
kid = account._jwk_kid(sig.jwk)
in_use = await self._db.get_account(session, kid=kid)
if in_use:
"""9. Check that no account exists whose account key is the same as the key in the "jwk" header
parameter of the inner JWS."""
raise acme.messages.Error.with_code(
"malformed", detail="The KeyChange object key already in use"
)
"""key size validation"""
self._validate_account_key(sig.jwk.key._wrapped)
account.kid = kid
account.key = inner_jws.signature.combined.jwk
await session.merge(account)
await session.commit()
serialized = account.serialize(request)
return self._response(
request,
serialized,
headers={
"Location": acmetk.util.url_for(
request, "accounts", account_id=str(account.account_id)
)
},
)
@abc.abstractmethod
async def handle_order_finalize(self, request, account_id: str, order_id: str):
"""Method that handles the actual finalization of an order.
This method should be called after the order's status has been set
to :class:`acmetk.models.OrderStatus.PROCESSING` in :meth:`finalize_order`.
It should retrieve the order from the database and either generate
the certificate from the stored CSR itself or submit it to another
CA.
Afterwards the certificate should be stored alongside the order.
The *full_chain* attribute needs to be populated and returned
to the client in :meth:`certificate` if the certificate was
generated by another CA.
:param account_id: The account's id
:param order_id: The order's id
"""
pass
@middleware
async def host_ip_middleware(self, request, handler):
"""Middleware that checks whether the requesting host's IP
is part of any of the subnets that are whitelisted.
:returns:
* HTTP status code *403* if the host's IP is not part of any of the whitelisted subnets.
* HTTP status code *400* if there is a *X-Forwarded-For* header spoofing attack going on.
"""
forwarded_for = request.headers.get("X-Forwarded-For")
"""If the X-Forwarded-For header is set, then we need to check whether the app is configured
to be behind a reverse proxy. Otherwise, there may be a spoofing attack going on."""
if forwarded_for and not self._use_forwarded_header:
return web.Response(
status=400,
text=f"{type(self).__name__}: The X-Forwarded-For header is being spoofed.",
)
"""Read the X-Forwarded-For header if the server is behind a reverse proxy.
Otherwise, use the host address directly."""
host_ip = ipaddress.ip_address(forwarded_for or request.remote)
"""Attach the actual host IP to the request for re-use in the handler."""
request["actual_ip"] = host_ip
if self._subnets and not any([host_ip in subnet for subnet in self._subnets]):
return web.Response(
status=403,
text=f"{type(self).__name__}: This service is only available from within certain networks."
" Please contact your system administrator.",
)
return await handler(request)
@middleware
async def aiohttp_jinja2_middleware(self, request, handler):
if isinstance(handler, functools.partial) and (
handler := handler.keywords["handler"]
):
# using subapps | |
content)
finally:
config.HOSTNAME_EXTERNAL = hostname_before
def test_s3_static_website_hosting(self):
bucket_name = 'test-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
self.s3_client.put_object(Bucket=bucket_name, Key='test/index.html', Body='index')
self.s3_client.put_object(Bucket=bucket_name, Key='test/error.html', Body='error')
self.s3_client.put_object(Bucket=bucket_name, Key='actual/key.html', Body='key')
self.s3_client.put_bucket_website(
Bucket=bucket_name,
WebsiteConfiguration={'IndexDocument': {'Suffix': 'index.html'},
'ErrorDocument': {'Key': 'test/error.html'}}
)
headers = aws_stack.mock_aws_request_headers('s3')
headers['Host'] = s3_utils.get_bucket_website_hostname(bucket_name)
# actual key
url = 'https://{}.{}:{}/actual/key.html'.format(bucket_name, constants.S3_STATIC_WEBSITE_HOSTNAME,
config.EDGE_PORT)
response = requests.get(url, headers=headers, verify=False)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.text, 'key')
# index document
url = 'https://{}.{}:{}/test'.format(bucket_name, constants.S3_STATIC_WEBSITE_HOSTNAME, config.EDGE_PORT)
response = requests.get(url, headers=headers, verify=False)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.text, 'index')
# root path test
url = 'https://{}.{}:{}/'.format(bucket_name, constants.S3_STATIC_WEBSITE_HOSTNAME, config.EDGE_PORT)
response = requests.get(url, headers=headers, verify=False)
self.assertEqual(response.status_code, 404)
self.assertEqual(response.text, 'error')
# error document
url = 'https://{}.{}:{}/something'.format(bucket_name, constants.S3_STATIC_WEBSITE_HOSTNAME, config.EDGE_PORT)
response = requests.get(url, headers=headers, verify=False)
self.assertEqual(response.status_code, 404)
self.assertEqual(response.text, 'error')
def test_s3_event_notification_with_sqs(self):
key_by_path = 'aws/bucket=2020/test1.txt'
bucket_name = 'notif-sqs-%s' % short_uid()
queue_url, queue_attributes = self._create_test_queue()
self._create_test_notification_bucket(queue_attributes, bucket_name=bucket_name)
self.s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': 'Enabled'})
body = 'Lorem ipsum dolor sit amet, ... ' * 30
# put an object
self.s3_client.put_object(Bucket=bucket_name, Key=key_by_path, Body=body)
self.assertEqual(self._get_test_queue_message_count(queue_url), '1')
rs = self.sqs_client.receive_message(QueueUrl=queue_url)
record = [json.loads(to_str(m['Body'])) for m in rs['Messages']][0]['Records'][0]
download_file = new_tmp_file()
self.s3_client.download_file(Bucket=bucket_name, Key=key_by_path, Filename=download_file)
self.assertEqual(record['s3']['object']['size'], os.path.getsize(download_file))
# clean up
self.s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': 'Disabled'})
self.sqs_client.delete_queue(QueueUrl=queue_url)
self._delete_bucket(bucket_name, [key_by_path])
def test_s3_delete_object_with_version_id(self):
test_1st_key = 'aws/s3/testkey1.txt'
test_2nd_key = 'aws/s3/testkey2.txt'
body = 'Lorem ipsum dolor sit amet, ... ' * 30
self.s3_client.create_bucket(Bucket=TEST_BUCKET_WITH_VERSIONING)
self.s3_client.put_bucket_versioning(Bucket=TEST_BUCKET_WITH_VERSIONING,
VersioningConfiguration={'Status': 'Enabled'})
# put 2 objects
rs = self.s3_client.put_object(Bucket=TEST_BUCKET_WITH_VERSIONING, Key=test_1st_key, Body=body)
self.s3_client.put_object(Bucket=TEST_BUCKET_WITH_VERSIONING, Key=test_2nd_key, Body=body)
version_id = rs['VersionId']
# delete 1st object with version
rs = self.s3_client.delete_objects(Bucket=TEST_BUCKET_WITH_VERSIONING,
Delete={'Objects': [{'Key': test_1st_key, 'VersionId': version_id}]})
deleted = rs['Deleted'][0]
self.assertEqual(deleted['Key'], test_1st_key)
self.assertEqual(deleted['VersionId'], version_id)
rs = self.s3_client.list_object_versions(Bucket=TEST_BUCKET_WITH_VERSIONING)
object_versions = [object['VersionId'] for object in rs['Versions']]
self.assertNotIn(version_id, object_versions)
# clean up
self.s3_client.put_bucket_versioning(Bucket=TEST_BUCKET_WITH_VERSIONING,
VersioningConfiguration={'Status': 'Disabled'})
self._delete_bucket(TEST_BUCKET_WITH_VERSIONING, [test_1st_key, test_2nd_key])
def test_etag_on_get_object_call(self):
self.s3_client.create_bucket(Bucket=TEST_BUCKET_NAME_2)
body = 'Lorem ipsum dolor sit amet, ... ' * 30
rs = self.s3_client.put_object(Bucket=TEST_BUCKET_NAME_2, Key=TEST_KEY_2, Body=body)
etag = rs['ETag']
rs = self.s3_client.get_object(
Bucket=TEST_BUCKET_NAME_2,
Key=TEST_KEY_2
)
self.assertIn('ETag', rs)
self.assertEqual(etag, rs['ETag'])
self.assertEqual(rs['ContentLength'], len(body))
rs = self.s3_client.get_object(
Bucket=TEST_BUCKET_NAME_2,
Key=TEST_KEY_2,
Range='bytes=0-{}'.format(TEST_GET_OBJECT_RANGE - 1)
)
self.assertIn('ETag', rs)
self.assertEqual(etag, rs['ETag'])
self.assertEqual(rs['ContentLength'], TEST_GET_OBJECT_RANGE)
# clean up
self._delete_bucket(TEST_BUCKET_NAME_2, [TEST_KEY_2])
def test_get_object_versioning(self):
bucket_name = 'bucket-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
rs = self.s3_client.list_object_versions(
Bucket=bucket_name,
EncodingType='url'
)
self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200)
self.assertEqual(rs['Name'], bucket_name)
# clean up
self._delete_bucket(bucket_name, [])
def test_bucket_versioning(self):
self.s3_client.create_bucket(Bucket=TEST_BUCKET_WITH_VERSIONING)
self.s3_client.put_bucket_versioning(Bucket=TEST_BUCKET_WITH_VERSIONING,
VersioningConfiguration={'Status': 'Enabled'})
result = self.s3_client.get_bucket_versioning(Bucket=TEST_BUCKET_WITH_VERSIONING)
self.assertEqual(result['Status'], 'Enabled')
def test_get_bucket_versioning_order(self):
bucket_name = 'version-order-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
self.s3_client.put_bucket_versioning(Bucket=bucket_name,
VersioningConfiguration={'Status': 'Enabled'})
self.s3_client.put_object(Bucket=bucket_name, Key='test', Body='body')
self.s3_client.put_object(Bucket=bucket_name, Key='test', Body='body')
self.s3_client.put_object(Bucket=bucket_name, Key='test2', Body='body')
rs = self.s3_client.list_object_versions(
Bucket=bucket_name,
)
self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200)
self.assertEqual(rs['Name'], bucket_name)
self.assertEqual(rs['Versions'][0]['IsLatest'], True)
self.assertEqual(rs['Versions'][2]['IsLatest'], True)
def test_upload_big_file(self):
bucket_name = 'bucket-big-file-%s' % short_uid()
key1 = 'test_key1'
key2 = 'test_key1'
self.s3_client.create_bucket(Bucket=bucket_name)
body1 = '\x01' * 10000000
rs = self.s3_client.put_object(Bucket=bucket_name, Key=key1, Body=body1)
self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200)
body2 = 'a' * 10000000
rs = self.s3_client.put_object(Bucket=bucket_name, Key=key2, Body=body2)
self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200)
rs = self.s3_client.head_object(Bucket=bucket_name, Key=key1)
self.assertEqual(rs['ContentLength'], len(body1))
rs = self.s3_client.head_object(Bucket=bucket_name, Key=key2)
self.assertEqual(rs['ContentLength'], len(body2))
# clean up
self._delete_bucket(bucket_name, [key1, key2])
def test_s3_put_more_than_1000_items(self):
self.s3_client.create_bucket(Bucket=TEST_BUCKET_NAME_2)
for i in range(0, 1010, 1):
body = 'test-' + str(i)
key = 'test-key-' + str(i)
self.s3_client.put_object(Bucket=TEST_BUCKET_NAME_2, Key=key, Body=body)
# trying to get the last item of 1010 items added.
resp = self.s3_client.get_object(Bucket=TEST_BUCKET_NAME_2, Key='test-key-1009')
self.assertEqual(to_str(resp['Body'].read()), 'test-1009')
# trying to get the first item of 1010 items added.
resp = self.s3_client.get_object(Bucket=TEST_BUCKET_NAME_2, Key='test-key-0')
self.assertEqual(to_str(resp['Body'].read()), 'test-0')
resp = self.s3_client.list_objects(Bucket=TEST_BUCKET_NAME_2, MaxKeys=1010)
self.assertEqual(len(resp['Contents']), 1010)
resp = self.s3_client.list_objects(Bucket=TEST_BUCKET_NAME_2)
self.assertEqual(len(resp['Contents']), 1000)
next_marker = resp['NextMarker']
# Second list
resp = self.s3_client.list_objects(Bucket=TEST_BUCKET_NAME_2, Marker=next_marker)
self.assertEqual(len(resp['Contents']), 10)
def test_s3_list_objects_empty_marker(self):
bucket_name = 'test' + short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
resp = self.s3_client.list_objects(Bucket=bucket_name, Marker='')
self.assertEqual(resp['Marker'], '')
def test_s3_multipart_upload_file(self):
def upload(size_in_mb, bucket):
file_name = '{}.tmp'.format(short_uid())
path = '{}'.format(file_name)
with open(path, 'wb') as f:
f.seek(int(size_in_mb * 1e6))
f.write(b'\0')
f.flush()
self.s3_client.upload_file(
path,
bucket,
f'{file_name}',
ExtraArgs={'StorageClass': 'DEEP_ARCHIVE'}
)
os.remove(path)
bucket_name = 'bucket-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
upload(1, bucket_name)
upload(9, bucket_name)
upload(15, bucket_name)
s3_resource = aws_stack.connect_to_resource('s3')
objects = s3_resource.Bucket(bucket_name).objects.all()
keys = []
for obj in objects:
keys.append(obj.key)
self.assertEqual(obj.storage_class, 'DEEP_ARCHIVE')
self._delete_bucket(bucket_name, keys)
def test_cors_with_single_origin_error(self):
client = self._get_test_client()
BUCKET_CORS_CONFIG = {
'CORSRules': [{
'AllowedOrigins': ['https://localhost:4200'],
'AllowedMethods': ['GET', 'PUT'],
'MaxAgeSeconds': 3000,
'AllowedHeaders': ['*'],
}]
}
client.create_bucket(Bucket='my-s3-bucket')
client.put_bucket_cors(Bucket='my-s3-bucket', CORSConfiguration=BUCKET_CORS_CONFIG)
# create signed url
url = client.generate_presigned_url(
ClientMethod='put_object',
Params={
'Bucket': 'my-s3-bucket',
'Key': '424f6bae-c48f-42d8-9e25-52046aecc64d/document.pdf',
'ContentType': 'application/pdf',
'ACL': 'bucket-owner-full-control'
},
ExpiresIn=3600
)
result = requests.put(url, data='something', verify=False,
headers={'Origin': 'https://localhost:4200', 'Content-Type': 'application/pdf'})
self.assertEqual(result.status_code, 200)
BUCKET_CORS_CONFIG = {
'CORSRules': [{
'AllowedOrigins': ['https://localhost:4200', 'https://localhost:4201'],
'AllowedMethods': ['GET', 'PUT'],
'MaxAgeSeconds': 3000,
'AllowedHeaders': ['*'],
}]
}
client.put_bucket_cors(Bucket='my-s3-bucket', CORSConfiguration=BUCKET_CORS_CONFIG)
# create signed url
url = client.generate_presigned_url(
ClientMethod='put_object',
Params={
'Bucket': 'my-s3-bucket',
'Key': '424f6bae-c48f-42d8-9e25-52046aecc64d/document.pdf',
'ContentType': 'application/pdf',
'ACL': 'bucket-owner-full-control'
},
ExpiresIn=3600
)
result = requests.put(url, data='something', verify=False,
headers={'Origin': 'https://localhost:4200', 'Content-Type': 'application/pdf'})
self.assertEqual(result.status_code, 200)
result = requests.put(url, data='something', verify=False,
headers={'Origin': 'https://localhost:4201', 'Content-Type': 'application/pdf'})
self.assertEqual(result.status_code, 200)
def test_s3_put_object_notification_with_lambda(self):
bucket_name = 'bucket-%s' % short_uid()
function_name = 'func-%s' % short_uid()
table_name = 'table-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
testutil.create_lambda_function(
handler_file=TEST_LAMBDA_PYTHON_ECHO,
func_name=function_name,
runtime=LAMBDA_RUNTIME_PYTHON36
)
aws_stack.create_dynamodb_table(
table_name=table_name,
partition_key='uuid'
)
self.s3_client.put_bucket_notification_configuration(
Bucket=bucket_name,
NotificationConfiguration={
'LambdaFunctionConfigurations': [
{
'LambdaFunctionArn': aws_stack.lambda_function_arn(function_name),
'Events': ['s3:ObjectCreated:*']
}
]
}
)
# put an object
obj = self.s3_client.put_object(Bucket=bucket_name, Key=table_name, Body='something..')
etag = obj['ETag']
time.sleep(2)
table = aws_stack.connect_to_resource('dynamodb').Table(table_name)
def check_table():
rs = table.scan()
self.assertEqual(len(rs['Items']), 1)
return rs
rs = retry(check_table, retries=4, sleep=3)
record = rs['Items'][0]
self.assertEqual(record['data']['s3']['bucket']['name'], bucket_name)
self.assertEqual(record['data']['s3']['object']['eTag'], etag)
# clean up
self._delete_bucket(bucket_name, [table_name])
lambda_client = aws_stack.connect_to_service('lambda')
lambda_client.delete_function(FunctionName=function_name)
dynamodb_client = aws_stack.connect_to_service('dynamodb')
dynamodb_client.delete_table(TableName=table_name)
def test_s3_put_object_notification_with_sns_topic(self):
bucket_name = 'bucket-%s' % short_uid()
topic_name = 'topic-%s' % short_uid()
queue_name = 'queue-%s' % short_uid()
key_name = 'bucket-key-%s' % short_uid()
sns_client = aws_stack.connect_to_service('sns')
self.s3_client.create_bucket(Bucket=bucket_name)
queue_url = self.sqs_client.create_queue(QueueName=queue_name)['QueueUrl']
topic_arn = sns_client.create_topic(Name=topic_name)['TopicArn']
sns_client.subscribe(TopicArn=topic_arn, Protocol='sqs', Endpoint=aws_stack.sqs_queue_arn(queue_name))
self.s3_client.put_bucket_notification_configuration(
Bucket=bucket_name,
NotificationConfiguration={
'TopicConfigurations': [
{
'TopicArn': topic_arn,
'Events': ['s3:ObjectCreated:*']
}
]
}
)
# Put an object
# This will trigger an event to sns topic, sqs queue will get a message since it's a subscriber of topic
self.s3_client.put_object(Bucket=bucket_name, Key=key_name, Body='body content...')
time.sleep(2)
def get_message(q_url):
resp = self.sqs_client.receive_message(QueueUrl=q_url)
m = resp['Messages'][0]
self.sqs_client.delete_message(
QueueUrl=q_url,
ReceiptHandle=m['ReceiptHandle']
)
return json.loads(m['Body'])
message = retry(get_message, retries=3, sleep=2, q_url=queue_url)
# We got a notification message in sqs queue (from s3 source)
self.assertEqual(message['Type'], 'Notification')
self.assertEqual(message['TopicArn'], topic_arn)
self.assertEqual(message['Subject'], 'Amazon S3 Notification')
r = json.loads(message['Message'])['Records'][0]
self.assertEqual(r['eventSource'], 'aws:s3')
self.assertEqual(r['s3']['bucket']['name'], bucket_name)
self.assertEqual(r['s3']['object']['key'], key_name)
# clean up
self._delete_bucket(bucket_name, [key_name])
self.sqs_client.delete_queue(QueueUrl=queue_url)
sns_client.delete_topic(TopicArn=topic_arn)
def test_s3_get_deep_archive_object(self):
bucket_name = 'bucket-%s' % short_uid()
object_key = 'key-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
# put DEEP_ARCHIVE object
self.s3_client.put_object(
Bucket=bucket_name,
Key=object_key,
Body='body data',
StorageClass='DEEP_ARCHIVE'
)
with self.assertRaises(ClientError) as ctx:
self.s3_client.get_object(
Bucket=bucket_name,
Key=object_key
)
self.assertIn('InvalidObjectState', str(ctx.exception))
# clean up
self._delete_bucket(bucket_name, [object_key])
def test_s3_get_deep_archive_object_restore(self):
bucket_name = 'bucket-%s' % short_uid()
object_key = 'key-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
# put DEEP_ARCHIVE object
self.s3_client.put_object(
Bucket=bucket_name,
Key=object_key,
Body='body data',
StorageClass='DEEP_ARCHIVE'
)
with self.assertRaises(ClientError) as ctx:
self.s3_client.get_object(
Bucket=bucket_name,
Key=object_key
)
self.assertIn('InvalidObjectState', str(ctx.exception))
# put DEEP_ARCHIVE object
self.s3_client.restore_object(
Bucket=bucket_name,
Key=object_key,
RestoreRequest={
'Days': 30,
'GlacierJobParameters': {
'Tier': 'Bulk'
},
'Tier': 'Bulk',
},
)
response = self.s3_client.get_object(
Bucket=bucket_name,
Key=object_key
)
self.assertIn('etag', response.get('ResponseMetadata').get('HTTPHeaders'))
# clean up
self._delete_bucket(bucket_name, [object_key])
def test_encoding_notification_messages(self):
key = 'a@b'
bucket_name = 'notif-enc-%s' % short_uid()
queue_url = self.sqs_client.create_queue(QueueName='testQueue')['QueueUrl']
queue_attributes = self.sqs_client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=['QueueArn'])
self._create_test_notification_bucket(queue_attributes, bucket_name=bucket_name)
# put an object where the bucket_name is in the path
self.s3_client.put_object(Bucket=bucket_name, Key=key, Body='something')
response = self.sqs_client.receive_message(QueueUrl=queue_url)
self.assertEqual(json.loads(response['Messages'][0]['Body'])['Records'][0]['s3']['object']['key'], 'a%40b')
# clean up
self.s3_client.delete_objects(Bucket=bucket_name, Delete={'Objects': [{'Key': key}]})
def test_s3_batch_delete_objects_using_requests(self):
bucket_name = 'bucket-%s' % short_uid()
object_key_1 = 'key-%s' % short_uid()
object_key_2 = 'key-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
self.s3_client.put_object(Bucket=bucket_name, Key=object_key_1, Body='This body document')
self.s3_client.put_object(Bucket=bucket_name, Key=object_key_2, Body='This body document')
base_url = '{}://{}:{}'.format(get_service_protocol(), config.LOCALSTACK_HOSTNAME, config.PORT_S3)
url = '{}/{}?delete='.format(base_url, bucket_name)
r = requests.post(url=url, data=BATCH_DELETE_BODY % (object_key_1, object_key_2))
self.assertEqual(r.status_code, 200)
s3_resource = aws_stack.connect_to_resource('s3')
bucket = s3_resource.Bucket(bucket_name)
total_keys = sum(1 for _ in bucket.objects.all())
self.assertEqual(total_keys, 0)
# clean up
self._delete_bucket(bucket_name, [])
def test_presigned_url_signature_authentication(self):
client = boto3.client('s3', endpoint_url=config.get_edge_url(),
config=Config(signature_version='s3'), aws_access_key_id=TEST_AWS_ACCESS_KEY_ID,
aws_secret_access_key=TEST_AWS_SECRET_ACCESS_KEY)
client_v4 = boto3.client('s3', endpoint_url=config.get_edge_url(),
config=Config(signature_version='s3v4'), aws_access_key_id=TEST_AWS_ACCESS_KEY_ID,
aws_secret_access_key=TEST_AWS_SECRET_ACCESS_KEY)
OBJECT_KEY = 'temp 1.txt'
OBJECT_DATA = 'this should be found in when you download {}.'.format(OBJECT_KEY)
BUCKET = 'test'
EXPIRES = 4
def make_v2_url_invalid(url):
parsed = urlparse.urlparse(url)
query_params = parse_qs(parsed.query)
url = '{}/{}/{}?AWSAccessKeyId={}&Signature={}&Expires={}'.format(
config.get_edge_url(), BUCKET, OBJECT_KEY,
'test', query_params['Signature'][0], query_params['Expires'][0]
)
return url
def make_v4_url_invalid(url):
parsed = urlparse.urlparse(url)
query_params = parse_qs(parsed.query)
url = ('{}/{}/{}?X-Amz-Algorithm=AWS4-HMAC-SHA256&' +
'X-Amz-Credential={}&X-Amz-Date={}&' +
'X-Amz-Expires={}&X-Amz-SignedHeaders=host&' +
'X-Amz-Signature={}').format(
config.get_edge_url(), BUCKET, OBJECT_KEY,
quote(query_params['X-Amz-Credential'][0]).replace('/', '%2F'),
query_params['X-Amz-Date'][0], query_params['X-Amz-Expires'][0], query_params['X-Amz-Signature'][0]
)
return url
client.create_bucket(Bucket=BUCKET)
client.put_object(Key=OBJECT_KEY, Bucket=BUCKET, Body='123')
# GET requests
presign_get_url = client.generate_presigned_url(
'get_object',
Params={'Bucket': BUCKET, 'Key': OBJECT_KEY},
ExpiresIn=EXPIRES
)
presign_get_url_v4 = client_v4.generate_presigned_url(
'get_object',
Params={'Bucket': BUCKET, 'Key': OBJECT_KEY},
ExpiresIn=EXPIRES
)
# Valid request
response = requests.get(presign_get_url)
self.assertEqual(response.status_code, 200)
response = requests.get(presign_get_url_v4)
self.assertEqual(response.status_code, 200)
presign_get_url = client.generate_presigned_url(
'get_object',
Params={'Bucket': BUCKET, 'Key': OBJECT_KEY, 'ResponseContentType': 'text/plain',
'ResponseContentDisposition': 'attachment; filename=test.txt'},
ExpiresIn=EXPIRES
)
presign_get_url_v4 | |
<reponame>bobelly/torchsupport<gh_stars>10-100
import torch
import random
# import pandas as pd
from torchsupport.data.io import imread
from torch.utils.data import Dataset, DataLoader
if True:
from torch.utils.data.sampler import Sampler, SubsetRandomSampler
else:
from torch.utils.data import Sampler, SubsetRandomSampler
import os
class SupportData(Dataset):
def __init__(self, dataset, ways=3, shots=5, key=lambda x: int(x[1])):
self.shots = shots
self.ways = ways
self.index_lists = {}
for idx, point in enumerate(dataset):
label = key(point)
if label not in self.index_lists:
self.index_lists[label] = []
self.index_lists[label].append(idx)
self.index_lists = [
self.index_lists[label]
for label in range(self.ways)
]
print(self.index_lists)
self.dataset = dataset
def __getitem__(self, idx):
support_shots = []
support_labels = []
for label, indices in enumerate(self.index_lists):
support_indices = random.sample(indices, self.shots)
print(support_indices)
support_subset = [
self.dataset[index][0].unsqueeze(0)
for index in support_indices
]
support_subset = torch.cat(support_subset)
support_label = torch.LongTensor([[[[label]]] for idx in range(self.shots)])
support_shots.append(support_subset)
support_labels.append(support_label)
support_shots = torch.cat(support_shots, dim=0)
support_labels = torch.cat(support_labels, dim=0)
return support_shots, support_labels
def __len__(self):
return 1000000000000
# class SubDataset(Dataset):
# def __init__(self, dataset, indices):
# """The subset of a Dataset, defined by a set of indices.
# Arguments
# ---------
# dataset : a :class:`Dataset` from which a subset is chosen.
# indices : a :class:`list` of indices making up the chosen subset.
# """
# self.dataset = dataset
# self.indices = indices
# def __len__(self):
# return len(self.indices)
# def __getitem__(self, index):
# sample = {
# "image": self.getimage(index),
# "label": self.getlabel(index)
# }
# return sample
# def getimage(self, index):
# idx = self.indices[index]
# img_name = os.path.join(self.dataset.annotation.iloc[idx, 0])
# image = imread(img_name)
# if self.dataset.transform != None:
# image = self.dataset.transform(image)
# return image
# def getlabel(self, index):
# idx = self.indices[index]
# labelname = self.dataset.annotation.iloc[idx, 1]
# label = torch.LongTensor([[self.dataset.classmap[labelname]]])
# return label
# class LabelPartitionedDataset(Dataset):
# def __init__(self, dataset):
# """A :class:`Dataset` partitioned by its labels.
# Arguments
# ---------
# dataset : a :class:`Dataset` to be partitioned.
# """
# self.dataset = dataset
# self.labelindices = {}
# self.labels = []
# for idx in range(len(self.dataset)):
# labelindex = self.dataset.getlabel(idx)[0][0]
# label = labelindex.item()#self.dataset.classmap_inv[labelindex]
# if label not in self.labels:
# self.labelindices[label] = []
# self.labels.append(label)
# self.labelindices[label].append(idx)
# def __getitem__(self, label):
# return SubDataset(self.dataset, self.labelindices[label])
# class UnionSampler(Sampler):
# def __init__(self, samplers):
# """Samples randomly from a union of samplers.
# Arguments
# ---------
# samplers : a :class:`list` of :class:`Sampler`s to be unified.
# """
# self.samplers = samplers
# def __len__(self):
# try:
# result = 0
# for sampler in self.samplers:
# result += len(sampler)
# return result
# except:
# return None
# def __iter__(self):
# iters = [iter(sampler) for sampler in self.samplers]
# def iterator():
# while len(iters) > 0:
# next_val = None
# while next_val == None:
# it = random.choice(iters)
# try:
# next_val = next(it)
# except StopIteration:
# next_val = None
# iters.remove(it)
# yield next_val
# return iterator()
# class ReplacementRandomSampler(Sampler):
# def __init__(self, indices):
# """Samples randomly with replacement from a indices.
# Arguments:
# indices (list[int]): a list of indices to be randomly sampled from.
# """
# self.indices = indices
# def __len__(self):
# return None
# def __iter__(self):
# def iterator():
# while True:
# index = random.choice(range(len(self.indices)))
# yield self.indices[index]
# return iterator()
# class LabelPartitionedSampler(object):
# def __init__(self, dataset):
# """Partition of a :class:`Dataset` into multiple class:`Sampler`s, by label.
# Arguments
# ---------
# dataset : a :class:`Dataset` to be partitioned by label.
# """
# self.dataset = dataset
# self.labelindices = {}
# self.labels = []
# self.samplers = {}
# for idx in range(len(self.dataset)):
# labelindex = self.dataset.getlabel(idx)[0][0]
# label = labelindex.item()#self.dataset.classmap_inv[labelindex]
# if label not in self.labels:
# self.labelindices[label] = []
# self.labels.append(label)
# self.labelindices[label].append(idx)
# for label in self.labels:
# self.samplers[label] = ReplacementRandomSampler(
# self.labelindices[label]
# )
# def __getitem__(self, label):
# result = None
# if isinstance(label, list):
# samplers = []
# for elem in label:
# samplers.append(self.samplers[elem])
# result = UnionSampler(samplers)
# else:
# result = self.samplers[label]
# return result
# class EpisodicSampler(Sampler):
# def __init__(self, dataset,
# batch_size=128, label_size=2,
# shot_size=1, max_episodes=1000):
# """Samples episodes from a dataset.
# Arguments
# ---------
# dataset : a :class:`Dataset` to be packed into episodes.
# batch_size : the batch size for each episode.
# label_size : the maximum number of labels per episode.
# shot_size : the maximum size of the support set for each given class.
# max_episodes : the number of episodes per epoch.
# """
# self.dataset = dataset
# self.labelsampler = LabelPartitionedSampler(dataset)
# self.batch_size = batch_size
# self.label_size = label_size
# self.shot_size = shot_size
# self.max_episodes = max_episodes
# def __iter__(self):
# def iterator():
# for idx in range(self.max_episodes):
# num_episode_labels = self.label_size# FIXME! #random.randrange(2, self.label_size + 1)
# episode_labels = random.sample(self.labelsampler.labels, num_episode_labels)
# sampler = iter(self.labelsampler[episode_labels])
# batchindices = []
# supportindices = []
# for idx in range(self.batch_size):
# batchindices.append(next(sampler))
# for label in episode_labels:
# labelsampler = iter(self.labelsampler[label])
# for idx in range(self.shot_size):
# supportindices.append(next(labelsampler))
# yield batchindices + supportindices
# return iterator()
# def __len__(self):
# return self.max_episodes
# class EpisodicBinarySampler(Sampler):
# def __init__(self, dataset,
# batch_size=128,
# shot_size=1,
# max_episodes=1000):
# """Samples episodes from a dataset.
# Arguments
# ---------
# dataset : a :class:`Dataset` to be packed into episodes.
# batch_size : the batch size for each episode.
# label_size : the maximum number of labels per episode.
# shot_size : the maximum size of the support set for each given class.
# max_episodes : the number of episodes per epoch.
# """
# self.dataset = dataset
# self.labelsampler = LabelPartitionedSampler(dataset)
# self.batch_size = batch_size
# self.shot_size = shot_size
# self.max_episodes = max_episodes
# def __iter__(self):
# def iterator():
# for idx in range(self.max_episodes):
# num_episode_labels = 1#self.label_size# FIXME! #random.randrange(2, self.label_size + 1)
# episode_labels = random.sample(self.labelsampler.labels, num_episode_labels)
# anti_labels = [label for label in self.labelsampler.labels
# if label not in episode_labels]
# sampler = iter(self.labelsampler[episode_labels])
# antisampler = iter(self.labelsampler[anti_labels])
# samplers = [sampler, antisampler]
# batchindices = []
# supportindices = []
# for idx in range(self.batch_size):
# batchindices.append(next(random.choice(samplers)))
# for label in episode_labels:
# labelsampler = iter(self.labelsampler[label])
# for idx in range(self.shot_size):
# supportindices.append(next(labelsampler))
# yield batchindices + supportindices
# return iterator()
# def __len__(self):
# return self.max_episodes
# class _EpisodicOverlay(object):
# def __init__(self, loader, batch_size):
# """Wraps a DataLoader to separate its batches into batch and support."""
# self.loader = loader
# self.batch_size = batch_size
# def __len__(self):
# return len(self.loader)
# def __iter__(self):
# def iterator():
# for elem in iter(self.loader):
# data, labels = elem.values()
# batch = data[:self.batch_size, :, :, :]
# support = data[self.batch_size:, :, :, :]
# batchlabels = labels[:self.batch_size, :, :]
# supportlabels = labels[self.batch_size:, :, :]
# labelmap = []
# count = 0
# total = 0
# inverse_labelmap = {}
# for label_tensor in supportlabels[:, 0, 0]:
# label = label_tensor.item()
# if label not in labelmap:
# labelmap.append(label)
# inverse_labelmap[label] = count
# count += 1
# total += 1
# batchonehot = torch.zeros((self.batch_size, count))
# for idx in range(self.batch_size):
# batchlabels[idx, 0, 0] = inverse_labelmap[batchlabels[idx, 0, 0].item()]
# batchonehot[idx, batchlabels[idx, 0, 0].item()] = 1.0
# for idx in range(supportlabels.size()[0]):
# supportlabels[idx, 0, 0] = inverse_labelmap[supportlabels[idx, 0, 0].item()]
# yield {
# "batch": batch,
# "batchlabels": batchlabels,
# "batchonehot": batchonehot,
# "support": support,
# "supportlabels": supportlabels,
# "classes": count,
# "shots": total
# }
# return iterator()
# class _EpisodicBinaryOverlay(object):
# def __init__(self, loader, batch_size):
# """Wraps a DataLoader to separate its batches into batch and support."""
# self.loader = loader
# self.batch_size = batch_size
# def __len__(self):
# return len(self.loader)
# def __iter__(self):
# def iterator():
# for elem in iter(self.loader):
# data, labels = elem.values()
# batch = data[:self.batch_size, :, :, :]
# support = data[self.batch_size:, :, :, :]
# batchlabels = labels[:self.batch_size, :, :]
# supportlabels = labels[self.batch_size:, :, :]
# labelmap = []
# count = 0
# total = 0
# inverse_labelmap = {}
# for label_tensor in supportlabels[:, 0, 0]:
# label = label_tensor.item()
# if label not in labelmap:
# labelmap.append(label)
# inverse_labelmap[label] = count
# count += 1
# total += 1
# batchonehot = torch.zeros((self.batch_size, 2))
# for idx in range(self.batch_size):
# if batchlabels[idx, 0, 0].item() in inverse_labelmap:
# batchlabels[idx, 0, 0] = 0
# else:
# batchlabels[idx, 0, 0] = 1
# batchonehot[idx, batchlabels[idx, 0, 0].item()] = 1.0
# for idx in range(supportlabels.size()[0]):
# supportlabels[idx, 0, 0] = 0
# yield {
# "batch": batch,
# "batchlabels": batchlabels,
# "batchonehot": batchonehot,
# "support": support,
# "supportlabels": supportlabels,
# "classes": count,
# "shots": total
# }
# return iterator()
# def EpisodicLoader(dataset, batch_size=128, label_size=2,
# shot_size=1, max_episodes=1000, num_workers=None):
# """Creates a loader for episodic training.
# Arguments
# ---------
# dataset : a :class:`Dataset` to be packed into episodes.
# batch_size : the batch size for each episode.
# label_size : the maximum number of labels per episode.
# shot_size : the maximum size of the support set for each given class.
# max_episodes : the number of episodes per epoch.
# num_workers : the number of processes to use for data loading.
# """
# sampler = EpisodicSampler(
# dataset,
# batch_size=batch_size,
# label_size=label_size,
# shot_size=shot_size,
# max_episodes=max_episodes
# )
# loader = DataLoader(
# dataset, batch_sampler=sampler,
# num_workers=num_workers
# )
# return _EpisodicOverlay(loader, batch_size)
# def EpisodicBinaryLoader(dataset, batch_size=128,
# shot_size=1, max_episodes=1000,
# num_workers=None):
# """Creates a loader for episodic training.
# Arguments
# ---------
# dataset : a :class:`Dataset` to be packed into episodes.
# batch_size : the batch size for each episode.
# label_size : the maximum number of labels per episode.
# shot_size : the maximum size of the support set for each given class.
# max_episodes : the number of episodes per epoch.
# num_workers : the number of processes to use | |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Blue Yonder Coding Task - Image downloader - 3. Simple and solid solution.
:author <NAME>:
:created 2018/04/05:
Usage: quickanddirty.py <path to input file>
"""
# standard library modules
import argparse
import logging
import os
import sys
import tempfile
import errno
from urlparse import urlparse
from pprint import pprint
# 3rd party modules
import requests
import magic
import yaml
def create_parser(parserclass=argparse.ArgumentParser):
"""
Create argument parser, handle command line parameters, provide command line help.
Avoid a global default configuration by setting default values here.
Factory method
@see: https://stackoverflow.com/questions/39028204/using-unittest-to-test-argparse-exit-errors
:param parserclass: set the argument parser class in an optional parameter to be able to switch it out for testing
:return:
"""
parser = parserclass('Download images from urls provided to local storage')
# No defaults so every empty value is returned as None
parser.add_argument(
'-i',
'--input_file',
help='Input file: Plain text list of urls; one per line',
type=lambda s: s.strip()
)
# positional argument: Output directory, no default this parameter is mandatory
parser.add_argument(
'-o',
'--output_dir',
help='Output directory: downloaded files will be written here; write permissions needed',
type=lambda s: s.strip()
)
# verbosity level
verbosity = parser.add_mutually_exclusive_group()
verbosity.add_argument(
'-v',
'--verbosity',
help='Set verbosity level for screen output: [none, debug, info, warn, error]',
choices=['quiet', 'debug', 'info', 'warn', 'error'],
type=lambda s: s.strip().lower()
)
verbosity.add_argument(
'-q',
'--quiet',
help='No output to screen. Same as "--verbosity none"',
action='store_const',
const='quiet'
)
parser.add_argument(
'-l',
'--log_level',
help='Set verbosity level for log output: [none, debug, info, warn, error]',
choices=['quiet', 'debug', 'info', 'warn', 'error'],
type=lambda s: s.strip().lower()
)
parser.add_argument(
'-f',
'--log_file',
help='Set file to write logs to',
type=lambda s: s.strip()
)
parser.add_argument(
'-c',
'--config_file',
help='Set file to read config from. Has to be valid YAML format.',
type=lambda s: s.strip()
)
return parser
def setup_logging(config):
"""
Set up and configure logging environment.
Factory method
:param config: dictionary with configuration values
:return: logger object
"""
logger = logging.getLogger(__name__)
console_handler = logging.StreamHandler()
console_handler.setLevel(config['verbosity'])
# use a very simple log format for console output
# Todo: maybe make this configurable later
console_handler.setFormatter('%(levelname)s:%(module)s - %(message)s')
file_handler = logging.FileHandler(config['log_file'])
file_handler.setLevel(config['log_level'])
# use a more detailed log format for file output leaning on the PHP error log format
# [12-Jun-2011 12:58:55] PHP Notice: Undefined variable: test in C:\www\phpinfo.php on line 2\r\r\n
# Todo: maybe make this configurable later
file_handler.setFormatter(
'[%(asctime)s] %(levelname)s: %(module)s: %(funcName)s: %(message)s in %(pathname)s on line %(lineno)d'
)
# getLogger() returns references to the same object but handlers may be duplicated. So only add handlers if there
# are none. This issue arises only in unittests since this function is only called once in the script
# @see: https://stackoverflow.com/questions/6333916/python-logging-ensure-a-handler-is-added-only-once
if not logger.handlers:
logger.addHandler(console_handler)
logger.addHandler(file_handler)
return logger
def verify_file(filename, permission, mime_type):
"""
Check if input/output file is a valid path and is suitable for reading/writing.
Taking typical linux exit codes from stack overflow
@see: https://stackoverflow.com/questions/1101957/are-there-any-standard-exit-status-codes-in-linux
> #define EX_OK 0 /* successful termination */
> #define EX__BASE 64 /* base value for error messages */
> #define EX_USAGE 64 /* command line usage error */
> #define EX_DATAERR 65 /* data format error */
> #define EX_NOINPUT 66 /* cannot open input */
> #define EX_NOUSER 67 /* addressee unknown */
> #define EX_NOHOST 68 /* host name unknown */
> #define EX_UNAVAILABLE 69 /* service unavailable */
> #define EX_SOFTWARE 70 /* internal software error */
> #define EX_OSERR 71 /* system error (e.g., can't fork) */
> #define EX_OSFILE 72 /* critical OS file missing */
> #define EX_CANTCREAT 73 /* can't create (user) output file */
> #define EX_IOERR 74 /* input/output error */
> #define EX_TEMPFAIL 75 /* temp failure; user is invited to retry */
> #define EX_PROTOCOL 76 /* remote error in protocol */
> #define EX_NOPERM 77 /* permission denied */
> #define EX_CONFIG 78 /* configuration error */
"""
path = os.path.abspath(filename)
result = (0, 'File is fine')
with magic.Magic(flags=magic.MAGIC_MIME_TYPE) as wizzard:
# File is to be opened for reading and therefore has to exist
if permission == os.R_OK and not os.path.exists(path):
result = (66, 'Not a valid path: %s\n' % filename)
# File is to be opened for reading and therefore has be a valid file
elif permission == os.R_OK and not os.path.isfile(path):
result = (66, 'Not a valid file: %s\n' % filename)
# check file permission for reading
elif permission == os.R_OK and not os.access(path, permission):
result = (77, 'No permission to read file: %s\n' % filename)
# check the file type for files opened for reading
elif permission == os.R_OK and not wizzard.id_filename(path) == mime_type:
result = (74, 'Wrong file type "%s": %s\n' % (wizzard.id_filename(path), filename))
# when writing a file the target directory has to exist
elif permission == os.W_OK and not os.path.exists(os.path.dirname(path)):
result = (73, 'Not a valid path for output: %s\n' % (os.path.dirname(filename)))
# when writing a file the path has to resolve to a directory
elif permission == os.W_OK and not os.path.isdir(os.path.dirname(path)):
result = (73, 'Not a directory: %s\n' % (os.path.dirname(filename)))
# when writing a file the directory has to be writeable
elif permission == os.W_OK and os.path.isdir(os.path.dirname(path)):
# work around wonky behaviour of os.access()
try:
testfile = tempfile.TemporaryFile(dir=os.path.dirname(path))
testfile.close()
except OSError as err:
# catch access error
if err.errno == errno.EACCES:
return 77, 'No permission to write to directory: %s\n' % (os.path.dirname(filename))
# all other errors reraise exception for unexpected error
err.filename = os.path.dirname(filename)
raise
return result
def verify_configuration(config, exitfunc=sys.exit):
"""
Sanity check configuration in command line arguments or config file; log and exit on errors.
This happens before logging is set up so use stderr to output error message and terminate script
Todo: Not quite sure how to tet this properly yet...
:param config: dictionary of config values
:param exitfunc: make the sys.exit call overwriteable for unittesting
:return:
"""
# check for valid input file or abort
status = verify_file(config['input_file'], os.R_OK, 'text/plain')
if status[0] != 0:
sys.stderr.write(status[1])
exitfunc(status[0])
# check for a valid log directory to write logfile to
status = verify_file(config['log_file'], os.W_OK, '')
if status[0] != 0:
sys.stderr.write(status[1])
exitfunc(status[0])
# check for a valid log directory to write logfile to
status = verify_file(config['output_dir'], os.W_OK, '')
if status[0] != 0:
sys.stderr.write(status[1])
exitfunc(status[0])
# skip checking config file that is already handled
if config['verbosity'] not in [logging.FATAL, logging.DEBUG, logging.INFO, logging.WARN, logging.ERROR]:
sys.stderr.write('Error: Invalid verbosity level')
exitfunc(status[0])
if config['log_level'] not in [logging.FATAL, logging.DEBUG, logging.INFO, logging.WARN, logging.ERROR]:
sys.stderr.write('Error: Invalid log level')
exitfunc(status[0])
def merge_configuration(args, config):
"""
Merge configuration by priority.
<command line arguments> override <configuration file> override <default configuration>
:param args: command line arguments object
:param config: configuration dictionary from file
:return: merged configuration dictionary
"""
# At the moment I seem to need to put the default config here for comparison to get the merge priorities right
default = {
'input_file': 'test/fixtures/sample_input.txt',
'output_dir': '.',
'verbosity': logging.WARN,
'log_level': logging.WARN,
'log_file': 'logs/general.simpleandsolid.log',
'config_file': 'config/configuration.simpleandsolid.yaml'
}
map_log_levels = {
'quiet': logging.FATAL,
'debug': logging.DEBUG,
'info': logging.INFO,
'warn': logging.WARN,
'error': logging.ERROR
}
if args.input_file is not None:
default['input_file'] = args.input_file
elif config['input_file'] is not None:
default['input_file'] = config['input_file']
if args.output_dir is not None:
default['output_dir'] = args.output_dir
elif config['output_dir'] is not None:
default['output_dir'] = config['output_dir']
if args.verbosity is not None:
default['verbosity'] = map_log_levels[args.verbosity]
elif config['verbosity'] is not None:
default['verbosity'] = map_log_levels[config['verbosity']]
# Quote:
# >> Many in the Python community recommend a strategy of "easier to ask for forgiveness than permission"
# >> (EAFP) rather than "look before you leap" (LBYL).
# @see: https://stackoverflow.com/questions/610883/how-to-know-if-an-object-has-an-attribute-in-python
# I'm not sure where I personally stand on that issue. Intuitively most of the time I stray to the EAFP side but in
# this case somehow LBYL feels better to me. Probably because it saves a few lines of code and keeps to the code
# structure of the other conditions
if hasattr(args, 'quiet') and args.quiet is not None:
default['verbosity'] = logging.FATAL
if args.log_level is not None:
default['log_level'] = map_log_levels[args.log_level]
elif config['log_level'] is not None:
default['log_level'] = map_log_levels[config['log_level']]
if args.log_file is not None:
default['log_file'] = args.log_file
elif config['log_file'] is not None:
default['log_file'] = config['log_file']
if args.config_file is | |
and the remote :class:`.Graph`, pull properties
and node labels into the local copies. This operation does not
create or delete any entities.
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
:class:`.Subgraph`
"""
try:
pull = subgraph.__db_pull__
except AttributeError:
raise TypeError("No method defined to pull object %r" % subgraph)
else:
return pull(self)
def push(self, subgraph):
""" Update remote entities from their local counterparts.
For any nodes and relationships that exist in both the local
:class:`.Subgraph` and the remote :class:`.Graph`, push properties
and node labels into the remote copies. This operation does not
create or delete any entities.
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
:class:`.Subgraph`
"""
try:
push = subgraph.__db_push__
except AttributeError:
raise TypeError("No method defined to push object %r" % subgraph)
else:
return push(self)
def separate(self, subgraph):
""" Delete the remote relationships that correspond to those in a local
subgraph. This leaves any nodes untouched.
:param subgraph: a :class:`.Node`, :class:`.Relationship` or other
:class:`.Subgraph`
"""
try:
separate = subgraph.__db_separate__
except AttributeError:
raise TypeError("No method defined to separate object %r" % subgraph)
else:
separate(self)
class Cursor(object):
""" A `Cursor` is a navigator for a stream of records.
A cursor can be thought of as a window onto an underlying data
stream. All cursors in py2neo are "forward-only", meaning that
navigation starts before the first record and may proceed only in a
forward direction.
It is not generally necessary for application code to instantiate a
cursor directly as one will be returned by any Cypher execution method.
However, cursor creation requires only a :class:`.DataSource` object
which contains the logic for how to access the source data that the
cursor navigates.
Many simple cursor use cases require only the :meth:`.forward` method
and the :attr:`.current` attribute. To navigate through all available
records, a `while` loop can be used::
while cursor.forward():
print(cursor.current["name"])
If only the first record is of interest, a similar `if` structure will
do the job::
if cursor.forward():
print(cursor.current["name"])
To combine `forward` and `current` into a single step, use the built-in
py:func:`next` function::
print(next(cursor)["name"])
Cursors are also iterable, so can be used in a loop::
for record in cursor:
print(record["name"])
For queries that are expected to return only a single value within a
single record, use the :meth:`.evaluate` method. This will return the
first value from the next record or :py:const:`None` if neither the
field nor the record are present::
print(cursor.evaluate())
"""
def __init__(self, result, hydrant=None, entities=None):
self._result = result
self._hydrant = hydrant
self._entities = entities
self._current = None
self._closed = False
def __del__(self):
try:
self.close()
except OSError:
pass
def __repr__(self):
return repr(self.preview(3))
def __next__(self):
if self.forward():
return self._current
else:
raise StopIteration()
# Exists only for Python 2 iteration compatibility
next = __next__
def __iter__(self):
while self.forward():
yield self._current
def __getitem__(self, key):
return self._current[key]
@property
def current(self):
""" Returns the current record or :py:const:`None` if no record
has yet been selected.
"""
return self._current
def close(self):
""" Close this cursor and free up all associated resources.
"""
if not self._closed:
self._result.buffer() # force consumption of remaining data
self._closed = True
def keys(self):
""" Return the field names for the records in the stream.
"""
return self._result.fields()
def summary(self):
""" Return the result summary.
"""
self._result.buffer()
metadata = self._result.summary()
return CypherSummary(**metadata)
def plan(self):
""" Return the plan returned with this result, if any.
"""
self._result.buffer()
metadata = self._result.summary()
if "plan" in metadata:
return CypherPlan(**metadata["plan"])
elif "profile" in metadata:
return CypherPlan(**metadata["profile"])
else:
return None
def stats(self):
""" Return the query statistics.
This contains details of the activity undertaken by the database
kernel for the query, such as the number of entities created or
deleted. Specifically, this returns a :class:`.CypherStats` object.
>>> from py2neo import Graph
>>> g = Graph()
>>> g.run("CREATE (a:Person) SET a.name = 'Alice'").stats()
constraints_added: 0
constraints_removed: 0
contained_updates: True
indexes_added: 0
indexes_removed: 0
labels_added: 1
labels_removed: 0
nodes_created: 1
nodes_deleted: 0
properties_set: 1
relationships_created: 0
relationships_deleted: 0
"""
self._result.buffer()
metadata = self._result.summary()
return CypherStats(**metadata.get("stats", {}))
def forward(self, amount=1):
""" Attempt to move the cursor one position forward (or by
another amount if explicitly specified). The cursor will move
position by up to, but never more than, the amount specified.
If not enough scope for movement remains, only that remainder
will be consumed. The total amount moved is returned.
:param amount: the amount to move the cursor
:returns: the amount that the cursor was able to move
"""
if amount == 0:
return 0
if amount < 0:
raise ValueError("Cursor can only move forwards")
amount = int(amount)
moved = 0
v = self._result.protocol_version
while moved != amount:
values = self._result.fetch()
if values is None:
break
else:
keys = self._result.fields() # TODO: don't do this for every record
if self._hydrant:
values = self._hydrant.hydrate(keys, values, entities=self._entities, version=v)
self._current = Record(zip(keys, values))
moved += 1
return moved
def preview(self, limit=1):
""" Construct a :class:`.Table` containing a preview of
upcoming records, including no more than the given `limit`.
:param limit: maximum number of records to include in the
preview
:returns: :class:`.Table` containing the previewed records
"""
if limit < 0:
raise ValueError("Illegal preview size")
v = self._result.protocol_version
records = []
keys = self._result.fields()
for values in self._result.peek_records(int(limit)):
if self._hydrant:
values = self._hydrant.hydrate(keys, values, entities=self._entities, version=v)
records.append(values)
return Table(records, keys)
def evaluate(self, field=0):
""" Return the value of the first field from the next record
(or the value of another field if explicitly specified).
This method attempts to move the cursor one step forward and,
if successful, selects and returns an individual value from
the new current record. By default, this value will be taken
from the first value in that record but this can be overridden
with the `field` argument, which can represent either a
positional index or a textual key.
If the cursor cannot be moved forward or if the record contains
no values, :py:const:`None` will be returned instead.
This method is particularly useful when it is known that a
Cypher query returns only a single value.
:param field: field to select value from (optional)
:returns: value of the field or :py:const:`None`
Example:
>>> from py2neo import Graph
>>> g = Graph()
>>> g.run("MATCH (a) WHERE a.email=$x RETURN a.name", x="<EMAIL>").evaluate()
'<NAME>'
"""
if self.forward():
try:
return self[field]
except IndexError:
return None
else:
return None
def data(self, *keys):
""" Consume and extract the entire result as a list of
dictionaries.
::
>>> from py2neo import Graph
>>> graph = Graph()
>>> graph.run("MATCH (a:Person) RETURN a.name, a.born LIMIT 4").data()
[{'a.born': 1964, 'a.name': '<NAME>'},
{'a.born': 1967, 'a.name': '<NAME>'},
{'a.born': 1961, 'a.name': '<NAME>'},
{'a.born': 1960, 'a.name': '<NAME>'}]
:param keys: indexes or keys of the items to include; if none
are provided, all values will be included
:returns: list of dictionary of values, keyed by field name
:raises IndexError: if an out-of-bounds index is specified
"""
return [record.data(*keys) for record in self]
def to_table(self):
""" Consume and extract the entire result as a :class:`.Table`
object.
:return: the full query result
"""
return Table(self)
def to_subgraph(self):
""" Consume and extract the entire result as a :class:`.Subgraph`
containing the union of all the graph structures within.
:return: :class:`.Subgraph` object
"""
s = None
for record in self:
s_ = record.to_subgraph()
if s_ is not None:
if s is None:
s = s_
else:
s |= s_
return s
def to_ndarray(self, dtype=None, order='K'):
""" Consume and extract the entire result as a
`numpy.ndarray <https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html>`_.
.. note::
This method requires `numpy` to be installed.
:param dtype:
:param order:
:warns: If `numpy` is not installed
:returns: `ndarray <https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html>`__ object.
"""
try:
from numpy import array
except ImportError:
warn("Numpy is not installed.")
raise
else:
return array(list(map(list, self)), dtype=dtype, order=order)
def to_series(self, field=0, index=None, dtype=None):
""" Consume and extract one field of the entire result as a
`pandas.Series <http://pandas.pydata.org/pandas-docs/stable/dsintro.html#series>`_.
.. note::
This method requires `pandas` to be installed.
:param field:
:param index:
:param dtype:
:warns: If `pandas` is not installed
:returns: | |
# Option 4: ffma(a, -a, (2*a)
# Option 5: a * (2 - a)
#
# There are a lot of other possible combinations.
(('~ffma@32', ('fadd', b, ('fneg', a)), a, a), ('flrp', a, b, a), '!options->lower_flrp32'),
(('~ffma@32', a, 2.0, ('fneg', ('fmul', a, a))), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
(('~ffma@32', a, 2.0, ('fmul', ('fneg', a), a)), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
(('~ffma@32', a, ('fneg', a), ('fmul', 2.0, a)), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
(('~fmul@32', a, ('fadd', 2.0, ('fneg', a))), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
# we do these late so that we don't get in the way of creating ffmas
(('fmin', ('fadd(is_used_once)', '#c', a), ('fadd(is_used_once)', '#c', b)), ('fadd', c, ('fmin', a, b))),
(('fmax', ('fadd(is_used_once)', '#c', a), ('fadd(is_used_once)', '#c', b)), ('fadd', c, ('fmax', a, b))),
# Putting this in 'optimizations' interferes with the bcsel(a, op(b, c),
# op(b, d)) => op(b, bcsel(a, c, d)) transformations. I do not know why.
(('bcsel', ('feq', ('fsqrt', 'a(is_not_negative)'), 0.0), intBitsToFloat(0x7f7fffff), ('frsq', a)),
('fmin', ('frsq', a), intBitsToFloat(0x7f7fffff))),
# Things that look like DPH in the source shader may get expanded to
# something that looks like dot(v1.xyz, v2.xyz) + v1.w by the time it gets
# to NIR. After FFMA is generated, this can look like:
#
# fadd(ffma(v1.z, v2.z, ffma(v1.y, v2.y, fmul(v1.x, v2.x))), v1.w)
#
# Reassociate the last addition into the first multiplication.
#
# Some shaders do not use 'invariant' in vertex and (possibly) geometry
# shader stages on some outputs that are intended to be invariant. For
# various reasons, this optimization may not be fully applied in all
# shaders used for different rendering passes of the same geometry. This
# can result in Z-fighting artifacts (at best). For now, disable this
# optimization in these stages. See bugzilla #111490. In tessellation
# stages applications seem to use 'precise' when necessary, so allow the
# optimization in those stages.
(('~fadd', ('ffma(is_used_once)', a, b, ('ffma', c, d, ('fmul', 'e(is_not_const_and_not_fsign)', 'f(is_not_const_and_not_fsign)'))), 'g(is_not_const)'),
('ffma', a, b, ('ffma', c, d, ('ffma', e, 'f', 'g'))), '(info->stage != MESA_SHADER_VERTEX && info->stage != MESA_SHADER_GEOMETRY) && !options->intel_vec4'),
(('~fadd', ('ffma(is_used_once)', a, b, ('fmul', 'c(is_not_const_and_not_fsign)', 'd(is_not_const_and_not_fsign)') ), 'e(is_not_const)'),
('ffma', a, b, ('ffma', c, d, e)), '(info->stage != MESA_SHADER_VERTEX && info->stage != MESA_SHADER_GEOMETRY) && !options->intel_vec4'),
(('~fadd', ('fneg', ('ffma(is_used_once)', a, b, ('ffma', c, d, ('fmul', 'e(is_not_const_and_not_fsign)', 'f(is_not_const_and_not_fsign)')))), 'g(is_not_const)'),
('ffma', ('fneg', a), b, ('ffma', ('fneg', c), d, ('ffma', ('fneg', e), 'f', 'g'))), '(info->stage != MESA_SHADER_VERTEX && info->stage != MESA_SHADER_GEOMETRY) && !options->intel_vec4'),
# Section 8.8 (Integer Functions) of the GLSL 4.60 spec says:
#
# If bits is zero, the result will be zero.
#
# These prevent the next two lowerings generating incorrect results when
# count is zero.
(('ubfe', a, b, 0), 0),
(('ibfe', a, b, 0), 0),
# On Intel GPUs, BFE is a 3-source instruction. Like all 3-source
# instructions on Intel GPUs, it cannot have an immediate values as
# sources. There are also limitations on source register strides. As a
# result, it is very easy for 3-source instruction combined with either
# loads of immediate values or copies from weird register strides to be
# more expensive than the primitive instructions it represents.
(('ubfe', a, '#b', '#c'), ('iand', ('ushr', 0xffffffff, ('ineg', c)), ('ushr', a, b)), 'options->lower_bfe_with_two_constants'),
# b is the lowest order bit to be extracted and c is the number of bits to
# extract. The inner shift removes the bits above b + c by shifting left
# 32 - (b + c). ishl only sees the low 5 bits of the shift count, which is
# -(b + c). The outer shift moves the bit that was at b to bit zero.
# After the first shift, that bit is now at b + (32 - (b + c)) or 32 - c.
# This means that it must be shifted right by 32 - c or -c bits.
(('ibfe', a, '#b', '#c'), ('ishr', ('ishl', a, ('ineg', ('iadd', b, c))), ('ineg', c)), 'options->lower_bfe_with_two_constants'),
# Clean up no-op shifts that may result from the bfe lowerings.
(('ishl', a, 0), a),
(('ishl', a, -32), a),
(('ishr', a, 0), a),
(('ishr', a, -32), a),
(('ushr', a, 0), a),
]
# A few more extract cases we'd rather leave late
for N in [16, 32]:
aN = 'a@{0}'.format(N)
u2uM = 'u2u{0}'.format(M)
i2iM = 'i2i{0}'.format(M)
for x in ['u', 'i']:
x2xN = '{0}2{0}{1}'.format(x, N)
extract_x8 = 'extract_{0}8'.format(x)
extract_x16 = 'extract_{0}16'.format(x)
late_optimizations.extend([
((x2xN, ('u2u8', aN)), (extract_x8, a, 0), '!options->lower_extract_byte'),
((x2xN, ('i2i8', aN)), (extract_x8, a, 0), '!options->lower_extract_byte'),
])
if N > 16:
late_optimizations.extend([
((x2xN, ('u2u16', aN)), (extract_x16, a, 0), '!options->lower_extract_word'),
((x2xN, ('i2i16', aN)), (extract_x16, a, 0), '!options->lower_extract_word'),
])
# Integer sizes
for s in [8, 16, 32, 64]:
late_optimizations.extend([
(('iand', ('ine(is_used_once)', 'a@{}'.format(s), 0), ('ine', 'b@{}'.format(s), 0)), ('ine', ('umin', a, b), 0)),
(('ior', ('ieq(is_used_once)', 'a@{}'.format(s), 0), ('ieq', 'b@{}'.format(s), 0)), ('ieq', ('umin', a, b), 0)),
])
# Float sizes
for s in [16, 32, 64]:
late_optimizations.extend([
(('~fadd@{}'.format(s), 1.0, ('fmul(is_used_once)', c , ('fadd', b, -1.0 ))), ('fadd', ('fadd', 1.0, ('fneg', c)), ('fmul', b, c)), 'options->lower_flrp{}'.format(s)),
(('bcsel', a, 0, ('b2f{}'.format(s), ('inot', 'b@bool'))), ('b2f{}'.format(s), ('inot', ('ior', a, b)))),
])
for op in ['fadd']:
late_optimizations += [
(('bcsel', a, (op + '(is_used_once)', b, c), (op, b, d)), (op, b, ('bcsel', a, c, d))),
(('bcsel', a, (op, b, c), (op + '(is_used_once)', b, d)), (op, b, ('bcsel', a, c, d))),
]
for op in ['ffma']:
late_optimizations += [
(('bcsel', a, (op + '(is_used_once)', b, c, d), (op, b, c, e)), (op, b, c, ('bcsel', a, d, e))),
(('bcsel', a, (op, b, c, d), (op + '(is_used_once)', b, c, e)), (op, b, c, ('bcsel', a, d, e))),
(('bcsel', a, (op + '(is_used_once)', b, c, d), (op, b, e, d)), (op, b, ('bcsel', a, c, e), d)),
(('bcsel', a, (op, b, c, d), (op + '(is_used_once)', b, e, d)), (op, b, ('bcsel', a, c, e), d)),
]
# mediump: If an opcode is surrounded by conversions, remove the conversions.
# The rationale is that type conversions + the low precision opcode are more
# expensive that the same arithmetic opcode at higher precision.
#
# This must be done in late optimizations, because we need normal optimizations to
# first eliminate temporary up-conversions such as in op1(f2fmp(f2f32(op2()))).
#
# Unary opcodes
for op in ['fabs', 'fceil', 'fcos', 'fddx', 'fddx_coarse', 'fddx_fine', 'fddy',
'fddy_coarse', 'fddy_fine', 'fexp2', 'ffloor', 'ffract', 'flog2', 'fneg',
'frcp', 'fround_even', 'frsq', 'fsat', 'fsign', 'fsin', 'fsqrt']:
late_optimizations += [(('~f2f32', (op, ('f2fmp', a))), (op, a))]
# Binary opcodes
for op in ['fadd', 'fdiv', 'fmax', 'fmin', 'fmod', 'fmul', 'fpow', 'frem']:
late_optimizations += [(('~f2f32', (op, ('f2fmp', a), ('f2fmp', b))), (op, a, b))]
# Ternary opcodes
for op in ['ffma', 'flrp']:
late_optimizations += [(('~f2f32', (op, ('f2fmp', a), ('f2fmp', b), ('f2fmp', c))), (op, a, b, c))]
# Comparison opcodes
for op in ['feq', 'fge', 'flt', 'fneu']:
late_optimizations += [(('~' + op, ('f2fmp', a), ('f2fmp', b)), (op, a, b))]
# Do this last, so that the f2fmp patterns above have effect.
late_optimizations += [
# Convert *2*mp instructions to concrete *2*16 instructions. At this point
# any conversions that could have been removed will have been removed in
# nir_opt_algebraic so any remaining ones are required.
(('f2fmp', a), ('f2f16', a)),
(('f2imp', a), ('f2i16', a)),
(('f2ump', a), ('f2u16', a)),
(('i2imp', a), ('i2i16', a)),
(('i2fmp', a), ('i2f16', a)),
(('i2imp', a), ('u2u16', a)),
(('u2fmp', a), ('u2f16', a)),
]
distribute_src_mods = [
# Try to remove some spurious negations rather than pushing them down.
(('fmul', ('fneg', a), ('fneg', b)), ('fmul', a, b)),
(('ffma', ('fneg', a), ('fneg', b), c), ('ffma', a, b, c)),
(('fdot2_replicated', ('fneg', a), ('fneg', b)), ('fdot2_replicated', a, b)),
(('fdot3_replicated', ('fneg', a), ('fneg', b)), ('fdot3_replicated', a, b)),
(('fdot4_replicated', ('fneg', a), ('fneg', b)), ('fdot4_replicated', a, b)),
(('fneg', ('fneg', a)), a),
(('fneg', ('fmul(is_used_once)', a, b)), ('fmul', ('fneg', a), b)),
(('fabs', ('fmul(is_used_once)', a, b)), ('fmul', ('fabs', a), ('fabs', b))),
(('fneg', ('ffma(is_used_once)', a, b, c)), ('ffma', ('fneg', a), b, ('fneg', c))),
(('fneg', ('flrp(is_used_once)', a, b, c)), ('flrp', ('fneg', a), ('fneg', b), c)),
(('fneg', ('fadd(is_used_once)', a, b)), ('fadd', ('fneg', a), ('fneg', b))),
# Note that | |
import copy
import sys
import os
import eventlet
import socketio
import pathlib
from .player import Player
from .room import Room
from .table import Table
from .ai import AI
import time
import random
import string
import math
roomList = []
sio = socketio.Server()
app = socketio.WSGIApp(sio)
@sio.event
def connect(sid, environ):
print(sid, "in lobby")
@sio.on('active_player_list')
def on_event(sid, room_id):
room = next((room for room in roomList if room.room_id == room_id), None)
if room is None:
print("room does not exist")
return None
pl_list = []
for pl in room.get_player_list():
if pl.AI:
temp_pl = Player(pl._client_number, False, pl.get_name(), True)
temp_pl._isFolded = pl.isFolded
temp_pl._investment = pl.investment
temp_pl._bankrupt = pl.bankrupt
temp_pl._balance = pl.balance
else:
temp_pl = copy.deepcopy(pl)
temp_pl.hand = []
pl_list.append(temp_pl.__dict__)
return pl_list
@sio.on('in_room')
def in_room(sid, data):
name = data[0]
room_id = data[1]
room = next((room for room in roomList if room.room_id == room_id), None)
player_list = room.get_player_list()
player = next((player for player in player_list if player.get_name() == name), None)
if player is None:
return False
else:
return True
# gui should make sure that only vip players can do this action
# pass the room object not the room_id
@sio.on('add_bot')
def add_bot(sid, room_id):
room = next((room for room in roomList if room.room_id == room_id), None)
sample = string.ascii_lowercase + string.digits
ai_code = ''.join(random.choice(sample) for i in range(32))
ai_player = AI(ai_code, False, "AI BOT", True)
ai_player._connected = False
room.add_player(ai_player)
sio.emit('ai_joined', room=room.room_id)
@sio.event
def disconnect(sid):
# If everyone disconnects empty the room of players and delete room for roomList
room = find_room(sid)
if room is not None:
player_list = room.get_player_list()
player = next((player for player in player_list if player.get_client_number() == sid), None)
player._balance = 0
player._investment = 0
player.declare_bankrupt()
player._connected = False
# If game in progress, just make sure that there are at least two active players
if room.game_in_progress:
pass
# room.game_in_progress = False
connected_players = sum([1 for p in room.get_player_list() if p._connected])
# O human player, deleted the room
if connected_players == 0:
room.game_in_progress = False
for p in room.get_player_list()[:]:
room.remove_player(p)
roomList.remove(room)
return
ai_num = sum([1 for p in room.get_player_list() if p.AI == True])
# If one human player,
if connected_players + ai_num <= 1:
sio.emit('game_ended', "The game has ended.", room=room.room_id)
room.game_in_progress = False
else:
# We are in lobby
# print("We are not in a game")
is_vip_ = player.is_vip
# Remove diconnecting player
room.remove_player(player)
# count ai players in room
ai_players = sum([1 for p in room.get_player_list() if p.AI == True])
# If player disconnecting is vip
if is_vip_:
# Set VIP to other person not AI
for p in room.get_player_list():
if not p.AI:
p.is_vip = True
# print("We have a new vip: ", p)
sio.emit('vip', room=p.get_client_number())
break
ai_players = sum([1 for p in room.get_player_list() if p.AI == True])
# No players in room or all players in room are AI
if len(room.get_player_list()) == 0 or ai_players == len(room.get_player_list()):
roomList.remove(room)
print('disconnect', sid)
@sio.on('remove_player')
def remove_player(sid, data):
room_id = data[0]
index = data[1]
room = next((room for room in roomList if room.room_id == room_id), None)
if room is not None:
player_list = room.get_player_list()
player = player_list[index]
if player is not None:
if player.AI is False:
client_id = player.get_client_number()
if client_id == sid:
return True
else:
sio.disconnect(client_id)
return False
else:
room.remove_player(player)
return False
@sio.on('my_name')
def on_event(sid, name, room_id):
room = next((room for room in roomList if room.room_id == room_id), None)
if len(room.get_player_list()) == 0:
print("vip")
room.add_player(Player(sid, True, name))
sio.emit('vip', room=sid)
else:
room.add_player(Player(sid, False, name))
sio.emit('user_connection', (name + " has joined the room!"), room=room_id, skip_sid=sid)
@sio.event
def goto_room(sid, room_id):
find_room = next((room for room in roomList if room.room_id == room_id), None)
if find_room is None:
roomList.append(Room(room_id))
find_room = roomList[-1]
if len(find_room.get_player_list()) < 6 and find_room.game_in_progress is False:
sio.enter_room(sid, room_id)
print(sid, "joined room", room_id)
sio.emit('joined_room', ("You have successfully joined the room " + room_id, room_id), room=sid)
else:
sio.emit('connection_error', "Unauthorized", room=sid)
@sio.event
def leave_room(sid, room):
sio.leave_room(sid, room)
print(sid, "left room", room)
@sio.event
def leave_room(sid):
print(sid)
@sio.on('start_game')
def start_game(sid, room_id):
room = next((room for room in roomList if room.room_id == room_id), None)
if room is not None and room.game_in_progress:
return
room.game_in_progress = True
sio.emit('message', "Game Starting...", room=room.room_id)
table = room.get_Table()
balance_dict = {p.get_client_number(): p.balance for p in room.get_player_list()}
while True:
#If everyone is bakrupt get out
isBroke = 0
for player in room.get_player_list():
if player.balance == 0:
isBroke += 1
if len(room.get_player_list()) - isBroke == 1:
break
else:
table.new_round()
sio.emit('new_hand')
table.distribute_cards()
small_blind = str(table.small_blind.get_client_number())
big_blind = str(table.big_blind.get_client_number())
dealer = str(table.dealer.get_client_number())
min_bet = str(table.minimum_bet)
round_num = str(Table.theRound)
#
for player in room.get_player_list():
if not player.bankrupt:
card_string = str(player.hand[0]), str(player.hand[1])
sio.emit('emit_hand', card_string, room=player.get_client_number())
sio.emit('board_init_info', [dealer, small_blind, big_blind, min_bet, round_num], room=room.room_id)
if not game_loop(room):
continue
table._deck.pick_card() # the burn card
table.add_to_visible_cards(table._deck.pick_card())
table.add_to_visible_cards(table._deck.pick_card()) # The FLOP - three cards
table.add_to_visible_cards(table._deck.pick_card())
visibleCards = str(table._visible_cards[0]) + " " + str(table._visible_cards[1]) + " " + str(
table._visible_cards[2])
sio.emit('flop', visibleCards, room=room.room_id)
table.change_last_action()
# Change the first player to player left of last action player.
while True:
first_player = next(table._dealer_gen_obj)
if not first_player.isFolded:
while True:
if table.current_player == first_player:
break
else:
table.next_player()
break
# Point dealer generator back to dealer
while True:
current_d = next(table._dealer_gen_obj)
if current_d == table.dealer:
break
if not table.skip_to_show:
if not game_loop(room):
continue
table._deck.pick_card() # the burn card
table.add_to_visible_cards(table._deck.pick_card()) # The TURN - one card
visibleCards += " " + str(table._visible_cards[3])
sio.emit('turn', visibleCards, room=room.room_id)
if not table.skip_to_show:
if not game_loop(room):
continue
table._deck.pick_card() # the burn card
table.add_to_visible_cards(table._deck.pick_card()) # The RIVER - one card
visibleCards += " " + str(table._visible_cards[4])
sio.emit('river', visibleCards, room=room.room_id)
if not table.skip_to_show:
if not game_loop(room):
continue
show(room)
# At the end of the round, declare players bankrupt if they are out of money
for p in room.get_player_list():
if p.balance <= 0:
p.declare_bankrupt()
winner = None
for player in room.get_player_list():
if player.balance != 0:
winner = player
sio.emit('message', str(winner) + " has won the game!",
room=room.room_id)
room.game_in_progress = False
sio.emit('game_ended', "The game has ended.", room=room.room_id)
def find_room(sid):
for room in roomList:
if room.player_present_sid(sid):
return room
return None
# return True if game can still be continued
# returns False if everybody folded
def game_loop(room, num_raises=0):
table = room.get_Table()
bankrupt_players = sum([1 for p in room.get_player_list() if p.bankrupt])
folded = sum([1 for p in room.get_player_list() if p.isFolded])
check = len(room.get_player_list()) - bankrupt_players - folded
last_action_was_fold = False
# number of opponents
num_of_opponents = check - 1
while True:
player = table.current_player
is_check = True if player.investment == table.minimum_bet else False
checkOrCall = "Check" if is_check else "Call"
info = str(player.balance), str(player.investment), str(table.minimum_bet), str(checkOrCall)
sio.emit('which_players_turn', [player.get_client_number(), str(table.minimum_bet)], room=room.room_id)
if player.AI:
option = player.make_choice(num_of_opponents, player.hand, table.visible_cards, table.pot, table.minimum_bet - player.investment, player.investment)
pass
else:
try:
option = sio.call(event='your_turn', data=info, sid=player.get_client_number(), timeout = 300)
except:
print("Client failed to respond")
if is_check:
sio.emit('message', str(player.name) + " has been forced to check", room = room.room_id)
option = 1
else:
sio.emit('message', str(player.name) + " has been forced to fold", room = room.room_id)
option = 2
sio.emit('you_timed_out')
sio.emit('player_action', (player.get_name(), option), room=room.room_id)
if int(option) == 1:
# Going all in because cannot match table bet
if table.minimum_bet >= player.balance + player.investment:
sio.emit('message', str(player.name) + " is going all in!", room = room.room_id)
table.add_to_pot(player.balance)
player.add_investment(player.balance)
player.change_balance(-player.balance)
else:
if is_check:
sio.emit('message', str(player.name) + " checked", room = room.room_id)
else:
sio.emit('message', str(player.name) + " called", room = room.room_id)
player.change_balance(-(table.minimum_bet - player.investment))
table.add_to_pot(table.minimum_bet - player.investment)
player.add_investment(table.minimum_bet - player.investment)
# if is_check:
check -= 1
if int(option) == 2:
if player == table.last_action:
# modify last action to player to the right.
prev = table.current_player
table.next_player()
current = table.current_player
while True:
if current == player:
break
else:
prev = current
table.next_player()
current = table.current_player
if not check <= 1:
table._last_action = prev
else:
last_action_was_fold = True
player.fold()
sio.emit('message', str(player.name) + " has folded", room = room.room_id)
folded += 1
check -= 1
if int(option) == 3:
error = 0
while error < 3:
ask = "By how much do you want to raise"
if player.AI:
_raise = math.floor(player.make_raise(table.minimum_bet, player.balance))
_raise = _raise - (_raise % 5)
pass
else:
_raise = sio.call(event='raise', data=ask, sid=player.get_client_number())
if int(_raise) > player.balance:
# sio.emit('message', "You ain't a millionaire, try a smaller raise", room=player.get_client_number())
error += 1
else:
sio.emit('message', str(player.name) + " has raised by $" + str(_raise), room | |
this link https://towardsdatascience.com/k-nearest-neighbor-classifier-from-scratch-in-python-698e3de97063
# and this link https://machinelearningmastery.com/tutorial-to-implement-k-nearest-neighbors-in-python-from-scratch/
# region
# calculate the Euclidean distance between two 1d-arrays
def distance(instance1, instance2):
return scipy.spatial.distance.euclidean(instance1, instance2)
def get_neighbors(training_set,
labels,
test_instance,
k,
distance=distance):
"""
get_neighors calculates a list of the k nearest neighbors
of an instance 'test_instance'.
The list neighbors contains 3-tuples with
(index, dist, label)
where
index is the index from the training_set,
dist is the distance between the test_instance and the
instance training_set[index]
distance is a reference to a function used to calculate the
distances
"""
distances = []
for index in range(len(training_set)):
dist = distance(test_instance, training_set[index])
distances.append((training_set[index], dist, labels[index]))
distances.sort(key=lambda x: x[1])
neighbors = distances[:k]
return neighbors
# The function 'vote' returns the most common class. (Majority Voting)
def vote(neighbors):
class_counter = Counter()
for neighbor in neighbors:
class_counter[neighbor[2]] += 1
return class_counter.most_common(1)[0][0]
# ‘vote_prob’ is a function like ‘vote’ but returns the probability for all classes (like clf.predict_proba())
def vote_prob(neighbors):
class_counter = Counter()
for neighbor in neighbors:
class_counter[neighbor[2]] += 1
labels, votes = zip(*class_counter.most_common())
probabilityArray = votesToProbability(class_counter.most_common(), sum(votes))
return probabilityArray
def votesToProbability(tuplesList, totalVotes):
# tuplesList is of form [(num1,num2), (num1,num2), ...] where num1 is the label and num2 is the number of votes for this label
labelVotesDict = dict(tuplesList)
numOfClasses = 5
probabilityArray = []
for i in range(0,numOfClasses):
if i in labelVotesDict: # calculate probability
probabilityArray.append(labelVotesDict[i] / totalVotes)
else: # this label doesn't exist in the dictionary so its probability is 0
probabilityArray.append(0)
return np.asarray(probabilityArray)
# Make a prediction with neighbors
def predict_classification(training_set, labels, test_instance, k, distance=distance):
neighbors = get_neighbors(training_set, labels, test_instance, k, distance=distance)
prediction = vote(neighbors)
return prediction
# Make a prediction probability with neighbors
def predict_proba_classification(training_set, labels, test_instance, k, distance=distance):
neighbors = get_neighbors(training_set, labels, test_instance, k, distance=distance)
prediction_proba = vote_prob(neighbors)
return prediction_proba
# kNN Algorithm
def k_nearest_neighbors(trainX, trainY, testX, num_neighbors):
predictions = list()
for row in testX:
output = predict_classification(trainX, trainY, row, num_neighbors, distance=distance )
predictions.append(output)
return(predictions)
# kNN Algorithm probability predictions
def k_nearest_neighbors_proba(trainX, trainY, testX, num_neighbors):
predictions_proba = list()
for row in testX:
output = predict_proba_classification(trainX, trainY, row, num_neighbors, distance=distance )
predictions_proba.append(output)
return(predictions_proba)
# Evaluate an algorithm using a cross validation split
# Specific evaluation for our knn algorithm
def evaluate_algorithm(trainX, trainY, n_folds, labelEncoder):
# make rocPlots for each fold in 10 CV
f, axs = plt.subplots(5, 2)
f.set_figheight(30)
f.set_figwidth(30)
scoresAccuracy = list()
scoresPrecision = list()
scoresRecall = list()
scoresF1 = list()
cv = StratifiedKFold(n_splits=10)
i = 0
z = 0
k = 1
for train, test in cv.split(trainX, trainY):
predictions = k_nearest_neighbors(trainX[train], trainY[train], trainX[test], 100)
predY = np.asarray(predictions)
scoresAccuracy.append(accuracy_score(trainY[test], predY))
scoresPrecision.append(precision_score(trainY[test], predY, average='weighted'))
scoresRecall.append(recall_score(trainY[test], predY, average='weighted'))
scoresF1.append(f1_score(trainY[test], predY, average='weighted'))
# make roc plot for this fold
predictions_proba = k_nearest_neighbors_proba(trainX[train], trainY[train], trainX[test], 100)
predY_proba = np.asarray(predictions_proba)
makeRocPlot(trainY[test], predY_proba, labelEncoder, axs[i, z])
axs[i, z].set_title('Roc Plot for fold - {0}'.format(k))
k += 1
if z == 1:
i += 1
z = 0
else:
z = 1
plt.show()
scores = {'Accuracy':scoresAccuracy, 'Precision':scoresPrecision, 'Recall':scoresRecall, 'F1':scoresF1}
return scores
def KnnClassification(trainX, trainY, testX, testY, labelEncoder):
"""
Classify the text using the KNN classifier we implemented
"""
trainXarray = trainX.toarray()
testXarray = testX.toarray()
print('\n----10 Fold Cross Validation Evaluation----')
# evaluate algorithm
n_folds = 10
scores = evaluate_algorithm(trainXarray, trainY, n_folds, labelEncoder)
print ('Precision \t %0.2f' % (sum(scores['Precision'])/float(len(scores['Precision']))))
print ('Recalls \t %0.2f' % (sum(scores['Recall'])/float(len(scores['Recall']))))
print ('F-Measure \t %0.2f' % (sum(scores['F1'])/float(len(scores['F1']))))
print('Accuracy: \t %0.2f' % (sum(scores['Accuracy'])/float(len(scores['Accuracy']))))
# Classification_report
predictions = k_nearest_neighbors(trainXarray, trainY, testXarray, 100)
predY = np.asarray(predictions)
print('\n----Report for predictions on test dataset----')
print(classification_report(testY, predY, target_names=list(labelEncoder.classes_)))
predictions_proba = k_nearest_neighbors_proba(trainXarray, trainY, testXarray, 100)
predY_proba = np.asarray(predictions_proba)
print('\n----ROC plot for predictions on test dataset----')
makeRocPlot(testY, predY_proba, labelEncoder)
plt.show()
return accuracy_score(testY, predY)
# endregion
# - ### *Split DataSet into TrainData and TestData*
# region
trainDataSet, testDataSet = train_test_split(myDataSetDf, test_size=0.2, stratify=myDataSetDf['CATEGORY'])
# reset index
trainDataSet.reset_index(drop=True, inplace=True)
testDataSet.reset_index(drop=True, inplace=True)
# save to tsv files
trainDataSet.to_csv('train_set.tsv', sep = '\t')
# save test_set categories
testDataSetCategories = testDataSet[['CATEGORY']].copy()
testDataSetCategories.to_csv('test_set_categories.tsv', sep = '\t')
testDataSet = testDataSet.drop('CATEGORY', axis=1)
testDataSet.to_csv('test_set.tsv', sep = '\t')
# endregion
# Prepare train and test data that we will need below
# region
# build label encoder for categories
le = preprocessing.LabelEncoder()
le.fit(trainDataSet["CATEGORY"])
# transform categories into numbers
trainY = le.transform(trainDataSet["CATEGORY"])
testY = le.transform(testDataSetCategories["CATEGORY"])
accuracyDict = dict()
# endregion
# ## __Vectorization__
# Let's do classification using 2 different ways of vectorization
# region language="javascript"
# IPython.OutputArea.prototype._should_scroll = function(lines) {
# return false;
# }
# endregion
# - #### Bag-of-words vectorization
# region
bowVectorizer = CountVectorizer(max_features=1000)
trainX = bowVectorizer.fit_transform(trainDataSet['CONTENT'])
testX = bowVectorizer.transform(testDataSet['CONTENT'])
print('-------------SVM Classification with BOW Vectorization-------------')
accuracyDict["BOW-SVM"] = SvmClassification(trainX, trainY, testX, testY, le)
print('-------------SVM Classification with BOW Vectorization and GridSearchCV for demonstration-------------')
SvmClassificationGridSearchCVDemo(trainX, trainY, testX, testY, le)
print('\n-------------Random Forests Classification with BOW Vectorization-------------')
accuracyDict["BOW-RandomForests"] = RandomForestClassification(trainX, trainY, testX, testY, le)
print('\n-------------Naive Bayes Classification with BOW Vectorization-------------')
accuracyDict["BOW-NB"] = NaiveBayesClassification(trainX, trainY, testX, testY, le)
print('\n-------------K Nearest Neighbor Classification with BOW Vectorization-------------')
accuracyDict["BOW-knn"] = KnnClassification(trainX, trainY, testX, testY, le)
# endregion
# - #### Tf-idf vectorization
# region
tfIdfVectorizer = TfidfVectorizer(max_features=1000)
trainX = tfIdfVectorizer.fit_transform(trainDataSet['CONTENT'])
testX = tfIdfVectorizer.transform(testDataSet['CONTENT'])
print('-------------SVM Classification with TfIdf Vectorization-------------')
accuracyDict["TfIdf-SVM"] = SvmClassification(trainX, trainY, testX, testY, le)
print('\n-------------Random Forests Classification with TfIdf Vectorization-------------')
accuracyDict["TfIdf-RandomForests"] = RandomForestClassification(trainX, trainY, testX, testY, le)
print('\n-------------Naive Bayes Classification with TfIdf Vectorization-------------')
accuracyDict["TfIdf-NB"] = NaiveBayesClassification(trainX, trainY, testX, testY, le)
print('\n-------------K Nearest Neighbor Classification with TfIdf Vectorization-------------')
accuracyDict["TfIdf-knn"] = KnnClassification(trainX, trainY, testX, testY, le)
# endregion
# #### Results Summary
# region
resultsData = {r'Vectorizer \ Classifier': ['BOW', 'Tfidf'],
'SVM': [accuracyDict["BOW-SVM"], accuracyDict["TfIdf-SVM"]],
'Random Forest': [accuracyDict["BOW-RandomForests"], accuracyDict["TfIdf-RandomForests"]],
'Naive Bayes': [accuracyDict["BOW-NB"], accuracyDict["TfIdf-NB"]],
'K Nearest Neighbor': [accuracyDict["BOW-knn"], accuracyDict["TfIdf-knn"]]}
resultsDataFrame = pd.DataFrame(data=resultsData)
resultsDataFrame
# endregion
# ## __Beat the Benchmark (bonus)__
# region
def preprocessText(initText):
"""Preprocess the text"""
# Make everything to lower case
processedText = initText.lower()
# Remove urls
processedText = re.sub(r'(http:\/\/www\.|https:\/\/www\.|http:\/\/|https:\/\/)?[a-z0-9]+([\-\.]{1}[a-z0-9]+)'
r'*\.[a-z]{2,5}(:[0-9]{1,5})?(\/.*)?', ' ', processedText)
# Remove any punctuation from the text
for c in punctuation:
processedText = processedText.replace(c, ' ')
# Remove digits
processedText = re.sub(r'\d+', '', processedText)
# Remove consecutive spaces
processedText = re.sub(r" {2,}", ' ', processedText)
# Split to words
tokens = word_tokenize(processedText)
# Remove sropwords
stopWords = ENGLISH_STOP_WORDS
stopWords = (stopWords.union(nltkStopwords.words('english')))
filtered = [w for w in tokens if w not in stopWords]
# Concat the remaining words in a single string again
if not filtered: # list is empty
processedText = ''
else:
processedText = filtered[0]
for word in filtered[1:]:
processedText = processedText + ' ' + word
return processedText
def stemmingPreprocess(initText):
# Split to words
tokens = word_tokenize(initText)
# Do the stemming
stemmer = PorterStemmer()
stems = [stemmer.stem(token) for token in tokens]
# Concat the remaining words in a single string again
if not stems: # list is empty
processedText = ''
else:
processedText = stems[0]
for stem in stems[1:]:
processedText = processedText + ' ' + stem
return processedText
# endregion
# Let's do some preprocessing for train and test data
# region
trainDataPreprocessed = trainDataSet.copy()
testDataPreprocessed = testDataSet.copy()
# preprocess train data
for index, row in trainDataPreprocessed.iterrows():
initialText = row["CONTENT"]
trainDataPreprocessed.iloc[index]["CONTENT"] = preprocessText(initialText)
# preprocess test data
for index, row in testDataPreprocessed.iterrows():
initialText = row["CONTENT"]
testDataPreprocessed.iloc[index]["CONTENT"] = preprocessText(initialText)
# endregion
# Let's do stemming
# region
for index, row in trainDataPreprocessed.iterrows():
initialText = row["CONTENT"]
trainDataPreprocessed.iloc[index]["CONTENT"] = stemmingPreprocess(initialText)
for index, row in testDataPreprocessed.iterrows():
initialText = row["CONTENT"]
testDataPreprocessed.iloc[index]["CONTENT"] = stemmingPreprocess(initialText)
# endregion
# We will check only the SVM classifier with Tf-idf vectorization
# region
tfIdfVectorizer = TfidfVectorizer(max_features=1000)
trainX = tfIdfVectorizer.fit_transform(trainDataPreprocessed['CONTENT'])
testX = tfIdfVectorizer.transform(testDataPreprocessed['CONTENT'])
print('\n-------------SVM Classification with TfIdf Vectorization in processed text-------------')
accuracyDict["TfIdf-SVM-processed"] = SvmClassification(trainX, trainY, testX, testY, le)
# endregion
# Let's compare scores
# region
resultsDataCompare = {'SVM without preprocessing': [accuracyDict["TfIdf-SVM"]],
'SVM with preprocessing': [accuracyDict["TfIdf-SVM-processed"]]}
resultsCompareDataFrame = pd.DataFrame(data=resultsDataCompare)
resultsCompareDataFrame
# endregion
# As we see there is no big difference between scores for max_features=1000 in TfidfVectorizer.
# Let's check what happens for max_features=100
tfIdfVectorizer = TfidfVectorizer(max_features=100)
# region
trainX = tfIdfVectorizer.fit_transform(trainDataSet['CONTENT'])
testX = tfIdfVectorizer.transform(testDataSet['CONTENT'])
print('-------------SVM Classification with TfIdf Vectorization for max_features=100-------------')
accuracyDict["TfIdf-SVM-100"] = SvmClassification(trainX, trainY, testX, testY, le)
# endregion
# region
trainX = tfIdfVectorizer.fit_transform(trainDataPreprocessed['CONTENT'])
testX = tfIdfVectorizer.transform(testDataPreprocessed['CONTENT'])
print('\n-------------SVM Classification with TfIdf Vectorization in processed text for max_features=100-------------')
accuracyDict["TfIdf-SVM-processed-100"] = SvmClassification(trainX, trainY, testX, testY, le)
# endregion
# Let's compare scores one more time
# region
resultsDataCompare = {'SVM without preprocessing for max_features=100': [accuracyDict["TfIdf-SVM-100"]],
'SVM with preprocessing for max_features=100': [accuracyDict["TfIdf-SVM-processed-100"]]}
resultsCompareDataFrame = pd.DataFrame(data=resultsDataCompare)
resultsCompareDataFrame
# endregion
# Here we can see a significant difference.
# ## __Clustering__
def KmeansClustering(trainX, numberOfClusters, numberOfRepeats):
# init cluster with trainX
# example taken from https://www.nltk.org/_modules/nltk/cluster/kmeans.html#demo
clusterer = KMeansClusterer(numberOfClusters, cosine_distance, initial_means=None, repeats=numberOfRepeats)
assigned_clusters = clusterer.cluster(trainX, assign_clusters=True)
return clusterer, assigned_clusters
# - #### Compression using PCA method
def principalComponentAnalysis(nComponents, trainX, labels, clusters):
# reduce the features to 2D
random_state = 0
pca = PCA(n_components=nComponents, random_state=random_state)
#reduced_features = pca.fit_transform(trainX.toarray())
reduced_features = pca.fit_transform(trainX)
# reduce the | |
# Copyright (c) 2015-2017 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.ra, Inc. All rights reserved.
from collections import deque
from datetime import datetime
import logging
import re
from tests.st.utils.exceptions import CommandExecError
_log = logging.getLogger(__name__)
FELIX_LOG_FORMAT = (
"(?P<timestamp>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}).\d{0,3} "
"\\[(?P<loglevel>\w+)\\]"
"\\[(?P<pid>\d+)(/\d+)?\\] "
"(?P<logtext>.*)"
)
TIMESTAMP_FORMAT = "%Y-%m-%d %H:%M:%S"
# The number of additional logs to trace out before the first error log, and
# the maximum number of errors to report.
NUM_CONTEXT_LOGS = 300
MAX_NUM_ERRORS = 100
# This is the list of logs we should ignore for all tests.
LOGS_IGNORE_ALL_TESTS = [
"Failed to connect to syslog error=Unix syslog delivery error level=",
"Exiting for config change",
"Exiting. reason=\"config changed\"",
"Exiting immediately reason=\"config changed\"",
]
class Log(object):
"""
Class encapsulating information about a log extracted from a log file.
"""
def __init__(self, timestamp, level, pid, msg):
"""
:param timestamp: The log datetime.
:param level: The log level
:param pid: The PID of the process that created the log
:param msg: The log text.
"""
self.timestamp = timestamp
self.level = level
self.pid = pid
self.msg = msg
def append(self, logtext):
"""
Append the text to the end of the current text with a newline
separator.
:param logtext: The text to append to the log.
"""
self.msg += "\n" + logtext
def detailed(self):
return "=== LOG %s %s [pid %s] ===\n%s" % (self.timestamp, self.level,
self.pid, self.msg)
def __str__(self):
return "%s %s %s %s" % (self.timestamp, self.level,
self.pid, self.msg)
def __repr__(self):
return self.__str__()
class LogAnalyzer(object):
"""
LogAnalyzer class to check any new logs generated since the analyzer
was instantiated.
This is a fairly simpler parser - it doesn't check flipped files.
"""
def __init__(self, host, filename, log_format, timestamp_format,
continuation_level=None):
"""
:param host: the host running calico-node
:param filename: The log filename on the server
:param log_format: The format of the logs
:param timestamp_format: The date/time format in the logs.
:param continuation_level: An optional log level that indicates the
log is a continuation of the previous log (i.e. the text can be
extracted and appended to the previous log).
The log format should be a regex string containing the following
named matches:
- timestamp (the extracted timestamp)
- loglevel (the log level)
- pid (the process ID)
- logtext (the actual log message)
The timestamp format is the format of the extracted timestamp in
notation used by datetime.datetime.strptime().
"""
self.host = host
self.filename = filename
self.log_regex = re.compile(log_format)
self.timestamp_format = timestamp_format
self.init_log_time = None
self.init_log_lines = None
self.continuation_level = continuation_level
# Store the time of the last log in the file.
self.reset()
def reset(self):
"""
Initialise the time of the first log in the log file and the number
of lines in the log file.
This information is used to work out where to start from when looking
at new logs.
"""
_log.debug("Resetting log analyzer on %s", self.host.name)
# Grab the time of the first log.
self.init_log_time = self._get_first_log_time()
_log.debug("First log has timestamp: %s", self.init_log_time)
self.init_log_lines = self._get_logs_num_lines()
_log.debug("Log file has %s lines", self.init_log_lines)
def _get_first_log_time(self):
"""
Extract the time of the first log in the file. This is used to
determine whether a file has flipped during a test.
"""
cmd = "head -100 %s"
for log in self._parse_logs(cmd, self.filename):
return log.timestamp
return None
def _get_logs_num_lines(self):
"""
Return the number of lines in the log file.
:return: The number of lines in the log file or None if the file does
not exist or cannot be read.
"""
cmd = "wc -l %s" % self.filename
lines = None
stdout = None
try:
stdout = self.host.execute(cmd)
except CommandExecError:
_log.debug("Error running command: %s", cmd)
_log.debug("Extract number of lines in file: %s",
self.filename)
try:
lines = int(stdout.split(" ")[0])
except ValueError:
_log.error("Unable to parse output: %s", stdout)
except AttributeError:
_log.error("None output?: %s", stdout)
return lines
def get_latest_logs(self, logfilter=None):
"""
Get the latest (filtered) logs from the server.
:param logfilter: An optional filter that determines whether a log
should be stored. This is a function that takes the log as the only
argument and returns True if the log should be filtered _out_ of the
list.
:return: A list of Log objects.
"""
return [log for log in self._parse_latest_logs() if not logfilter or not logfilter(log)]
def _parse_latest_logs(self):
"""
Parse the latest logs from the server, returning a generator that
iterates through the logs.
:return: A Log generator.
"""
# Use the entire log file if the file has flipped (i.e. the first log
# time is not the same, otherwise tail all but the first logs.
first_log_time = self._get_first_log_time()
_log.debug("First log has timestamp: %s", first_log_time)
if first_log_time != self.init_log_time or \
not self.init_log_lines:
_log.debug("Log file is new")
cmd = "cat %s"
else:
_log.debug("Check appended logs")
cmd = "tail -n +%s %s" % (self.init_log_lines + 1, self.filename)
return self._parse_logs(cmd, self.filename)
def _parse_logs(self, cmd, filename):
"""
Parse the logs from the output of the supplied command, returning a
generator that iterates through the logs.
:param cmd: The command to run to output the logs.
:return: A Log generator.
"""
last_log = None
try:
for line in self.host.execute_readline(cmd, filename):
log = self._process_log_line(line, last_log)
# Logs may be continued, in which case we only return the log
# when the parsing indicates a new log.
if last_log and last_log != log:
yield last_log
last_log = log
except Exception:
_log.exception(
"Hit exception getting logs from %s - skip logs",
self.host.name)
# Yield the final log.
if last_log:
yield last_log
def _process_log_line(self, line, last_log):
"""
Build up a list of logs from the supplied log line.
If a line in the logs_text does not match the format of the log string
it is assumed it is a continuation of the previous log. Similarly,
a log with level "TRACE" is also treated as a continuation.
:param line: The log line to process. This may either add a new log
or may be a continuation of a previous log, or may be filtered out.
:param last_log: The previous log that was processed by this command.
This may be None for the first line in the log file.
:return: The log that was added or updated by this method. This may
return None if no log was parsed. If this line was appended to the
previous log, it will return last_log.
"""
# Put the full text of the log into logtext, but strip off ending whitespace because
# we'll add \n back to it when we append to it
logtext = line.rstrip()
# Strip superfluous whitespace
line = line.strip()
# Check the line for a log match.
log_match = self.log_regex.match(line)
# If the line does not match the regex it will be a continuation
# of the previous log. If there was no previous log then we must
# have starting parsing in the middle of a multi-line log.
if not log_match:
if last_log:
last_log.append(line)
return last_log
# Extract the parameters from the match object.
groupdict = log_match.groupdict()
loglevel = groupdict["loglevel"]
timestamp = datetime.strptime(groupdict["timestamp"],
self.timestamp_format)
pid = groupdict["pid"]
# Neutron logs use a log level of TRACE to continue a multi-line
# log. If there was no previous log then we must have starting parsing
# in the middle of a multi-line log.
if self.continuation_level == loglevel:
if last_log:
last_log.append(logtext)
return last_log
# Create and return the new log. We don't add it until we start the
# next log as we need to get the entire log before we can run it
# through the filter.
| |
= Constraint(expr= - 9*m.b125 - 11*m.b133 - 5*m.b141 - 7*m.b149 - 6*m.b157 - 4*m.b165 - 0.0025*m.x229
- 0.002*m.x253 - 0.00222222222222222*m.x277 - m.x416 + m.x417 >= 0)
m.c138 = Constraint(expr= - 9*m.b126 - 11*m.b134 - 5*m.b142 - 7*m.b150 - 6*m.b158 - 4*m.b166 - 0.0025*m.x230
- 0.002*m.x254 - 0.00222222222222222*m.x278 - m.x417 + m.x418 >= 0)
m.c139 = Constraint(expr= - 9*m.b127 - 11*m.b135 - 5*m.b143 - 7*m.b151 - 6*m.b159 - 4*m.b167 - 0.0025*m.x231
- 0.002*m.x255 - 0.00222222222222222*m.x279 - m.x418 + m.x419 >= 0)
m.c140 = Constraint(expr= - 9*m.b128 - 11*m.b136 - 5*m.b144 - 7*m.b152 - 6*m.b160 - 4*m.b168 - 0.0025*m.x232
- 0.002*m.x256 - 0.00222222222222222*m.x280 - m.x419 + m.x423 >= 0)
m.c141 = Constraint(expr= - 11*m.b169 - m.b177 - 2*m.b185 - 5*m.b193 - 6*m.b201 - m.b209 - 0.00111111111111111*m.x233
- 0.00166666666666667*m.x257 - 0.000909090909090909*m.x281 - m.x412 + m.x413 >= 0)
m.c142 = Constraint(expr= - 11*m.b170 - m.b178 - 2*m.b186 - 5*m.b194 - 6*m.b202 - m.b210 - 0.00111111111111111*m.x234
- 0.00166666666666667*m.x258 - 0.000909090909090909*m.x282 - m.x413 + m.x414 >= 0)
m.c143 = Constraint(expr= - 11*m.b171 - m.b179 - 2*m.b187 - 5*m.b195 - 6*m.b203 - m.b211 - 0.00111111111111111*m.x235
- 0.00166666666666667*m.x259 - 0.000909090909090909*m.x283 - m.x414 + m.x415 >= 0)
m.c144 = Constraint(expr= - 11*m.b172 - m.b180 - 2*m.b188 - 5*m.b196 - 6*m.b204 - m.b212 - 0.00111111111111111*m.x236
- 0.00166666666666667*m.x260 - 0.000909090909090909*m.x284 - m.x415 + m.x416 >= 0)
m.c145 = Constraint(expr= - 11*m.b173 - m.b181 - 2*m.b189 - 5*m.b197 - 6*m.b205 - m.b213 - 0.00111111111111111*m.x237
- 0.00166666666666667*m.x261 - 0.000909090909090909*m.x285 - m.x416 + m.x417 >= 0)
m.c146 = Constraint(expr= - 11*m.b174 - m.b182 - 2*m.b190 - 5*m.b198 - 6*m.b206 - m.b214 - 0.00111111111111111*m.x238
- 0.00166666666666667*m.x262 - 0.000909090909090909*m.x286 - m.x417 + m.x418 >= 0)
m.c147 = Constraint(expr= - 11*m.b175 - m.b183 - 2*m.b191 - 5*m.b199 - 6*m.b207 - m.b215 - 0.00111111111111111*m.x239
- 0.00166666666666667*m.x263 - 0.000909090909090909*m.x287 - m.x418 + m.x419 >= 0)
m.c148 = Constraint(expr= - 11*m.b176 - m.b184 - 2*m.b192 - 5*m.b200 - 6*m.b208 - m.b216 - 0.00111111111111111*m.x240
- 0.00166666666666667*m.x264 - 0.000909090909090909*m.x288 - m.x419 + m.x423 >= 0)
m.c149 = Constraint(expr= - 200000*m.b1 + m.x217 <= 0)
m.c150 = Constraint(expr= - 200000*m.b2 + m.x218 <= 0)
m.c151 = Constraint(expr= - 200000*m.b3 + m.x219 <= 0)
m.c152 = Constraint(expr= - 200000*m.b4 + m.x220 <= 0)
m.c153 = Constraint(expr= - 200000*m.b5 + m.x221 <= 0)
m.c154 = Constraint(expr= - 200000*m.b6 + m.x222 <= 0)
m.c155 = Constraint(expr= - 200000*m.b7 + m.x223 <= 0)
m.c156 = Constraint(expr= - 200000*m.b8 + m.x224 <= 0)
m.c157 = Constraint(expr= - 100000*m.b9 + m.x225 <= 0)
m.c158 = Constraint(expr= - 100000*m.b10 + m.x226 <= 0)
m.c159 = Constraint(expr= - 100000*m.b11 + m.x227 <= 0)
m.c160 = Constraint(expr= - 100000*m.b12 + m.x228 <= 0)
m.c161 = Constraint(expr= - 100000*m.b13 + m.x229 <= 0)
m.c162 = Constraint(expr= - 100000*m.b14 + m.x230 <= 0)
m.c163 = Constraint(expr= - 100000*m.b15 + m.x231 <= 0)
m.c164 = Constraint(expr= - 100000*m.b16 + m.x232 <= 0)
m.c165 = Constraint(expr= - 225000*m.b17 + m.x233 <= 0)
m.c166 = Constraint(expr= - 225000*m.b18 + m.x234 <= 0)
m.c167 = Constraint(expr= - 225000*m.b19 + m.x235 <= 0)
m.c168 = Constraint(expr= - 225000*m.b20 + m.x236 <= 0)
m.c169 = Constraint(expr= - 225000*m.b21 + m.x237 <= 0)
m.c170 = Constraint(expr= - 225000*m.b22 + m.x238 <= 0)
m.c171 = Constraint(expr= - 225000*m.b23 + m.x239 <= 0)
m.c172 = Constraint(expr= - 225000*m.b24 + m.x240 <= 0)
m.c173 = Constraint(expr= - 300000*m.b25 + m.x241 <= 0)
m.c174 = Constraint(expr= - 300000*m.b26 + m.x242 <= 0)
m.c175 = Constraint(expr= - 300000*m.b27 + m.x243 <= 0)
m.c176 = Constraint(expr= - 300000*m.b28 + m.x244 <= 0)
m.c177 = Constraint(expr= - 300000*m.b29 + m.x245 <= 0)
m.c178 = Constraint(expr= - 300000*m.b30 + m.x246 <= 0)
m.c179 = Constraint(expr= - 300000*m.b31 + m.x247 <= 0)
m.c180 = Constraint(expr= - 300000*m.b32 + m.x248 <= 0)
m.c181 = Constraint(expr= - 125000*m.b33 + m.x249 <= 0)
m.c182 = Constraint(expr= - 125000*m.b34 + m.x250 <= 0)
m.c183 = Constraint(expr= - 125000*m.b35 + m.x251 <= 0)
m.c184 = Constraint(expr= - 125000*m.b36 + m.x252 <= 0)
m.c185 = Constraint(expr= - 125000*m.b37 + m.x253 <= 0)
m.c186 = Constraint(expr= - 125000*m.b38 + m.x254 <= 0)
m.c187 = Constraint(expr= - 125000*m.b39 + m.x255 <= 0)
m.c188 = Constraint(expr= - 125000*m.b40 + m.x256 <= 0)
m.c189 = Constraint(expr= - 150000*m.b41 + m.x257 <= 0)
m.c190 = Constraint(expr= - 150000*m.b42 + m.x258 <= 0)
m.c191 = Constraint(expr= - 150000*m.b43 + m.x259 <= 0)
m.c192 = Constraint(expr= - 150000*m.b44 + m.x260 <= 0)
m.c193 = Constraint(expr= - 150000*m.b45 + m.x261 <= 0)
m.c194 = Constraint(expr= - 150000*m.b46 + m.x262 <= 0)
m.c195 = Constraint(expr= - 150000*m.b47 + m.x263 <= 0)
m.c196 = Constraint(expr= - 150000*m.b48 + m.x264 <= 0)
m.c197 = Constraint(expr= - 250000*m.b49 + m.x265 <= 0)
m.c198 = Constraint(expr= - 250000*m.b50 + m.x266 <= 0)
m.c199 = Constraint(expr= - 250000*m.b51 + m.x267 <= 0)
m.c200 = Constraint(expr= - 250000*m.b52 + m.x268 <= 0)
m.c201 = Constraint(expr= - 250000*m.b53 + m.x269 <= 0)
m.c202 = Constraint(expr= - 250000*m.b54 + m.x270 <= 0)
m.c203 = Constraint(expr= - 250000*m.b55 + m.x271 <= 0)
m.c204 = Constraint(expr= - 250000*m.b56 + m.x272 <= 0)
m.c205 = Constraint(expr= - 112500*m.b57 + m.x273 <= 0)
m.c206 = Constraint(expr= - 112500*m.b58 + m.x274 <= 0)
m.c207 = Constraint(expr= - 112500*m.b59 + m.x275 <= 0)
m.c208 = Constraint(expr= - 112500*m.b60 + m.x276 <= 0)
m.c209 = Constraint(expr= - 112500*m.b61 + m.x277 <= 0)
m.c210 = Constraint(expr= - 112500*m.b62 + m.x278 <= 0)
m.c211 = Constraint(expr= - 112500*m.b63 + m.x279 <= 0)
m.c212 = Constraint(expr= - 112500*m.b64 + m.x280 <= 0)
m.c213 = Constraint(expr= - 275000*m.b65 + m.x281 <= 0)
m.c214 = Constraint(expr= - 275000*m.b66 + m.x282 <= 0)
m.c215 = Constraint(expr= - 275000*m.b67 + m.x283 <= 0)
m.c216 = Constraint(expr= - 275000*m.b68 + m.x284 <= 0)
m.c217 = Constraint(expr= - 275000*m.b69 + m.x285 <= 0)
m.c218 = Constraint(expr= - 275000*m.b70 + m.x286 <= 0)
m.c219 = Constraint(expr= - 275000*m.b71 + m.x287 <= 0)
m.c220 = Constraint(expr= - 275000*m.b72 + m.x288 <= 0)
m.c221 = Constraint(expr= m.x409 - 50*m.x423 >= 0)
m.c222 = Constraint(expr= m.x410 - 100*m.x423 >= 0)
m.c223 = Constraint(expr= m.x411 - 250*m.x423 >= 0)
m.c224 = Constraint(expr= m.x361 - m.x420 <= 0)
m.c225 = Constraint(expr= m.x362 - m.x420 <= 0)
m.c226 = Constraint(expr= m.x363 - m.x420 <= 0)
m.c227 = Constraint(expr= m.x364 - m.x420 <= 0)
m.c228 = Constraint(expr= m.x365 - m.x420 <= 0)
m.c229 = Constraint(expr= m.x366 - m.x420 <= 0)
m.c230 = Constraint(expr= m.x367 - m.x420 <= 0)
m.c231 = Constraint(expr= m.x368 - m.x420 <= 0)
m.c232 = Constraint(expr= m.x377 - m.x421 <= 0)
m.c233 = Constraint(expr= m.x378 - m.x421 <= 0)
m.c234 = Constraint(expr= m.x379 - m.x421 <= 0)
m.c235 = Constraint(expr= m.x380 - m.x421 <= 0)
m.c236 = Constraint(expr= m.x381 - m.x421 <= 0)
m.c237 = Constraint(expr= m.x382 - m.x421 <= 0)
m.c238 = Constraint(expr= m.x383 - m.x421 <= 0)
m.c239 = Constraint(expr= m.x384 - m.x421 <= 0)
m.c240 = Constraint(expr= m.x393 - m.x422 <= 0)
m.c241 = Constraint(expr= m.x394 - m.x422 <= 0)
m.c242 = Constraint(expr= m.x395 - m.x422 <= 0)
m.c243 = Constraint(expr= m.x396 - m.x422 <= 0)
m.c244 = Constraint(expr= m.x397 - m.x422 <= 0)
m.c245 = Constraint(expr= m.x398 - m.x422 <= 0)
m.c246 = Constraint(expr= m.x399 - m.x422 <= 0)
m.c247 = Constraint(expr= m.x400 - m.x422 <= 0)
m.c248 = Constraint(expr= m.b73 + m.b74 + m.b75 + m.b76 + m.b77 + m.b78 + m.b79 + m.b80 + m.b81 + m.b82 + m.b83
+ m.b84 + m.b85 + m.b86 + m.b87 + m.b88 + m.b89 + m.b90 + m.b91 + m.b92 + m.b93 + m.b94
+ m.b95 + m.b96 + m.b97 + m.b98 + m.b99 + m.b100 + m.b101 + m.b102 + m.b103 + m.b104 + m.b105
+ m.b106 + m.b107 + m.b108 + m.b109 + m.b110 + m.b111 + m.b112 + m.b113 + m.b114 + m.b115
+ m.b116 + m.b117 + m.b118 + m.b119 + m.b120 + m.b121 + m.b122 + m.b123 + m.b124 + m.b125
+ m.b126 + m.b127 + m.b128 + m.b129 + m.b130 + m.b131 + m.b132 + m.b133 + m.b134 + m.b135
+ m.b136 + m.b137 + m.b138 + m.b139 + m.b140 + m.b141 + m.b142 + m.b143 + m.b144 + m.b145
+ m.b146 + m.b147 + m.b148 + m.b149 + m.b150 + m.b151 + m.b152 + m.b153 + m.b154 + m.b155
+ m.b156 + m.b157 + m.b158 + m.b159 + m.b160 + m.b161 + m.b162 + m.b163 + m.b164 + m.b165
+ m.b166 + m.b167 + m.b168 <= 3)
m.c249 = Constraint(expr= m.b169 + m.b170 + m.b171 + | |
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from typing import (Mapping, Optional, Tuple, Union, List, FrozenSet,
DefaultDict, TYPE_CHECKING)
import numbers
import numpy as np
from cirq import protocols, qis, value
from cirq._doc import document
from cirq.linalg import operator_spaces
from cirq.ops import identity, raw_types, pauli_gates, pauli_string
from cirq.ops.pauli_string import PauliString, _validate_qubit_mapping
from cirq.value.linear_dict import _format_terms
if TYPE_CHECKING:
import cirq
UnitPauliStringT = FrozenSet[Tuple[raw_types.Qid, pauli_gates.Pauli]]
PauliSumLike = Union[int, float, complex, PauliString, 'PauliSum', pauli_string.
SingleQubitPauliStringGateOperation]
document(
PauliSumLike, # type: ignore
"""Any value that can be easily translated into a sum of Pauli products.
""")
class LinearCombinationOfGates(value.LinearDict[raw_types.Gate]):
"""Represents linear operator defined by a linear combination of gates.
Suppose G1, G2, ..., Gn are gates and b1, b2, ..., bn are complex
numbers. Then
LinearCombinationOfGates({G1: b1, G2: b2, ..., Gn: bn})
represents the linear operator
A = b1 G1 + b2 G2 + ... + bn Gn
Note that A may not be unitary or even normal.
Rather than creating LinearCombinationOfGates instance explicitly, one may
use overloaded arithmetic operators. For example,
cirq.LinearCombinationOfGates({cirq.X: 2, cirq.Z: -2})
is equivalent to
2 * cirq.X - 2 * cirq.Z
"""
def __init__(self, terms: Mapping[raw_types.Gate, value.Scalar]) -> None:
"""Initializes linear combination from a collection of terms.
Args:
terms: Mapping of gates to coefficients in the linear combination
being initialized.
"""
super().__init__(terms, validator=self._is_compatible)
def num_qubits(self) -> Optional[int]:
"""Returns number of qubits in the domain if known, None if unknown."""
if not self:
return None
any_gate = next(iter(self))
return any_gate.num_qubits()
def _is_compatible(self, gate: 'cirq.Gate') -> bool:
return (self.num_qubits() is None or
self.num_qubits() == gate.num_qubits())
def __add__(self,
other: Union[raw_types.Gate, 'LinearCombinationOfGates']
) -> 'LinearCombinationOfGates':
if not isinstance(other, LinearCombinationOfGates):
other = other.wrap_in_linear_combination()
return super().__add__(other)
def __iadd__(self,
other: Union[raw_types.Gate, 'LinearCombinationOfGates']
) -> 'LinearCombinationOfGates':
if not isinstance(other, LinearCombinationOfGates):
other = other.wrap_in_linear_combination()
return super().__iadd__(other)
def __sub__(self,
other: Union[raw_types.Gate, 'LinearCombinationOfGates']
) -> 'LinearCombinationOfGates':
if not isinstance(other, LinearCombinationOfGates):
other = other.wrap_in_linear_combination()
return super().__sub__(other)
def __isub__(self,
other: Union[raw_types.Gate, 'LinearCombinationOfGates']
) -> 'LinearCombinationOfGates':
if not isinstance(other, LinearCombinationOfGates):
other = other.wrap_in_linear_combination()
return super().__isub__(other)
def __pow__(self, exponent: int) -> 'LinearCombinationOfGates':
if not isinstance(exponent, int):
return NotImplemented
if exponent < 0:
return NotImplemented
if self.num_qubits() != 1:
return NotImplemented
pauli_basis = {
identity.I,
pauli_gates.X,
pauli_gates.Y,
pauli_gates.Z,
}
if not set(self.keys()).issubset(pauli_basis):
return NotImplemented
ai = self[identity.I]
ax = self[pauli_gates.X]
ay = self[pauli_gates.Y]
az = self[pauli_gates.Z]
bi, bx, by, bz = operator_spaces.pow_pauli_combination(
ai, ax, ay, az, exponent)
return LinearCombinationOfGates({
identity.I: bi,
pauli_gates.X: bx,
pauli_gates.Y: by,
pauli_gates.Z: bz
})
def matrix(self) -> np.ndarray:
"""Reconstructs matrix of self using unitaries of underlying gates.
Raises:
TypeError: if any of the gates in self does not provide a unitary.
"""
num_qubits = self.num_qubits()
if num_qubits is None:
raise ValueError('Unknown number of qubits')
num_dim = 2 ** num_qubits
result = np.zeros((num_dim, num_dim), dtype=np.complex128)
for gate, coefficient in self.items():
result += protocols.unitary(gate) * coefficient
return result
def _pauli_expansion_(self) -> value.LinearDict[str]:
result = value.LinearDict({}) # type: value.LinearDict[str]
for gate, coefficient in self.items():
result += protocols.pauli_expansion(gate) * coefficient
return result
class LinearCombinationOfOperations(value.LinearDict[raw_types.Operation]):
"""Represents operator defined by linear combination of gate operations.
If G1, ..., Gn are gate operations, {q1_1, ..., q1_k1}, {q2_1, ..., q2_k2},
..., {qn_1, ..., qn_kn} are (not necessarily disjoint) sets of qubits and
b1, b2, ..., bn are complex numbers, then
LinearCombinationOfOperations({
G1(q1_1, ..., q1_k1): b1,
G2(q2_1, ..., q2_k2): b2,
...,
Gn(qn_1, ..., qn_kn): bn})
represents the linear operator
A = b1 G1(q1_1, ..., q1_k1) +
+ b2 G2(q2_1, ..., q2_k2) +
+ ... +
+ bn Gn(qn_1, ..., qn_kn)
where in each term qubits not explicitly listed are assumed to be acted on
by the identity operator. Note that A may not be unitary or even normal.
"""
def __init__(self,
terms: Mapping[raw_types.Operation, value.Scalar]) -> None:
"""Initializes linear combination from a collection of terms.
Args:
terms: Mapping of gate operations to coefficients in the linear
combination being initialized.
"""
super().__init__(terms, validator=self._is_compatible)
def _is_compatible(self, operation: 'cirq.Operation') -> bool:
return isinstance(operation, raw_types.Operation)
@property
def qubits(self) -> Tuple[raw_types.Qid, ...]:
"""Returns qubits acted on self."""
if not self:
return ()
qubit_sets = [set(op.qubits) for op in self.keys()]
all_qubits = set.union(*qubit_sets)
return tuple(sorted(all_qubits))
def __pow__(self, exponent: int) -> 'LinearCombinationOfOperations':
if not isinstance(exponent, int):
return NotImplemented
if exponent < 0:
return NotImplemented
if len(self.qubits) != 1:
return NotImplemented
qubit = self.qubits[0]
i = identity.I(qubit)
x = pauli_gates.X(qubit)
y = pauli_gates.Y(qubit)
z = pauli_gates.Z(qubit)
pauli_basis = {i, x, y, z}
if not set(self.keys()).issubset(pauli_basis):
return NotImplemented
ai, ax, ay, az = self[i], self[x], self[y], self[z]
bi, bx, by, bz = operator_spaces.pow_pauli_combination(
ai, ax, ay, az, exponent)
return LinearCombinationOfOperations({i: bi, x: bx, y: by, z: bz})
def matrix(self) -> np.ndarray:
"""Reconstructs matrix of self using unitaries of underlying operations.
Raises:
TypeError: if any of the gates in self does not provide a unitary.
"""
num_qubits = len(self.qubits)
num_dim = 2**num_qubits
qubit_to_axis = {q: i for i, q in enumerate(self.qubits)}
result = np.zeros((2,) * (2 * num_qubits), dtype=np.complex128)
for op, coefficient in self.items():
identity = np.eye(num_dim,
dtype=np.complex128).reshape(result.shape)
workspace = np.empty_like(identity)
axes = tuple(qubit_to_axis[q] for q in op.qubits)
u = protocols.apply_unitary(
op, protocols.ApplyUnitaryArgs(identity, workspace, axes))
result += coefficient * u
return result.reshape((num_dim, num_dim))
def _pauli_expansion_(self) -> value.LinearDict[str]:
"""Computes Pauli expansion of self from Pauli expansions of terms."""
def extend_term(pauli_names: str, qubits: Tuple['cirq.Qid', ...],
all_qubits: Tuple['cirq.Qid', ...]) -> str:
"""Extends Pauli product on qubits to product on all_qubits."""
assert len(pauli_names) == len(qubits)
qubit_to_pauli_name = dict(zip(qubits, pauli_names))
return ''.join(qubit_to_pauli_name.get(q, 'I') for q in all_qubits)
def extend(expansion: value.LinearDict[str],
qubits: Tuple['cirq.Qid', ...],
all_qubits: Tuple['cirq.Qid', ...]) -> value.LinearDict[str]:
"""Extends Pauli expansion on qubits to expansion on all_qubits."""
return value.LinearDict({
extend_term(p, qubits, all_qubits): c
for p, c in expansion.items()
})
result = value.LinearDict({}) # type: value.LinearDict[str]
for op, coefficient in self.items():
expansion = protocols.pauli_expansion(op)
extended_expansion = extend(expansion, op.qubits, self.qubits)
result += extended_expansion * coefficient
return result
def _is_linear_dict_of_unit_pauli_string(
linear_dict: value.LinearDict[UnitPauliStringT]) -> bool:
if not isinstance(linear_dict, value.LinearDict):
return False
for k in linear_dict.keys():
if not isinstance(k, frozenset):
return False
for qid, pauli in k:
if not isinstance(qid, raw_types.Qid):
return False
if not isinstance(pauli, pauli_gates.Pauli):
return False
return True
def _pauli_string_from_unit(unit: UnitPauliStringT,
coefficient: Union[int, float, complex] = 1):
return PauliString(qubit_pauli_map=dict(unit), coefficient=coefficient)
@value.value_equality(approximate=True)
class PauliSum:
"""Represents operator defined by linear combination of PauliStrings.
Since PauliStrings store their own coefficients, this class
does not implement the LinearDict interface. Instead, you can
add and subtract terms and then iterate over the resulting
(simplified) expression.
Under the hood, this class is backed by a LinearDict with coefficient-less
PauliStrings as keys. PauliStrings are reconstructed on-the-fly during
iteration.
"""
def __init__(
self,
linear_dict: Optional[value.LinearDict[UnitPauliStringT]] = None):
if linear_dict is None:
linear_dict = value.LinearDict()
if not _is_linear_dict_of_unit_pauli_string(linear_dict):
raise ValueError(
"PauliSum constructor takes a LinearDict[UnitPauliStringT]. "
"Consider using PauliSum.from_pauli_strings() or adding and "
"subtracting PauliStrings")
self._linear_dict = linear_dict
def _value_equality_values_(self):
return self._linear_dict
@staticmethod
def wrap(val: PauliSumLike) -> 'PauliSum':
if isinstance(val, PauliSum):
return val
return PauliSum() + val
@classmethod
def from_pauli_strings(cls, terms: Union[PauliString, List[PauliString]]
) -> 'PauliSum':
if isinstance(terms, PauliString):
terms = [terms]
termdict: DefaultDict[UnitPauliStringT, value.Scalar] = defaultdict(
lambda: 0)
for pstring in terms:
key = frozenset(pstring._qubit_pauli_map.items())
termdict[key] += pstring.coefficient
return cls(linear_dict=value.LinearDict(termdict))
@property
def qubits(self) -> Tuple[raw_types.Qid, ...]:
qs = {q for k in self._linear_dict.keys() for q, _ in k}
return tuple(sorted(qs))
def copy(self) -> 'PauliSum':
factory = type(self)
return factory(self._linear_dict.copy())
def expectation_from_wavefunction(self,
state: np.ndarray,
qubit_map: Mapping[raw_types.Qid, int],
*,
atol: float = 1e-7,
check_preconditions: bool = True
) -> float:
"""Evaluate the expectation of this PauliSum given a wavefunction.
See `PauliString.expectation_from_wavefunction`.
Args:
state: An array representing a valid wavefunction.
qubit_map: A map from all qubits used in this PauliSum to the
indices of the qubits that `state` is defined over.
| |
<gh_stars>0
#mv.py
import itertools
import copy
import numbers
import operator
from compiler.ast import flatten
from operator import itemgetter, mul, add
from itertools import combinations
#from numpy.linalg import matrix_rank
from sympy import Symbol, Function, S, expand, Add, Mul, Pow, Basic, \
sin, cos, sinh, cosh, sqrt, trigsimp, \
simplify, diff, Rational, Expr, Abs, collect, combsimp
from sympy import N as Nsympy
import printer
import metric
import ga
ONE = S(1)
ZERO = S(0)
half = Rational(1, 2)
modules = \
"""
from sympy import symbols, sin, Function
from mv import Mv
from ga import Ga, half
from printer import Eprint, xdvi
from lt import Lt
"""
########################### Multivector Class ##########################
class Mv(object):
"""
Wrapper class for multivector objects (self.obj) so that it is easy
to overload operators (*,^,|,<,>) for the various multivector
products and for printing. Also provides an __init__ fuction to
easily instanciate multivector objects. Additionally, the functionality
of the multivector derivative have been added via the special vector
'grad' so that one can take the geometric derivative of a multivector
function 'A' by applying 'grad' from the left, 'grad*A', or the
right 'A*grad' for both the left and right derivatives. The operator
between the 'grad' and the 'A' can be any of the multivector product
operators.
If 'f' is a scalar function 'grad*f' is the usual gradient of a function.
If 'A' is a vector function 'grad|f' is the divergence of 'A' and
'-I*(grad^A)' is the curl of 'A' (I is the pseudo scalar for the geometric
algebra)
"""
################### Multivector initialization #####################
fmt = 1
latex_flg = False
restore = False
init_slots = {'f': (False, 'True if function of coordinates'),
'ga': (None, 'Geometric algebra to be used with multivectors'),
'coords': (None, 'Coordinates to be used with multivector function'),
'recp': (None, 'Normalization for reciprocal vector')}
@staticmethod
def setup(ga):
"""
Set up constant mutilvectors reqired for multivector class for
a given geometric algebra, 'ga'.
"""
Mv.fmt = 1
basis = [Mv(x, ga=ga) for x in ga.basis]
I = Mv(ga.iobj, ga=ga) # default pseudoscalar
x = Mv('XxXx', 'vector', ga=ga) # testing vectors
# return default basis vectors and grad vector if coords defined
return I, basis, x
@staticmethod
def get_Ga(name):
return(Mv.ga[name])
@staticmethod
def Format(mode=1):
Mv.latex_flg = True
Mv.fmt = mode
return
@staticmethod
def Mul(A, B, op):
if not isinstance(A, Mv):
A = B.Ga.mv(A)
if not isinstance(B, Mv):
B = A.Ga.mv(B)
if op == '*':
return A * B
elif op == '^':
return A ^ B
elif op == '|':
return A | B
elif op == '<':
return A < B
elif op == '>':
print 'A > B =', A, B
return A > B
else:
raise ValeError('Operation ' + op + 'not allowed in Mv.Mul!')
return
def characterise_Mv(self):
if self.char_Mv:
return
obj = self.obj
if isinstance(obj, numbers.Number):
self.i_grade = 0
self.is_blade_rep = True
self.grades = [0]
return
if obj.is_commutative:
self.i_grade = 0
self.is_blade_rep = True
self.grades = [0]
return
if isinstance(obj, Add):
args = obj.args
else:
args = [obj]
grades = []
self.is_blade_rep = True
for term in args:
if term.is_commutative:
if 0 not in grades:
grades.append(0)
else:
c, nc = term.args_cnc(split_1=False)
blade = nc[0]
if blade in self.Ga.blades_lst:
grade = self.Ga.blades_to_grades_dict[blade]
if not grade in grades:
grades.append(grade)
else:
self.char_Mv = True
self.is_blade_rep = False
self.i_grade = None
return
if len(grades) == 1:
self.i_grade = grades[0]
else:
self.i_grade = None
self.grades = grades
self.char_Mv = True
return
def make_grade(self, *kargs, **kwargs): #Make a pure grade multivector
grade = kargs[1]
self.i_grade = grade
if isinstance(kargs[0],str):
root = kargs[0] + '__'
if isinstance(kwargs['f'], bool) and not kwargs['f']: #Not a mulitvector function
self.obj = sum([Symbol(root + super_script, real=True) * base
for (super_script, base) in zip(self.Ga.blade_super_scripts[grade], self.Ga.blades[grade])])
else:
if isinstance(kwargs['f'], bool): #Is a multivector function
self.obj = sum([Function(root + super_script, real=True)(*self.Ga.coords) * base
for (super_script, base) in zip(self.Ga.blade_super_scripts[grade], self.Ga.blades[grade])])
else:
self.obj = sum([Function(root + super_script, real=True)(kwargs['f']) * base
for (super_script, base) in zip(self.Ga.blade_super_scripts[grade], self.Ga.blades[grade])])
else:
if isinstance(kargs[0],(list,tuple)):
if len(kargs[0]) <= len(self.Ga.blades[grade]):
self.obj = sum([coef * base
for (coef, base) in zip(kargs[0], self.Ga.blades[grade][:len(kargs[0])])])
else:
pass
else:
pass
return
def make_scalar(self, *kargs, **kwargs):
if isinstance(kargs[0],str):
if 'f' in kwargs and kwargs['f']:
self.obj = Function(kargs[0])(*self.Ga.coords)
else:
self.obj = Symbol(kargs[0], real=True)
else:
self.obj = kargs[0]
return
def make_vector(self, *kargs, **kwargs):
self.make_grade(*(kargs[0], 1), **kwargs)
return
def make_bivector(self, *kargs, **kwargs):
self.make_grade(*(kargs[0], 2), **kwargs)
return
def make_pseudo_scalar(self, *kargs, **kwargs):
self.make_grade(*(kargs[0], self.Ga.n), **kwargs)
return
def make_multivector(self, *kargs, **kwargs):
self.make_scalar(kargs[0], **kwargs)
tmp = self.obj
for grade in self.Ga.n_range:
self.make_grade(*(kargs[0], grade + 1), **kwargs)
tmp += self.obj
self.obj = tmp
return
def make_spinor(self, *kargs, **kwargs):
self.make_scalar(kargs[0], **kwargs)
tmp = self.obj
for grade in self.Ga.n_range:
if (grade + 1) % 2 == 0:
self.make_grade(*(kargs[0], grade + 1), **kwargs)
tmp += self.obj
self.obj = tmp
return
def make_odd(self, *kargs, **kwargs):
self.make_scalar(kargs[0], **kwargs)
tmp = S(0)
for grade in self.Ga.n_range:
if (grade + 1) % 2 == 1:
self.make_grade(*(kargs[0], grade + 1), **kwargs)
tmp += self.obj
self.obj = tmp
return
init_dict = {'scalar': make_scalar,
'vector': make_vector,
'bivector': make_bivector,
'grade2': make_bivector,
'pseudo': make_pseudo_scalar,
'mv': make_multivector,
'spinor': make_spinor,
'even': make_spinor,
'odd': make_odd,
'grade': make_grade}
def __init__(self, *kargs, **kwargs):
if 'ga' not in kwargs:
raise ValueError("Geometric algebra key inplut 'ga' required")
kwargs = metric.test_init_slots(Mv.init_slots, **kwargs)
self.Ga = kwargs['ga']
self.recp = kwargs['recp'] # Normalization for reciprocal vectors
self.char_Mv = False
self.i_grade = None # if pure grade mv, grade value
self.grades = None # list of grades in mv
self.is_blade_rep = True # flag for blade representation
self.blade_flg = None # if is_blade is called flag is set
self.versor_flg = None # if is_versor is called flag is set
self.coords = self.Ga.coords
self.fmt = 1
self.title = None
if len(kargs) == 0: # default constructor 0
self.obj = S(0)
self.i_grade = 0
elif len(kargs) == 1 and not isinstance(kargs[0], str): # copy constructor
x = kargs[0]
if isinstance(x, Mv):
self.obj = x.obj
self.is_blade_rep = x.is_blade_rep
self.i_grade = x.i_grade
else:
if isinstance(x, Expr):
self.obj = x
else:
self.obj = S(x)
self.is_blade_rep = True
self.characterise_Mv()
else:
if kargs[1] not in Mv.init_dict:
raise ValueError('"' + kargs[1] + '" not an allowed multivector type.')
mode = kargs[1]
kargs = [kargs[0]] + list(kargs[2:])
Mv.init_dict[mode](self, *kargs, **kwargs)
if isinstance(kargs[0],str):
self.title = kargs[0]
self.characterise_Mv()
################# Multivector member functions #####################
def reflect_in_blade(self, blade): # Reflect mv in blade
self.characterise_Mv()
blade.characterise_Mv()
blade_inv = blade.rev() / blade.norm2()
grade_dict = self.Ga.grade_decomposition(self)
blade_grade = blade.i_grade
reflect = Mv(0,'scalar',ga=self.Ga)
for grade in grade_dict.keys():
if (grade * (blade_grade + 1)) % 2 == 0:
reflect += blade * grade_dict[grade] * blade_inv
else:
reflect -= blade * grade_dict[grade] * blade_inv
return reflect
def base_rep(self):
if self.is_blade_rep:
self.obj = self.Ga.blade_to_base_rep(self.obj)
self.is_blade_rep = False
return self
else:
return self
def blade_rep(self):
if self.is_blade_rep:
return self
else:
self.obj = self.Ga.base_to_blade_rep(self.obj)
self.is_blade_rep = True
return self
def __eq__(self, A):
if not isinstance(A, Mv):
if not self.is_scalar():
return False
if expand(self.obj) == expand(A):
return True
else:
return False
if self.is_blade_rep != A.is_blade_rep:
self = self.blade_rep()
A = A.blade_rep()
coefs, bases = metric.linear_expand(self.obj)
Acoefs, Abases = metric.linear_expand(A.obj)
if len(bases) != len(Abases):
return False
if set(bases) != set(Abases):
return False
for base in bases:
index = bases.index(base)
indexA = Abases.index(base)
if expand(coefs[index]) != expand(Acoefs[index]):
return False
return True
def __neg__(self):
return Mv(-self.obj, ga=self.Ga)
def __add__(self, A):
if (not isinstance(A, Mv)) and (not isinstance(A, Dop)):
return Mv(self.obj + A, ga=self.Ga)
if self.Ga.name != A.Ga.name:
raise ValueError('In + operation Mv arguments are not from same geometric algebra')
if isinstance(A, Dop):
return Dop.Add(A, self)
if self.is_blade_rep == A.is_blade_rep:
return Mv(self.obj + A.obj, ga=self.Ga)
else:
if self.is_blade_rep:
A = A.blade_rep()
else:
self = self.blade_rep()
return Mv(self.obj + A.obj, ga=self.Ga)
def __radd__(self, A):
return(self + A)
def __add_ab__(self, A): # self += A
self.obj += A.obj
self.char_Mv = False
self.characterise_Mv()
return(self)
def __sub__(self, A):
if (not isinstance(A, Mv)) and (not isinstance(A, Dop)):
return Mv(self.obj - A, ga=self.Ga)
if self.Ga.name != A.Ga.name:
raise ValueError('In - operation Mv arguments are not from same geometric algebra')
if isinstance(A, Dop):
return Dop.Add(self, -A)
if self.is_blade_rep == A.is_blade_rep:
return Mv(self.obj - A.obj, ga=self.Ga)
else:
if self.is_blade_rep:
A = | |
tapes contain WireCut operations."
)
return tape
def _cut_circuit_expand(
tape: QuantumTape,
use_opt_einsum: bool = False,
device_wires: Optional[Wires] = None,
max_depth: int = 1,
auto_cutter: Union[bool, Callable] = False,
**kwargs,
):
"""Main entry point for expanding operations until reaching a depth that
includes :class:`~.WireCut` operations."""
# pylint: disable=unused-argument
return _qcut_expand_fn(tape, max_depth, auto_cutter)
def _cut_circuit_mc_expand(
tape: QuantumTape,
classical_processing_fn: Optional[callable] = None,
max_depth: int = 1,
shots: Optional[int] = None,
device_wires: Optional[Wires] = None,
auto_cutter: Union[bool, Callable] = False,
**kwargs,
):
"""Main entry point for expanding operations in sample-based tapes until
reaching a depth that includes :class:`~.WireCut` operations."""
# pylint: disable=unused-argument, too-many-arguments
return _qcut_expand_fn(tape, max_depth, auto_cutter)
cut_circuit.expand_fn = _cut_circuit_expand
cut_circuit_mc.expand_fn = _cut_circuit_mc_expand
def remap_tape_wires(tape: QuantumTape, wires: Sequence) -> QuantumTape:
"""Map the wires of a tape to a new set of wires.
Given an :math:`n`-wire ``tape``, this function returns a new :class:`~.QuantumTape` with
operations and measurements acting on the first :math:`n` wires provided in the ``wires``
argument. The input ``tape`` is left unmodified.
.. note::
This function is designed for use as part of the circuit cutting workflow.
Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Args:
tape (QuantumTape): the quantum tape whose wires should be remapped
wires (Sequence): the new set of wires to map to
Returns:
QuantumTape: A remapped copy of the input tape
Raises:
ValueError: if the number of wires in ``tape`` exceeds ``len(wires)``
**Example**
Consider the following circuit that operates on wires ``[2, 3]``:
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.RX(0.5, wires=2)
qml.RY(0.6, wires=3)
qml.CNOT(wires=[2, 3])
qml.expval(qml.PauliZ(2) @ qml.PauliZ(3))
We can map from wires ``[2, 3]`` to ``[0, 1]`` using:
>>> new_wires = [0, 1]
>>> new_tape = qml.transforms.qcut.remap_tape_wires(tape, new_wires)
>>> print(new_tape.draw())
0: ──RX(0.5)──╭C──╭┤ ⟨Z ⊗ Z⟩
1: ──RY(0.6)──╰X──╰┤ ⟨Z ⊗ Z⟩
"""
if len(tape.wires) > len(wires):
raise ValueError(
f"Attempting to run a {len(tape.wires)}-wire circuit on a "
f"{len(wires)}-wire device. Consider increasing the number of wires in "
f"your device."
)
wire_map = dict(zip(tape.wires, wires))
copy_ops = [copy.copy(op) for op in tape.operations]
copy_meas = [copy.copy(op) for op in tape.measurements]
with QuantumTape() as new_tape:
for op in copy_ops:
new_wires = Wires([wire_map[w] for w in op.wires])
op._wires = new_wires
apply(op)
for meas in copy_meas:
obs = meas.obs
if isinstance(obs, Tensor):
for obs in obs.obs:
new_wires = Wires([wire_map[w] for w in obs.wires])
obs._wires = new_wires
else:
new_wires = Wires([wire_map[w] for w in obs.wires])
obs._wires = new_wires
apply(meas)
return new_tape
@dataclass()
class CutStrategy:
"""
A circuit-cutting distribution policy for executing (large) circuits on available (comparably
smaller) devices.
.. note::
This class is part of a work-in-progress feature to support automatic cut placement in the
circuit cutting workflow. Currently only manual placement of cuts is supported,
check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
Args:
devices (Union[qml.Device, Sequence[qml.Device]]): Single, or Sequence of, device(s).
Optional only when ``max_free_wires`` is provided.
max_free_wires (int): Number of wires for the largest available device. Optional only when
``devices`` is provided where it defaults to the maximum number of wires among
``devices``.
min_free_wires (int): Number of wires for the smallest available device, or, equivalently,
the smallest max fragment-wire-size that the partitioning is allowed to explore.
When provided, this parameter will be used to derive an upper-bound to the range of
explored number of fragments. Optional, defaults to 2 which corresponds to attempting
the most granular partitioning of max 2-wire fragments.
num_fragments_probed (Union[int, Sequence[int]]): Single, or 2-Sequence of, number(s)
specifying the potential (range of) number of fragments for the partitioner to attempt.
Optional, defaults to probing all valid strategies derivable from the circuit and
devices. When provided, has precedence over all other arguments affecting partitioning
exploration, such as ``max_free_wires``, ``min_free_wires``, or ``exhaustive``.
max_free_gates (int): Maximum allowed circuit depth for the deepest available device.
Optional, defaults to unlimited depth.
min_free_gates (int): Maximum allowed circuit depth for the shallowest available device.
Optional, defaults to ``max_free_gates``.
imbalance_tolerance (float): The global maximum allowed imbalance for all partition trials.
Optional, defaults to unlimited imbalance. Used only if there's a known hard balancing
constraint on the partitioning problem.
trials_per_probe (int): Number of repeated partitioning trials for a random automatic
cutting method to attempt per set of partitioning parameters. For a deterministic
cutting method, this can be set to 1. Defaults to 4.
**Example**
The following cut strategy specifies that a circuit should be cut into between
``2`` to ``5`` fragments, with each fragment having at most ``6`` wires and
at least ``4`` wires:
>>> cut_strategy = qml.transforms.CutStrategy(
... max_free_wires=6,
... min_free_wires=4,
... num_fragments_probed=(2, 5),
... )
"""
# pylint: disable=too-many-arguments, too-many-instance-attributes
#: Initialization argument only, used to derive ``max_free_wires`` and ``min_free_wires``.
devices: InitVar[Union[qml.Device, Sequence[qml.Device]]] = None
#: Number of wires for the largest available device.
max_free_wires: int = None
#: Number of wires for the smallest available device.
min_free_wires: int = None
#: The potential (range of) number of fragments for the partitioner to attempt.
num_fragments_probed: Union[int, Sequence[int]] = None
#: Maximum allowed circuit depth for the deepest available device.
max_free_gates: int = None
#: Maximum allowed circuit depth for the shallowest available device.
min_free_gates: int = None
#: The global maximum allowed imbalance for all partition trials.
imbalance_tolerance: float = None
#: Number of trials to repeat for per set of partition parameters probed.
trials_per_probe: int = 4
#: Class attribute, threshold for warning about too many fragments.
HIGH_NUM_FRAGMENTS: ClassVar[int] = 20
#: Class attribute, threshold for warning about too many partition attempts.
HIGH_PARTITION_ATTEMPTS: ClassVar[int] = 20
def __post_init__(
self,
devices,
):
"""Deriving cutting constraints from given devices and parameters."""
self.max_free_wires = self.max_free_wires
if isinstance(self.num_fragments_probed, int):
self.num_fragments_probed = [self.num_fragments_probed]
if isinstance(self.num_fragments_probed, (list, tuple)):
self.num_fragments_probed = sorted(self.num_fragments_probed)
self.k_lower = self.num_fragments_probed[0]
self.k_upper = self.num_fragments_probed[-1]
if self.k_lower <= 0:
raise ValueError("`num_fragments_probed` must be positive int(s)")
else:
self.k_lower, self.k_upper = None, None
if devices is None and self.max_free_wires is None:
raise ValueError("One of arguments `devices` and max_free_wires` must be provided.")
if isinstance(devices, qml.Device):
devices = (devices,)
if devices is not None:
if not isinstance(devices, SequenceType) or any(
(not isinstance(d, qml.Device) for d in devices)
):
raise ValueError(
"Argument `devices` must be a list or tuple containing elements of type "
"`qml.Device`"
)
device_wire_sizes = [len(d.wires) for d in devices]
self.max_free_wires = self.max_free_wires or max(device_wire_sizes)
self.min_free_wires = self.min_free_wires or min(device_wire_sizes)
if (self.imbalance_tolerance is not None) and not (
isinstance(self.imbalance_tolerance, (float, int)) and self.imbalance_tolerance >= 0
):
raise ValueError(
"The overall `imbalance_tolerance` is expected to be a non-negative number, "
f"got {type(self.imbalance_tolerance)} with value {self.imbalance_tolerance}."
)
self.min_free_wires = self.min_free_wires or 1
def get_cut_kwargs(
self,
tape_dag: MultiDiGraph,
max_wires_by_fragment: Sequence[int] = None,
max_gates_by_fragment: Sequence[int] = None,
exhaustive: bool = True,
) -> List[Dict[str, Any]]:
"""Derive the complete set of arguments, based on a given circuit, for passing to a graph
partitioner.
Args:
tape_dag (nx.MultiDiGraph): Graph representing a tape, typically the output of
:func:`tape_to_graph`.
max_wires_by_fragment (Sequence[int]): User-predetermined list of wire limits by
fragment. If supplied, the number of fragments will be derived from it and
exploration of other choices will not be made.
max_gates_by_fragment (Sequence[int]): User-predetermined list of gate limits by
fragment. If supplied, the number of fragments will be derived from it and
exploration of other choices will not be made.
exhaustive (bool): Toggle for an exhaustive search which will attempt all potentially
valid numbers of fragments into which the circuit is partitioned. If ``True``,
for a circuit with N gates, N - 1 attempts will be made with ``num_fragments``
ranging from [2, N], i.e. from bi-partitioning to complete partitioning where each
fragment has exactly a single gate. Defaults to ``True``.
Returns:
List[Dict[str, Any]]: A list of minimal kwargs being passed to a graph
partitioner method.
**Example**
Deriving kwargs for a given circuit and feeding them to a custom partitioner, along with
extra parameters specified using ``extra_kwargs``:
>>> cut_strategy = qcut.CutStrategy(devices=dev)
>>> cut_kwargs = cut_strategy.get_cut_kwargs(tape_dag)
>>> cut_trials = [
... my_partition_fn(tape_dag, **kwargs, **extra_kwargs) for kwargs in cut_kwargs
... ]
"""
tape_wires = set(w for _, _, w in tape_dag.edges.data("wire"))
num_tape_wires = | |
<reponame>Shadybloom/amber-in-the-dark
#----
# Заметки:
# Список фабрик и заводов Российской империи (1907-1909 годы):
# http://istmat.info/node/26498
#----
# Фабрики (обмолот)
metadict_detail['_Обмолот зерновых (килограмм)'] = {
# TODO: Сначала обмолот, затем веялка.
# обмолот растений на молотилке.
# Молотилка (4 кВт, 12 рабочих) -- 6000 килограмм/день (сезон 60 дней/год)
'_-Обмолот растений фабричный (килограмм)':1,
'_Молотилка (годовой оборот)':1 / 360000,
}
metadict_detail['_Обмолот масличных культур (килограмм)'] = {
'_-Обмолот растений фабричный (килограмм)':1,
'_Молотилка (годовой оборот)':1 / 360000,
}
metadict_detail['_Обмолот пряных растений (килограмм)'] = {
'_-Обмолот растений фабричный (килограмм)':1,
'_Молотилка (годовой оборот)':1 / 360000,
}
metadict_detail['_Чистка и сушка бобов (килограмм)'] = {
'_-Обмолот растений фабричный (килограмм)':1,
'_Молотилка (годовой оборот)':1 / 360000,
}
#----
# Фабрики (мельницы)
metadict_detail['_Помол зерна (килограмм)'] = {
# Производство: 5 тонн/сутки, 1000 тонн/год (снабжение кантона)
# https://ru.wikisource.org/wiki/ЭСБЕ/Мельницы
# Пример №2: 100 тонн/сутки, 30 000 тонн/год (снабжение округа)
# 13 рабочих в смене, 39 рабочих на 3 смены/день (0.00312 нормо-часа/килограмм муки)
# Машина в 150-185 киловатт.
# https://istmat.info/node/27392
'_-Помол зерна (килограмм)':1,
'_Мельница (годовой оборот)':1 / 1000000,
}
metadict_detail['_Помол крупы (килограмм)'] = {
'_-Помол крупы (килограмм)':1,
'_Мельница (годовой оборот)':1 / 1000000,
}
metadict_detail['_Чистка и дробление зерна (килограмм)'] = {
'_-Чистка и дробление зерна (килограмм)':1,
'_Мельница (годовой оборот)':1 / 1000000,
}
metadict_detail['_Чистка и шлифование зерна (килограмм)'] = {
'_-Чистка и шлифование зерна (килограмм)':1,
'_Мельница (годовой оборот)':1 / 1000000,
}
metadict_detail['_Чистка и обжаривание зерна (килограмм)'] = {
'_-Чистка и обжаривание зерна (килограмм)':1,
'_Мельница (годовой оборот)':1 / 1000000,
}
#----
# Фабрики (полуфабрикаты)
metadict_detail['_Производство пшеничного хлеба (килограмм)'] = {
# Холодная ферментация, 48 часов; вызревание 6 часов; выпечка 25 минут.
# https://arborio.ru/xleb-na-zakvaske-prostoj-kak-1-2-3/
# https://ru.wikisource.org/wiki/ЭСБЕ/Хлебопечение
# https://ru.wikisource.org/wiki/ЭСБЕ/Хлебопекарные_печи_полевые
# <NAME>. Технология хлебопечения от замеса до выпечки
# https://hlebopechka.ru/index.php?option=com_smf&topic=171525
# 400 000 килограмм/год, три 2-кубометровых пода и тестомесильная машина.
'_-Очистка муки на сетчатом барабане (килограмм)':0.625 * 1.15 / 0.95,
'_-Вымешивание теста в тестомесильной машине (килограмм)':1.15,
'_-Брожение теста в кадке фабричной (килограмм)':1.15,
'_-Обминка теста в тестомесильной машине (килограмм)':1.15,
'_-Формовка теста на столах (килограмм)':1.15,
'_-Расстойка теста в формах (килограмм)':1.15,
'_-Выпечка хлеба в пекарне (килограмм)':1.15,
'-Испарение воды в печи (килограмм)':0.15 * 1.15,
'-Кипячение воды в печи (килограмм)':1 * 1.15,
'_Хлебозавод (годовой оборот)':1 / 400000,
}
metadict_detail['_Производство шоколада (килограмм)'] = {
# TODO: на отдельные процессы
# Технологическая линия производства плиточного шоколада и какао-порошка:
# 1) Вальцевание -- измельчение и перемешивание сахарной пудры и какао-порошка
# 2) Разводка шоколадной массы -- разжижение смеси, добавление какао-масла
# 3) Конширование -- перемешивание подогретой смеси с доступом воздуха (72 часа)
# 4) Формирование шоколада -- быстрое охлаждение с +45°C до +33°C.
# https://ru.wikisource.org/wiki/ЭСБЕ/Шоколад
# https://znaytovar.ru/s/Texnologicheskaya_liniya_proizvod6.html
# Производство: 0.5 тонн/сутки, 100 тонн/год (снабжение округа)
'_-Производство шоколада (килограмм)':1,
'_Кондитерская фабрика (годовой оборот)':1 / 100000,
}
metadict_detail['_Производство картофельного крахмала (килограмм)'] = {
# TODO: допилить
# разделить на процессы.
# Переработка: 15 тонн картофеля за 10 часов:
# 1) Промывочная машина -- 25 кг/минуту (воды 2 литра/килограмм)
# 2) Тёрочный барабан -- 40 кг/минуту
# 3) Рафинировочное сито -- 25 кг/минуту (воды 5 литров/килограмм, рециркуляция)
# 4) Отстаивание в чанах
# 5) Сушка
# https://ru.wikisource.org/wiki/ЭСБЕ/Крахмальное_производство
# Производство: 0.5 тонны/сутки, 100 тонн/год (снабжение округа)
# https://ru.wikisource.org/wiki/ЭСБЕ/Крахмальное_производство
# https://znaytovar.ru/s/Texnologicheskaya_liniya_proizvod33.html
# http://howtogetrid.ru/kak-sdelat-kraxmal-v-domashnix-usloviyax/
# https://propozitsiya.com/druge-narodzhennya-krohmalnoyi-galuzi-keys-vid-pbp-vimal
'Картофель (килограмм)':20,
'_-Переработка картофеля в крахмал (килограмм)':20,
'_Крахмальный завод (годовой оборот)':1 / 100000,
}
metadict_detail['_Производство агар-агара (килограмм)'] = {
# TODO:
# Уточни расход водорослей
'Водоросли красные (килограмм)':20,
'_-Переработка водорослей в агар-агар (килограмм)':20,
'_Крахмальный завод (годовой оборот)':1 / 100000,
}
metadict_detail['_Производство тростникового сахара (килограмм)'] = {
# TODO: дели рафинирование на отдельные процессы
# рафинирование химическое.
# Производство: 5 тонн/сутки, 1000 тонн/год (снабжение округа)
# Эффективность производства: 10 тонн/рабочего в год
# Очень неточно, по годовому обороту и рыночным ценам сахара-песка (1914 год)
# Список фабрик и заводов Российской империи
'Сахар-сырец тростниковый (килограмм)':1.1,
'_-Рафинирование тростникового сахара-сырца (килограмм)':1.1,
'_Рафинадный завод (годовой оборот)':1 / 1000000,
}
metadict_detail['_Производство тростникового сахара-сырца (килограмм)'] = {
# TODO: вываривание на отдельные процессы.
# резка, вываривание, просушивание.
# Производство: 5 тонн/сутки, 1000 тонн/год (снабжение округа)
# Примитивный техпроцесс, из 10% сахарозы в стеблях извлекают только 5% сахара.
'Сахарный тростник (килограмм)':20,
'Известь свежегашёная (килограмм)':0.014,
'_-Переработка сахарного тростника в сахар-сырец (килограмм)':20,
'_Сахарный завод (годовой оборот)':1 / 1000000,
}
metadict_detail['_Производство чёрной патоки (килограмм)'] = {
# TODO: Побочный продукт сахарного производства.
'Сахарный тростник (килограмм)':20,
'Известь свежегашёная (килограмм)':0.014,
'_-Переработка сахарного тростника в сахар-сырец (килограмм)':20,
'_Сахарный завод (годовой оборот)':1 / 1000000,
}
#----
# Фабрики (масло)
# Содержание масла (Российская империя 1900 года):
# | Растение | Жира (%) | Белков | Воды
# | ------------------ | -------------- | ------------- | ----------
# | Рапс озимый* | 43.96 | 19.25 | 5
# | Рапс яровой* | 37.33 | 24.50 | 5
# | Сурепица | 30.90 - 37.11 | 23.13 - 27.00 | 4.8 - 15.6
# | Лён | 32.50 - 34.00 | 23.00 - 24.70 | 6.6 - 7.0
# | Конопля | 30.36 - 34.00 | 23.12 - 24.30 | 5.6 - 10.0
# | Мак | 38.46 - 43.00 | 23.40 - 24.09 | 4.2 - 6.4
# | Лаллеманция | 22.12 - 28.24 | - | 6.63
# | Кунжут | 55.17 | - | 3.63
# * Рапсовое масло -- техническое. Оно токсично без современных методов рафинирования.
# Содержание масла (Европейская торговля 1900 года):
# | Растение | Жира (%)
# | ------------------ | ---------
# | Лён | 37.0
# | Рапс | 42.5
# | Конопля | 33.6
# | Мак | 41.0
# | Magia семя | 38.8
# | Рыжиковое семя | 30.0
# | Подсолнечник* | 23.6
# | Хлопчатник | 30.3
# | Кунжут | 37.0
# | Арахис | 41.2
# | Оливки* | 50 - 70
# * Оливки обезвоженные -- в свежих 15% жира
# https://fdc.nal.usda.gov/fdc-app.html#/food-details/1103679/nutrients
# Подсолнечник с учётом 40% лузги, в самих семенах содержится 33-34% масла.
# - Прессование. В льняных жмыхах остаётся -- 6.6-16.5% масла (11% масла)
# - Прессование. Остатки от прессования оливок -- 12-28% масла (20% масла)
# - Экстракция бензином (ап<NAME>). В экстракционных остатках -- 2% масла
# https://ru.wikisource.org/wiki/ЭСБЕ/Маслобойное_и_маслоэкстракционное_производства
# https://ru.wikisource.org/wiki/ЭСБЕ/Жмыхи
# http://vidkormov.narod.ru/card/n351.html
metadict_detail['_Производство подсолнечного масла (килограмм)'] = {
# Доступно масла:
# - Жира -- 23.6%.
# - Прессование даёт -- 13%
# - Экстракция даёт -- 21%
# - Остаток -- 2.6%
# https://ru.wikisource.org/wiki/ЭСБЕ/Подсолнечник
# https://farmet.com.ua/?page_id=210
# Сначала прессование, затем экстракция
# Производство 500+ тыс. кг/год даёт возможность эффективно использовать экстракцию.
# Лузга -- 40% семени подсолнечника.
'_-Очистка семян на сетчатом барабане (килограмм)':1 / 0.21,
'_-Лущение семян на вальцовой молотилке (килограмм)':1 / 0.21 * 0.95,
'_-Помол семян на вальцовой мельнице (килограмм)':1 / 0.21 * 0.55,
'_-Подогревание муки на паровой сковороде (килограмм)':1 / 0.21 * 0.55,
'_-Отжим масла из муки гидравлическим прессом (килограмм)':1 / 0.21 * 0.55,
'_-Экстракция масла из муки сернистым эфиром (килограмм)':1 / 0.21 * 0.42,
'_-Отстаивание масла в чане (килограмм)':1.07,
'_-Фильтрование масла паклей и опилками (килограмм)':1.04,
'_-Рафинирование масла серной кислотой (килограмм)':1.02,
'Семена подсолнечника (килограмм)':1 / 0.21,
'+Отруби подсолнечные (доступно/килограмм)':1 / 0.21 * (0.95 - 0.55),
'+Жмых подсолнечный (доступно/килограмм)':1 / 0.21 * (0.55 - 0.21),
'_Маслоэкстракционный завод (годовой оборот)':1 / 500000,
}
metadict_detail['_Производство хлопчатого масла (килограмм)'] = {
# TODO:
# Годится в пищу только рафинированное.
# 30% масла -- твёрдые жиры, подобно какао-маслу.
# Из хлопчатникового масла делают олифу и стеарин.
# Доступно масла:
# - Жира -- 30.3%.
# - Прессование даёт -- 18%
# - Экстракция даёт -- 28%
# - Остаток -- 2.3%
'_-Очистка семян на сетчатом барабане (килограмм)':1 / 0.28,
'_-Лущение семян на вальцовой молотилке (килограмм)':1 / 0.28 * 0.95,
'_-Помол семян на вальцовой мельнице (килограмм)':1 / 0.28 * 0.7,
'_-Подогревание муки на паровой сковороде (килограмм)':1 / 0.28 * 0.7,
'_-Отжим масла из муки гидравлическим прессом (килограмм)':1 / 0.28 * 0.7,
'_-Экстракция масла из муки сернистым эфиром (килограмм)':1 / 0.28 * 0.58,
'_-Отстаивание масла в чане (килограмм)':1.07,
'_-Фильтрование масла паклей и опилками (килограмм)':1.04,
'_-Рафинирование масла серной кислотой (килограмм)':1.02,
'-Семена хлопчатника (требуется/килограмм)':1 / 0.28,
'+Отруби хлопчатниковые (доступно/килограмм)':1 / 0.28 * (0.95 - 0.7),
'+Жмых хлопчатниковый (доступно/килограмм)':1 / 0.28 * (0.7 - 0.28),
'_Маслоэкстракционный завод | |
#!/usr/bin/env python
import numpy as np
import netCDF4 as nc
import pandas as pd
import multiprocessing
import textwrap
import matplotlib.pyplot as plt
import lhsmdu
import glob
import json
import os
import ast
import shutil
import subprocess
from contextlib import contextmanager
import param_util as pu
import output_utils as ou
@contextmanager
def log_wrapper(message,tag=''):
'''
Likely will abandon or repurpose this function.
Not super helpful as a log printer.'''
print('[SA:{}] {}'.format(tag, message))
try:
yield
finally:
print()
def generate_uniform(N, param_props):
'''
Generate sample matrix using uniform method.
Sample matrix will have one row for each "sample" of the
parameters. There will be one column for each parameter in
the `param_props` list.
Parameters
----------
N : int
number of samples (rows) to create
param_props : list of dicts
Each item in `param_props` list will be a dictionary
with at least the following:
>>> param_props = {
... 'name': 'rhq10', # name in dvmdostem parameter file (cmt_*.txt)
... 'bounds': [5.2, 6.4], # the min and max values the parameter can have
... }
Returns
-------
df : pandas.DataFrame, shape (N, len(param_props))
There will be one column for each parameter in the
`param_props` list and N rows (samples).
'''
print(param_props)
l = np.random.uniform(size=(N, len(param_props)))
# Generate bounds, based on specification in params list
lows = np.array([p['bounds'][0] for p in param_props])
highs = np.array([p['bounds'][1] for p in param_props])
# Figure out the spread, or difference between bounds
spreads = highs - lows
sm = l * spreads + lows
return pd.DataFrame(sm, columns=[p['name'] for p in param_props])
def generate_lhc(N, param_props):
'''
Generate sample matrix using Latin Hyper Cube method.
Sample matrix will have one row for each "sample" of the
parameters. There will be one column for each parameter in
the `param_props` list.
Parameters
----------
N : int
number of samples (rows) to create
param_props : list of dicts
Each item in `param_props` list will be a dictionary
with at least the following:
>>> param_props = {
... 'name': 'cmax', # name in dvmdostem parameter file (cmt_*.txt)
... 'bounds': [100.1, 105.1], # the min and max values the parameter can have
... }
Returns
-------
df : pandas.DataFrame, shape (N, len(param_props))
There will be one column for each parameter in the
`param_props` list and N rows (samples).
'''
# Generate bounds, based on specification in params list
lo_bounds = np.array([p['bounds'][0] for p in param_props])
hi_bounds = np.array([p['bounds'][1] for p in param_props])
# Figure out the spread, or difference between bounds
spreads = hi_bounds - lo_bounds
# ??
l = lhsmdu.sample(len(param_props), N)
# ??
l = lhsmdu.resample().T
# ??
mat_diff = np.diag(spreads)
# ??
sample_matrix = l * mat_diff + lo_bounds
return pd.DataFrame(sample_matrix, columns=[p['name'] for p in param_props])
class SensitivityDriver(object):
'''
Sensitivity Analysis Driver class.
Driver class for conducting dvmdostem SensitivityAnalysis.
Methods for cleaning, setup, running model, collecting outputs.
Basic overview of use is like this:
1. Instantiate driver object.
2. Setup/design the experiment (parameters, to use,
number of samples, etc)
3. Use driver object to setup the run folders.
4. Use driver object to carry out model runs.
5. Use driver object to summarize/collect outputs.
6. Use driver object to make plots, do analysis.
Parameters
----------
See Also
--------
Examples
--------
Instantiate object, sets pixel, outputs, working directory,
site selection (input data path)
>>> driver = SensitivityDriver()
Show info about the driver object:
>>> driver.design_experiment(5, 4, params=['cmax','rhq10','nfall(1)'], pftnums=[2,None,2])
>>> driver.sample_matrix
cmax rhq10 nfall(1)
0 63.536594 1.919504 0.000162
1 62.528847 2.161819 0.000159
2 67.606747 1.834203 0.000145
3 59.671967 2.042034 0.000171
4 57.711999 1.968631 0.000155
'''
def __init__(self, clean=False):
'''
Constructor
Hard code a bunch of stuff for now...
'''
# Made this one private because I don't want it to get confused with
# the later params directories that will be created in each run folder.
self.__initial_params = '/work/parameters'
self.work_dir = '/data/workflows/sensitivity_analysis'
self.site = '/data/input-catalog/cru-ts40_ar5_rcp85_ncar-ccsm4_CALM_Toolik_LTER_10x10/'
self.PXx = 0
self.PXy = 0
self.outputs = [
{ 'name': 'GPP', 'type': 'flux',},
{ 'name': 'VEGC','type': 'pool',},
]
if not os.path.isdir(self.work_dir):
os.mkdir(self.work_dir)
if clean:
self.clean()
def get_initial_params_dir(self):
'''Read only accessor to private member variable.'''
return self.__initial_params
def design_experiment(self, Nsamples, cmtnum, params, pftnums,
percent_diffs=None, sampling_method='lhc'):
'''
Builds bounds based on initial values found in dvmdostem parameter
files (cmt_*.txt files) and the `percent_diffs` array.
The `percent_diffs` array gets used to figure out how far
the bounds should be from the initial value. Defaults to initial
value +/-10%.
Sets instance values for `self.params` and `self.sample_matrix`.
Parameters
----------
Nsamples : int
How many samples to draw. One sample equates to one run to be done with
the parameter values in the sample.
cmtnum : int
Which community type number to use for initial parameter values, for
doing runs and analyzing outputs.
params : list of strings
List of parameter names to use in the experiment. Each name must be
in one of the dvmdostem parameter files (cmt_*.txt).
pftnums : list of ints
List of PFT numbers, one number for each parameter in `params`. Use
`None` in the list for any non-pft parameter (i.e. a soil parameter).
percent_diffs: list of floats
List values, one for each parameter in `params`. The value is used to
the bounds with respect to the intial parameter value. I.e. passing
a value in the percent_diff array of .3 would mean that bounds should
be +/-30% of the initial value of the parameter.
Returns
-------
None
'''
if not percent_diffs:
percent_diffs = np.ones(len(params)) * 0.1 # use 10% for default perturbation
assert len(params) == len(pftnums), "params list and pftnums list must be same length!"
assert len(params) == len(percent_diffs), "params list and percent_diffs list must be same length"
self.params = []
plu = pu.build_param_lookup(self.__initial_params)
for pname, pftnum, perturbation in zip(params, pftnums, percent_diffs):
original_pdata_file = pu.which_file(self.__initial_params, pname, lookup_struct=plu)
p_db = pu.get_CMT_datablock(original_pdata_file, cmtnum)
p_dd = pu.cmtdatablock2dict(p_db)
if pname in p_dd.keys():
p_initial = p_dd[pname]
else:
p_initial = p_dd['pft{}'.format(pftnum)][pname]
p_bounds = [p_initial - (p_initial*perturbation), p_initial + (p_initial*perturbation)]
self.params.append(dict(name=pname, bounds=p_bounds, initial=p_initial, cmtnum=cmtnum, pftnum=pftnum))
if sampling_method == 'lhc':
self.sample_matrix = generate_lhc(Nsamples, self.params)
elif sampling_method == 'uniform':
self.sample_matrix = self.generate_uniform(Nsamples, self.params)
def save_experiment(self, name=''):
'''Write the parameter properties and sensitivity matrix to files.'''
if name == '':
sm_fname = os.path.join(self.work_dir, 'sample_matrix.csv')
pp_fname = os.path.join(self.work_dir, 'param_props.csv')
else:
sm_fname = "{}_sample_matrix.csv".format(name)
pp_fname = '{}_param_props.csv'.format(name)
self.sample_matrix.to_csv(sm_fname, index=False)
pd.DataFrame(self.params).to_csv(pp_fname, index=False)
def load_experiment(self, param_props_path, sample_matrix_path):
'''Load parameter properties and sample matrix from files.'''
self.sample_matrix = pd.read_csv(sample_matrix_path)
self.params = pd.read_csv(param_props_path,
dtype={'name':'S10','cmtnum':np.int32,},
converters={'bounds': ast.literal_eval}
)
self.params = self.params.to_dict(orient='records')
# nan to None so that self.pftnum() function works later
for x in self.params:
if 'name' in x.keys():
x['name'] = x['name'].decode('utf-8')
if 'pftnum' in x.keys():
if pd.isna(x['pftnum']): # could try np.isnan
x['pftnum'] = None
else:
x['pftnum'] = int(x['pftnum'])
def clean(self):
'''
Remove the entire tree at `self.work_dir`.
This function is not careful, so be careful using it!
'''
shutil.rmtree(self.work_dir, ignore_errors=True)
os.makedirs(self.work_dir)
def get_sensitivity_csvs(self):
'''
Looks for all the sensitivity.csv files that are present in
the run directories. The sensitivity.csv files are created
using the extract_data_for_sensitivity_analysis(..) funciton.
Returns
-------
file_list : list of strings
list of paths to sensitivity.csv files, one for each file run folder
'''
pattern = '{}/*/sensitivity.csv'.format(self.work_dir)
file_list = sorted(glob.glob(pattern, recursive=True))
return file_list
def info(self):
'''
Print some summary info about the SensitivityDriver object.
Not sure how to best handle the summary of outputs yet. Maybe
a separate method. The problem is that existing outputs may
be leftover from prior runs and thus may not match the existing
params and sample_matrix data. But I don't want to be too
aggressive in cleaning up old outputs incase they are expensive
to re-create.
Returns
-------
None
'''
try:
pft_verbose_name = pu.get_pft_verbose_name(
cmtnum=self.cmtnum(), pftnum=self.pftnum(),
lookup_path=self.get_initial_params_dir()
)
except (AttributeError, ValueError) as e:
pft_verbose_name = ''
# Not all class attributes might be initialized, so if an
# attribute is not set, then print empty string.
try:
# DataFrame prints nicely
df = pd.DataFrame(self.params)
# prevents printing nan
# Might want to make | |
<reponame>IBM/spectrum-connect-csi<filename>controller/tests/array_action/svc/array_mediator_svc_test.py
import unittest
from mock import patch, Mock, call, PropertyMock
from munch import Munch
from pysvc import errors as svc_errors
from pysvc.unified.response import CLIFailureError, SVCResponse
import controller.array_action.config as config
import controller.array_action.errors as array_errors
from controller.array_action.array_mediator_svc import SVCArrayMediator, build_kwargs_from_parameters, \
FCMAP_STATUS_DONE, YES
from controller.common.node_info import Initiators
EMPTY_BYTES = b''
class TestArrayMediatorSVC(unittest.TestCase):
def setUp(self):
self.endpoint = ["IP_1"]
with patch("controller.array_action.array_mediator_svc.SVCArrayMediator._connect"):
self.svc = SVCArrayMediator("user", "password", self.endpoint)
self.svc.client = Mock()
self.svc.client.svcinfo.lssystem.return_value = [Munch({'location': 'local',
'id_alias': 'fake_identifier'})]
node = Munch({'id': '1', 'name': 'node1', 'iscsi_name': 'iqn.1986-03.com.ibm:2145.v7k1.node1',
'status': 'online'})
self.svc.client.svcinfo.lsnode.return_value = [node]
lsportip_port = Munch({'node_id': '1', 'IP_address': '1.1.1.1', 'IP_address_6': None})
lsip_port = Munch({'node_id': '1', 'IP_address': '1.1.1.1', 'portset_id': 'demo_id'})
self.svc.client.svcinfo.lsportip.return_value = [lsportip_port]
self.svc.client.svcinfo.lsip.return_value = [lsip_port]
self.fcmaps = [self._create_dummy_fcmap('source_name', 'test_fc_id')]
self.fcmaps_as_target = [self._create_dummy_fcmap('source_name', 'test_fc_as_target_id')]
self.fcmaps_as_source = [self._create_dummy_fcmap('test_snapshot', 'test_fc_id')]
self.svc.client.svcinfo.lsfcmap.return_value = Mock(as_list=self.fcmaps)
del self.svc.client.svctask.addsnapshot
def _create_dummy_fcmap(self, source_name, id_value):
return Munch(
{'source_vdisk_name': source_name,
'target_vdisk_name': 'target_name',
'id': id_value,
'status': FCMAP_STATUS_DONE,
'copy_rate': 'non_zero_value',
'rc_controlled': 'no'})
@patch("controller.array_action.array_mediator_svc.connect")
def test_init_unsupported_system_version(self, connect_mock):
code_level_below_min_supported = '7.7.77.77 (build 777.77.7777777777777)'
svc_mock = Mock()
svc_mock.svcinfo.lssystem.return_value = [Munch({'location': 'local',
'code_level': code_level_below_min_supported})]
connect_mock.return_value = svc_mock
with self.assertRaises(array_errors.UnsupportedStorageVersionError):
SVCArrayMediator("user", "password", self.endpoint)
def test_raise_management_ips_not_support_error_in_init(self):
self.endpoint = ["IP_1", "IP_2"]
with self.assertRaises(
array_errors.StorageManagementIPsNotSupportError):
SVCArrayMediator("user", "password", self.endpoint)
self.endpoint = []
with self.assertRaises(
array_errors.StorageManagementIPsNotSupportError):
SVCArrayMediator("user", "password", self.endpoint)
@patch("controller.array_action.array_mediator_svc.connect")
def test_connect_errors(self, connect_mock):
connect_mock.side_effect = [
svc_errors.IncorrectCredentials('Failed_a')]
with self.assertRaises(array_errors.CredentialsError):
self.svc._connect()
def test_close(self):
self.svc.disconnect()
self.svc.client.close.assert_called_with()
def test_default_object_prefix_length_not_larger_than_max(self):
prefix_length = len(self.svc.default_object_prefix)
self.assertGreaterEqual(self.svc.max_object_prefix_length, prefix_length)
self.assertGreaterEqual(self.svc.max_object_prefix_length, prefix_length)
@patch("controller.array_action.array_mediator_svc.is_warning_message")
def _test_mediator_method_client_error(self, mediator_method, args, client_method, client_error, expected_error,
mock_warning):
mock_warning.return_value = False
client_method.side_effect = [client_error]
with self.assertRaises(expected_error):
mediator_method(*args)
def _test_mediator_method_client_cli_failure_error(self, mediator_method, args, client_method, error_message_id,
expected_error):
self._test_mediator_method_client_error(mediator_method, args, client_method, CLIFailureError(error_message_id),
expected_error)
def _test_get_volume_lsvdisk_cli_failure_error(self, volume_name, error_message_id, expected_error):
self._test_mediator_method_client_cli_failure_error(self.svc.get_volume, (volume_name, "pool", False),
self.svc.client.svcinfo.lsvdisk, error_message_id,
expected_error)
def test_get_volume_lsvdisk_cli_failure_errors(self):
self._test_get_volume_lsvdisk_cli_failure_error("volume_name", 'CMMVC5753E', array_errors.ObjectNotFoundError)
self._test_get_volume_lsvdisk_cli_failure_error("\xff", 'CMMVC6017E', array_errors.IllegalObjectName)
self._test_get_volume_lsvdisk_cli_failure_error("12345", 'CMMVC5703E', array_errors.IllegalObjectName)
self._test_get_volume_lsvdisk_cli_failure_error("", 'other error', CLIFailureError)
def test_get_volume_return_correct_value(self):
cli_volume_mock = Mock(as_single_element=self._get_cli_volume())
self.svc.client.svcinfo.lsvdisk.return_value = cli_volume_mock
volume = self.svc.get_volume("test_volume", pool="pool1", flashcopy_2=False)
self.assertEqual(1024, volume.capacity_bytes)
self.assertEqual('pool_name', volume.pool)
self.assertEqual('SVC', volume.array_type)
def test_get_volume_hyperswap_has_no_source(self):
target_cli_volume = self._get_mapped_target_cli_volume()
self.svc.client.svcinfo.lsvdisk.return_value = self._mock_cli_object(target_cli_volume)
self._prepare_fcmaps_for_hyperswap()
volume = self.svc.get_volume("volume_name", pool="pool1", flashcopy_2=False)
self.assertIsNone(volume.source_id)
def _prepare_stretched_volume_mock(self):
cli_volume = self._get_cli_volume(pool_name=['many', 'pool1', 'pool2'])
self.svc.client.svcinfo.lsvdisk.return_value = Mock(as_single_element=cli_volume)
def test_get_volume_stretched_return_correct_pools(self):
self._prepare_stretched_volume_mock()
volume = self.svc.get_volume("volume_name", pool="pool1", flashcopy_2=False)
self.assertEqual('pool1:pool2', volume.pool)
def test_get_volume_raise_exception(self):
self._test_mediator_method_client_error(self.svc.get_volume, ("volume",),
self.svc.client.svcinfo.lsvdisk, Exception, Exception)
def test_get_volume_returns_nothing(self):
vol_ret = Mock(as_single_element=Munch({}))
self.svc.client.svcinfo.lsvdisk.return_value = vol_ret
with self.assertRaises(array_errors.ObjectNotFoundError):
self.svc.get_volume("volume", pool="pool1", flashcopy_2=False)
def _test_create_volume_mkvolume_cli_failure_error(self, error_message_id, expected_error, volume_name="volume"):
self._test_mediator_method_client_cli_failure_error(self.svc.create_volume,
(volume_name, 10, "thin", "pool", None, None, False),
self.svc.client.svctask.mkvolume, error_message_id,
expected_error)
def test_create_volume_raise_exceptions(self):
self._test_mediator_method_client_error(self.svc.create_volume,
("volume", 10, "thin", "pool", None, None, False),
self.svc.client.svctask.mkvolume, Exception, Exception)
self._test_create_volume_mkvolume_cli_failure_error("Failed", CLIFailureError)
self._test_create_volume_mkvolume_cli_failure_error("CMMVC8710E", array_errors.NotEnoughSpaceInPool)
self._test_create_volume_mkvolume_cli_failure_error("CMMVC6017E", array_errors.IllegalObjectName, "\xff")
self._test_create_volume_mkvolume_cli_failure_error("CMMVC6527E", array_errors.IllegalObjectName, "1_volume")
self._test_create_volume_mkvolume_cli_failure_error("CMMVC5738E", array_errors.IllegalObjectName, "a" * 64)
self._test_create_volume_mkvolume_cli_failure_error("CMMVC6035E", array_errors.VolumeAlreadyExists)
self._test_create_volume_mkvolume_cli_failure_error("CMMVC5754E", array_errors.InvalidArgumentError)
self._test_create_volume_mkvolume_cli_failure_error("CMMVC9292E", array_errors.PoolDoesNotMatchSpaceEfficiency)
self._test_create_volume_mkvolume_cli_failure_error("CMMVC9301E", array_errors.PoolDoesNotMatchSpaceEfficiency)
def _test_create_volume_success(self, space_efficiency):
self.svc.client.svctask.mkvolume.return_value = Mock()
vol_ret = Mock(as_single_element=self._get_cli_volume())
self.svc.client.svcinfo.lsvdisk.return_value = vol_ret
volume = self.svc.create_volume("test_volume", 1024, space_efficiency, "pool_name", io_group=None,
volume_group=None, flashcopy_2=False)
self.assertEqual(1024, volume.capacity_bytes)
self.assertEqual('SVC', volume.array_type)
self.assertEqual('vol_id', volume.id)
self.assertEqual('test_id', volume.internal_id)
def test_create_volume_with_thin_space_efficiency_success(self):
self._test_create_volume_success(config.SPACE_EFFICIENCY_THIN)
self.svc.client.svctask.mkvolume.assert_called_with(name="test_volume", unit="b", size=1024, pool="pool_name",
thin=True)
def test_create_volume_with_compressed_space_efficiency_success(self):
self._test_create_volume_success(config.SPACE_EFFICIENCY_COMPRESSED)
self.svc.client.svctask.mkvolume.assert_called_with(name="test_volume", unit="b", size=1024, pool="pool_name",
compressed=True)
def test_create_volume_with_deduplicated_thin_space_efficiency_success(self):
self._test_create_volume_success(config.SPACE_EFFICIENCY_DEDUPLICATED_THIN)
self.svc.client.svctask.mkvolume.assert_called_with(name="test_volume", unit="b", size=1024, pool="pool_name",
thin=True, deduplicated=True)
def test_create_volume_with_deduplicated_compressed_space_efficiency_success(self):
self._test_create_volume_success(config.SPACE_EFFICIENCY_DEDUPLICATED_COMPRESSED)
self.svc.client.svctask.mkvolume.assert_called_with(name="test_volume", unit="b", size=1024, pool="pool_name",
compressed=True, deduplicated=True)
def test_create_volume_with_deduplicated_backward_compatibility_space_efficiency_success(self):
self._test_create_volume_success(config.SPACE_EFFICIENCY_DEDUPLICATED)
self.svc.client.svctask.mkvolume.assert_called_with(name="test_volume", unit="b", size=1024, pool="pool_name",
compressed=True, deduplicated=True)
def _test_create_volume_with_default_space_efficiency_success(self, space_efficiency):
self._test_create_volume_success(space_efficiency)
self.svc.client.svctask.mkvolume.assert_called_with(name="test_volume", unit="b", size=1024, pool="pool_name")
def test_create_volume_with_empty_string_space_efficiency_success(self):
self._test_create_volume_with_default_space_efficiency_success("")
def test_create_volume_with_thick_space_efficiency_success(self):
self._test_create_volume_with_default_space_efficiency_success(config.SPACE_EFFICIENCY_THICK)
def _test_delete_volume_rmvolume_cli_failure_error(self, error_message_id, expected_error, volume_name="volume"):
self._test_mediator_method_client_cli_failure_error(self.svc.delete_volume, (volume_name,),
self.svc.client.svctask.rmvolume, error_message_id,
expected_error)
def test_delete_volume_return_volume_delete_errors(self):
self._test_delete_volume_rmvolume_cli_failure_error("CMMVC5753E", array_errors.ObjectNotFoundError)
self._test_delete_volume_rmvolume_cli_failure_error("CMMVC8957E", array_errors.ObjectNotFoundError)
self._test_delete_volume_rmvolume_cli_failure_error("Failed", CLIFailureError)
def _prepare_mocks_for_object_still_in_use(self):
cli_volume = self._get_cli_volume()
cli_volume.FC_id = 'many'
self.svc.client.svcinfo.lsvdisk.return_value = Mock(as_single_element=cli_volume)
def test_delete_volume_has_snapshot_fcmaps_not_removed(self):
self._prepare_mocks_for_object_still_in_use()
fcmaps_as_target = Mock(as_list=[])
fcmaps = self.fcmaps
fcmaps[0].copy_rate = "0"
fcmaps_as_source = Mock(as_list=fcmaps)
self.svc.client.svcinfo.lsfcmap.side_effect = [fcmaps_as_target, fcmaps_as_source]
with self.assertRaises(array_errors.ObjectIsStillInUseError):
self.svc.delete_volume("volume")
def test_delete_volume_still_copy_fcmaps_not_removed(self):
self._prepare_mocks_for_object_still_in_use()
fcmaps_as_target = Mock(as_list=[])
fcmaps = self.fcmaps
fcmaps[0].status = "not good"
fcmaps_as_source = Mock(as_list=fcmaps)
self.svc.client.svcinfo.lsfcmap.side_effect = [fcmaps_as_target, fcmaps_as_source]
with self.assertRaises(array_errors.ObjectIsStillInUseError):
self.svc.delete_volume("volume")
def _prepare_fcmaps_for_hyperswap(self):
self.fcmaps_as_target[0].rc_controlled = "yes"
fcmaps_as_target = Mock(as_list=self.fcmaps_as_target)
self.fcmaps[0].rc_controlled = "yes"
fcmaps_as_source = Mock(as_list=self.fcmaps)
self.svc.client.svcinfo.lsfcmap.side_effect = [fcmaps_as_target, fcmaps_as_source]
def test_delete_volume_does_not_remove_hyperswap_fcmap(self):
self._prepare_fcmaps_for_hyperswap()
self.svc.delete_volume("volume")
self.svc.client.svctask.rmfcmap.assert_not_called()
def test_delete_volume_has_clone_fcmaps_removed(self):
fcmaps_as_target = Mock(as_list=[])
fcmaps_as_source = Mock(as_list=self.fcmaps_as_source)
self.svc.client.svcinfo.lsfcmap.side_effect = [fcmaps_as_target, fcmaps_as_source]
self.svc.delete_volume("volume")
self.svc.client.svctask.rmfcmap.assert_called_once()
@patch("controller.array_action.array_mediator_svc.is_warning_message")
def test_delete_volume_has_clone_rmfcmap_raise_error(self, mock_warning):
mock_warning.return_value = False
fcmaps_as_target = Mock(as_list=[])
fcmaps_as_source = Mock(as_list=self.fcmaps_as_source)
self.svc.client.svcinfo.lsfcmap.side_effect = [fcmaps_as_target, fcmaps_as_source]
self.svc.client.svctask.rmfcmap.side_effect = [CLIFailureError('error')]
with self.assertRaises(CLIFailureError):
self.svc.delete_volume("volume")
def test_delete_volume_success(self):
self.svc.client.svctask.rmvolume = Mock()
self.svc.delete_volume("volume")
def test_copy_to_existing_volume_from_source_success(self):
self.svc.copy_to_existing_volume("a", "b", 1, 1)
self.svc.client.svctask.mkfcmap.assert_called_once()
self.svc.client.svctask.startfcmap.assert_called_once()
@patch("controller.array_action.array_mediator_svc.is_warning_message")
def _test_copy_to_existing_volume_raise_errors(self, mock_warning, client_return_value, expected_error):
mock_warning.return_value = False
self.svc.client.svcinfo.lsvdisk.side_effect = [client_return_value, client_return_value]
with self.assertRaises(expected_error):
self.svc.copy_to_existing_volume("a", "b", 1, 1)
def test_copy_to_existing_volume_raise_not_found(self):
self._test_copy_to_existing_volume_raise_errors(client_return_value=Mock(as_single_element=None),
expected_error=array_errors.ObjectNotFoundError)
def test_copy_to_existing_volume_raise_illegal_object_id(self):
self._test_copy_to_existing_volume_raise_errors(client_return_value=CLIFailureError('CMMVC6017E'),
expected_error=array_errors.IllegalObjectID)
self._test_copy_to_existing_volume_raise_errors(client_return_value=CLIFailureError('CMMVC5741E'),
expected_error=array_errors.IllegalObjectID)
@staticmethod
def _mock_cli_object(cli_object):
return Mock(as_single_element=cli_object)
@classmethod
def _mock_cli_objects(cls, cli_objects):
return map(cls._mock_cli_object, cli_objects)
@staticmethod
def _get_cli_volume(with_deduplicated_copy=True, name='source_volume', pool_name='pool_name'):
se_copy = YES
deduplicated_copy = 'no'
compressed_copy = 'no'
if with_deduplicated_copy:
se_copy = 'no'
deduplicated_copy = YES
compressed_copy = YES
return Munch({'vdisk_UID': 'vol_id',
'id': 'test_id',
'name': name,
'capacity': '1024',
'mdisk_grp_name': pool_name,
'IO_group_name': 'iogrp0',
'FC_id': '',
'se_copy': se_copy,
'deduplicated_copy': deduplicated_copy,
'compressed_copy': compressed_copy
})
@staticmethod
def _get_cli_snapshot():
return Munch({'snapshot_id': 'snapshot_id',
'snapshot_name': 'snapshot_name',
'volume_id': 'volume_id',
'volume_name': 'volume_name',
})
@classmethod
def _get_mapless_target_cli_volume(cls):
target_cli_volume = cls._get_cli_volume()
target_cli_volume.vdisk_UID = 'snap_id'
target_cli_volume.name = 'test_snapshot'
return target_cli_volume
@classmethod
def _get_mapped_target_cli_volume(cls):
target_cli_volume = cls._get_mapless_target_cli_volume()
target_cli_volume.FC_id = 'test_fc_id'
return target_cli_volume
def _prepare_lsvdisk_to_raise_not_found_error(self, mock_warning):
mock_warning.return_value = False
self.svc.client.svcinfo.lsvdisk.side_effect = [
CLIFailureError("CMMVC5753E")]
def _prepare_lsvdisk_to_return_mapless_target_volume(self):
mapless_target_cli_volume = self._get_mapless_target_cli_volume()
mapless_target_cli_volume_mock = self._mock_cli_object(mapless_target_cli_volume)
self.svc.client.svcinfo.lsvdisk.return_value = mapless_target_cli_volume_mock
def _prepare_lsvdisk_to_return_none(self):
self.svc.client.svcinfo.lsvdisk.return_value = self._mock_cli_object(None)
def _prepare_mocks_for_delete_snapshot(self):
target_cli_volume = self._get_mapped_target_cli_volume()
target_cli_volume.FC_id = 'many'
self.svc.client.svcinfo.lsvdisk.return_value = self._mock_cli_object(target_cli_volume)
def _prepare_mocks_for_get_snapshot(self):
self._prepare_mocks_for_delete_snapshot()
self.fcmaps[0].copy_rate = "0"
@patch("controller.array_action.array_mediator_svc.is_warning_message")
def test_get_snapshot_not_exist_return_none(self, mock_warning):
self._prepare_lsvdisk_to_raise_not_found_error(mock_warning)
snapshot = self.svc.get_snapshot("volume_id", "test_snapshot", pool="pool1", flashcopy_2=False)
self.assertIsNone(snapshot)
def _test_get_snapshot_cli_failure_error(self, snapshot_name, client_method, error_message_id, expected_error,
flashcopy_2=False):
volume_id = "volume_id"
self._test_mediator_method_client_cli_failure_error(self.svc.get_snapshot,
(volume_id, snapshot_name, "pool", flashcopy_2),
client_method, error_message_id, expected_error)
def _test_get_snapshot_illegal_name_cli_failure_errors(self, client_method, flashcopy_2=False):
self._test_get_snapshot_cli_failure_error("\xff", client_method, 'CMMVC6017E', array_errors.IllegalObjectName,
flashcopy_2)
self._test_get_snapshot_cli_failure_error("12345", client_method, 'CMMVC5703E', array_errors.IllegalObjectName,
flashcopy_2)
def test_get_snapshot_lsvdisk_cli_failure_errors(self):
client_method = self.svc.client.svcinfo.lsvdisk
self._test_get_snapshot_illegal_name_cli_failure_errors(client_method)
self.svc.client.svcinfo.lsvdisk.assert_called()
def test_get_snapshot_has_no_fc_id_raise_error(self):
self._prepare_lsvdisk_to_return_mapless_target_volume()
with self.assertRaises(array_errors.ExpectedSnapshotButFoundVolumeError):
self.svc.get_snapshot("volume_id", "test_snapshot", pool="pool1", flashcopy_2=False)
@patch("controller.array_action.array_mediator_svc.is_warning_message")
def test_get_snapshot_get_fcmap_not_exist_raise_error(self, mock_warning):
target_cli_volume = self._get_mapped_target_cli_volume()
self.svc.client.svcinfo.lsvdisk.return_value = self._mock_cli_object(target_cli_volume)
mock_warning.return_value = False
self.svc.client.svcinfo.lsfcmap.side_effect = [
CLIFailureError("CMMVC5753E")]
with self.assertRaises(CLIFailureError):
self.svc.get_snapshot("volume_id", "test_snapshot", pool="pool1", flashcopy_2=False)
def test_get_snapshot_non_zero_copy_rate(self):
self._prepare_mocks_for_get_snapshot()
self.fcmaps[0].copy_rate = "non_zero_value"
with self.assertRaises(array_errors.ExpectedSnapshotButFoundVolumeError):
self.svc.get_snapshot("volume_id", "test_snapshot", pool="pool1", flashcopy_2=False)
def test_get_snapshot_no_fcmap_as_target(self):
self._prepare_mocks_for_get_snapshot()
self.svc.client.svcinfo.lsfcmap.return_value = Mock(as_list=[])
with self.assertRaises(array_errors.ExpectedSnapshotButFoundVolumeError):
self.svc.get_snapshot("volume_id", "test_snapshot", pool="pool1", flashcopy_2=False)
def test_get_snapshot_lsvdisk_success(self):
self._prepare_mocks_for_get_snapshot()
snapshot = self.svc.get_snapshot("volume_id", "test_snapshot", pool="pool1", flashcopy_2=False)
self.assertEqual("test_snapshot", snapshot.name)
def test_get_snapshot_lsvolumesnapshot_cli_failure_errors(self):
self.svc.client.svctask.addsnapshot = Mock()
client_method = self.svc.client.svcinfo.lsvolumesnapshot
self._test_get_snapshot_illegal_name_cli_failure_errors(client_method, True)
self.svc.client.svcinfo.lsvolumesnapshot.assert_called()
def _prepare_mocks_for_get_snapshot_lsvolumesnapshot(self):
self.svc.client.svctask.addsnapshot = Mock()
self.svc.client.svcinfo.lsvolumesnapshot.return_value = self._mock_cli_object(self._get_cli_snapshot())
self.svc.client.svcinfo.lsvdisk.return_value = self._mock_cli_object(self._get_cli_volume())
def test_get_snapshot_lsvolumesnapshot_success(self):
self._prepare_mocks_for_get_snapshot_lsvolumesnapshot()
snapshot = self.svc.get_snapshot("volume_id", "snapshot_name", pool="pool1", flashcopy_2=True)
self.assertEqual("snapshot_name", snapshot.name)
self.svc.client.svcinfo.lsvolumesnapshot.assert_called_once_with(filtervalue='snapshot_name=snapshot_name')
self.svc.client.svcinfo.lsvdisk.assert_called_once_with(bytes=True, filtervalue='vdisk_UID=volume_id')
def test_get_snapshot_lsvolumesnapshot_not_supported_error(self):
with self.assertRaises(array_errors.Flashcopy2NotSupportedMessage):
self.svc.get_snapshot("volume_id", "snapshot_name", pool="pool1", flashcopy_2=True)
def test_get_object_by_id_snapshot_has_no_fcmap_id_raise_error(self):
self._prepare_lsvdisk_to_return_mapless_target_volume()
with self.assertRaises(array_errors.ExpectedSnapshotButFoundVolumeError):
self.svc.get_object_by_id("snap_id", "snapshot")
def test_get_object_by_id_return_none(self):
self.svc.client.svcinfo.lsvdisk.return_value = Mock(as_single_element=None)
returned_value = self.svc.get_object_by_id("snap_id", "snapshot")
self.assertEqual(None, returned_value)
def test_get_object_by_id_snapshot_success(self):
self._prepare_mocks_for_get_snapshot()
snapshot = self.svc.get_object_by_id("test_snapshot", "snapshot")
self.assertEqual("test_snapshot", snapshot.name)
calls = [call(bytes=True, filtervalue='vdisk_UID=test_snapshot'),
call(bytes=True, object_id='source_name')]
self.svc.client.svcinfo.lsvdisk.assert_has_calls(calls)
def test_get_object_by_id_volume_success(self):
target_cli_volume = self._get_mapped_target_cli_volume()
target_cli_volume.name = "volume_id"
self.svc.client.svcinfo.lsvdisk.return_value = self._mock_cli_object(target_cli_volume)
volume = self.svc.get_object_by_id("volume_id", "volume")
self.assertEqual("volume_id", volume.name)
def _get_custom_cli_volume(self, support_deduplicated_copy, with_deduplicated_copy, name='source_volume',
pool_name='pool_name'):
volume = self._get_cli_volume(with_deduplicated_copy, name=name, pool_name=pool_name)
if not support_deduplicated_copy:
del volume.deduplicated_copy
return volume
def _prepare_mocks_for_create_snapshot_mkvolume(self, support_deduplicated_copy=True,
source_has_deduplicated_copy=False, different_pool_site=False,
is_source_stretched=False):
self.svc.client.svctask.mkvolume.return_value = Mock()
self.svc.client.svctask.mkfcmap.return_value = Mock()
pool = ['many', 'pool1', 'pool2'] if is_source_stretched else 'pool_name'
source_volume_to_copy_from = self._get_custom_cli_volume(support_deduplicated_copy,
source_has_deduplicated_copy,
pool_name=pool)
volumes_to_return = [source_volume_to_copy_from, source_volume_to_copy_from]
if different_pool_site:
if is_source_stretched:
pools_to_return = [Munch({'site_name': 'pool_site'}),
Munch({'site_name': 'source_volume_site'}),
Munch({'site_name': 'pool_site'})]
self.svc.client.svcinfo.lsmdiskgrp.side_effect = self._mock_cli_objects(pools_to_return)
else:
pools_to_return = [Munch({'site_name': 'pool_site'}),
Munch({'site_name': 'source_volume_site'}),
Munch({'site_name': 'other_volume_site'}),
Munch({'site_name': 'pool_site'})]
self.svc.client.svcinfo.lsmdiskgrp.side_effect = self._mock_cli_objects(pools_to_return)
auxiliary_volumes = [self._get_cli_volume(name='other_volume', pool_name='other_volume_pool'),
self._get_custom_cli_volume(support_deduplicated_copy,
source_has_deduplicated_copy,
name='relevant_volume',
pool_name='relevant_volume_pool')]
volumes_to_return.extend(auxiliary_volumes)
rcrelationships_to_return = [Munch({'aux_vdisk_name': 'other_volume'}),
Munch({'aux_vdisk_name': 'relevant_volume'})]
self.svc.client.svcinfo.lsrcrelationship.return_value = Mock(as_list=rcrelationships_to_return)
target_volume_after_creation = self._get_mapless_target_cli_volume()
target_volume_after_mapping = self._get_mapped_target_cli_volume()
target_volume_for_rollback = self._get_mapped_target_cli_volume()
volumes_to_return.extend([target_volume_after_creation, target_volume_after_mapping,
target_volume_for_rollback])
self.svc.client.svcinfo.lsvdisk.side_effect = self._mock_cli_objects(volumes_to_return)
self.svc.client.svctask.startfcmap.return_value = Mock()
@patch("controller.array_action.array_mediator_svc.is_warning_message")
def test_create_snapshot_create_volume_error(self, mock_warning):
source_cli_volume = self._get_cli_volume()
self.svc.client.svcinfo.lsvdisk.return_value = self._mock_cli_object(source_cli_volume)
mock_warning.return_value = False
self.svc.client.svctask.mkvolume.side_effect = [
CLIFailureError("Failed")]
with self.assertRaises(CLIFailureError):
self.svc.create_snapshot("source_volume_id", "test_snapshot", space_efficiency=None, pool="pool1",
flashcopy_2=False)
def _test_create_snapshot_lsvdisk_cli_failure_error(self, volume_id, snapshot_name, error_message_id,
expected_error, space_efficiency=None, pool=None):
self._test_mediator_method_client_cli_failure_error(self.svc.create_snapshot,
(volume_id, snapshot_name, space_efficiency, pool, False),
self.svc.client.svcinfo.lsvdisk, error_message_id,
expected_error)
def test_create_snapshot_lsvdisk_cli_failure_errors(self):
self._test_create_snapshot_lsvdisk_cli_failure_error("\xff", "snapshot_name", 'CMMVC6017E',
array_errors.IllegalObjectID)
self._test_create_snapshot_lsvdisk_cli_failure_error("!@#", "snapshot_name", 'CMMVC5741E',
array_errors.IllegalObjectID)
def test_create_snapshot_source_not_found_error(self):
self.svc.client.svcinfo.lsvdisk.side_effect = [Mock(as_single_element=None), Mock(as_single_element=None)]
with self.assertRaises(array_errors.ObjectNotFoundError):
self.svc.create_snapshot("source_volume_id", "test_snapshot", space_efficiency=None, pool="pool1",
flashcopy_2=False)
@patch("controller.array_action.array_mediator_svc.is_warning_message")
def test_create_snapshot_create_fcmap_error(self, mock_warning):
self._prepare_mocks_for_create_snapshot_mkvolume()
mock_warning.return_value = False
self.svc.client.svctask.mkfcmap.side_effect = [
CLIFailureError("Failed")]
with self.assertRaises(CLIFailureError):
self.svc.create_snapshot("source_volume_id", "test_snapshot", space_efficiency=None, pool="pool1",
flashcopy_2=False)
@patch("controller.array_action.array_mediator_svc.is_warning_message")
def test_create_snapshot_start_fcmap_error(self, mock_warning):
self._prepare_mocks_for_create_snapshot_mkvolume()
mock_warning.return_value = False
self.svc.client.svctask.startfcmap.side_effect = [
CLIFailureError("Failed")]
with self.assertRaises(CLIFailureError):
self.svc.create_snapshot("source_volume_id", "test_snapshot", space_efficiency=None, pool="pool1",
flashcopy_2=False)
def test_create_snapshot_mkvolume_success(self):
self._prepare_mocks_for_create_snapshot_mkvolume()
snapshot = self.svc.create_snapshot("source_volume_id", "test_snapshot", space_efficiency=None, pool="pool1",
flashcopy_2=False)
self.assertEqual(1024, snapshot.capacity_bytes)
self.assertEqual('SVC', snapshot.array_type)
self.assertEqual('snap_id', snapshot.id)
def test_create_snapshot_with_different_pool_success(self):
self._prepare_mocks_for_create_snapshot_mkvolume()
self.svc.create_snapshot("source_volume_id", "test_snapshot", space_efficiency=None, pool="different_pool",
flashcopy_2=False)
self.svc.client.svctask.mkvolume.assert_called_once_with(name='test_snapshot', unit='b', size=1024,
pool='different_pool', iogrp='iogrp0',
thin=True)
def test_create_snapshot_for_hyperswap_volume_with_different_site_success(self):
self._prepare_mocks_for_create_snapshot_mkvolume(different_pool_site=True)
self.svc.create_snapshot("source_volume_id", "test_snapshot", space_efficiency=None, pool="different_pool",
flashcopy_2=False)
self.svc.client.svctask.mkfcmap.assert_called_once_with(source="relevant_volume", target="test_snapshot",
copyrate=0)
def test_create_snapshot_for_stretched_volume_with_different_site_success(self):
self._prepare_mocks_for_create_snapshot_mkvolume(different_pool_site=True, is_source_stretched=True)
self.svc.create_snapshot("source_volume_id", "test_snapshot", space_efficiency=None, pool="different_pool",
flashcopy_2=False)
self.svc.client.svctask.mkfcmap.assert_called_once_with(source="source_volume", target="test_snapshot",
copyrate=0)
def test_create_snapshot_for_stretched_volume_implicit_pool_success(self):
self._prepare_mocks_for_create_snapshot_mkvolume(is_source_stretched=True)
self.svc.create_snapshot("source_volume_id", "test_snapshot", space_efficiency=None, pool=None,
flashcopy_2=False)
self.svc.client.svctask.mkvolume.assert_called_once_with(name='test_snapshot', unit='b', size=1024,
pool='pool1', | |
DelaySpectrum(uv=[test_uv_1, test_uv_2])
dspec_object.calculate_delay_spectrum()
dspec_object.add_trcvr(144 * units.K)
assert (units.mK**2 * units.Mpc**3).is_equivalent(dspec_object.noise_power.unit)
def test_delay_spectrum_thermal_power_units():
"""Test the units on the output thermal power are correct."""
test_file = os.path.join(DATA_PATH, "paper_test_file.uvh5")
test_uv_1 = UVData()
test_uv_1.read(test_file)
test_uv_2 = copy.deepcopy(test_uv_1)
beam_file = os.path.join(DATA_PATH, "test_paper_pI.beamfits")
uvb = UVBeam()
uvb.read_beamfits(beam_file)
test_uv_1.select(freq_chans=np.arange(95, 116))
test_uv_2.select(freq_chans=np.arange(95, 116))
dspec_object = DelaySpectrum(uv=[test_uv_1, test_uv_2])
dspec_object.calculate_delay_spectrum()
dspec_object.add_trcvr(144 * units.K)
assert (units.mK**2 * units.Mpc**3).is_equivalent(
dspec_object.thermal_power.unit
)
def test_delay_spectrum_thermal_power_shape():
"""Test the shape of the output thermal power is correct."""
test_file = os.path.join(DATA_PATH, "paper_test_file.uvh5")
test_uv_1 = UVData()
test_uv_1.read(test_file)
test_uv_2 = copy.deepcopy(test_uv_1)
beam_file = os.path.join(DATA_PATH, "test_paper_pI.beamfits")
uvb = UVBeam()
uvb.read_beamfits(beam_file)
test_uv_1.select(freq_chans=np.arange(95, 116))
test_uv_2.select(freq_chans=np.arange(95, 116))
dspec_object = DelaySpectrum(uv=[test_uv_1, test_uv_2])
dspec_object.calculate_delay_spectrum()
dspec_object.add_trcvr(144 * units.K)
assert (
dspec_object._thermal_power.expected_shape(dspec_object)
== dspec_object.thermal_power.shape
)
def test_multiple_polarization_file():
"""Test the units on cosmological parameters."""
testfile = os.path.join(DATA_PATH, "test_two_pol_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_multiple_pol.beamfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=[uvd])
dspec_object.select_spectral_windows([(1, 3), (4, 6)])
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object.add_uvbeam(uvb=uvb)
assert dspec_object.check()
dspec_object.calculate_delay_spectrum()
assert dspec_object.check()
def test_remove_cosmology():
"""Test removing cosmology does not alter data from before cosmology is applied."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=[uvd])
dspec_object.select_spectral_windows([(1, 3), (4, 6)])
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object.add_uvbeam(uvb=uvb)
dspec_object2 = copy.deepcopy(dspec_object)
dspec_object.calculate_delay_spectrum(littleh_units=True)
dspec_object.remove_cosmology()
assert dspec_object.power_array.unit.is_equivalent(units.Jy**2 * units.Hz**2)
dspec_object2.delay_transform()
dspec_object2.power_array = utils.cross_multiply_array(
array_1=dspec_object2.data_array[:, 0], axis=2
)
assert units.allclose(dspec_object2.power_array, dspec_object.power_array)
def test_remove_cosmology_no_cosmo():
"""Test removing cosmology does not alter data from before cosmology is applied."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=[uvd])
dspec_object.select_spectral_windows([(1, 3), (4, 6)])
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object.add_uvbeam(uvb=uvb)
dspec_object.delay_transform()
dspec_object.power_array = utils.cross_multiply_array(
array_1=dspec_object.data_array[:, 0], axis=2
)
dspec_object.noise_power = utils.cross_multiply_array(
array_1=dspec_object.noise_array[:, 0], axis=2
)
dspec_object2 = copy.deepcopy(dspec_object)
dspec_object.remove_cosmology()
assert dspec_object.power_array.unit.is_equivalent(units.Jy**2 * units.Hz**2)
assert units.allclose(dspec_object2.power_array, dspec_object.power_array)
def test_remove_cosmology_cosmo_none():
"""Test removing cosmology does not alter data from before cosmology is applied."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=[uvd])
dspec_object.cosmology = None
with pytest.raises(ValueError) as cm:
dspec_object.remove_cosmology()
assert str(cm.value).startswith("Cannot remove cosmology of type")
def test_update_cosmology_units_and_shapes():
"""Test the check function on DelaySpectrum after changing cosmologies."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
test_cosmo = Planck15
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=[uvd])
dspec_object.select_spectral_windows([(1, 3), (4, 6)])
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object.add_uvbeam(uvb=uvb)
dspec_object.calculate_delay_spectrum()
assert dspec_object.check()
dspec_object.update_cosmology(cosmology=test_cosmo)
assert dspec_object.check()
def test_update_cosmology_error_if_not_cosmology_object():
"""Test update cosmology function errors if new cosmology is not a Cosmology object."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
bad_input = DummyClass()
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=[uvd])
dspec_object.select_spectral_windows([(1, 3), (4, 6)])
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object.add_uvbeam(uvb=uvb)
dspec_object.calculate_delay_spectrum()
assert dspec_object.check()
pytest.raises(ValueError, dspec_object.update_cosmology, cosmology=bad_input)
def test_update_cosmology_unit_and_shape_kelvin_sr():
"""Test the check function after changing cosmolgies, input visibility Kelvin * sr."""
test_file = os.path.join(DATA_PATH, "paper_test_file_k_units.uvh5")
test_cosmo = Planck15
test_uv_1 = UVData()
test_uv_1.read(test_file)
test_uv_2 = copy.deepcopy(test_uv_1)
beam_file = os.path.join(DATA_PATH, "test_paper_pI.beamfits")
uvb = UVBeam()
uvb.read_beamfits(beam_file)
test_uv_1.select(freq_chans=np.arange(95, 116))
test_uv_2.select(freq_chans=np.arange(95, 116))
dspec_object = DelaySpectrum(uv=[test_uv_1, test_uv_2])
dspec_object.calculate_delay_spectrum()
dspec_object.add_trcvr(144 * units.K)
assert dspec_object.check()
dspec_object.update_cosmology(cosmology=test_cosmo)
assert dspec_object.check()
def test_update_cosmology_unit_and_shape_uncalib():
"""Test the check function after changing cosmolgies, input visibility uncalibrated."""
test_file = os.path.join(DATA_PATH, "paper_test_file_uncalib_units.uvh5")
test_cosmo = Planck15
test_uv_1 = UVData()
test_uv_1.read(test_file)
test_uv_2 = copy.deepcopy(test_uv_1)
beam_file = os.path.join(DATA_PATH, "test_paper_pI.beamfits")
uvb = UVBeam()
uvb.read_beamfits(beam_file)
test_uv_1.select(freq_chans=np.arange(95, 116))
test_uv_2.select(freq_chans=np.arange(95, 116))
warn_message = [
"Data is uncalibrated. Unable to covert noise array to unicalibrated units.",
"Data is uncalibrated. Unable to covert noise array to unicalibrated units.",
]
with uvtest.check_warnings(UserWarning, warn_message):
dspec_object = DelaySpectrum([test_uv_1, test_uv_2])
dspec_object.add_trcvr(144 * units.K)
warn_message = [
"Fourier Transforming uncalibrated data. "
"Units will not have physical meaning. "
"Data will be arbitrarily scaled."
]
with uvtest.check_warnings(UserWarning, warn_message):
dspec_object.calculate_delay_spectrum()
assert dspec_object.check()
dspec_object.update_cosmology(cosmology=test_cosmo)
assert dspec_object.check()
def test_update_cosmology_littleh_units():
"""Test the units can convert to 'littleh' units in python 3."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
test_cosmo = Planck15
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=[uvd])
dspec_object.select_spectral_windows([(1, 3), (4, 6)])
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object.add_uvbeam(uvb=uvb)
dspec_object.calculate_delay_spectrum()
assert dspec_object.check()
dspec_object.update_cosmology(cosmology=test_cosmo, littleh_units=True)
assert dspec_object.check()
test_unit = (units.mK**2) / (littleh / units.Mpc) ** 3
assert dspec_object.power_array.unit, test_unit
def test_update_cosmology_littleh_units_from_calc_delay_spectr():
"""Test the units can convert to 'littleh' units in python 3 passed through calculate_delay_spectrum."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
test_cosmo = Planck15
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=[uvd])
dspec_object.select_spectral_windows([(1, 3), (4, 6)])
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object.add_uvbeam(uvb=uvb)
dspec_object.calculate_delay_spectrum(cosmology=test_cosmo, littleh_units=True)
assert dspec_object.check()
test_unit = (units.mK**2) / (littleh / units.Mpc) ** 3
assert dspec_object.power_array.unit == test_unit
assert dspec_object.cosmology.name == "Planck15"
def test_call_update_cosmology_twice():
"""Test cosmology can be updated at least twice in a row with littleh_units."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
test_cosmo1 = WMAP9
test_cosmo2 = Planck15
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=[uvd])
dspec_object.select_spectral_windows([(1, 3), (4, 6)])
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object.add_uvbeam(uvb=uvb)
dspec_object.calculate_delay_spectrum(cosmology=test_cosmo1, littleh_units=True)
assert dspec_object.check()
assert dspec_object.cosmology.name == "WMAP9"
dspec_object.update_cosmology(test_cosmo2, littleh_units=True)
test_unit = (units.mK**2) / (littleh / units.Mpc) ** 3
assert dspec_object.power_array.unit == test_unit
assert dspec_object.cosmology.name == "Planck15"
assert dspec_object.check()
def test_call_update_cosmology_twice_no_littleh():
"""Test cosmology can be updated at least twice in a row without littleh_units."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
test_cosmo1 = WMAP9
test_cosmo2 = Planck15
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=[uvd])
dspec_object.select_spectral_windows([(1, 3), (4, 6)])
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object.add_uvbeam(uvb=uvb)
dspec_object.calculate_delay_spectrum(cosmology=test_cosmo1, littleh_units=False)
assert dspec_object.check()
assert dspec_object.cosmology.name == "WMAP9"
dspec_object.update_cosmology(test_cosmo2, littleh_units=False)
test_unit = units.mK**2 * units.Mpc**3
assert dspec_object.power_array.unit == test_unit
assert dspec_object.cosmology.name == "Planck15"
assert dspec_object.check()
def test_call_delay_spectrum_twice_no_littleh():
"""Test calculate_delay_spectrum can be called at least twice in a row without littleh_units."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
test_cosmo1 = WMAP9
test_cosmo2 = Planck15
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=[uvd])
dspec_object.select_spectral_windows([(1, 3), (4, 6)])
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object.add_uvbeam(uvb=uvb)
dspec_object.calculate_delay_spectrum(cosmology=test_cosmo1, littleh_units=False)
assert dspec_object.check()
assert dspec_object.cosmology.name == "WMAP9"
dspec_object.calculate_delay_spectrum(cosmology=test_cosmo2, littleh_units=False)
test_unit = units.mK**2 * units.Mpc**3
assert dspec_object.power_array.unit == test_unit
assert dspec_object.cosmology.name == "Planck15"
assert dspec_object.check()
def test_call_delay_spectrum_twice():
"""Test calculate_delay_spectrum can be called at least twice in a row."""
testfile = os.path.join(UVDATA_PATH, "test_redundant_array.uvfits")
test_uvb_file = os.path.join(DATA_PATH, "test_redundant_array.beamfits")
test_cosmo1 = WMAP9
test_cosmo2 = Planck15
uvd = UVData()
uvd.read(testfile)
dspec_object = DelaySpectrum(uv=[uvd])
dspec_object.select_spectral_windows([(1, 3), (4, 6)])
uvb = UVBeam()
uvb.read_beamfits(test_uvb_file)
dspec_object.add_uvbeam(uvb=uvb)
dspec_object.calculate_delay_spectrum(cosmology=test_cosmo1, littleh_units=True)
assert dspec_object.check()
assert dspec_object.cosmology.name == "WMAP9"
dspec_object.calculate_delay_spectrum(cosmology=test_cosmo2, littleh_units=True)
test_unit = units.mK**2 * units.Mpc**3 / littleh**3
assert dspec_object.power_array.unit == test_unit
assert dspec_object.cosmology.name == "Planck15"
assert dspec_object.check()
@pytest.mark.parametrize(
"input,err_type,err_message",
[
({"antenna_nums": [-1]}, ValueError, "Antenna -1 is not present in either"),
({"bls": []}, ValueError, "bls must be a list of tuples of antenna numbers"),
(
{"bls": [(0, 44), "test"]},
ValueError,
"bls must be a list of tuples of antenna numbers",
),
(
{"bls": [(1, "2")]},
ValueError,
"bls must be a list of tuples of antenna numbers",
),
(
{"bls": [("1", 2)]},
ValueError,
"bls must be a list of tuples of antenna numbers",
),
(
{"bls": [(1, 2, "xx")], "polarizations": "yy"},
ValueError,
"Cannot provide length-3 tuples and also",
),
(
{"bls": [(1, 2, 3)]},
ValueError,
"The third element in each bl must be a polarization",
),
({"bls": [(2, 3)]}, ValueError, "Baseline (2, 3) has no data associate"),
({"spws": ["pi"]}, ValueError, "Input spws must be an array_like of integers"),
({"spws": [5]}, ValueError, "Input spectral window values must be less"),
(
{"frequencies": [12] * units.Hz},
ValueError,
"Frequency 12.0 Hz not present in the frequency array.",
),
(
{"frequencies": [146798030.15625, 147290641.0, 151724138.59375] * units.Hz},
ValueError,
"Frequencies provided for selection will result in a non-rectangular",
),
(
{"delays": [12] * units.ns},
ValueError,
"The input delay 12.0 ns is not present in the delay_array.",
),
(
{"lsts": [7] * units.rad},
ValueError,
"The input lst 7.0 rad is not present in the lst_array.",
),
(
{"lst_range": [0, 2, 3] * units.rad},
ValueError,
"Parameter lst_range must be an Astropy Quantity object with size 2 ",
),
(
{"polarizations": ["pU"]},
ValueError,
"Polarization 3 not present in polarization_array.",
),
(
{"delay_chans": np.arange(11).tolist(), "delays": -96.66666543 * units.ns},
ValueError,
"The intersection of the input delays and delay_chans ",
),
(
{"uv_index": np.arange(5).tolist()},
ValueError,
"The number of UVData objects in this DelaySpectrum object",
),
],
)
def test_select_preprocess_errors(ds_from_uvfits, input, err_type, err_message):
"""Test Errors raised by _select_preprocess."""
ds = ds_from_uvfits
ds.delay_transform()
with pytest.raises(err_type) as cm:
ds.select(**input)
assert str(cm.value).startswith(err_message)
@pytest.mark.parametrize(
"input",
[
{"antenna_nums": [0, 44]},
{"bls": (0, 26)}, # if statement looking for just one input that is a tuple
{"bls": (26, 0)}, # reverse the baseline to see if it is taken
{"bls": [(0, 26), (1, 4)]},
{"bls": [(0, 26), | |
or value > 2 ** 16 - 1:
raise ValueError('<{}> 超过 <{}> 寄存器的取值范围'.format(value, key))
elif key in ('A', 'X', 'Y', 'S'):
if value < 0 or value > 2 ** 8 - 1:
raise ValueError('<{}> 超过 <{}> 寄存器的取值范围'.format(value, key))
elif key == 'status':
if not isinstance(value, _Status):
raise ValueError('<{}> 不是正确的 Status 寄存器'.format(value))
super().__setattr__(key, value)
def dump_registers(self):
d = dict(
PC=self.pc,
P=self.status.value,
A=self.a,
X=self.x,
Y=self.y,
S=self.sp,
)
return d
def next_mem_value(self):
pc = self.pc
v = self.memory[pc]
self.pc = pc + 1
return v
def address_from_mode(self, mode: str):
if mode == 'IMP':
return None
elif mode == 'IMM':
a = self.next_mem_value()
return a
elif mode == 'ABS':
al = self.next_mem_value()
ah = self.next_mem_value()
a = utils.number_from_bytes([al, ah])
return a
elif mode == 'ZPG':
a = self.next_mem_value()
return a
elif mode == 'ABX':
al = self.next_mem_value()
ah = self.next_mem_value()
a = utils.number_from_bytes([al, ah])
i = self.x
return (a + i) % 0x10000
elif mode == 'ABY':
al = self.next_mem_value()
ah = self.next_mem_value()
a = utils.number_from_bytes([al, ah])
i = self.y
return (a + i) % 0x10000
elif mode == 'ZPX':
a = self.next_mem_value()
i = self.x
return (a + i) % 0x100
elif mode == 'ZPY':
a = self.next_mem_value()
i = self.y
return (a + i) % 0x100
elif mode == 'IND':
tal = self.next_mem_value()
tah = self.next_mem_value()
ta = utils.number_from_bytes([tal, tah])
# 模拟 6502 的 BUG
ta2 = (ta & 0xFF00) | ((ta + 1) & 0x00FF)
al = self.memory[ta]
ah = self.memory[ta2]
a = utils.number_from_bytes([al, ah])
return a
elif mode == 'INX':
t = self.next_mem_value()
i = self.x
ta = (t + i) % 0x100
ta2 = (ta + 1) % 0x100
al = self.memory[ta]
ah = self.memory[ta2]
a = utils.number_from_bytes([al, ah])
return a
elif mode == 'INY':
ta = self.next_mem_value()
ta2 = (ta + 1) % 0x100
al = self.memory[ta]
ah = self.memory[ta2]
a = utils.number_from_bytes([al, ah])
i = self.y
return (a + i) % 0x10000
elif mode == 'REL':
diff = self.next_mem_value()
diff = utils.number_from_bytes([diff], signed=True)
pc = self.pc
return (pc + diff) % 0x10000
else:
raise ValueError('错误的寻址模式:<{}>'.format(mode))
def execute(self):
# for debug
info = {}
if config.DEBUG:
info.update(self.dump_registers())
op, addr, mode = self._prepare()
if config.DEBUG:
info['op'] = op
info['address'] = addr if addr is not None else -1
utils.log(ld.LogDiffer.log_line_from_info(info))
self._execute(op, addr, mode)
def _prepare(self):
c = self.next_mem_value()
op, mode = self.opcodes[c]
addr = self.address_from_mode(mode)
return op, addr, mode
def _execute(self, op: str, addr: tp.Optional[int], mode: str):
self.instructions[op](addr, mode)
def push(self, value: int):
sp = self.sp
addr = sp + 0x0100
self.memory[addr] = value
self.sp = sp - 1
def pop(self):
sp = self.sp
sp += 1
addr = sp + 0x0100
self.sp = sp
v = self.memory[addr]
return v
def interrupt(self, name: str):
if name == 'NMI':
# 将 pc 和 p 压栈
pc = self.pc
v = pc
self.push((v & 0xff00) >> 8)
self.push(v & 0x00ff)
# 只有「被压入栈」的 status 的 B flag 被置为 1
s = copy.copy(self.status)
s.break_command = 1
self.push(s.value)
al_pos = 0xfffa
elif name == 'RESET':
al_pos = 0xfffc
else:
raise ValueError('错误的 interrupt: <{}>'.format(name))
al = self.memory[al_pos]
ah = self.memory[al_pos + 1]
addr = utils.number_from_bytes([al, ah])
self.pc = addr
class _InstructionSet(object):
def __init__(self, cpu: NesCPU):
self.cpu = cpu
def __getitem__(self, item):
return self.__getattribute__(item)
def value_from_address(self, address: tp.Optional[int], mode: str):
cpu = self.cpu
if mode in ('IMM', 'IMP'):
# 立即寻址 和 隐含寻址 情况
return address
else:
return cpu.memory[address]
def JMP(self, address: int, mode: str):
cpu = self.cpu
cpu.pc = address
def LDX(self, address: int, mode: str):
cpu = self.cpu
v = self.value_from_address(address, mode)
cpu.x = v
cpu.status.set_negative(v)
cpu.status.set_zero(v)
def STX(self, address: int, mode: str):
cpu = self.cpu
v = cpu.x
cpu.memory[address] = v
def JSR(self, address: int, mode: str):
cpu = self.cpu
pc = cpu.pc
v = pc - 1
cpu.push((v & 0xff00) >> 8)
cpu.push(v & 0x00ff)
cpu.pc = address
def NOP(self, address: int, mode: str):
# do nothing
pass
def SEC(self, address: int, mode: str):
cpu = self.cpu
cpu.status.carry = 1
def BCS(self, address: int, mode: str):
cpu = self.cpu
if cpu.status.carry == 1:
cpu.pc = address
def CLC(self, address: int, mode: str):
cpu = self.cpu
cpu.status.carry = 0
def BCC(self, address: int, mode: str):
cpu = self.cpu
if cpu.status.carry == 0:
cpu.pc = address
def LDA(self, address: int, mode: str):
cpu = self.cpu
v = self.value_from_address(address, mode)
cpu.a = v
cpu.status.set_negative(v)
cpu.status.set_zero(v)
def BEQ(self, address: int, mode: str):
cpu = self.cpu
if cpu.status.zero == 1:
cpu.pc = address
def BNE(self, address: int, mode: str):
cpu = self.cpu
if cpu.status.zero == 0:
cpu.pc = address
def STA(self, address: int, mode: str):
cpu = self.cpu
v = cpu.a
cpu.memory[address] = v
def BIT(self, address: int, mode: str):
cpu = self.cpu
v = self.value_from_address(address, mode)
a = cpu.a
cpu.status.overflow = (v >> 6) & 1
cpu.status.set_negative(v)
cpu.status.set_zero(v & a)
def BVS(self, address: int, mode: str):
cpu = self.cpu
if cpu.status.overflow == 1:
cpu.pc = address
def BVC(self, address: int, mode: str):
cpu = self.cpu
if cpu.status.overflow == 0:
cpu.pc = address
def BPL(self, address: int, mode: str):
cpu = self.cpu
if cpu.status.negative == 0:
cpu.pc = address
def RTS(self, address: int, mode: str):
cpu = self.cpu
vl = cpu.pop()
vh = cpu.pop()
v = utils.number_from_bytes([vl, vh])
pc = v + 1
cpu.pc = pc
def SEI(self, address: int, mode: str):
cpu = self.cpu
cpu.status.interrupt = 1
def SED(self, address: int, mode: str):
cpu = self.cpu
cpu.status.decimal = 1
def PHP(self, address: int, mode: str):
cpu = self.cpu
# 只有「被压入栈」的 status 的 B flag 被置为 1
s = copy.copy(cpu.status)
s.break_command = 1
cpu.push(s.value)
def PLA(self, address: int, mode: str):
cpu = self.cpu
v = cpu.pop()
cpu.a = v
cpu.status.set_negative(v)
cpu.status.set_zero(v)
def AND(self, address: int, mode: str):
cpu = self.cpu
v = self.value_from_address(address, mode)
r = cpu.a
v = r & v
cpu.a = v
cpu.status.set_negative(v)
cpu.status.set_zero(v)
def CMP(self, address: int, mode: str):
cpu = self.cpu
v = self.value_from_address(address, mode)
r = cpu.a
v = r - v
cpu.status.set_negative(v)
cpu.status.set_zero(v)
cpu.status.carry = int(v >= 0)
def CLD(self, address: int, mode: str):
cpu = self.cpu
cpu.status.decimal = 0
def PHA(self, address: int, mode: str):
cpu = self.cpu
v = cpu.a
cpu.push(v)
def PLP(self, address: int, mode: str):
cpu = self.cpu
v = cpu.pop()
s = _Status(v)
# 从栈里弹出的值,外面不会作用到 P 的 B flag 上
s.break_command = cpu.status.break_command
cpu.status = s
def BMI(self, address: int, mode: str):
cpu = self.cpu
if cpu.status.negative == 1:
cpu.pc = address
def ORA(self, address: int, mode: str):
cpu = self.cpu
v = self.value_from_address(address, mode)
r = cpu.a
v = r | v
cpu.a = v
cpu.status.set_negative(v)
cpu.status.set_zero(v)
def CLV(self, address: int, mode: str):
cpu = self.cpu
cpu.status.overflow = 0
def EOR(self, address: int, mode: str):
cpu = self.cpu
v = self.value_from_address(address, mode)
r = cpu.a
v = r ^ v
cpu.a = v
cpu.status.set_negative(v)
cpu.status.set_zero(v)
def ADC(self, address: int, mode: str):
cpu = self.cpu
mvalue = self.value_from_address(address, mode)
v = mvalue
r = cpu.a
c = cpu.status.carry
v = r + v + c
# C flag: set if overflow
if v > 255:
v -= 256
cpu.status.carry = 1
else:
cpu.status.carry = 0
cpu.a = v
cpu.status.set_negative(v)
cpu.status.set_zero(v)
# 处理 v flag
v = utils.number_from_bytes([mvalue], signed=True)
r = utils.number_from_bytes([r], signed=True)
v = r + v + c
cpu.status.overflow = int(v > 128 or v < -127)
def LDY(self, address: int, mode: str):
cpu = self.cpu
v = self.value_from_address(address, mode)
cpu.y = v
cpu.status.set_negative(v)
cpu.status.set_zero(v)
def CPY(self, address: int, mode: str):
cpu = self.cpu
v = self.value_from_address(address, mode)
r = cpu.y
v = r - v
cpu.status.set_negative(v)
cpu.status.set_zero(v)
cpu.status.carry = int(v >= 0)
def CPX(self, address: int, mode: str):
cpu = self.cpu
v = self.value_from_address(address, mode)
r = cpu.x
v = r - v
cpu.status.set_negative(v)
cpu.status.set_zero(v)
cpu.status.carry = int(v >= 0)
def SBC(self, address: int, mode: str):
cpu = self.cpu
mvalue = self.value_from_address(address, mode)
v = | |
string != values['string']:
logging.warning((
'String mismatch for LCID: 0x{0:08x}, '
'file version: {1:s}, string identifier: {2:s}.\n'
'Found: {3:s}\nStored: {4:s}\n').format(
language_identifier, message_resource_file.file_version,
string_identifier, string, values['string']))
elif number_of_values != 0:
logging.warning((
'More than one string found for LCID: 0x{0:08x}, '
'file version: {1:s}, string identifier: {2:s}.').format(
language_identifier, message_resource_file.file_version,
string_identifier))
# TODO: warn if new string has been found.
insert_values = False
if insert_values:
values = [string_identifier, string]
self._database_file.InsertValues(table_name, column_names, values)
def _WriteStringTable(
self, message_resource_file, string_table, language_identifier):
"""Writes a string table for a specific language identifier.
Args:
message_resource_file (MessageResourceFile): message resource file.
string_table (pywrc.strings): string table.
language_identifier (int): language identifier (LCID).
"""
number_of_strings = string_table.get_number_of_strings(
language_identifier)
if number_of_strings > 0:
message_file_key = self._GetMessageFileKey(message_resource_file)
if message_file_key is None:
logging.warning('Missing message file key for: {0:s}'.format(
message_resource_file.windows_path))
self._WriteStringTableLanguage(message_file_key, language_identifier)
table_name = 'message_table_0x{0:08x}'.format(language_identifier)
if message_resource_file.file_version:
table_name = '{0:s}_{1:s}'.format(
table_name, message_resource_file.file_version)
table_name = re.sub(r'\.', r'_', table_name)
has_table = self._database_file.HasTable(table_name)
if not has_table:
column_definitions = ['string_identifier TEXT', 'string TEXT']
self._database_file.CreateTable(table_name, column_definitions)
for string_index in range(0, number_of_strings):
self._WriteString(
message_resource_file, string_table, language_identifier,
string_index, table_name, has_table)
def _WriteStringTableLanguage(self, message_file_key, language_identifier):
"""Writes a string table language.
Args:
message_file_key (int): message file key.
language_identifier (int): language identifier (LCID).
"""
table_name = 'string_table_languages'
column_names = ['lcid', 'message_file_key', 'identifier']
has_table = self._database_file.HasTable(table_name)
if not has_table:
column_definitions = [
'lcid TEXT', 'message_file_key INT', 'identifier TEXT']
self._database_file.CreateTable(table_name, column_definitions)
if not has_table:
insert_values = True
else:
condition = 'lcid = "0x{0:08x}" AND message_file_key = "{1:d}"'.format(
language_identifier, message_file_key)
values_list = list(self._database_file.GetValues(
[table_name], column_names, condition))
number_of_values = len(values_list)
insert_values = number_of_values == 0
if insert_values:
values = [
'0x{0:08x}'.format(language_identifier), message_file_key,
definitions.LANGUAGES.get(language_identifier, ['', ''])[0]]
self._database_file.InsertValues(table_name, column_names, values)
def _WriteStringTables(self):
"""Writes the string tables."""
string_table = self._message_resource_file.GetStringResource()
if not string_table:
return
try:
number_of_languages = string_table.get_number_of_languages()
except IOError as exception:
number_of_languages = 0
logging.warning((
'Unable to retrieve number of languages from: {0:s} '
'with error: {1:s}.').format(self._message_resource_file, exception))
if number_of_languages > 0:
for language_identifier in string_table.language_identifiers:
self._WriteStringTable(
self._message_resource_file, string_table, language_identifier)
def WriteResources(self):
"""Writes the resources."""
self._WriteMessageFile(self._message_resource_file)
self._WriteMessageTables()
# TODO: only write the string resources of Event Log parameter files.
# self._WriteStringTables()
class ResourcesSqlite3DatabaseReader(Sqlite3DatabaseReader):
"""Class to represent an Event Log resources sqlite3 database reader."""
def _GetEventLogProviderKey(self, log_source):
"""Retrieves the Event Log provider key.
Args:
log_source (str): Event Log source.
Returns:
int: an Event Log provider key or None if not available.
Raises:
IOError: if more than one value is found in the database.
OSError: if more than one value is found in the database.
"""
table_names = ['event_log_providers']
column_names = ['event_log_provider_key']
condition = 'log_source == "{0:s}"'.format(log_source)
values_list = list(self._database_file.GetValues(
table_names, column_names, condition))
number_of_values = len(values_list)
if number_of_values == 0:
return None
if number_of_values == 1:
values = values_list[0]
return values['event_log_provider_key']
raise IOError('More than one value found in database.')
def _GetMessage(self, message_file_key, lcid, message_identifier):
"""Retrieves a specific message from a specific message table.
Args:
message_file_key (int): message file key.
lcid (int): language code identifier (LCID).
message_identifier (int): message identifier.
Returns:
str: the message string or None if not available.
Raises:
IOError: if more than one value is found in the database.
OSError: if more than one value is found in the database.
"""
table_name = 'message_table_{0:d}_0x{1:08x}'.format(message_file_key, lcid)
has_table = self._database_file.HasTable(table_name)
if not has_table:
return None
column_names = ['message_string']
condition = 'message_identifier == "0x{0:08x}"'.format(message_identifier)
values = list(self._database_file.GetValues(
[table_name], column_names, condition))
number_of_values = len(values)
if number_of_values == 0:
return None
if number_of_values == 1:
return values[0]['message_string']
raise IOError('More than one value found in database.')
def _GetMessageFileKeys(self, event_log_provider_key):
"""Retrieves the message file keys.
Args:
event_log_provider_key (int): the Event Log provider key.
Yields:
int: a message file key.
"""
table_names = ['message_file_per_event_log_provider']
column_names = ['message_file_key']
condition = 'event_log_provider_key == {0:d}'.format(
event_log_provider_key)
generator = self._database_file.GetValues(
table_names, column_names, condition)
# pylint: disable=not-an-iterable
for values in generator:
yield values['message_file_key']
def _GetMessageFilenames(self, log_source, message_file_type):
"""Retrieves the message filenames of a specific Event Log provider.
Args:
log_source (str): Event Log source.
message_file_type (str): message file type.
Returns:
list[str]: message filenames.
"""
table_names = [
'event_log_providers', 'message_file_per_event_log_provider',
'message_files']
column_names = ['message_files.path']
condition = (
'{0:s}.log_source == "{3:s}" AND '
'{1:s}.message_file_type == "{4:s}" AND '
'{0:s}.event_log_provider_key == {1:s}.event_log_provider_key AND '
'{1:s}.message_file_key == {2:s}.message_file_key').format(
'event_log_providers', 'message_file_per_event_log_provider',
'message_files', log_source, message_file_type)
message_filenames = []
for values in self._database_file.GetValues(
table_names, column_names, condition):
message_filename = values['message_files.path']
message_filenames.append(message_filename)
return message_filenames
def _GetMessages(self, message_file_key, lcid):
"""Retrieves the messages of a specific message table.
Args:
message_file_key (int): message file key.
lcid (int): language code identifier (LCID).
Yields:
tuple[int, str]: message identifier and message string.
"""
table_name = 'message_table_{0:d}_0x{1:08x}'.format(message_file_key, lcid)
has_table = self._database_file.HasTable(table_name)
if has_table:
column_names = ['message_identifier', 'message_string']
condition = ''
for values in self._database_file.GetValues(
[table_name], column_names, condition):
yield values['message_identifier'], values['message_string']
def GetEventLogProviders(self):
"""Retrieves the Event Log providers.
Yields:
EventLogProvider: an Event Log provider.
"""
table_names = ['event_log_providers']
column_names = ['log_source', 'provider_guid']
condition = ''
event_log_providers = []
for values in self._database_file.GetValues(
table_names, column_names, condition):
event_log_provider = resources.EventLogProvider(
None, values['log_source'], values['provider_guid'])
event_log_providers.append(event_log_provider)
for event_log_provider in event_log_providers:
message_filenames = self._GetMessageFilenames(
event_log_provider.log_source,
definitions.MESSAGE_FILE_TYPE_CATEGORY)
event_log_provider.SetCategoryMessageFilenames(message_filenames)
message_filenames = self._GetMessageFilenames(
event_log_provider.log_source, definitions.MESSAGE_FILE_TYPE_EVENT)
event_log_provider.SetEventMessageFilenames(message_filenames)
message_filenames = self._GetMessageFilenames(
event_log_provider.log_source,
definitions.MESSAGE_FILE_TYPE_PARAMETER)
event_log_provider.SetParameterMessageFilenames(message_filenames)
yield event_log_provider
def GetMessage(self, log_source, lcid, message_identifier):
"""Retrieves a specific message for a specific Event Log source.
Args:
log_source (str): Event Log source.
lcid (int): language code identifier (LCID).
message_identifier (int): message identifier.
Returns:
str: the message string or None if not available.
"""
event_log_provider_key = self._GetEventLogProviderKey(log_source)
if not event_log_provider_key:
return None
generator = self._GetMessageFileKeys(event_log_provider_key)
if not generator:
return None
message_string = None
for message_file_key in generator:
message_string = self._GetMessage(
message_file_key, lcid, message_identifier)
if message_string:
break
return message_string
def GetMessages(self, log_source, lcid):
"""Retrieves the messages of a specific Event Log source.
Args:
log_source (str): Event Log source.
lcid (int): language code identifier (LCID).
Yields:
tuple[int, str]: message identifier and message string.
"""
event_log_provider_key = self._GetEventLogProviderKey(log_source)
if event_log_provider_key:
for message_file_key in self._GetMessageFileKeys(event_log_provider_key):
for message_identifier, message_string in self._GetMessages(
message_file_key, lcid):
yield message_identifier, message_string
def GetMetadataAttribute(self, attribute_name):
"""Retrieves the metadata attribute.
Args:
attribute_name (str): name of the metadata attribute.
Returns:
str: value of the metadata attribute or None.
Raises:
IOError: if more than one value is found in the database.
OSError: if more than one value is found in the database.
"""
table_name = 'metadata'
has_table = self._database_file.HasTable(table_name)
if not has_table:
return None
column_names = ['value']
condition = 'name == "{0:s}"'.format(attribute_name)
values = list(self._database_file.GetValues(
[table_name], column_names, condition))
number_of_values = len(values)
if number_of_values == 0:
return None
if number_of_values == 1:
return values[0]['value']
raise IOError('More than one value found in database.')
class ResourcesSqlite3DatabaseWriter(Sqlite3DatabaseWriter):
"""Class to represent a sqlite3 Event Log resources database writer."""
# Message string specifiers that are considered white space.
_WHITE_SPACE_SPECIFIER_RE = re.compile(r'(%[0b]|[\r\n])')
# Message string specifiers that expand to text.
_TEXT_SPECIFIER_RE = re.compile(r'%([ .!%nrt])')
# Curly brackets in a message string.
_CURLY_BRACKETS = re.compile(r'([\{\}])')
# Message string specifiers that expand to a variable place holder.
_PLACE_HOLDER_SPECIFIER_RE = re.compile(r'%([1-9][0-9]?)[!]?[s]?[!]?')
def __init__(self, string_format='wrc'):
"""Initializes the database writer.
Args:
string_format (Optional[str]): string format. The default is the Windows
Resource (wrc) format.
"""
super(ResourcesSqlite3DatabaseWriter, self).__init__()
self._string_format = string_format
def _GetEventLogProviderKey(self, event_log_provider):
"""Retrieves the key of an Event Log provider.
Args:
event_log_provider (EventLogProvider): event log provider.
Returns:
int: the Event Log provider key or None if no such value.
Raises:
IOError: if more than one value is found in the database.
OSError: if more than one value is found in the database.
"""
table_names = ['event_log_providers']
column_names = ['event_log_provider_key']
condition = 'log_source = "{0:s}"'.format(
event_log_provider.log_source)
values_list = list(self._database_file.GetValues(
table_names, column_names, condition))
number_of_values = len(values_list)
if number_of_values == 0:
return None
if number_of_values == 1:
values = values_list[0]
return values['event_log_provider_key']
raise IOError('More than one value found in database.')
def _GetMessageFileKey(self, message_file):
"""Retrieves the key of a message file.
Args:
message_file (MessageFile): message file.
Returns:
int: the message file key or None if no such value.
Raises:
IOError: if more than one value is found in the database.
OSError: if more than one value is found in the database.
"""
table_names = ['message_files']
column_names = ['message_file_key']
condition = 'LOWER(path) = LOWER("{0:s}")'.format(
message_file.windows_path)
values_list = list(self._database_file.GetValues(
table_names, column_names, condition))
number_of_values = len(values_list)
if | |
## @metapackage.py
# Object to simplify unpacking/building/cleaning of ASKAPsoft systems
# metapackages
#
# @copyright (c) 2007-2013 CSIRO
# Australia Telescope National Facility (ATNF)
# Commonwealth Scientific and Industrial Research Organisation (CSIRO)
# PO Box 76, Epping NSW 1710, Australia
# <EMAIL>
#
# This file is part of the ASKAP software distribution.
#
# The ASKAP software distribution is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the License
# or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
#
# @author <NAME> <<EMAIL>>
#
import os
import datetime
import sys
import socket
from builder import Builder
import askapdev.rbuild.utils as utils
## Implementation of Builder for a metapackage. A metapackage is a package
# that do not contain any software, instead contains configuration files
# and dependencies. This metapackage is used to create a unique software release
# for a collection of several packages, for example TOS, CP.
# It copies directory tree specified in the constructor into install directory.
# If Macro Substitutions and Include (MSI) files and directories are added it requires the msi tool (part of EPICS
# extensions) to be present in the dependencies. MSI is used to create final files from templates and/or
# substitutions files.
# It also creates config jar files specified in the add_config_jar_files() method.
# It also creates a RELEASE information file that includes: meta-package name, version, username, hostname and
# all the dependencies list.
# It overwrites _install() method.
class Bunch(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
class MetaPackage(Builder):
def __init__(self, metapkgname=None, datadir=None, msibasedir=None, jarbasedir=None):
## The constructor sets up a package build "environment"
# @param self The current object.
# @param metapkgname The name of the meta-package directory. By default the
# current directory name is used (basename of current directory).
# @param datadir The base directory name to be copied recursively in install dir, i.e. every file and
# subdirectory will be copied recursively into install directory. Defaults to "files/data".
# @param msibasedir The base directory name of the msi template/substitutions files and subdirectories.
# It is pre-pended to every input directory entered in add_msi_template() and add_msi_template_dir() calls.
# Defaults to "files/msi".
# @param jarbasedir The base directory name of the jar configuration directories. It is pre-pended to
# input directories entered in the add_config_jar_dir() method.
Builder.__init__(self, pkgname='.')
self._quiet = False
if '-q' in self._comopts or '--quiet' in self._comopts:
self._quiet = True
self._versionstr = str(utils.get_svn_branch_info())
self._metapkgname = metapkgname or os.path.basename(os.path.abspath(os.curdir))
self._datadir = datadir or os.path.join('files', 'data')
# List of directories that needs to be copied to installdir
self._datatrees = []
if os.path.exists(self._datadir):
self._datatrees.append(self._datadir)
# List of jar command and outputdir that needs to be executed
self._jar_cmdlist = []
self._jarcmd = 'jar cvf'
self._jarbasedir = jarbasedir or os.path.join('files', 'jar')
# List of msi commands and outputdir that needs to be executed
self._msibasedir = msibasedir or os.path.join('files', 'msi')
self._msi_cmdlist = []
# Check that msi tool is in the dependencies
try:
msipath = self.dep.get_install_path('msi')
self._msi_in_dep = True
except:
msipath = ''
self._msi_in_dep = False
ea = self.dep.get_env().get('EPICS_HOST_ARCH', None)
if ea is None:
print "Couldn't determine EPICS architecture"
sys.exit(1)
os.environ['EPICS_HOST_ARCH'] = ea
self.epics_host_arch = ea
self._msicmd = ''
if self._msi_in_dep:
self._msicmd = os.path.join(msipath, 'bin', ea, 'msi')
def _add_msi_cmd(self, template=None, subsfile=None, outfile=None, includes=None, subs=None, fmode=None):
# Forms the argument string for the msi command
if outfile is None or outfile == '':
print "Warning! adding a msi command without output file (template=%s, subsfile=%s)" % (template, subsfile)
return
# Extract the output file path
outpath = os.path.split(outfile)[0]
msiargs = ''
if template is not None:
msiargs = "%s" % template
msiargs = "-o %s %s" % (outfile, msiargs)
if subsfile is not None and subsfile != '':
msiargs = "-S %s %s" % (subsfile, msiargs)
msisubs = ''
if subs is not None:
for m in subs:
msisubs = "-M %s %s" % (m, msisubs)
msiargs = "%s %s" % (msisubs, msiargs)
msiinc = ''
if includes is not None:
for dir in includes:
msiinc = "-I%s %s" % (dir,msiinc)
msiargs = "%s %s" % (msiinc, msiargs)
self._msi_cmdlist.append(Bunch(outdir=outpath, msiargs=msiargs, outfile=outfile, fmode=fmode))
## Add msi template file (invokes msi tool with a template input file)
# @param self The current object
# @param inputfile The input file name (including extension) and path
# relative to the base directory (self._msibasedir or basedir argument).
# @param basedir The base directory name (defaults to self._msibasedir) to
# be pre-pended to the inputfile and include paths to form the full input path template to the msi
# tool.
# @param outdir The output directory relative to the install directory.
# Defaults to head of inputfile (path until last /).
# @param templinc Include path list for searching included templates relative (-I) to msi base
# directory. By default it adds msi base directory into the include paths.
# @param subs Macro values list (-M argument to msi tool).
def add_msi_template(self, inputfile, basedir=None, outdir=None, templinc=None, subs=None):
bdir = basedir or self._msibasedir
outputdir = outdir or os.path.split(inputfile)[0]
outfile = os.path.splitext(os.path.basename(inputfile))[0]
p = os.path.split(inputfile)[0]
if p == '':
newinc = [bdir]
else:
newinc = [os.path.join(bdir, p)]
if templinc is not None:
for inc in templinc:
newinc.append(os.path.join(bdir, inc))
fmode = os.stat(os.path.join(bdir, inputfile)).st_mode
self._add_msi_cmd(os.path.join(bdir,inputfile), '', os.path.join(self._installdir, outputdir, outfile), includes=newinc, subs=subs, fmode=fmode)
## Add msi template directory. Searches for ".template" files and add them to the msi list
# @param self The current object
# @param inputdir The input directory name relative to base directory (see next param).
# @param basedir The base directory name (defaults to self._msibasedir) to
# be pre-pended to the input directory and include paths to form the full input path to the msi tool.
# @param outdir The output directory relative to the install directory.
# Defaults to input directory.
# @param templinc Include path list for searching included templates relative (-I) to msi base
# directory. By default it adds msi base directory into the include paths.
# @param subs Macro values list (-M argument to msi tool) to be applied to the entire directory.
def add_msi_template_dir(self, inputdir, basedir=None, outdir=None, templinc=None, subs=None):
bdir = basedir or self._msibasedir
outputdir = outdir or inputdir
inputpath = os.path.join(bdir, inputdir)
for fname in os.listdir(inputpath):
if fname.endswith(".template"):
self.add_msi_template(inputfile=os.path.join(inputdir,fname), basedir=bdir, outdir=outputdir, templinc=templinc, subs=subs)
## Add msi substitutions directory. Searches for ".substitutions" files and add them to the msi command
# list using the dbTemplate format (no input template file required)
# @param self The current object
# @param inputdir The input directory name relative to base directory (see next param).
# @param basedir The base directory name (defaults to self._msibasedir) to
# be pre-pended to the input directory and include paths to form the full input path to the msi tool.
# @param outdir The output directory relative to the install directory.
# Defaults to input directory.
# @param templinc Include path list for searching included templates relative (-I) to msi base
# directory. By default it adds msi base directory into the include paths.
# @param subs Macro values list (-M argument to msi tool) to be applied to the entire directory.
def add_msi_subs_dir(self, inputdir, basedir=None, outdir=None, templinc=None, subs=None):
bdir = basedir or self._msibasedir
outputdir = outdir or inputdir
inputpath = os.path.join(bdir, inputdir)
newtinc = [inputpath]
if templinc is not None:
for t in templinc:
newtinc.append(os.path.join(bdir,t))
outpath = os.path.join(self._installdir, outputdir)
for fname in os.listdir(inputpath):
if fname.endswith(".substitutions"):
self._add_msi_cmd(subsfile=os.path.join(inputpath, fname),
outfile=os.path.join(outpath, os.path.splitext(fname)[0]),
includes=newtinc, subs=subs)
## Add extra directory tree into the directory trees to be copied recursively into | |
<reponame>sparkslabs/kamaelia_orig<filename>Sketches/MH/DocGen/DocExtractor.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
==================================
Documentation extractor and writer
==================================
A command line tool for generating Axon and Kamaelia documentation
Features:
* outputs HTML (with some simple additional directives for the wiki engine
behind the Kamaelia website)
* python DocStrings are parsed as
`ReStructuredText<http://docutils.sourceforge.net/rst.html>`_ - permitting
rich formatting.
* can document modules, classes, functions, components and prefabs
* some customisation control over the output format
* generates hierarchical indices of modules
* fully qualified module, component, prefab, class and function names are
automagically converted to hyperlinks
* can incorporate test suite output into documentation
* can dump symbols (with mappings to URLs) to a file and/or read them in. This
makes it possible to cross-link, for example, from the Kamaelia documentation
back to the Axon documentation.
*This is not an Axon/Kamaelia system* - it is not built from components. However
it is probably sufficiently useful to be classified as a 'tool'!
Usage
-----
For help on command line options, use the ``--help`` option::
$> ./DocExtractor.py --help
The command lines currently being used to generate Kamaelia and Axon
documentation are as follows:
For Axon docs for the website::
$> ./DocExtractor.py --urlprefix /Docs/Axon/ \
--promotetitles \
--notjustcomponents \
--indexdepth 0 \
--root Axon \
--footerinclude Docs/Axon-footer.html \
--outdir <outputDirName> \
--dumpSymbolsTo <symbolFile> \
<repositoryDir>
For Kamaelia component docs for the website::
$> ./DocExtractor.py --urlprefix /Components/pydoc/ \
--root Kamaelia \
--footerinclude Components/pydoc-footer.html \
--outdir <outputDirName> \
--linkToSymbols <symbolFile> \
<repositoryDir>
Why differences?
* The ``--notjustcomponents`` flag which ensures that the classes and functions
making up Axon are documented.
* the ``--dumpSymbolsTo`` option creates a dump of all symbols documented.
``--linkToSymbols`` reads them in for generating crosslinks.
* The remaining differences change the formatting and style:
* ``promotetitles`` pushes module level doc string titles to the top of the
HTML pages generated - making them more prominent.
* ``indexDepth`` of 0 supresses the generation of indexes of modules in a
given subdir. Axon's ``__init__.py`` contains a comprehensive table of
contents of its own, so the index is not needed.
Not quite plain HTML
--------------------
Although the output is primarily HTML, it does include Kamaelia website specific
directives (of the form ``[[foo][attribute=value] blah blah ]``
Since these directives use square brackets, any square brackets in the body text
are replaced with escaped codes.
Implementation Details
----------------------
Kamaelia.Support.Data.Repository is used to scan the specified code base and
extract info and documentation about modules, classes, functions, components and
prefabs.
All python doc strings are fed through the
`docutils <http://docutils.sourceforge.net/>`_ ReStructuredText parser to
generate formatted HTML output.
Internally individual documentation pages are built up entirely using docutils
node structures (a convenient intermediate tree representation of the doucment)
A separate renderer object is used to perform the final conversion to HTML, as
well as resolve the automatic linking of fully qualified names. It also
determines the appropriate filenames and URLs to use for individual pages and
hyperlinks between them.
A few bespoke extensions are added to the repertoire of docutils nodes to
represent specific directives that need to appear in the final output. These
are converted by the renderer object to final ``[[foo][bar=bibble] ...]``
format.
This is done this way to keep an unambiguous separation between directives and
documentation text. If directives were dropped in as plain text earlier in the
process then they might be confused with occurrences of square brackets in the
actual documentation text.
Code overview
* The *DocGenConfig* class encapsulates configuration choices and also carries
the extracted repository info.
* *__main__* invokes *generateDocumentationFiles()* and *generateIndices()* to
kick off the construction of all documentation files and index page files.
* The actual formatting and generation of pages is performed by the *docFormatter*
class.
* *formatXXXPage()* methods return document node trees representing the final
pages to be converted to HTML and written to disk.
* Other *formatXXX()* methods construct fragments of the document tree.
"""
import textwrap
import inspect
import pprint
import time
import os
import StringIO
import ConfigParser
from docutils import core
from docutils import nodes
#from Kamaelia.Support.Data import Repository
import Repository
ClassScope = Repository.ClassScope
FunctionScope = Repository.FunctionScope
ModuleScope = Repository.ModuleScope
ImportScope = Repository.ImportScope
from renderHTML import RenderHTML
from Nodes import boxright
class DocGenConfig(object):
"""Configuration object for documentation generation."""
def __init__(self):
super(DocGenConfig,self).__init__()
# NOTE: These settings are overridden in __main__ - modify them there,
# not here
self.repository = None
self.filterPattern=""
self.docdir="pydoc"
self.docroot="Kamaelia"
self.treeDepth=99
self.tocDepth=99
self.includeMethods=False
self.includeModuleDocString=False
self.includeNonKamaeliaStuff=False
self.showComponentsOnIndices=False
self.promoteModuleTitles=False
self.deemphasiseTrails=False
self.pageFooter=""
self.testOutputDir=None
self.testExtensions=[]
self.dumpSymbolsTo=None
self.loadSymbolsFrom=[]
class docFormatter(object):
"""\
docFormatter(renderer,config) -> new docFormatter object
Object that formats documentation - methods of this class return document
trees (docutils node format) documenting whatever was requested.
Requires the renderer object so it can determine URIs for hyperlinks.
"""
def __init__(self, renderer, config):
super(docFormatter,self).__init__()
self.renderer = renderer
self.config = config
self.errorCount=0
uid = 0
def genUniqueRef(self):
uid = str(self.uid)
self.uid+=1
return uid
def boxes(self,componentName, label, boxes):
"""\
Format documentation for inboxes/outboxes. Returns a docutils document
tree fragment.
Keyword arguments:
- componentName -- name of the component the boxes belong to
- label -- typically "Inboxes" or "Outboxes"
- boxes -- dict containing (boxname, boxDescription) pairs
"""
items = []
for box in boxes:
try:
description = boxes[box]
except KeyError:
description = ""
except TypeError:
description = "Code uses old style inbox/outbox description - no metadata available"
items.append((str(box), str(description)))
docTree= nodes.section('',
ids = ["symbol-"+componentName+"."+label],
names = ["symbol-"+componentName+"."+label],
*[ nodes.title('', label),
nodes.bullet_list('',
*[ nodes.list_item('', nodes.paragraph('', '',
nodes.strong('', boxname),
nodes.Text(" : "+boxdesc))
)
for (boxname,boxdesc) in items
]
),
]
)
return docTree
def docString(self,docstring, main=False):
"""
Parses a doc string in ReStructuredText format and returns a docutils
document tree fragment.
Removes any innate indentation from the raw doc strings before parsing.
Also captures any warnings generated by parsing and writes them to
stdout, incrementing the self.errorCount flag.
"""
if docstring is None:
docstring = " "
lines = docstring.split("\n")
if len(lines)>1:
line1 = textwrap.dedent(lines[0])
rest = textwrap.dedent("\n".join(lines[1:]))
docstring = line1+"\n"+rest
else:
docstring=textwrap.dedent(docstring)
while len(docstring)>0 and docstring[0] == "\n":
docstring = docstring[1:]
while len(docstring)>0 and docstring[-1] == "\n":
docstring = docstring[:-1]
warningStream=StringIO.StringIO()
overrides={"warning_stream":warningStream,"halt_level":99}
docTree=core.publish_doctree(docstring,settings_overrides=overrides)
warnings=warningStream.getvalue()
if warnings:
print "!!! Warnings detected:"
print warnings
self.errorCount+=1
warningStream.close()
return nodes.section('', *docTree.children)
def formatArgSpec(self, argspec):
return pprint.pformat(argspec[0]).replace("[","(").replace("]",")").replace("'","")
def formatMethodDocStrings(self,className,X):
docTree = nodes.section('')
methods = X.listAllFunctions()
methods.sort()
for (methodname,method) in methods:
methodHead = methodname + "(" + method.argString + ")"
docTree.append( nodes.section('',
ids = ["symbol-"+className+"."+methodname],
names = ["symbol-"+className+"."+methodname],
* [ nodes.title('', methodHead) ]
+ self.docString(method.doc)
)
)
return docTree
def formatInheritedMethods(self,className,CLASS):
docTree = nodes.section('')
overrides = [name for (name,method) in CLASS.listAllFunctions()] # copy of list of existing method names
for base in CLASS.allBasesInMethodResolutionOrder:
if isinstance(base,ClassScope):
moduleName=base.module
findName=moduleName[len(self.config.docroot+"."):]
module=self.config.repository.find(findName)
try:
className=module.locate(base)
except ValueError:
continue
# work out which methods haven't been already overriden
methodList = []
for (name,method) in base.listAllFunctions():
if name not in overrides:
overrides.append(name)
uri = self.renderer.makeURI(moduleName,"symbol-"+className+"."+name)
methodList.append(nodes.list_item('',
nodes.paragraph('','',
nodes.reference('', nodes.Text(name), refuri=uri),
nodes.Text("(" + method.argString + ")"),
),
)
)
if len(methodList)>0:
docTree.append( nodes.section('',
nodes.title('', "Methods inherited from "+moduleName+"."+className+" :"),
nodes.bullet_list('', *methodList),
)
)
return docTree
def formatClassStatement(self, name, bases):
baseNames=[]
for baseName,base in bases:
baseNames.append(baseName)
return "class "+ name+"("+", ".join(baseNames)+")"
def formatPrefabStatement(self, name):
return "prefab: "+name
def formatComponent(self, moduleName, name, X):
# no class bases available from repository scanner
CLASSNAME = self.formatClassStatement(name, X.bases)
CLASSDOC = self.docString(X.doc)
INBOXES = self.boxes(name,"Inboxes", X.inboxes)
OUTBOXES = self.boxes(name,"Outboxes", X.outboxes)
if self.config.includeMethods and len(X.listAllFunctions()):
METHODS = [ nodes.section('',
nodes.title('', 'Methods defined here'),
boxright('',
nodes.paragraph('', '',
nodes.strong('', nodes.Text("Warning!"))
),
nodes.paragraph('', '',
nodes.Text("You should be using the inbox/outbox interface, not these methods (except construction). This documentation is designed as a roadmap as to their | |
anything!”
“I’d tackle the old man first,” said Hallen; “I think, on general
principles, he’s the one to make inquiries of before you go to the
ladies. Let’s go to him now.”
“No;” proposed Burdon, “let’s send for him to come here. This is away
from the house, and we can talk more freely.”
“I’ll go for him,” offered Allen, seeing they were determined to carry
out their plan.
“Not much!” said Burdon. “You’re just aching to put a flea in his ear!
You go for him, Hallen.”
The detective went to the house, and returned with <NAME> at his
side.
The suspected man stood straight and held himself fearlessly. Not an old
man, he was grayed with care and trouble, but this morning he seemed
strong and alert as any of them.
“Put your questions,” he said, briefly, as he seated himself on one of
the many seats beneath the old sycamore.
“First of all, who do you think killed <NAME>?”
This question was shot at him by Burdon, and all waited in silence for
the answer.
“I killed him myself,” was the straightforward reply.
“That settles it,” said Hallen, “it was one of the women.”
“What do you mean by that?” cried Wheeler, turning quickly toward the
speaker.
“I mean, that either your wife or daughter did the deed, and you are
taking the crime on yourself to save her.”
“No;” reasserted <NAME>er, “you’re wrong. I killed Appleby for good
and sufficient reason. I’m not sorry, and I accept my fate.”
“Wait a minute,” said Hallen, as Keefe was about to protest; “where was
your daughter, <NAME>, when you killed your man?”
“I—I don’t know. I think she had gone to the fire—which had just broken
out.”
“You’re not sure——”
“I am not.”
“She had been with you, in the den?”
“I don’t know.”
“Well, I know. She had. She had been sitting in her favorite window-seat,
in the large bay, and was there while you and Mr. Appleby were talking
together. Also, she did not leave the room to go to the fire, for no one
saw her anywhere near the burning garage.”
“As to that, I can’t say,” went on Wheeler, slowly, “but she was not in
the den, to my knowledge, at the time of the shooting.”
“Very well, let that pass. Now, then, Mr. Wheeler, if you shot Mr.
Appleby, what did you afterward do with your revolver?”
“I—I don’t know.” The man’s face was convincing. His frank eyes testified
to the truth of his words. “I assure you, I don’t know. I was so—so
bewildered—that I must have dropped it—somewhere. I never thought of it
again.”
“But if you had merely dropped it, it must have been found. And it hasn’t
been.”
“Somebody else found it and secreted it,” suggested Hallen. “Probably Mr.
Wheeler’s wife or daughter.”
“Perhaps so,” assented Wheeler, calmly. “They might have thought to help
me by secreting it. Have you asked them?”
“Yes, and they deny all knowledge of it.”
“So do I. But surely it will be found.”
“It must be found. And, therefore, it is imperative that the rooms of the
ladies as well as your own rooms, sir, be thoroughly searched.”
“All right—go ahead and search!” Wheeler spoke sharply. “I’ve confessed
the crime, now waste no time in useless chattering. Get the evidence, get
the proofs, and let the law take its course.”
“You will not leave the premises,” put in Hallen, and his tone was that
of command rather than inquiry.
“I most certainly shall not,” declared Wheeler. “But I do ask you,
gentlemen, to trouble and annoy my wife and daughter as little as
possible. Their grief is sufficient reason for their being let alone.”
“H’m,” grunted Burdon. “Well, sir, I can promise not to trouble the
ladies more than is necessary—but I can’t help feeling necessity will
demand a great deal.”
<NAME> was next interviewed, and the confab took place in her own
sitting-room.
None of her family was allowed to be present, and the four men filed into
the room with various expressions of face. The two detectives were
stolid-looking, but eagerly determined to do their work, while Allen and
Keefe were alertly interested in finding out some way to be of help to
<NAME>.
She received the men quietly, even graciously, sensing what they had come
for.
“To start with, Mrs. Wheeler,” said Burdon, frankly but not unkindly,
“who do you think killed Mr. Appleby?”
“Oh—I don’t know—I don’t know,” she wailed, losing her calm and becoming
greatly agitated.
“Where were you when the shot was fired?” asked Hallen.
“I don’t know—I didn’t hear it——”
“Then you were up in your own room?”
“I suppose so—I don’t know.”
“You were up there when the fire broke out?”
“Yes—I think I was——”
“But you must know, <NAME>—that is, you must know where you were
when you first heard of the fire——”
“Yes, yes; I was up in my bedroom.”
“And who told you of the fire?”
“My maid—Rachel.”
“And then what did you do?”
“I—I—I don’t remember.”
“You ran downstairs, didn’t you?”
“I don’t remember——”
“Yes, you did!” Burdon took up the reins. “You ran downstairs, and just
as you got down to the den you saw—you saw your husband shoot Mr.
Appleby!”
His harsh manner, as he intended, frightened the nervous woman, and
reduced her to the verge of collapse.
But after a gasping moment, she recovered herself, and cried out: “I did
not! I shot <NAME> myself. That’s why I’m so agitated.”
“I knew it!” exclaimed Burdon. “<NAME>’s confession was merely to
save his wife. Now, <NAME>, I believe your story, and I want all
the particulars. First, why did you kill him?”
“Be—because he was my husband’s enemy—and I had stood it as long as I
could.”
“H’m. And what did you do with the weapon you used?”
“I threw it out of the window.”
“And it dropped on the lawn?”
“Not dropped; I threw it far out—as far as I could.”
“Oh, I see. Out of which window?”
“Why—why, the one in the den—the bay window.”
“But your daughter—<NAME>—was sitting in the bay window.”
“No, she was not,” <NAME> spoke emphatically now. “She was not in
the room at all. She had gone to the fire.”
“Oh, is that so? And then—what happened next?”
“Why—nothing. I—I ran upstairs again.”
“Appalled at what you had done?”
“Not appalled—so much as—as——”
“Unnerved?”
“Yes; unnerved. I fell on my bed, and Rachel looked after me.”
“Ah, yes; we will interview Rachel, and so save you further harrowing
details. Come on, men, let’s strike while these irons are hot.”
The four filed from the room, and Burdon spoke in a low tone, but
excitedly:
“Come quickly! There goes <NAME> across the lawn. We will take her
next. The maid, Rachel, can wait.”
Inwardly rebelling, but urged on by the others, <NAME> went along,
and as Burdon stopped Maida, on her quick walk across the lawn, Jeff put
his arm through that of the girl, and said: “Do as they tell you, dear.
It’s best to have this matter settled at once.”
Again the party grouped themselves under the old sycamore, and this time
Maida was the target for their queries.
“Tell me all you know of the case,” she said, peremptorily; “then I’ll
tell you what I know.”
“We know that the murder was committed by one of you three Wheelers,”
said Burdon, brutally. “Now, both your parents have confessed to being
the criminal——”
“What?” Maida cried, her face white and her eyes big and frightened.
“Yes, ma’am, just that! Now, what have you to say? Are you going to
confess also?”
“Of course I am! For I am the real criminal! Can’t you see that my father
and mother are both trying to shield me? I did it, because of that awful
man’s hold on my father! Take my confession, and do with me what you
will!”
“Here’s a state of things!” cried Burdon, truly surprised at this new
development.
“The girl is telling the truth,” exclaimed <NAME>, not because he
really thought so but his quick mind told him that it would be easier to
get a young girl acquitted than an older person, and he saw the
plausibility of the detectives’ theory that it must have been one of the
three Wheelers.
“All right,” Burdon went on, “then, <NAME>, enlighten us as to
details. Where’s the weapon?”
“I don’t have to tell you anything except that I did it. Do I, Jeffrey?
Do I, Mr. Keefe?” She looked at these two for help.
“No, <NAME>,” Keefe assured her, | |
<gh_stars>100-1000
"""
Some parts are adapted from https://github.com/cocodataset/cocoapi :
Copyright (c) 2014, <NAME> and <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
"""
"""
For the remaining parts:
Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
import numpy as np
from loguru import logger
from typing import Sequence, List, Dict, Union, Tuple
from nndet.evaluator import DetectionMetric
class COCOMetric(DetectionMetric):
def __init__(self,
classes: Sequence[str],
iou_list: Sequence[float] = (0.1, 0.5, 0.75),
iou_range: Sequence[float] = (0.1, 0.5, 0.05),
max_detection: Sequence[int] = (1, 5, 100),
per_class: bool = True,
verbose: bool = True):
"""
Class to compute COCO metrics
Metrics computed:
mAP over the IoU range specified by :param:`iou_range` at last value of :param:`max_detection`
AP values at IoU thresholds specified by :param:`iou_list` at last value of :param:`max_detection`
AR over max detections thresholds defined by :param:`max_detection` (over iou range)
Args:
classes (Sequence[str]): name of each class (index needs to correspond to predicted class indices!)
iou_list (Sequence[float]): specific thresholds where ap is evaluated and saved
iou_range (Sequence[float]): (start, stop, step) for mAP iou thresholds
max_detection (Sequence[int]): maximum number of detections per image
verbose (bool): log time needed for evaluation
"""
self.verbose = verbose
self.classes = classes
self.per_class = per_class
iou_list = np.array(iou_list)
_iou_range = np.linspace(iou_range[0], iou_range[1],
int(np.round((iou_range[1] - iou_range[0]) / iou_range[2])) + 1, endpoint=True)
self.iou_thresholds = np.union1d(iou_list, _iou_range)
self.iou_range = iou_range
# get indices of iou values of ious range and ious list for later evaluation
self.iou_list_idx = np.nonzero(iou_list[:, np.newaxis] == self.iou_thresholds[np.newaxis])[1]
self.iou_range_idx = np.nonzero(_iou_range[:, np.newaxis] == self.iou_thresholds[np.newaxis])[1]
assert (self.iou_thresholds[self.iou_list_idx] == iou_list).all()
assert (self.iou_thresholds[self.iou_range_idx] == _iou_range).all()
self.recall_thresholds = np.linspace(.0, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True)
self.max_detections = max_detection
def get_iou_thresholds(self) -> Sequence[float]:
"""
Return IoU thresholds needed for this metric in an numpy array
Returns:
Sequence[float]: IoU thresholds [M], M is the number of thresholds
"""
return self.iou_thresholds
def compute(self,
results_list: List[Dict[int, Dict[str, np.ndarray]]],
) -> Tuple[Dict[str, float], Dict[str, np.ndarray]]:
"""
Compute COCO metrics
Args:
results_list (List[Dict[int, Dict[str, np.ndarray]]]): list with result s per image (in list)
per category (dict). Inner Dict contains multiple results obtained by :func:`box_matching_batch`.
`dtMatches`: matched detections [T, D], where T = number of thresholds, D = number of detections
`gtMatches`: matched ground truth boxes [T, G], where T = number of thresholds, G = number of
ground truth
`dtScores`: prediction scores [D] detection scores
`gtIgnore`: ground truth boxes which should be ignored [G] indicate whether ground truth
should be ignored
`dtIgnore`: detections which should be ignored [T, D], indicate which detections should be ignored
Returns:
Dict[str, float]: dictionary with coco metrics
Dict[str, np.ndarray]: None
"""
if self.verbose:
logger.info('Start COCO metric computation...')
tic = time.time()
dataset_statistics = self.compute_statistics(results_list=results_list)
if self.verbose:
toc = time.time()
logger.info(f'Statistics for COCO metrics finished (t={(toc - tic):0.2f}s).')
results = {}
results.update(self.compute_ap(dataset_statistics))
results.update(self.compute_ar(dataset_statistics))
if self.verbose:
toc = time.time()
logger.info(f'COCO metrics computed in t={(toc - tic):0.2f}s.')
return results, None
def compute_ap(self, dataset_statistics: dict) -> dict:
"""
Compute AP metrics
Args:
results_list (List[Dict[int, Dict[str, np.ndarray]]]): list with result s per image (in list)
per category (dict). Inner Dict contains multiple results obtained by :func:`box_matching_batch`.
`dtMatches`: matched detections [T, D], where T = number of thresholds, D = number of detections
`gtMatches`: matched ground truth boxes [T, G], where T = number of thresholds, G = number of
ground truth
`dtScores`: prediction scores [D] detection scores
`gtIgnore`: ground truth boxes which should be ignored [G] indicate whether ground truth
should be ignored
`dtIgnore`: detections which should be ignored [T, D], indicate which detections should be ignored
"""
results = {}
if self.iou_range: # mAP
key = (f"mAP_IoU_{self.iou_range[0]:.2f}_{self.iou_range[1]:.2f}_{self.iou_range[2]:.2f}_"
f"MaxDet_{self.max_detections[-1]}")
results[key] = self.select_ap(dataset_statistics, iou_idx=self.iou_range_idx, max_det_idx=-1)
if self.per_class:
for cls_idx, cls_str in enumerate(self.classes): # per class results
key = (f"{cls_str}_"
f"mAP_IoU_{self.iou_range[0]:.2f}_{self.iou_range[1]:.2f}_{self.iou_range[2]:.2f}_"
f"MaxDet_{self.max_detections[-1]}")
results[key] = self.select_ap(dataset_statistics, iou_idx=self.iou_range_idx,
cls_idx=cls_idx, max_det_idx=-1)
for idx in self.iou_list_idx: # AP@IoU
key = f"AP_IoU_{self.iou_thresholds[idx]:.2f}_MaxDet_{self.max_detections[-1]}"
results[key] = self.select_ap(dataset_statistics, iou_idx=[idx], max_det_idx=-1)
if self.per_class:
for cls_idx, cls_str in enumerate(self.classes): # per class results
key = (f"{cls_str}_"
f"AP_IoU_{self.iou_thresholds[idx]:.2f}_"
f"MaxDet_{self.max_detections[-1]}")
results[key] = self.select_ap(dataset_statistics,
iou_idx=[idx], cls_idx=cls_idx, max_det_idx=-1)
return results
def compute_ar(self, dataset_statistics: dict) -> dict:
"""
Compute AR metrics
Args:
results_list (List[Dict[int, Dict[str, np.ndarray]]]): list with result s per image (in list)
per category (dict). Inner Dict contains multiple results obtained by :func:`box_matching_batch`.
`dtMatches`: matched detections [T, D], where T = number of thresholds, D = number of detections
`gtMatches`: matched ground truth boxes [T, G], where T = number of thresholds, G = number of
ground truth
`dtScores`: prediction scores [D] detection scores
`gtIgnore`: ground truth boxes which should be ignored [G] indicate whether ground truth
should be ignored
`dtIgnore`: detections which should be ignored [T, D], indicate which detections should be ignored
"""
results = {}
for max_det_idx, max_det in enumerate(self.max_detections): # mAR
key = f"mAR_IoU_{self.iou_range[0]:.2f}_{self.iou_range[1]:.2f}_{self.iou_range[2]:.2f}_MaxDet_{max_det}"
results[key] = self.select_ar(dataset_statistics, max_det_idx=max_det_idx)
if self.per_class:
for cls_idx, cls_str in enumerate(self.classes): # per class results
key = (f"{cls_str}_"
f"mAR_IoU_{self.iou_range[0]:.2f}_{self.iou_range[1]:.2f}_{self.iou_range[2]:.2f}_"
f"MaxDet_{max_det}")
results[key] = self.select_ar(dataset_statistics,
cls_idx=cls_idx, max_det_idx=max_det_idx)
for idx in self.iou_list_idx: # AR@IoU
key = f"AR_IoU_{self.iou_thresholds[idx]:.2f}_MaxDet_{self.max_detections[-1]}"
results[key] = self.select_ar(dataset_statistics, iou_idx=idx, max_det_idx=-1)
if self.per_class:
for cls_idx, cls_str in enumerate(self.classes): # per class results
key = (f"{cls_str}_"
f"AR_IoU_{self.iou_thresholds[idx]:.2f}_"
f"MaxDet_{self.max_detections[-1]}")
results[key] = self.select_ar(dataset_statistics, iou_idx=idx,
cls_idx=cls_idx, max_det_idx=-1)
return results
@staticmethod
def select_ap(dataset_statistics: dict, iou_idx: Union[int, List[int]] = None,
cls_idx: Union[int, Sequence[int]] = None, max_det_idx: int = -1) -> np.ndarray:
"""
Compute average precision
Args:
dataset_statistics (dict): computed statistics over dataset
`counts`: Number of thresholds, Number recall thresholds, Number of classes, Number of max
detection thresholds
`recall`: Computed recall values [num_iou_th, num_classes, num_max_detections]
`precision`: Precision values at specified recall thresholds
[num_iou_th, num_recall_th, num_classes, num_max_detections]
`scores`: Scores corresponding to specified recall thresholds
[num_iou_th, num_recall_th, num_classes, num_max_detections]
iou_idx: index of IoU values to select for evaluation(if None, all values are used)
cls_idx: class indices to select, if None all classes will be selected
max_det_idx (int): index to select max detection threshold from data
Returns:
np.ndarray: AP value
"""
prec = dataset_statistics["precision"]
if iou_idx is not None:
prec = prec[iou_idx]
if cls_idx is not None:
prec = prec[..., cls_idx, :]
prec = prec[..., max_det_idx]
return np.mean(prec)
@staticmethod
def select_ar(dataset_statistics: dict, iou_idx: Union[int, Sequence[int]] = None,
cls_idx: Union[int, Sequence[int]] = None,
max_det_idx: int = -1) -> np.ndarray:
"""
Compute average recall
Args:
dataset_statistics (dict): computed statistics over dataset
`counts`: Number of thresholds, Number recall thresholds, Number of classes, Number of max
detection thresholds
`recall`: | |
provided).
The model for the autoregressive process takes this convention:
s[n] = a1*s[n-1] + a2*s[n-2] + ... aP*s[n-P] + v[n]
where v[n] is a zero-mean white noise process with variance=sigma_v
Parameters
----------
s : ndarray
The sampled autoregressive random process
order : int
The order P of the AR system
sxx : ndarray (optional)
An optional, possibly unbiased estimate of the autocovariance of s
Returns
-------
a, ecov: The system coefficients and the estimated covariance
"""
if sxx is not None and type(sxx) == np.ndarray:
sxx_m = sxx[:order+1]
else:
sxx_m = ut.autocov(s)[:order+1]
phi = np.zeros((order+1, order+1), 'd')
sig = np.zeros(order+1)
# initial points for the recursion
phi[1,1] = sxx_m[1]/sxx_m[0]
sig[1] = sxx_m[0] - phi[1,1]*sxx_m[1]
for k in xrange(2,order+1):
phi[k,k] = (sxx_m[k]-np.dot(phi[1:k,k-1], sxx_m[1:k][::-1]))/sig[k-1]
for j in xrange(1,k):
phi[j,k] = phi[j,k-1] - phi[k,k]*phi[k-j,k-1]
sig[k] = sig[k-1]*(1 - phi[k,k]**2)
sigma_v = sig[-1]; ak = phi[1:,-1]
return ak, sigma_v
def MAR_est_LWR(s, order, sxx=None):
"""
MAR estimation, using the LWR algorithm, as in Morf et al.
Parameters
----------
s : ndarray
The sampled autoregressive random process
order : int
The order P of the AR system
sxx : ndarray (optional)
An optional, possibly unbiased estimate of the autocovariance of s
Returns
-------
a, ecov: The system coefficients and the estimated covariance
"""
Rxx = ut.autocov_vector(s, nlags=order)
a, ecov = ut.lwr(Rxx.transpose(2,0,1))
return a,ecov
def AR_psd(ak, sigma_v, Nfreqs=1024, sides='onesided'):
"""What does this do?
Nfreqs : int
The number of spacings on the frequency grid from [-PI,PI).
If sides=='onesided', Nfreqs/2+1 frequencies are computed from [0,PI]
sides : str (optional)
Indicates whether to return a one-sided or two-sided PSD
system : bool (optional)
If True, return the AR system parameters, sigma_v and a{k}
Returns
-------
(w, ar_psd)
w : Array of normalized frequences from [-.5, .5) or [0,.5]
ar_psd : A PSD estimate computed by sigma_v / |1-a(f)|**2 , where
a(f) = DTFT(ak)
"""
# compute the psd as |h(f)|**2, where h(f) is the transfer function..
# for this model s[n] = a1*s[n-1] + a2*s[n-2] + ... aP*s[n-P] + v[n]
# Taken as a FIR system from s[n] to v[n],
# v[n] = w0*s[n] + w1*s[n-1] + w2*s[n-2] + ... + wP*s[n-P],
# where w0 = 1, and wk = -ak for k>0
# the transfer function here is H(f) = DTFT(w)
# leading to Sxx(f) = Vxx(f) / |H(f)|**2 = sigma_v / |H(f)|**2
w, hw = freq_response(sigma_v**0.5, a=np.concatenate(([1], -ak)),
Nfreqs=Nfreqs, sides=sides)
ar_psd = (hw*hw.conj()).real
return (w,2*ar_psd) if sides=='onesided' else (w,ar_psd)
def boxcar_filter(time_series,lb=0,ub=0.5,n_iterations=2):
"""
Filters data into a frequency range.
For each of the two bounds, a low-passed version is created by convolving
with a box-car and then the low-passed version for the upper bound is added
to the low-passed version for the lower bound subtracted from the signal,
resulting in a band-passed version
Parameters
----------
time_series: float array
the signal
ub : float, optional
The cut-off frequency for the low-pass filtering as a proportion of the
sampling rate. Default to 0.5 (Nyquist)
lb : float, optional
The cut-off frequency for the high-pass filtering as a proportion of the
sampling rate. Default to 0
n_iterations: int, optional
how many rounds of smoothing to do. Default to 2.
Returns
-------
float array:
The signal, filtered
"""
n = time_series.shape[-1]
len_boxcar_ub = np.ceil(1 / (2.0*ub) )
boxcar_ub = np.empty(len_boxcar_ub)
boxcar_ub.fill(1.0/len_boxcar_ub)
boxcar_ones_ub = np.ones_like(boxcar_ub)
if lb==0:
lb=None
else:
len_boxcar_lb = np.ceil(1 / (2.0*lb) )
boxcar_lb = np.empty(len_boxcar_lb)
boxcar_lb.fill(1.0/len_boxcar_lb)
boxcar_ones_lb = np.ones_like(boxcar_lb)
#If the time_series is a 1-d, we add a dimension, so that we can iterate
#over 2-d inputs:
if len(time_series.shape)==1:
time_series = np.array([time_series])
for i in xrange(time_series.shape[0]):
if ub:
#Start by applying a low-pass to the signal. Pad the signal on
#each side with the initial and terminal signal value:
pad_s = np.hstack((boxcar_ones_ub*time_series[i,0],time_series[i]))
pad_s = np.hstack((pad_s, boxcar_ones_ub*time_series[i,-1]))
#Filter operation is a convolution with the box-car(iterate,
#n_iterations times over this operation):
for iteration in xrange(n_iterations):
conv_s = np.convolve(pad_s,boxcar_ub)
#Extract the low pass signal by excising the central
#len(time_series) points:
time_series[i] = conv_s[conv_s.shape[-1]/2-np.floor(n/2.):
conv_s.shape[-1]/2+np.ceil(n/2.)]
#Now, if there is a high-pass, do the same, but in the end subtract out
#the low-passed signal:
if lb:
pad_s = np.hstack((boxcar_ones_lb*time_series[i,0],time_series[i]))
pad_s = np.hstack((pad_s, boxcar_ones_lb * time_series[i,-1]))
#Filter operation is a convolution with the box-car(iterate,
#n_iterations times over this operation):
for iteration in xrange(n_iterations):
conv_s = np.convolve(pad_s,boxcar_lb)
#Extract the low pass signal by excising the central
#len(time_series) points:
s_lp = conv_s[conv_s.shape[-1]/2-np.floor(n/2.):
conv_s.shape[-1]/2+np.ceil(n/2.)]
#Extract the high pass signal simply by subtracting the high pass
#signal from the original signal:
time_series[i] = time_series[i] - s_lp + np.mean(s_lp) #add mean
#to make sure that there are no negative values. This also seems to
#make sure that the mean of the signal (in % signal change) is close
#to 0
return time_series.squeeze()
#-------------------------------------------------------------------------------
#Coherency calculated using cached spectra
#-------------------------------------------------------------------------------
"""The idea behind this set of functions is to keep a cache of the windowed fft
calculations of each time-series in a massive collection of time-series, so
that this calculation doesn't have to be repeated each time a cross-spectrum is
calculated. The first function creates the cache and then, another function
takes the cached spectra and calculates PSDs and CSDs, which are then passed to
coherency_calculate and organized in a data structure similar to the one
created by coherence"""
def cache_fft(time_series,ij,lb=0,ub=None,
method=None,prefer_speed_over_memory=False,
scale_by_freq=True):
"""compute and cache the windowed FFTs of the time_series, in such a way
that computing the psd and csd of any combination of them can be done
quickly.
Parameters
----------
time_series : float array
An ndarray with time-series, where time is the last dimension
ij: list of tuples
Each tuple in this variable should contain a pair of
indices of the form (i,j). The resulting cache will contain the fft of
time-series in the rows indexed by the unique elements of the union of i
and j
lb,ub: float
Define a frequency band of interest, for which the fft will be cached
method: dict, optional
See :func:`get_spectra` for details on how this is used. For this set
of functions, 'this_method' has to be 'welch'
Returns
-------
freqs, cache
where: cache =
{'FFT_slices':FFT_slices,'FFT_conj_slices':FFT_conj_slices,
'norm_val':norm_val}
Notes
-----
- For these functions, only the Welch windowed periodogram ('welch') is
available.
- Detrending the input is not an option here, in order to save
time on an empty function call.
"""
if method is None:
method = {'this_method':'welch'} #The default
this_method = method.get('this_method','welch')
if this_method == 'welch':
NFFT = method.get('NFFT',64)
Fs = method.get('Fs',2*np.pi)
window = method.get('window',mlab.window_hanning)
n_overlap = method.get('n_overlap',int(np.ceil(NFFT/2.0)))
else:
raise ValueError("For cache_fft, spectral estimation method must be welch")
time_series = ut.zero_pad(time_series,NFFT)
#The shape of the zero-padded version:
n_channels, n_time_points = time_series.shape
# get all the unique channels in time_series that we are interested in by
# checking the ij tuples
all_channels = set()
for i,j in ij:
all_channels.add(i); all_channels.add(j)
n_channels = len(all_channels)
# for real time_series, ignore the negative frequencies
if np.iscomplexobj(time_series): n_freqs = NFFT
else: n_freqs = NFFT//2+1
#Which frequencies
freqs = ut.get_freqs(Fs,NFFT)
#If there are bounds, limit the calculation to within that band,
#potentially include the DC component:
lb_idx,ub_idx = ut.get_bounds(freqs,lb,ub)
n_freqs=ub_idx-lb_idx
#Make the window:
if mlab.cbook.iterable(window):
assert(len(window) == NFFT)
window_vals = window
else:
window_vals = window(np.ones(NFFT, time_series.dtype))
#Each fft needs to be normalized by the square of the norm of the window
#and, for consistency with newer versions of mlab.csd (which, in turn, are
#consistent with Matlab), normalize also by the sampling rate:
if scale_by_freq:
#This is the normalization factor for one-sided estimation, taking into
#account the sampling rate. This makes the PSD a density function, with
#units of dB/Hz, so that integrating over frequencies gives you the RMS
#(XXX this should be in the tests!).
norm_val = (np.abs(window_vals)**2).sum()*(Fs/2)
else:
norm_val = (np.abs(window_vals)**2).sum()/2
# cache the FFT of every windowed, detrended NFFT length segement
# of every channel. If prefer_speed_over_memory, cache | |
<filename>Pilot1/Uno/uno_trainUQ_keras2.py
#! /usr/bin/env python
from __future__ import division, print_function
import logging
import os
import argparse
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend as K
from tensorflow.keras import optimizers
from tensorflow.keras.callbacks import ReduceLROnPlateau, LearningRateScheduler, TensorBoard
import uno as benchmark
import candle
import uno_data
from uno_data import CombinedDataLoader, CombinedDataGenerator, DataFeeder, read_IDs_file
from uno_baseline_keras2 import build_model, evaluate_prediction
logger = logging.getLogger(__name__)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.compat.v1.disable_eager_execution()
additional_definitions = [
{'name': 'uq_exclude_drugs_file',
'default': argparse.SUPPRESS,
'action': 'store',
'help': 'File with drug ids to exclude from training'},
{'name': 'uq_exclude_cells_file',
'default': argparse.SUPPRESS,
'action': 'store',
'help': 'File with cell ids to exclude from training'},
{'name': 'uq_exclude_indices_file',
'default': argparse.SUPPRESS,
'action': 'store',
'help': 'File with indices to exclude from training'},
{'name': 'exclude_drugs', 'nargs': '+',
'default': [],
'help':'drug ids to exclude'},
{'name': 'exclude_cells', 'nargs': '+',
'default': [],
'help':'cell ids to exclude'},
{'name': 'exclude_indices', 'nargs': '+',
'default': [],
'help':'indices to exclude'},
{'name': 'reg_l2',
'type': float,
'default': 0.,
'help': 'weight of regularization for l2 norm of nn weights'}
]
required = ['exclude_drugs', 'exclude_cells', 'exclude_indices']
def extension_from_parameters(args):
"""Construct string for saving model with annotation of parameters"""
ext = ''
ext += '.A={}'.format(args.activation)
ext += '.B={}'.format(args.batch_size)
ext += '.E={}'.format(args.epochs)
ext += '.O={}'.format(args.optimizer)
ext += '.LOSS={}'.format(args.loss)
ext += '.LR={}'.format(args.learning_rate)
ext += '.CF={}'.format(''.join([x[0] for x in sorted(args.cell_features)]))
ext += '.DF={}'.format(''.join([x[0] for x in sorted(args.drug_features)]))
if args.feature_subsample > 0:
ext += '.FS={}'.format(args.feature_subsample)
if args.dropout > 0:
ext += '.DR={}'.format(args.dropout)
if args.warmup_lr:
ext += '.wu_lr'
if args.reduce_lr:
ext += '.re_lr'
if args.residual:
ext += '.res'
if args.use_landmark_genes:
ext += '.L1000'
if args.no_gen:
ext += '.ng'
for i, n in enumerate(args.dense):
if n > 0:
ext += '.D{}={}'.format(i + 1, n)
if args.dense_feature_layers != args.dense:
for i, n in enumerate(args.dense):
if n > 0:
ext += '.FD{}={}'.format(i + 1, n)
return ext
def log_evaluation(metric_outputs, logger, description='Comparing y_true and y_pred:'):
logger.info(description)
for metric, value in metric_outputs.items():
logger.info(' {}: {:.4f}'.format(metric, value))
def initialize_parameters(default_model='uno_defaultUQ_model.txt'):
# Build benchmark object
unoBmk = benchmark.BenchmarkUno(benchmark.file_path, default_model, 'keras',
prog='uno_trainUQ', desc='Build and train neural network based models to predict tumor response to single and paired drugs with UQ.')
# update locals
unoBmk.required.update(required)
unoBmk.additional_definitions += additional_definitions
# Finalize parameters
gParameters = candle.finalize_parameters(unoBmk)
# benchmark.logger.info('Params: {}'.format(gParameters))
return gParameters
def run(params):
args = candle.ArgumentStruct(**params)
candle.set_seed(args.rng_seed)
ext = extension_from_parameters(args)
candle.verify_path(args.save_path)
prefix = args.save_path + 'uno' + ext
logfile = args.logfile if args.logfile else prefix + '.log'
candle.set_up_logger(logfile, logger, args.verbose)
logger.info('Params: {}'.format(params))
# Exclude drugs / cells for UQ
if 'uq_exclude_drugs_file' in params.keys():
args.exclude_drugs = read_IDs_file(args.uq_exclude_drugs_file)
logger.info('Drugs to exclude: {}'.format(args.exclude_drugs))
else:
args.exclude_drugs = []
if 'uq_exclude_cells_file' in params.keys():
args.exclude_cells = read_IDs_file(args.uq_exclude_cells_file)
logger.info('Cells to exclude: {}'.format(args.exclude_cells))
else:
args.exclude_cells = []
if 'uq_exclude_indices_file' in params.keys():
exclude_indices_ = read_IDs_file(args.uq_exclude_indices_file)
args.exclude_indices = [int(x) for x in exclude_indices_]
logger.info('Indices to exclude: {}'.format(args.exclude_indices))
else:
args.exclude_indices = []
if (len(args.gpus) > 0):
import tensorflow as tf
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = ",".join(map(str, args.gpus))
tf.compat.v1.keras.backend.set_session(tf.compat.v1.Session(config=config))
loader = CombinedDataLoader(seed=args.rng_seed)
loader.load(cache=args.cache,
ncols=args.feature_subsample,
agg_dose=args.agg_dose,
cell_features=args.cell_features,
drug_features=args.drug_features,
drug_median_response_min=args.drug_median_response_min,
drug_median_response_max=args.drug_median_response_max,
use_landmark_genes=args.use_landmark_genes,
use_filtered_genes=args.use_filtered_genes,
cell_feature_subset_path=args.cell_feature_subset_path or args.feature_subset_path,
drug_feature_subset_path=args.drug_feature_subset_path or args.feature_subset_path,
preprocess_rnaseq=args.preprocess_rnaseq,
single=args.single,
train_sources=args.train_sources,
test_sources=args.test_sources,
embed_feature_source=not args.no_feature_source,
encode_response_source=not args.no_response_source,
use_exported_data=args.use_exported_data,
)
target = args.agg_dose or 'Growth'
nout = 1
val_split = args.val_split
train_split = 1 - val_split
if args.export_csv:
fname = args.export_csv
loader.partition_data(cv_folds=args.cv,
train_split=train_split,
val_split=val_split,
cell_types=args.cell_types,
by_cell=args.by_cell,
by_drug=args.by_drug,
cell_subset_path=args.cell_subset_path,
drug_subset_path=args.drug_subset_path,
exclude_cells=args.exclude_cells,
exclude_drugs=args.exclude_drugs,
exclude_indices=args.exclude_indices)
train_gen = CombinedDataGenerator(loader, batch_size=args.batch_size, shuffle=args.shuffle)
val_gen = CombinedDataGenerator(loader, partition='val', batch_size=args.batch_size, shuffle=args.shuffle)
x_train_list, y_train = train_gen.get_slice(size=train_gen.size, dataframe=True, single=args.single)
x_val_list, y_val = val_gen.get_slice(size=val_gen.size, dataframe=True, single=args.single)
df_train = pd.concat([y_train] + x_train_list, axis=1)
df_val = pd.concat([y_val] + x_val_list, axis=1)
df = pd.concat([df_train, df_val]).reset_index(drop=True)
if args.growth_bins > 1:
df = uno_data.discretize(df, 'Growth', bins=args.growth_bins)
df.to_csv(fname, sep='\t', index=False, float_format="%.3g")
return
if args.export_data:
fname = args.export_data
loader.partition_data(cv_folds=args.cv,
train_split=train_split,
val_split=val_split,
cell_types=args.cell_types,
by_cell=args.by_cell,
by_drug=args.by_drug,
cell_subset_path=args.cell_subset_path,
drug_subset_path=args.drug_subset_path,
exclude_cells=args.exclude_cells,
exclude_drugs=args.exclude_drugs,
exclude_indices=args.exclude_indices)
train_gen = CombinedDataGenerator(loader, batch_size=args.batch_size, shuffle=args.shuffle)
val_gen = CombinedDataGenerator(loader, partition='val', batch_size=args.batch_size, shuffle=args.shuffle)
store = pd.HDFStore(fname, complevel=9, complib='blosc:snappy')
config_min_itemsize = {'Sample': 30, 'Drug1': 10}
if not args.single:
config_min_itemsize['Drug2'] = 10
for partition in ['train', 'val']:
gen = train_gen if partition == 'train' else val_gen
for i in range(gen.steps):
x_list, y = gen.get_slice(size=args.batch_size, dataframe=True, single=args.single)
for j, input_feature in enumerate(x_list):
input_feature.columns = [''] * len(input_feature.columns)
store.append('x_{}_{}'.format(partition, j), input_feature.astype('float32'), format='table', data_column=True)
store.append('y_{}'.format(partition), y.astype({target: 'float32'}), format='table', data_column=True,
min_itemsize=config_min_itemsize)
logger.info('Generating {} dataset. {} / {}'.format(partition, i, gen.steps))
# save input_features and feature_shapes from loader
store.put('model', pd.DataFrame())
store.get_storer('model').attrs.input_features = loader.input_features
store.get_storer('model').attrs.feature_shapes = loader.feature_shapes
store.close()
logger.info('Completed generating {}'.format(fname))
return
if args.use_exported_data is None:
loader.partition_data(partition_by=args.partition_by, cv_folds=args.cv,
train_split=train_split,
val_split=val_split,
cell_types=args.cell_types,
by_cell=args.by_cell,
by_drug=args.by_drug,
cell_subset_path=args.cell_subset_path,
drug_subset_path=args.drug_subset_path,
exclude_cells=args.exclude_cells,
exclude_drugs=args.exclude_drugs,
exclude_indices=args.exclude_indices)
model = build_model(loader, args)
logger.info('Combined model:')
model.summary(print_fn=logger.info)
# plot_model(model, to_file=prefix+'.model.png', show_shapes=True)
if args.loss == 'het' or args.loss == 'qtl':
model = candle.add_model_output(model, mode=args.loss)
logger.info('After adjusting for UQ loss function')
model.summary(print_fn=logger.info)
if args.cp:
model_json = model.to_json()
with open(prefix + '.model.json', 'w') as f:
print(model_json, file=f)
def warmup_scheduler(epoch):
lr = args.learning_rate or base_lr * args.batch_size / 100
if epoch <= 5:
K.set_value(model.optimizer.lr, (base_lr * (5 - epoch) + lr * epoch) / 5)
logger.debug('Epoch {}: lr={:.5g}'.format(epoch, K.get_value(model.optimizer.lr)))
return K.get_value(model.optimizer.lr)
df_pred_list = []
cv_ext = ''
cv = args.cv if args.cv > 1 else 1
for fold in range(cv):
if args.cv > 1:
logger.info('Cross validation fold {}/{}:'.format(fold + 1, cv))
cv_ext = '.cv{}'.format(fold + 1)
template_model = build_model(loader, args, silent=True)
if args.loss == 'het' or args.loss == 'qtl':
template_model = candle.add_model_output(template_model, mode=args.loss)
if args.initial_weights:
logger.info("Loading initial weights from {}".format(args.initial_weights))
template_model.load_weights(args.initial_weights)
if len(args.gpus) > 1:
from tensorflow.keras.utils import multi_gpu_model
gpu_count = len(args.gpus)
logger.info("Multi GPU with {} gpus".format(gpu_count))
model = multi_gpu_model(template_model, cpu_merge=False, gpus=gpu_count)
else:
model = template_model
optimizer = optimizers.deserialize({'class_name': args.optimizer, 'config': {}})
base_lr = args.base_lr or K.get_value(optimizer.lr)
if args.learning_rate:
K.set_value(optimizer.lr, args.learning_rate)
if args.loss == 'het':
logger.info('Training heteroscedastic model:')
mae_heteroscedastic = candle.mae_heteroscedastic_metric(nout)
r2_heteroscedastic = candle.r2_heteroscedastic_metric(nout)
meanS_heteroscedastic = candle.meanS_heteroscedastic_metric(nout)
model.compile(loss=candle.heteroscedastic_loss(nout), optimizer=optimizer, metrics=[mae_heteroscedastic, r2_heteroscedastic, meanS_heteroscedastic])
elif args.loss == 'qtl':
logger.info('Training quantile model:')
quantile50 = candle.quantile_metric(nout, 0, 0.5)
quantile10 = candle.quantile_metric(nout, 1, 0.1)
quantile90 = candle.quantile_metric(nout, 2, 0.9)
model.compile(loss=candle.triple_quantile_loss(nout, 0.1, 0.9), optimizer=optimizer, metrics=[quantile50, quantile10, quantile90])
else:
logger.info('Training homoscedastic model:')
model.compile(loss=args.loss, optimizer=optimizer, metrics=[candle.mae, candle.r2])
# calculate trainable and non-trainable params
params.update(candle.compute_trainable_params(model))
candle_monitor = candle.CandleRemoteMonitor(params=params)
timeout_monitor = candle.TerminateOnTimeOut(params['timeout'])
es_monitor = keras.callbacks.EarlyStopping(patience=10, verbose=1)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5, min_lr=0.00001)
warmup_lr = LearningRateScheduler(warmup_scheduler)
checkpointer = candle.MultiGPUCheckpoint(prefix + cv_ext + '.model.h5', save_best_only=True)
tensorboard = TensorBoard(log_dir="tb/{}{}{}".format(args.tb_prefix, ext, cv_ext))
history_logger = candle.LoggingCallback(logger.debug)
callbacks = [candle_monitor, timeout_monitor, history_logger]
if args.es:
callbacks.append(es_monitor)
if args.reduce_lr:
callbacks.append(reduce_lr)
if args.warmup_lr:
callbacks.append(warmup_lr)
if args.cp:
callbacks.append(checkpointer)
if args.tb:
callbacks.append(tensorboard)
if args.save_weights:
logger.info("Will save weights to: " + args.save_weights)
callbacks.append(candle.MultiGPUCheckpoint(args.save_weights))
if args.use_exported_data is not None:
train_gen = DataFeeder(filename=args.use_exported_data, batch_size=args.batch_size, shuffle=args.shuffle, single=args.single, agg_dose=args.agg_dose)
val_gen = DataFeeder(partition='val', filename=args.use_exported_data, batch_size=args.batch_size, shuffle=args.shuffle, single=args.single, agg_dose=args.agg_dose)
test_gen = DataFeeder(partition='test', filename=args.use_exported_data, batch_size=args.batch_size, shuffle=args.shuffle, single=args.single, agg_dose=args.agg_dose)
else:
train_gen = CombinedDataGenerator(loader, fold=fold, batch_size=args.batch_size, shuffle=args.shuffle, single=args.single)
val_gen = CombinedDataGenerator(loader, partition='val', fold=fold, batch_size=args.batch_size, shuffle=args.shuffle, single=args.single)
test_gen = CombinedDataGenerator(loader, partition='test', fold=fold, batch_size=args.batch_size, shuffle=args.shuffle, single=args.single)
df_val = val_gen.get_response(copy=True)
y_val = df_val[target].values
y_shuf = np.random.permutation(y_val)
log_evaluation(evaluate_prediction(y_val, y_shuf), logger, description='Between random pairs in y_val:')
if args.no_gen:
x_train_list, y_train = train_gen.get_slice(size=train_gen.size, single=args.single)
x_val_list, y_val = val_gen.get_slice(size=val_gen.size, single=args.single)
history = model.fit(x_train_list, y_train,
batch_size=args.batch_size,
epochs=args.epochs,
callbacks=callbacks,
validation_data=(x_val_list, y_val))
else:
logger.info('Data points per epoch: train = %d, val = %d, test = %d', train_gen.size, val_gen.size, test_gen.size)
logger.info('Steps per epoch: train = %d, val = %d, test = %d', train_gen.steps, val_gen.steps, test_gen.steps)
history = model.fit(train_gen,
epochs=args.epochs,
callbacks=callbacks,
validation_data=val_gen)
# prediction on holdout(test) when exists or use validation set
if test_gen.size > 0:
df_val = test_gen.get_response(copy=True)
y_val = df_val[target].values
y_val_pred = model.predict(test_gen, steps=test_gen.steps + 1)
y_val_pred = y_val_pred[:test_gen.size]
if args.loss == 'het':
y_val_pred_ = y_val_pred[:, 0]
y_val_pred = y_val_pred_.flatten()
elif args.loss == 'qtl':
y_val_pred_50q = y_val_pred[:, 0]
y_val_pred = y_val_pred_50q.flatten() # 50th quantile prediction
else:
if args.no_gen:
y_val_pred = model.predict(x_val_list, batch_size=args.batch_size)
else:
val_gen.reset()
y_val_pred = model.predict(val_gen, steps=val_gen.steps + 1)
y_val_pred = y_val_pred[:val_gen.size]
if args.loss == 'het':
y_val_pred_ = y_val_pred[:, 0]
s_val_pred = y_val_pred[:, 1]
y_val_pred = y_val_pred_.flatten()
df_val['Predicted_' + target] = y_val_pred
df_val[target + '_Error'] = y_val_pred - y_val
df_val['Pred_S_' + target] = s_val_pred
elif args.loss == 'qtl':
y_val_pred_50q = y_val_pred[:, 0]
y_val_pred_10q = y_val_pred[:, 1]
y_val_pred_90q = y_val_pred[:, 2]
y_val_pred = y_val_pred_50q.flatten() # 50th quantile prediction
df_val['Predicted_50q_' + target] = y_val_pred
df_val[target + '_Error_50q'] = y_val_pred - y_val
df_val['Predicted_10q_' + target] = y_val_pred_10q.flatten()
df_val['Predicted_90q_' + target] = y_val_pred_90q.flatten()
else:
y_val_pred = y_val_pred.flatten()
# df_val = df_val.assign(PredictedGrowth=y_val_pred, GrowthError=y_val_pred | |
len(edge.Vertexes) > 1:
v1 = self.wp.getLocalCoords(edge.Vertexes[0].Point)
v2 = self.wp.getLocalCoords(edge.Vertexes[-1].Point)
return Part.LineSegment(v1, v2).toShape()
return edge
def doCut(self, cutplane, hidden, clip, clipDepth, shapes):
objectShapes = []
sections = []
faces = []
shps = []
# self.reorient()
# self.filterWrongOrientedFaces()
for sh in shapes:
shps.append(sh[0])
cutface, cutvolume, invcutvolume = ArchCommands.getCutVolume(
cutplane, shps, clip=clip)
planeNormal = self.wp.getNormal()
planeNormal.normalize()
if not cutvolume:
cutface = cutplane
cutnormal = cutplane.normalAt(0.5, 0.5)
cutvolume = cutplane.extrude(cutnormal)
cutnormal = cutnormal.negative()
invcutvolume = cutplane.extrude(cutnormal)
if DEBUG:
print('cutface: %s, cutvolume: %s, invcutvolume: %s' %
(cutface, cutvolume, invcutvolume))
if cutface and cutvolume:
for sh in shapes:
for sol in sh[0].Solids:
c = sol.cut(cutvolume)
objectShapes.append([c]+sh[1:])
for f in c.Faces:
faceData = FaceData(f, sh[1], sh[2])
faceData = self.projectFace(faceData)
# TODO: Create temporary face list and filter duplicate faces
# Do isCoplanar check later on, when duplicate faces are removed
if faceData.correctlyOriented(planeNormal):
if DraftGeomUtils.isCoplanar([f, cutface]):
sections.append(faceData)
else:
faces.append(faceData)
if hidden:
c = sol.cut(invcutvolume)
# self.projectEdge(e)
self.hiddenEdges.extend(c.Edges)
if clipDepth > 0:
faces = [f for f in faces if self.isInRange(
f.originalFace, clipDepth)]
return CutResult(objectShapes, sections, faces, cutvolume, cutface)
def doCutSectionCuts(self, cutvolume, cutface, sectionCutShapes):
edges = []
if cutvolume:
for s in sectionCutShapes:
sh = s.face
c = sh.cut(cutvolume)
normal = sh.normalAt(0.5, 0.5)
for e in c.Edges:
if isEdgeOnPlane(e, cutface):
e = self.projectEdge(e)
edges.append((e, normal, s.text))
return edges
def cut(self, cutplane, hidden=False, clip=False, clipDepth=0):
"Cuts through the objectShapes with a given cut plane and builds section faces"
if DEBUG:
print("\n\n======> Starting cut\n\n")
if self.iscut:
return
objectCutVolume = None
objectCutFace = None
if not self.objectShapes:
if DEBUG:
print("No objects to make sections")
else:
# We always use a clipping cut here. The section plane needs to be big enough
# But we need it clipping for the sectionCutShapes later on
result = self.doCut(
cutplane, hidden, True, clipDepth, self.objectShapes)
self.objectShapes = result.objectShapes
self.sections = result.sections
self.secondaryFaces = result.faces
objectCutVolume = result.cutvolume
objectCutFace = result.cutface
if DEBUG:
print("Built ", len(self.sections), " sections")
if not self.windowShapes:
if DEBUG:
print("No objects to make windows")
else:
result = self.doCut(
cutplane, hidden, clip, clipDepth, self.windowShapes)
self.windowShapes = result.objectShapes
self.windows = result.sections
if DEBUG:
print("Built ", len(self.windows), " windows")
if not self.sectionCutShapes:
if DEBUG:
print("No objects to make sectionCuts")
else:
self.sectionCuts = self.doCutSectionCuts(
objectCutVolume, objectCutFace, self.sectionCutShapes)
if DEBUG:
print("Built ", len(self.sectionCuts), " sectionCuts")
self.sort()
self.iscut = True
self.sorted = True
self.duplicatesRemoved = False
if DEBUG:
print("\n\n======> Finished cut\n\n")
def getFill(self, fill):
"Returns a SVG fill value"
r = str(hex(int(fill[0]*255)))[2:].zfill(2)
g = str(hex(int(fill[1]*255)))[2:].zfill(2)
b = str(hex(int(fill[2]*255)))[2:].zfill(2)
return "#" + r + g + b
def getPatternTemplate(self, fill, opacity, pattern_type):
if pattern_type is None:
pattern_type = "DEFAULT"
if not pattern_type in PATTERN_TEMPLATES:
print("Unknown PatternType " + pattern_type)
pattern_type = "DEFAULT"
pattern_id = "%s-%s-%s" % (pattern_type.lower(),
fill.replace("#", ""), str(opacity))
return (PATTERN_TEMPLATES[pattern_type], pattern_id)
def getPattern(self, color, pattern_type, opacity=1):
fill = self.getFill(color)
pattern, pattern_id = self.getPatternTemplate(
fill, opacity, pattern_type)
if not pattern_id in self.patterns:
pattern = pattern.replace("PATTERN_ID", pattern_id)
pattern = pattern.replace("PATTERN_COLOR", fill)
pattern = pattern.replace("PATTERN_OPACITY", str(opacity))
self.patterns[pattern_id] = pattern
return pattern_id
def getPathData(self, w):
"""Returns a SVG path data string from a 2D wire
The Y Axis in the SVG Coordinate system is reversed from the FreeCAD Coordinate System.
So we change the y coordinates accordingly
"""
def toCommand(command, x, y):
return '%s %s %s ' % (command, toNumberString(x), toNumberString(-y))
edges = Part.__sortEdges__(w.Edges)
v = edges[0].Vertexes[0].Point
svg = toCommand('M', v.x, v.y)
for e in edges:
if (DraftGeomUtils.geomType(e) == "Line") or (DraftGeomUtils.geomType(e) == "BSplineCurve"):
v = e.Vertexes[-1].Point
svg += toCommand('L', v.x, v.y)
elif DraftGeomUtils.geomType(e) == "Circle":
r = e.Curve.Radius
v = e.Vertexes[-1].Point
svg += 'A %s %s 0 0 1 %s %s ' % (toNumberString(r),
toNumberString(r), toNumberString(v.x), toNumberString(-v.y))
if len(edges) > 1:
svg += 'Z '
return svg
def getPatternSVG(self):
if not hasattr(self, "patterns"):
return ''
patternsvg = ''
for pattern in self.patterns.values():
patternsvg += pattern + '\n'
return patternsvg
def getSectionSVG(self, linewidth):
sectionsvg = ''
for f in self.sections:
if f:
fill = 'url(#' + self.getPattern(f.color, f.pattern_type) + ')'
pathdata = ''
for w in f.reorientedFace.Wires:
pathdata += self.getPathData(w)
current = PATH_TEMPLATE.replace("PATH_FILL", fill)
current = current.replace("FILL_OPACITY", "1")
current = current.replace("DASH_ARRAY", "none")
current = current.replace("STROKE_COLOR", "#000000")
current = current.replace("STROKE_WIDTH", str(linewidth))
current = current.replace("PATH_DATA", pathdata)
sectionsvg += current + "\n"
return sectionsvg
def getMarkerSVG(self, linewidth):
markersvg = ''
for m in self.markerShapes:
# fill = 'url(#' + self.getPattern(f.color, f.pattern_type) + ')'
pathdata = ''
fillColor = self.getFill(m.color)
reorientedFace = self.projectFace(FaceData(m.face, None, None)).reorientedFace
textPos = reorientedFace.CenterOfMass
for w in reorientedFace.Wires:
pathdata += self.getPathData(w)
path = PATH_TEMPLATE.replace("PATH_FILL", fillColor)
path = path.replace("FILL_OPACITY", "0.04")
path = path.replace("DASH_ARRAY", "100,50")
path = path.replace("STROKE_COLOR", fillColor)
path = path.replace("STROKE_WIDTH", str(linewidth))
path = path.replace("PATH_DATA", pathdata)
text = TEXT_TEMPLATE.replace("TEXT_CONTENT", m.text)
text = text.replace("TEXT_FONT_SIZE", "SMALL_TEXT_FONT_SIZE")
text = text.replace("TEXT_ANCHOR", "middle")
text = text.replace("TEXT_POSITION_X", toNumberString(textPos.x))
text = text.replace("TEXT_POSITION_Y", toNumberString(-textPos.y))
text = text.replace("TEXT_ROTATION", "0")
markersvg += "%s %s\n" % (path, text)
return markersvg
def getSectionCutSvg(self, linewidth):
svg = ''
arrowSize = 100
referenceAxis = FreeCAD.Vector(0, -1, 0)
def rotation(x, y, angle):
angleString = toNumberString(angle)
xString = toNumberString(x)
yString = toNumberString(y)
return '%s %s %s' % (angleString, xString, yString)
def arrowPath(basePoint):
baseX = basePoint.x
baseY = -basePoint.y
baseXString = toNumberString(baseX)
baseYString = toNumberString(baseY)
return "M %s %s L %s %s L %s %s Z" % (toNumberString(baseX - arrowSize), baseYString, toNumberString(baseX + arrowSize), baseYString, baseXString, toNumberString(baseY + arrowSize))
def text(basePoint, normal, angle, text):
offset = FreeCAD.Vector(normal.x, normal.y, normal.z).multiply(100)
actualBase = FreeCAD.Vector(basePoint.x, basePoint.y, basePoint.z).sub(offset)
baseX = actualBase.x
baseY = -actualBase.y
baseXString = toNumberString(baseX)
baseYString = toNumberString(baseY)
svg = TEXT_TEMPLATE.replace("TEXT_POSITION_X", baseXString)
text = text.replace("TEXT_ANCHOR", "middle")
svg = svg.replace("TEXT_POSITION_Y", baseYString)
svg = svg.replace("TEXT_CONTENT", text)
svg = svg.replace("TEXT_ROTATION", rotation(baseX, baseY, angle))
return svg
for s in self.sectionCuts:
edge = s[0]
normal = s[1]
normal = normal.negative()
label = s[2]
pathdata = self.getPathData(edge)
rotationAngle = math.degrees(normal.getAngle(referenceAxis))
start = edge.Vertexes[0].Point
end = edge.Vertexes[1].Point
arrowStart = arrowPath(start)
arrowEnd = arrowPath(end)
textStart = text(start, normal, rotationAngle, label)
textEnd = text(end, normal, rotationAngle, label)
current = SECTION_CUT_TEMPLATE.replace("PATH_DATA", pathdata)
current = current.replace("STROKE_WIDTH", str(linewidth))
current = current.replace(
"ARROW_START_ROTATION", rotation(start.x, -start.y, rotationAngle))
current = current.replace(
"ARROW_END_ROTATION", rotation(end.x, -end.y, rotationAngle))
current = current.replace("ARROW_START", arrowStart)
current = current.replace("ARROW_END", arrowEnd)
current = current.replace("TEXT_START", textStart)
current = current.replace("TEXT_END", textEnd)
svg += current + "\n"
return svg
def getWindowSVG(self, linewidth):
windowsvg = ''
for f in self.windows:
if f:
fill = 'url(#' + self.getPattern(f.color, f.pattern_type) + ')'
pathdata = ''
for w in f.reorientedFace.Wires:
pathdata += self.getPathData(w)
current = PATH_TEMPLATE.replace("PATH_FILL", fill)
current = current.replace("FILL_OPACITY", "1")
current = current.replace("DASH_ARRAY", "none")
current = current.replace("STROKE_COLOR", "#000000")
current = current.replace("STROKE_WIDTH", str(linewidth))
current = current.replace("PATH_DATA", pathdata)
windowsvg += current + "\n"
return windowsvg
def isInRange(self, face, maxDistance):
if maxDistance <= 0:
return False
distance = face.CenterOfMass.distanceToPlane(
self.wp.getPlacement().Base, self.wp.getNormal())
if distance < 0:
distance *= -1
if distance <= maxDistance:
return True
return False
def getSecondaryFacesSVG(self, linewidth, faceHighlightDistance, highlightLineWith):
secondaryFacesSvg = ''
for f in self.secondaryFaces:
if f:
patternOpacity = 0.1
shouldHightlight = self.isInRange(
f.originalFace, faceHighlightDistance)
if shouldHightlight:
linewidth = highlightLineWith
patternOpacity = 1
fill = 'url(#' + self.getPattern(f.color,
f.pattern_type, patternOpacity) + ')'
pathdata = ''
for w in f.reorientedFace.Wires:
pathdata += self.getPathData(w)
current = PATH_TEMPLATE.replace("PATH_FILL", fill)
current = current.replace("FILL_OPACITY", "1")
current = current.replace("DASH_ARRAY", "none")
current = current.replace("STROKE_COLOR", "#000000")
current = current.replace("STROKE_WIDTH", str(linewidth))
current = current.replace("PATH_DATA", pathdata)
secondaryFacesSvg += current + "\n"
return secondaryFacesSvg
def getSvgParts(self, faceHighlightDistance=0):
"Returns all svg parts we cut"
if not self.duplicatesRemoved:
self.removeDuplicates()
self.duplicatesRemoved = True
self.patterns = {}
sectionSvg = self.getSectionSVG("SECTION_STROKE_WIDTH")
windowSvg = self.getWindowSVG("WINDOW_STROKE_WIDTH")
secondaryFacesSvg = self.getSecondaryFacesSVG(
"SECONDARY_STROKE_WIDTH", faceHighlightDistance, "SECTION_STROKE_WIDTH")
patternSvg = self.getPatternSVG()
sectionCutSvg = self.getSectionCutSvg("SECTION_CUT_STROKE_WIDTH")
markerSvg = self.getMarkerSVG("MARKER_STROKE_WIDTH")
boundBox = self.buildBoundBox()
return {
"patterns": patternSvg,
"sections": sectionSvg,
"secondaryFaces": secondaryFacesSvg,
"windows": windowSvg,
"boundBox": boundBox,
"sectionCuts": sectionCutSvg,
"markers": markerSvg
}
def buildBoundBox(self):
boundBox = BoundBox(self.wp)
if self.secondaryFaces:
boundBox.adaptFromShapes(
[f.reorientedFace for f in self.secondaryFaces if f])
if self.sections:
boundBox.adaptFromShapes(
[f.reorientedFace for f in self.sections if f])
if self.windows:
boundBox.adaptFromShapes(
[f.reorientedFace for f in self.windows if f])
if self.hiddenEdges:
boundBox.adaptFromShapes(
[f.reorientedFace for f in self.hiddenEdges if f])
if self.sectionCuts:
boundBox.adaptFromShapes([s[0] for s in self.sectionCuts])
return boundBox
# | |
<reponame>AlkalineCandy79/PGE_Potential_PSPS_Scraper
#-------------------------------------------------------------------------------
# Name: PGE Power Outage Status Scraper
# Purpose: This script, while not the most elegant, scrapes the PG&E back end
# for data about specific addresses. As there are sometimes Apt#'s etc
# within their data, once a match is found, it only compares City and Zip.
# Some data cleanup would be better, but this is BETA and works semi-decently.
#
# Author: <NAME>
#
# Created: 10/27/2019
#
#-------------------------------------------------------------------------------
# 888888888888888888888888888888888888888888888888888888888888888888888888888888
# ------------------------------- Configuration --------------------------------
# Pretty simple setup. Just change your settings/configuration below. Do not
# go below the "DO NOT UPDATE...." line.
#
# 888888888888888888888888888888888888888888888888888888888888888888888888888888
# Define the variables
PGE_premise_lookup = 'https://hiqlvv36ij.cloud.pge.com/Prod/v1/search/premise?address=' #Do not adjust
PGE_status_lookup = 'https://hiqlvv36ij.cloud.pge.com/Prod/v1/search/message?premise_id=' #Do not adjust
db_connection = r'Database Connections\\Connection to CartaEdit GISSQL16SDE.sde' #This is your database connection.
msag_source = 'DBO.MSAG_Listing' #main address table.
data_destination = 'DBO.PGE_Status' #where all your statuses will get built. This script will auto create the table if needed. Do not modify the schema.
account_destination = 'DBO.PGE_Cached_Accounts'
city_focus = '' #Place city name if you want to focus script on only 1 city. Leave '' if you want all.
# Careful with this one...this controls how many workers you have.
workers = 15 # Maximum number of workers.
# Rebuild Search Table
rebuild = 1 # False to not, true to rebuild.
# ------------------------------------------------------------------------------
# DO NOT UPDATE BELOW THIS LINE OR RISK DOOM AND DISPAIR! Have a nice day!
# ------------------------------------------------------------------------------
import arcpy
import time
import re
import concurrent.futures
import requests, json, collections, string
def prep_data():
# Build Results Table
item_check = data_destination
arcpy.env.workspace = db_connection
if arcpy.Exists(item_check):
try:
clear_results_SQL = ('''
truncate table {0}
'''.format(data_destination))
arcpy.ArcSDESQLExecute(db_connection).execute(clear_results_SQL)
except Exception as error_check_for_existance:
print ("Status: Failure!")
print(error_check_for_existance.args[0])
else:
create_results_SQL = ('''
Create Table {0} (
[OBJECTID] [int]
, [prefix_typ] [varchar](4)
, [prefix_dir] [varchar](4)
, [street_nam] [varchar](50)
, [street_typ] [varchar](6)
, [suffix_dir] [varchar](4)
, [unit_numbe] [varchar](10)
, [city] [varchar](50)
, [state] [varchar](2)
, [zip_code] [varchar](20)
, [street_num] [varchar](10)
, [full_addre] [varchar](254)
, [full_address_to_PGE] [varchar](254)
, [latitude] [numeric](38,8)
, [longitude] [numeric](38,8)
, [PGE_status] [varchar](1000)
, [SysChangeDate] [datetime2](7)
)
'''.format(data_destination))
try:
arcpy.ArcSDESQLExecute(db_connection).execute(create_results_SQL)
except Exception as error_check:
print(error_check.args[0])
# Build Address List
pull_msag_SQL = ('''
insert into {1}
select
ROW_NUMBER() OVER(ORDER BY full_address ASC) as OjectID
,prefix_type
,prefix_direction
,street_name
,street_type
,suffix_direction
,unit_number
,city
,state
,zip_code
,street_number
,full_address
, case
when prefix_type = '' and prefix_direction = '' and street_type = '' and suffix_direction = '' then street_number + ' ' + street_name
when prefix_type is NULL and prefix_direction is NULL and street_type is NULL and suffix_direction is NULL then street_number + ' ' + street_name
when prefix_type = '' and prefix_direction = '' and street_type <> '' and suffix_direction ='' then street_number + ' ' + street_name + ' ' + street_type
when prefix_type is NULL and prefix_direction is NULL and street_type is not NULL and suffix_direction is NULL then street_number + ' ' + street_name + ' ' + street_type
when prefix_type = '' and prefix_direction = '' and street_type = '' and suffix_direction <>'' then street_number + ' ' + street_name + ' ' + suffix_direction
when prefix_type is NULL and prefix_direction is NULL and street_type is NULL and suffix_direction is not NULL then street_number + ' ' + street_name + ' ' + suffix_direction
when prefix_type = '' and prefix_direction = '' and street_type <> '' and suffix_direction <>'' then street_number + ' ' + street_name + ' ' + street_type + ' ' + suffix_direction
when prefix_type is NULL and prefix_direction is NULL and street_type is not NULL and suffix_direction is not NULL then street_number + ' ' + street_name + ' ' + street_type + ' ' + suffix_direction
when prefix_type <> '' and prefix_direction = '' and street_type = '' and suffix_direction = '' then street_number + ' ' + street_name + ' ' + prefix_type
when prefix_type is not NULL and prefix_direction is NULL and street_type is NULL and suffix_direction is NULL then street_number + ' ' + street_name + ' ' + prefix_type
when prefix_type = '' and prefix_direction <> '' and street_type = '' and suffix_direction = '' then street_number + ' ' + prefix_direction + ' ' + street_name
when prefix_type is NULL and prefix_direction is not NULL and street_type is NULL and suffix_direction is NULL then street_number + ' ' + prefix_direction + ' ' + street_name
when prefix_type = '' and prefix_direction <> '' and street_type <> '' and suffix_direction = '' then street_number + ' ' + prefix_direction + ' ' + street_name + ' ' + street_type
when prefix_type is NULL and prefix_direction is not NULL and street_type is not NULL and suffix_direction is NULL then street_number + ' ' + prefix_direction + ' ' + street_name + ' ' + street_type
end as full_address_to_PGE
, latitude
, longitude
, ''
, getdate()
FROM {0}''').format(msag_source, data_destination)
try:
msag_results = arcpy.ArcSDESQLExecute(db_connection).execute(pull_msag_SQL)
except Exception as error_check:
print(error_check.args[0])
def prep_4accounts():
# Build Results Table
item_check = account_destination
arcpy.env.workspace = db_connection
if arcpy.Exists(item_check):
try:
clear_results_SQL = ('''
truncate table {0}
'''.format(account_destination))
arcpy.ArcSDESQLExecute(db_connection).execute(clear_results_SQL)
except Exception as error_check_for_existance:
print ("Status: Failure!")
print(error_check_for_existance.args[0])
else:
create_results_SQL = ('''
CREATE TABLE {0}(
[OBJECTID] [INT] IDENTITY(1,1)
, [address] [varchar](254)
, [streetnum] [varchar](20)
, [city] [varchar](50)
, [zip] [varchar](20)
, [pId] [varchar](20)
, [serviceType] [varchar](10)
, [PGE_status] [varchar](1000)
, [SysChangeDate] [datetime2](7)
)
'''.format(account_destination))
try:
arcpy.ArcSDESQLExecute(db_connection).execute(create_results_SQL)
except Exception as error_check:
print(error_check.args[0])
def city_list():
city_list_SQL = '''
select
distinct(city)
, count(*) as points
from {0} where city <> ''
group by city
order by city asc
'''.format (data_destination)
city_return = arcpy.ArcSDESQLExecute(db_connection).execute(city_list_SQL)
global city_listing
city_listing = []
for city in city_return:
target = city[0]
city_listing.append(target)
def process_city(city):
# Begin Status Update
if rebuild == 1:
pull_from_PGE_SQL = '''
select * from {0}
where city = '{1}'
'''.format(data_destination, city)
pge_status_search_return = arcpy.ArcSDESQLExecute(db_connection).execute(pull_from_PGE_SQL)
else:
pull_from_PGE_SQL = '''
select * from {0}
where city = '{1}'
'''.format(account_destination, city)
pge_status_search_return = arcpy.ArcSDESQLExecute(db_connection).execute(pull_from_PGE_SQL)
hitcount = 0
if rebuild == 1:
for row in pge_status_search_return:
address = row[12]
zipcode = row[9]
city = row[7]
hitcount += 1
print ("\n\n***Records reviewed: {0}\n\n".format(hitcount))
PGE_premise_search = PGE_premise_lookup + '{0}'.format(address)
print ("Looking up {0}, {1} {2}".format(address, city, zipcode))
# Get addresses like the seed values.
while True:
try:
response = requests.get (PGE_premise_search)
data = response.json()
payload = data['body']['Items']
retry = 0
except Exception as payload_error:
retry = 1
time.sleep(10)
if retry == 0:
break
for item in payload:
location = item
city_PGE = location['city']
zipcode_PGE = location['zip']
pId_PGE = location['pId']
streetNumber_PGE = location['streetNumber']
# Cleanup on aisle 6 required as some special charcters sneak in from time to time.
address_PGE = re.sub('[^a-zA-Z0-9 \n\.]', '', location['address'])
servicetype_PGE = location ['serviceType']
print ("\tFound {0}, {1} {2}".format (address_PGE, city_PGE, zipcode_PGE))
print ("\tPGE pID: {0}".format(pId_PGE))
PGE_pId_status = PGE_status_lookup + '{0}'.format(pId_PGE)
print ("\tLooking up {0}, {1} {2}".format(address_PGE, city_PGE, zipcode_PGE))
# Get the status of the account for the address in question.
if city_PGE.upper() == city.upper():
halt = 0
while True:
try:
status_response = requests.get (PGE_pId_status)
status_data = status_response.json()
print ('\tPG&E payload response: {0}'.format(status_data))
halt += 1
retry = 0
if halt == 10:
time.sleep(60)
break
except Exception as account_pull_error:
retry = 1
time.sleep(60)
if retry == 0:
break
print ("\tChecked.\n")
if status_data['Items'] == []:
status_message = '\tNo Update Available'
else:
status_payload = status_data['Items']
for item in status_payload:
status_message = item['message']
status_message = status_message.replace(r'\u00a0', ' ')
printable = set(string.printable)
status_message = filter(lambda x: x in printable, status_message)
push_update_SQL = '''
insert into {0} (
[address]
, [streetnum]
, [city]
, [zip]
, [pId]
, [serviceType]
, [PGE_status]
, [SysChangeDate])
values ('{1}'
, '{2}'
, '{3}'
, '{4}'
, '{5}'
, '{6}'
, '{7}'
, getdate())'''.format(account_destination, address_PGE, streetNumber_PGE, city_PGE, zipcode_PGE, pId_PGE, servicetype_PGE, status_message)
try:
arcpy.ArcSDESQLExecute(db_connection).execute(push_update_SQL)
except Exception as error_check:
print(error_check.args[0])
else:
print ('\n\n*****{0} is outside of search scope*****\n\n'.format(city_PGE))
else:
for row in pge_status_search_return:
pId_PGE = row[5]
hitcount += 1
PGE_pId_status = PGE_status_lookup + '{0}'.format(pId_PGE)
halt = 0
while True:
try:
status_response = requests.get (PGE_pId_status)
status_data = status_response.json()
print ('Attempted Account Number: {0}'.format(pId_PGE))
print ('\tPG&E payload response: {0}'.format(status_data))
halt | |
from buildingArrays import BuildingFactory
from random import randint, randrange
import utilityFunctions
import time
#inputs are taken from the user. Here I've just showing labels, as well as letting the user define
# what the main creation material for the structures is
inputs = (
("COMP4303 Final Project", "label"),
("caMel", "label"),
)
foliage = [6, 17, 18, 31, 32, 37, 38, 39, 40, 81, 83, 86, 99, 100, 103, 106, 111, 127, 161, 162, 175]
directions = [[1, 0], [0, 1], [-1, 0], [0, -1]] # right, up, left, down
lastDirectionInt = 5 # lets use this so it can never back track...
buildingLocations = [] # lets define a list of all locations that will need buildings
woodTypePerBiome = { # biome: tree data
4: 2, # forest: birch
5: 1, # taiga: spruce
12: 1, # snowy tundra: spruce
13: 1, # snowy mountains: spruce
19: 1, # taiga hills: spruce
21: 3, # jungle: jungle
22: 3, # jungle hills: jungle
23: 3, # jungle edge: jungle
27: 2, # birch forest: birch
28: 2, # birch forest hills: birch
30: 1, # snowy taiga: spruce
31: 1, # snowy taiga hills: spruce
32: 1, # mega taiga: spruce
133: 1, # taiga mountains: spruce
149: 3, # modified jungle: jungle
151: 3, # modified jungle edge: jungle
155: 2, # tall birch forest: birch
156: 2, # tall birch hills: birch
158: 1, # snowy taiga mountains: spruce
168: 3, # bamboo jungle: jungle
169: 3, # bamboo jungle hills: jungle
}
stairBlockTypePerBiome = {
4: 135, # forest: birch
5: 134, # taiga: spruce
12: 134, # snowy tundra: spruce
13: 134, # snowy mountains: spruce
19: 134, # taiga hills: spruce
21: 136, # jungle: jungle
22: 136, # jungle hills: jungle
23: 136, # jungle edge: jungle
27: 135, # birch forest: birch
28: 135, # birch forest hills: birch
30: 134, # snowy taiga: spruce
31: 134, # snowy taiga hills: spruce
32: 134, # mega taiga: spruce
133: 134, # taiga mountains: spruce
149: 136, # modified jungle: jungle
151: 136, # modified jungle edge: jungle
155: 135, # tall birch forest: birch
156: 135, # tall birch hills: birch
158: 134, # snowy taiga mountains: spruce
168: 136, # bamboo jungle: jungle
169: 136, # bamboo jungle hills: jungle
}
maxx = None; maxy = None; maxz = None; minx = None; miny = None; minz = None
# The required method for MCEdit. This function will be what runs the filter.
def perform(level, box, options):
global maxx, maxy, maxz, minx, miny, minz
start = time.time()
filterOptions = options
maxx = box.maxx; maxy = box.maxy; maxz = box.maxz
minx = box.minx; miny = box.miny; minz = box.minz
# Build a 2D planning grid
levelGrid = [[[0,0] for j in range(0, maxz - minz)] for i in range(minx, maxx)]
# for step in xrange(0,4):
print("Generating town layout...")
generateLayout(level, levelGrid)
# normalize the layout
print("Smoothing out house plots...")
normalizeBuildingLayout(level, box, levelGrid)
# smooth out the terrain around plots
print("smoothing out the terrain...")
levelTerrain(level, levelGrid)
# Place the grid on the terrain
print("Generating terrain...")
overlayGrid(levelGrid, level)
# build houses
print("Generating buildings...")
bulidBuildings(level, minx, minz)
print("Done!")
end = time.time()
print(end - start)
# Apply the levelGrid to the terrain
def overlayGrid(levelGrid, level):
global minx, minz
xLoc = minx
for x in levelGrid:
zLoc = minz
for y in x:
layoutType = y[0]
height = y[1]
wood = ["5","0"]
if layoutType == 1:
# house plots
if ((level.blockAt(xLoc, height, zLoc) == 9) or (level.blockAt(xLoc, height, zLoc) == 8)):
block_data = modifyWoodToSuitBiome(wood, level.biomeAt(xLoc, zLoc))
else:
block_data = (level.blockAt(xLoc, getHeight(level, xLoc, zLoc), zLoc), 0)
utilityFunctions.setBlock(level, block_data, xLoc, height, zLoc)
elif layoutType == 2:
# paths
height = getHeight(level, xLoc, zLoc, False)
if ((level.blockAt(xLoc, height, zLoc) == 9) or (level.blockAt(xLoc, height, zLoc) == 8)):
block_data = modifyWoodToSuitBiome(wood, level.biomeAt(xLoc, zLoc))
else:
block_data = (208, 0)
utilityFunctions.setBlock(level, tuple(block_data), xLoc, height, zLoc)
zLoc += 1
xLoc += 1
def partitionList(array, startIndex, endIndex):
pivot = array[endIndex]
i = startIndex
for j in range(startIndex, endIndex):
if array[j][3] < pivot[3]:
array[i], array[j] = array[j], array[i]
i += 1
array[i], array[endIndex] = array[endIndex], array[i]
return i
def sortBuildingLocations(array, startIndex, endIndex):
if startIndex < endIndex:
point = partitionList(array, startIndex, endIndex)
sortBuildingLocations(array, startIndex, point - 1)
sortBuildingLocations(array, point + 1, endIndex)
# This will level out all building plots
def normalizeBuildingLayout(level, box, levelGrid):
global minx, minz
for location in buildingLocations:
xDest = location[0]
zDest = location[1]
width = location[2] / 2
heights = []
# Gather heights
for x in range(xDest - width, xDest + width):
for z in range(zDest - width, zDest + width):
heights.append(levelGrid[x][z][1])
med = heights[int(len(heights) / 2)]
location[3] = med
# sort buildingLocation by median height
sortBuildingLocations(buildingLocations, 0, len(buildingLocations) - 1)
buildingLocations.reverse()
# This will create the layout on a 2D grid. The layout consists of house plots and roads/paths between each plot
def generateLayout(level, levelGrid):
xSize = len(levelGrid)
zSize = len(levelGrid[0])
c = randrange(35, 60)
print("generating %d buildings", c)
# create the first house. For now, always place it in the middle of the plot
xEnd = xSize / 2
zEnd = zSize / 2
generateHousePlot(level, levelGrid, xEnd, zEnd, 3)
# begin branching a creating the rest of the village
for houses in range(0, c - 1):
# Generate a path of a random length in a random direction
xEnd, zEnd = generatePath(level, levelGrid, xEnd, zEnd, randint(8, 40), randint(0, 3))
# Create the width of the next house plot and check that it will fit in the boundingBox
plotWidth = randint(6,12) / 2
# TODO: Ideally, this would be replaced by a function call that would check
if (xEnd + plotWidth > xSize) or (xEnd - plotWidth < 0):
continue
elif (zEnd + plotWidth > zSize) or (zEnd - plotWidth < 0):
continue
elif checkHousePlot(levelGrid, xEnd, zEnd, plotWidth):
generateHousePlot(level, levelGrid, xEnd, zEnd, plotWidth)
# This will generate the house plot
def generateHousePlot(level, levelGrid, xDest, zDest, width):
global minx, minz
buildingLocations.append([xDest, zDest, width * 2, 0])
for x in xrange(xDest - width, xDest + width):
for z in xrange(zDest - width, zDest + width):
levelGrid[x][z] = [1, getHeight(level, minx + x, minz + z)]
# check that the house's plot will not override another plot
def checkHousePlot(levelGrid, xDest, zDest, width):
for x in xrange(xDest - width, xDest + width):
for z in xrange(zDest - width, zDest + width):
if levelGrid[x][z][0] == 1:
return False
return True
# Get the hight of the terrain for a given X and Z
def getHeight(level, x, z, house_plot=True):
global maxy, miny, foliage
for y in xrange(maxy, miny, -1):
blockID = level.blockAt(x, y, z)
if blockID not in ([0] + foliage):
if not house_plot:
utilityFunctions.setBlock(level, (0, 0), x, y+1, z)
return y
elif blockID in foliage and house_plot:
clearEnvironmentArea(level, [[x,y,z]])
# utilityFunctions.setBlock(level, (0, 0), x, y, z)
return 0
def clearEnvironmentArea(level, neighbours):
global maxx, minx, maxz, minz, foliage
startx = neighbours[0][0]
starty = neighbours[0][1]
startz = neighbours[0][2]
while len(neighbours) > 0:
neighbour = neighbours.pop()
if maxx > neighbour[0] > minx and maxz > neighbour[2] > minz\
and abs(neighbour[0] - startx) < 6\
and abs(neighbour[1] - starty) < 20\
and abs(neighbour[2] - startz) < 6 :
utilityFunctions.setBlock(level, (0, 0), neighbour[0], neighbour[1], neighbour[2])
if level.blockAt(neighbour[0] + 1, neighbour[1], neighbour[2]) in foliage:
neighbours.append([neighbour[0] + 1, neighbour[1], neighbour[2]])
if level.blockAt(neighbour[0] - 1, neighbour[1], neighbour[2]) in foliage:
neighbours.append([neighbour[0] - 1, neighbour[1], neighbour[2]])
if level.blockAt(neighbour[0], neighbour[1] + 1, neighbour[2]) in foliage:
neighbours.append([neighbour[0], neighbour[1] + 1, neighbour[2]])
if level.blockAt(neighbour[0], neighbour[1] - 1, neighbour[2]) in foliage:
neighbours.append([neighbour[0], neighbour[1] - 1, neighbour[2]])
if level.blockAt(neighbour[0], neighbour[1], neighbour[2] + 1) in foliage:
neighbours.append([neighbour[0], neighbour[1], neighbour[2] + 1])
if level.blockAt(neighbour[0], neighbour[1], neighbour[2] - 1) in foliage:
neighbours.append([neighbour[0], neighbour[1], neighbour[2] - 1])
# This will generate the paths, starting from the center of each house plot
def generatePath(level, levelGrid, xStart, zStart, pathLength, directionInt):
global directions
global lastDirectionInt
# determine the direction for each path
direction = directions[directionInt]
xDir = direction[0]
zDir = direction[1]
i = 0
#TODO: place this | |
scan_info_strptime = datetime.datetime.strptime(scan_summary['scanDate'], '%Y-%m-%dT%H:%M:%S.%fZ')
scan_info_strptime_tuple = (
scan_info_strptime.year, scan_info_strptime.month, scan_info_strptime.day)
if scan_summary_strptime_tuple == scan_info_strptime_tuple:
print("[WARNING] Analysing an older application ({scan_date})".format(
scan_date=scan_summary['scanDate']))
self.scan_info['scanDate'] = scan_summary['scanDate']
analyse = True
if analyse:
self.scan_info['appKey'] = scan_summary['scannedVersions'][0]['appKey']
if self.export_summary:
if self.export_summary in os.listdir('.'):
raise ValueError("[X] Export result file already exists")
export_summary_file = open(self.export_summary, 'w')
export_summary_file.write(json.dumps(json.loads(user_scan_summary.body)))
export_summary_file.close()
return True
internal_retries += 1
time.sleep(60)
return False
def _all_evidences_muted(self, element):
"""
:param element: A vulnerability or a behavior
:type element: JSON
:return: It returns true if all the evidences are muted
"""
if not isinstance(element, dict):
raise ValueError("[X] {} not allowed. Waiting for a vulnerability or behavior JSON".format(element))
if not 'result' in element.keys():
raise ValueError("[X] The mASAPP response has not the expected structure")
for evidence in element['result']:
if not 'muted' in evidence.keys():
return False
if str(evidence['muted']).lower() == 'false':
return False
return True
def store_scan_result(self):
"""
:return: It get the scan result and store it in "scan_result". The info that is stored is:
* riskScore
* Vulnerabilities
* Critical
* High
* Medium
* Low
* Behaviors
* Critical
* High
* Medium
* Low
"""
if self.scan_info['lang'].lower() not in self.LANGUAGES:
raise ValueError(
"Language {language} Only supported languages: en , es".format(language=self.scan_info['lang']))
if self.scan_info["wg"] is None:
scan_result = self.auth_user.get_scan_result(scan_id=self.scan_info['scanId'],
scan_date=self.scan_info['scanDate'],
app_key=self.scan_info['appKey'], lang=self.scan_info['lang'])
else:
scan_result = self.auth_user.get_scan_result(scan_id=self.scan_info['scanId'],
scan_date=self.scan_info['scanDate'],
app_key=self.scan_info['appKey'], lang=self.scan_info['lang'],
workgroup=self.scan_info['wg'])
self._check_not_api_error(scan_result)
if self.export_result:
if self.export_result in os.listdir('.'):
raise ValueError("[X] Export result file already exists")
export_result_file = open(self.export_result, 'w')
export_result_file.write(json.dumps(json.loads(scan_result.body)))
export_result_file.close()
self.scan_result['riskScore'] = scan_result.data['data']['riskScore']
for vulnerability in scan_result.data['data']['vulnerabilities']:
store_risk = False
if not 'muted' in vulnerability.keys():
store_risk = True
elif str(vulnerability['muted']).lower() == 'false' and not self._all_evidences_muted(vulnerability):
store_risk = True
if store_risk:
risk = vulnerability['riskLevel'].lower()
if risk in self.scan_result['vulnerabilities'].keys():
self.scan_result['vulnerabilities'][risk].append(vulnerability)
for behavioral in scan_result.data['data']['behaviorals']:
store_risk = False
if not 'muted' in behavioral.keys():
store_risk = True
elif str(behavioral['muted']).lower() == 'false' and not self._all_evidences_muted(behavioral):
store_risk = True
if store_risk:
risk = behavioral['riskLevel'].lower()
if risk in self.scan_result['behaviorals'].keys():
self.scan_result['behaviorals'][risk].append(behavioral)
def upload_and_analyse_app(self, app_path, package_name_origin=None, workgroup=None, lang=None):
"""
:param app_path: The absolute path to the application which the user wants to upload.
:type app_path: String
:param package_name_origin: The packageNameOrigin that mASAPP gives to the app.
:type package_name_origin: String
:param workgroup: The name of the workgroup that the user wants to use in the scan.
:type workgroup: Integer
:param lang: The language in which the user wants to get the analysis result.
:type lang: "en", "es"
:return: It store the app scan result in "scan_result". The process that it follow is:
1. It store the workgroup in the position given (or the first workgroup by default)
2. Application uploading to mASAPP
3. Storing of the scan info looking for it in the user scans
4. Language setting
5. Scan summary storing
6. Storing the scan result using the info stored in steps 1,3,5 for making a \
request to mASAPP API
"""
if workgroup is not None:
self.store_workgroup(workgroup)
retries = 0
scan_found = False
while retries < 5 and not scan_found:
retries += 1
print("[!] Uploading and analysing the app to mASAPP - retry:{}".format(retries))
self.upload_app(app_path)
time.sleep(10)
if package_name_origin != None:
self.store_scan_info_from_package_name_origin(package_name_origin)
else:
app_hasPath = self.scan_info['hashPath']
if self.store_scan_info_from_app_hashPath(app_hasPath):
self.scan_info['lang'] = lang or 'en'
if self.store_scan_summary_from_scan_id(self.scan_info['scanId']):
scan_found = True
if not scan_found:
raise ValueError(
"[X] There is an error in mASAPP and your application hasn't been successfully processed yet")
self.store_scan_result()
def _print_link_to_app(self):
try:
scan_link = self.MASAPP_LINK + "scans/" + self.scan_info['packageNameOrigin']
print("[!] Link of your scan in mASAPP: {} \n".format(scan_link))
except:
pass
def riskscoring_execution(self, maximum_riskscoring, app_path, package_name_origin=None, workgroup=None, lang=None,
detail=None, export_summary=None, export_result=None):
"""
:param maximum_riskscoring: The maximum risk score allowed without throing an error.
:type maximum_riskscoring: Float
:param app_path: The absolute path to the application which the user wants to upload.
:type app_path: String
:param package_name_origin: The packageNameOrigin that mASAPP gave to the app. If is the first uploading of the\
export_summa app, don't add this parameter.
:type package_name_origin: String
:param workgroup: The name of the workgroup that the user wants to use in the scan.
:type workgroup: Integer
:param lang: The language in which the user wants to get the analysis result.
:type lang: "en", "es"
:param detail: If the user wants a detailed execution or not.
:type detail: Boolean
:return:
* If package_name_origin is sent, it returns an static analysis from the app\
code showing the following information:
* RISKSCORING SUCCESS or RISKSCORING ERROR depending on whether the\
obtained riskscore surpass the maximum given as maximum_riskscoring.
* The standard error includes a table with all the defined limits surpassed
* If detail is equal to True, it will add below two lists:
* A list of vulnerabilities, adding the tittle, risk, number of\
occurrences and the different occurrences with their evidences.
* A list of behaviors, adding the tittle, number of occurrences, impact\
and the different occurrences with their evidences.
* If the package_name_origin is not sent, the script will search in the users\
scans for the correct package_name_origin using the package name.
* If it is correctly found, the app would be analysed
* If not, it would throw an error asking the user for the packageNameOrigin.\
In order to facilitate the user to find the packageNameOrigin it would throw\
a list of all the user scans.
The **packageNameOrigin error text** is exactly: *Sometimes the\
packageNameOrigin is not correctly generated by mASAPP and the application\
is saved without it, so, please add the packageNameOrigin of your\
application with the param -p*
"""
self.export_summary = export_summary
self.export_result = export_result
if workgroup == None:
self.upload_and_analyse_app(app_path=app_path, package_name_origin=package_name_origin, lang=lang)
else:
self.upload_and_analyse_app(app_path=app_path, package_name_origin=package_name_origin, workgroup=workgroup,
lang=lang)
correct_execution = True
self._print_link_to_app()
if self.scan_result['riskScore'] < maximum_riskscoring:
print("---- RISKSCORING SUCCESS ----\n")
else:
self.exceeded_limit["expected"] = maximum_riskscoring
self.exceeded_limit["obtained"] = self.scan_result['riskScore']
correct_execution = False
if detail == True:
self._print_details('riskscoring')
if not correct_execution:
raise ValueError("---- RISKSCORING ERROR ----\n {excess}".format(excess=self._print_excess()))
def standard_execution(self, scan_maximum_values, app_path, package_name_origin=None, workgroup=None, lang=None,
detail=None, export_summary=None, export_result=None):
"""
:param scan_maximum_values: Maximum results allowed without throwing an error.
**Example**:
.. code-block:: json
{
"vulnerabilities":
{
"critical":"maximum of critical vulnerabilities",
"high":"maximum of high vulnerabilities",
"medium":"maximum of medium vulnerabilities",
"low":"maximum of low vulnerabilities"
},
"behaviorals":
{
"critical":"maximum of critical behaviors",
"high":"maximum of high behaviors",
"medium":"maximum of medium behaviors",
"low":"maximum of low behaviors"
}
}
:type scan_maximum_values: Dictionary
:param app_path: The absolute path to the application which the user wants to upload.
:type app_path: String
:param package_name_origin: The packageNameOrigin that mASAPP gave to the app. If is the first uploading of the\
app, don't add this parameter.
:type package_name_origin: String
:param workgroup: The name of the workgroup that the user wants to use in the scan.
:type workgroup: Integer
:param lang: The language in which the user wants to get the analysis result.
:type lang: "en", "es"
:param detail: If the user wants a detailed execution or not.
:type detail: Boolean
:return:
* If package_name_origin is sent, it returns an static analysis from the app\
code showing the following information:
* STANDARD SUCCESS or STANDARD ERROR depending on whether or not there are\
elements that exceed the limits contained in the scan_maximum_values json.
* The standard error includes a table with all the defined limits surpassed
* If detail is equal to True, it will add below two lists:
* A list of vulnerabilities, adding the tittle, risk, number of\
occurrences and the different occurrences with their evidences
* A list of behaviors, adding the tittle, number of occurrences, impact\
and the different occurrences with their evidences
* If the package_name_origin is not sent, the script will search in the users\
scans for the correct package_name_origin using the package name.
* If it is correctly found, the app would be analysed
* If not, it would throw an error asking the user for the packageNameOrigin.\
In order to facilitate the user to find the packageNameOrigin it would throw\
a list of all the user scans.
The **packageNameOrigin error text** is exactly: *Sometimes mASAPP\
can not generate all the necessary fields for unequivocally automatic \
| |
have gradients.
Outputs:
returns a map from the blob name in the input network to a blob
containing gradient or a GradientSlice in case of sparse gradient
Currently, this is hard-coded for float operators if there are branches
(i.e. a blob is used as input to multiple operators). This is because
the gradient accumulation (Sum) is float only right now.
"""
grad_ops, input_to_grad = GradientRegistry.GetBackwardPass(
self._net.op[skip:], ys)
# Check if in immediate mode: the grad_ops are actually being produced
# by C++ and bypasses the CreateOperator() call, so in immediate mode
# we will have to explicitly run them.
if workspace.IsImmediate():
for op in grad_ops:
workspace.RunOperatorImmediate(op)
self._ExtendOps(grad_ops)
return input_to_grad
def AddExternalInput(self, *inputs):
assert len(inputs) > 0
refs = []
for input in inputs:
input_name = str(input)
assert str(input) not in self._external_input_map, (
'Net already contains an input named %s' % input_name)
for input in inputs:
input_name = str(input)
self._net.external_input.extend([input_name])
self._external_input_map.update([input_name])
refs.append(_get_blob_ref(input_name))
return refs[0] if len(refs) == 1 else refs
def AddExternalOutput(self, *outputs):
for output in outputs:
assert isinstance(output, BlobReference)
assert self.BlobIsDefined(output)
for output in outputs:
self.Proto().external_output.extend([str(output)])
def AddScopedExternalInputs(self, *inputs):
res = self.AddExternalInput(
* [ScopedBlobReference(b) for b in inputs]
)
if not isinstance(res, list):
res = [res]
return res
def AddScopedExternalOutputs(self, *outputs):
return self.AddExternalOutput(
* [ScopedBlobReference(b) for b in outputs]
)
@property
def external_inputs(self):
return [_get_blob_ref(x) for x in self._net.external_input]
@property
def external_outputs(self):
return [_get_blob_ref(x) for x in self._net.external_output]
def set_input_record(self, input_record):
from caffe2.python import schema
assert self._input_record is None or (input_record.has_blobs() and
set(input_record.field_blobs()) ==
set(self._input_record.field_blobs())), (
'Input schema cannot be reset')
if not input_record.has_blobs():
with NameScope(self.Name()):
self._input_record = schema.NewRecord(self, input_record)
else:
self._input_record = input_record
for blob in input_record.field_blobs():
if blob not in self.external_inputs:
self.AddExternalInput(blob)
return self._input_record
def recover_input_record_by_prefix(self, prefix):
"""
Tries to recover input record by taking a subset of external_inputs with
a given prefix name and interpreting them as schema column names
"""
record = _recover_record_by_prefix(self._net.external_input, prefix)
if record:
self.set_input_record(record)
def set_output_record(self, record):
assert self._output_record is None or (record.has_blobs() and
set(record.field_blobs()) ==
set(self._output_record.field_blobs())), (
'Output schema cannot be reset')
for blob in record.field_blobs():
assert self.BlobIsDefined(blob), "{} is not defined".format(blob)
for blob in record.field_blobs():
self.AddExternalOutput(blob)
self._output_record = record
def recover_output_record_by_prefix(self, prefix):
"""
Tries to recover out record by taking a subset of external_outputs with
a given prefix name and interpreting them as schema column names
"""
record = _recover_record_by_prefix(self._net.external_output, prefix)
if record:
self.set_output_record(record)
def AppendOutputRecordField(self, field_name, record):
from caffe2.python import schema
assert self._output_record is not None, (
'Tried to append to missing output record'
)
for blob in record.field_blobs():
assert self.BlobIsDefined(blob)
for blob in record.field_blobs():
self.AddExternalOutput(blob)
self._output_record = self._output_record + schema.Struct(
(field_name, record)
)
def input_record(self):
return self._input_record
def output_record(self):
return self._output_record
def AddExternalInputs(self, *inputs):
return self.AddExternalInput(*inputs)
def AddExternalOutputs(self, *outputs):
self.AddExternalOutput(*outputs)
def DeduplicateGradientSlices(self, g, aggregator='sum'):
assert isinstance(g, GradientSlice)
unique, remapping = self.Unique([g.indices], 2, engine='SparseHash')
if aggregator.lower() == 'sum':
new_g = self.UnsortedSegmentSum([g.values, remapping], 1)
elif aggregator.lower() == 'mean':
new_g = self.UnsortedSegmentMean([g.values, remapping], 1)
else:
raise ValueError('{} is not supported'.format(aggregator))
return GradientSlice(indices=unique, values=new_g)
def RunAllOnGPU(self, gpu_id=0, use_cudnn=False):
"""A convenient function to run everything on the GPU."""
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CUDA
device_option.cuda_gpu_id = gpu_id
self._net.device_option.CopyFrom(device_option)
if use_cudnn:
for op in self._net.op:
op.engine = "CUDNN"
def RunAllOnMKL(self):
"""A convenient function to run everything on the GPU."""
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.MKLDNN
self._net.device_option.CopyFrom(device_option)
def _CreateAndAddToSelf(self, op_type, inputs, outputs=None, **kwargs):
"""A helper function to create an operator and add it to self.
"""
inputs = _RectifyInputOutput(inputs)
for input in inputs:
if not self.BlobIsDefined(input):
assert input.Net() != self
self.AddExternalInput(input)
if outputs is None:
# If we do not specify an output, we will assume that this op
# produces one output in this case.
outputs = self.NextName(prefix=op_type)
elif type(outputs) is int:
# In this case, we will auto-fill the given number of outputs
# with auto-generated names.
outputs = [
self.NextName(prefix=op_type, output_id=i)
for i in range(outputs)]
outputs = _RectifyInputOutput(outputs, net=self)
op = CreateOperator(op_type, inputs, outputs, **kwargs)
self._ExtendOps([op])
workspace.operator_tracebacks[self.Name()][
len(self._net.op) - 1] = _extract_stacktrace()
if len(op.output) == 0:
return
elif len(op.output) == 1:
return BlobReference(op.output[0], self)
else:
return tuple(BlobReference(o, self) for o in op.output)
def __getattr__(self, op_type):
if op_type.startswith('__'):
raise AttributeError('Attribute {} not found.'.format(op_type))
if not IsOperator(op_type) and not IsOperatorWithEngine(op_type, "CUDNN"):
raise AttributeError(
'Method ' + op_type + ' is not a registered operator.' +
' Did you mean: [' +
",".join(workspace.C.nearby_opnames(op_type)) + ']'
)
return lambda *args, **kwargs: self._CreateAndAddToSelf(
op_type, *args, **kwargs)
def __dir__(self):
additional_methods = [
op
for op in _REGISTERED_OPERATORS
if '_ENGINE_' not in op]
return sorted(set(chain(
dir(type(self)),
viewkeys(self.__dict__),
additional_methods
)))
def Python(
self,
f,
grad_f=None,
python_func_type=None,
pass_workspace=False,
grad_output_indices=None,
grad_input_indices=None
):
"""
Registers and returns a python operator.
`f` and `grad_f` can be one of the following:
- a function with signature (inputs, outputs), where inputs and
outputs are a list of CPUTensor objects. This function will be
called from C++ everytime the operator is executed.
- a tuple (func, args, kwargs), here `func` is a callable, args is
an argument list, and kwargs is a dict list. The call:
f = func(*args, kwargs)
will be performed locally at node initialization time, on all of
the nodes of the job, returning `f`, a callable that will be used
as the python operator function to be called during Net execution.
This is to be used when using python operator in a distributed
context, and allows to create and keep local python state across
calls to the operator.
`python_func_type` is a type of an object that constructed as
python_func_type(f) and provides an implementation to forward and
backward functions. Its useful in such a case where users needs
a statefull PythonOp (ex: use autograd for computing grad_f).
If `pass_workspace` is True, the signature is changed to
(inputs, outputs, workspace) where `workspace` is the workspace the op
is going to run on. This is potentially dangerous (as the op can
manipulate the workspace directly), use on your own risk.
If a gradient function is specified (`grad_f`), by default its inputs
will be: (1) all inputs to `f`, (2) followed by all outputs of `f`, (3)
and then all gradient outputs of `f`. The outputs of `grad_f` will be
(by default) all gradient inputs to `f`. If a subset of the gradient
outputs or gradient inputs is desired instead, then the subsets can be
specified by providing `grad_output_indices` and/or `grad_input_indices`
which identify the indices of `f`'s inputs and outputs which have
gradients.
"""
assert(IsOperator('Python'))
def make_builder(t):
if not isinstance(t, tuple):
return ''
assert len(t) == 3, 'Expected builder tuple (func, args, kwargs)'
func, args, kwargs = t
normalized = (func, tuple(args), dict(kwargs))
return pickle.dumps(normalized)
f_builder = make_builder(f)
grad_f_builder = make_builder(grad_f)
assert (not grad_f) or ((not f_builder) == (not grad_f_builder)), (
'A tuple has to be passed to both f and grad_f or neither.')
core_kwargs = {}
if f_builder:
core_kwargs['pickled_builder'] = f_builder
core_kwargs['pickled_grad_builder'] = grad_f_builder
core_kwargs['pass_workspace'] = pass_workspace
else:
core_kwargs['token'] = _RegisterPythonImpl(
f, grad_f, python_func_type, pass_workspace=pass_workspace)
grad_output_indices = grad_output_indices or []
grad_input_indices = grad_input_indices or []
return lambda *args, **kwargs: self._CreateAndAddToSelf(
'Python',
grad_output_indices=grad_output_indices,
grad_input_indices=grad_input_indices,
*args,
**dict(chain(viewitems(kwargs), viewitems(core_kwargs)))
)
def is_external_input(self, blob):
name = str(blob)
return name in self._external_input_map
def extend_ops(self, new_ops):
return self._ExtendOps(new_ops)
def copy_func_between_devices(src, dst):
CPU = caffe2_pb2.CPU
CUDA = caffe2_pb2.CUDA
if src.device_type == CPU and dst.device_type == CPU:
return None
if src.device_type == CUDA and dst.device_type == CUDA:
if src.cuda_gpu_id == dst.cuda_gpu_id:
return None
else:
def fun(net, *args, **kw):
with DeviceScope(dst):
return net.CopyGPUToGPU(*args, **kw)
return fun
if src.device_type == CUDA and dst.device_type == CPU:
def fun(net, *args, **kw):
with DeviceScope(src):
return net.CopyGPUToCPU(*args, **kw)
return fun
if src.device_type == CPU and dst.device_type == CUDA:
def fun(net, *args, **kw):
with DeviceScope(dst):
return net.CopyCPUToGPU(*args, **kw)
return fun
raise ValueError('Non-supported devices: %s and %s' % (src, dst))
class RemapEntry:
def __init__(self, blob, device):
self.blob = blob
self.device = device
def __eq__(self, other):
return self.blob == other.blob and self.device == other.device
def __hash__(self):
return hash(self.blob + str(self.device))
def InjectCrossDeviceCopies(net, blob_to_device=None):
'''
Injecting Copy functions | |
validate_config(config):
"""Validate some important keys in config dict."""
# Validate server type
valid_hosts = list(TEMPLATES.keys())
if config['server'] not in valid_hosts:
print('[ERR] httpd.server must be \'apache22\', \'apache24\' or \'nginx\'', file=sys.stderr)
print('[ERR] Your configuration is:', config['server'], file=sys.stderr)
sys.exit(1)
# # Validate if log dir can be created
# log_dir = config['vhost']['log']['dir']['path']
# if config['vhost']['log']['dir']['create']:
# if not os.path.isdir(log_dir):
# if not os.access(os.path.dirname(log_dir), os.W_OK):
# print('[ERR] log directory does not exist and cannot be created:', log_dir,
# file=sys.stderr)
# sys.exit(1)
############################################################
# Get vHost Skeleton placeholders
############################################################
def vhost_get_port(config, ssl):
"""Get listen port."""
if ssl:
if config['server'] == 'nginx':
return to_str(config['vhost']['ssl_port']) + ' ssl'
return to_str(config['vhost']['ssl_port'])
return to_str(config['vhost']['port'])
def vhost_get_default_server(config, default):
"""Get vhost default directive which makes it the default vhost."""
if default:
if config['server'] == 'nginx':
# The leading space is required here for the template to
# separate it from the port directive left to it.
return ' default_server'
elif config['server'] in ('apache22', 'apache24'):
return '_default_'
else:
if config['server'] in ('apache22', 'apache24'):
return '*'
return ''
def vhost_get_server_name(config, server_name, default):
"""Get server name."""
# Nginx uses: "server_name _;" as the default
if default and config['server'] == 'nginx':
return '_'
# Apache does not have any specialities. The first one takes precedence.
# The name will be the same as with every other vhost.
prefix = to_str(config['vhost']['name']['prefix'])
suffix = to_str(config['vhost']['name']['suffix'])
return prefix + server_name + suffix
def vhost_get_access_log(config, server_name):
"""Get access log directive."""
if config['vhost']['log']['access']['stdout']:
return STDOUT_ACCESS
prefix = to_str(config['vhost']['log']['access']['prefix'])
name = prefix + server_name + '-access.log'
path = os.path.join(config['vhost']['log']['dir']['path'], name)
return path
def vhost_get_error_log(config, server_name):
"""Get error log directive."""
if config['vhost']['log']['error']['stderr']:
return STDERR_ERROR
prefix = to_str(config['vhost']['log']['error']['prefix'])
name = prefix + server_name + '-error.log'
path = os.path.join(config['vhost']['log']['dir']['path'], name)
return path
############################################################
# Get vHost Type (normal or reverse proxy
############################################################
def vhost_get_vhost_docroot(config, template, docroot, proxy):
"""Get document root directive."""
if proxy is not None:
return ''
return str_replace(template['vhost_type']['docroot'], {
'__DOCUMENT_ROOT__': vhost_get_docroot_path(config, docroot)
})
def vhost_get_vhost_rproxy(template, proxy, location):
"""Get reverse proxy definition."""
if proxy is not None:
return str_replace(template['vhost_type']['rproxy'], {
'__LOCATION__': location,
'__PROXY_PROTO__': re.sub('://.*$', '', proxy),
'__PROXY_ADDR__': re.search('^.*://(.+):[0-9]+', proxy).group(1),
'__PROXY_PORT__': re.sub('^.*:', '', proxy)
})
return ''
############################################################
# Get vHost Features
############################################################
def vhost_get_vhost_ssl(config, template, server_name):
"""Get ssl definition."""
return str_replace(template['features']['ssl'], {
'__SSL_PATH_CRT__': to_str(vhost_get_ssl_crt_path(config, server_name)),
'__SSL_PATH_KEY__': to_str(vhost_get_ssl_key_path(config, server_name)),
'__SSL_PROTOCOLS__': to_str(config['vhost']['ssl']['protocols']),
'__SSL_HONOR_CIPHER_ORDER__': to_str(config['vhost']['ssl']['honor_cipher_order']),
'__SSL_CIPHERS__': to_str(config['vhost']['ssl']['ciphers'])
})
def vhost_get_vhost_redir(config, template, server_name):
"""Get redirect to ssl definition."""
return str_replace(template['features']['redirect'], {
'__VHOST_NAME__': server_name,
'__SSL_PORT__': to_str(config['vhost']['ssl_port'])
})
def vhost_get_ssl_crt_path(config, server_name):
"""Get ssl crt path"""
prefix = to_str(config['vhost']['name']['prefix'])
suffix = to_str(config['vhost']['name']['suffix'])
name = prefix + server_name + suffix + '.crt'
path = to_str(config['vhost']['ssl']['dir_crt'])
return os.path.join(path, name)
def vhost_get_ssl_key_path(config, server_name):
"""Get ssl key path"""
prefix = to_str(config['vhost']['name']['prefix'])
suffix = to_str(config['vhost']['name']['suffix'])
name = prefix + server_name + suffix + '.key'
path = to_str(config['vhost']['ssl']['dir_crt'])
return os.path.join(path, name)
def vhost_get_docroot_path(config, docroot):
"""Get path of document root."""
suffix = to_str(config['vhost']['docroot']['suffix'])
path = os.path.join(docroot, suffix)
return path
def vhost_get_index(config):
"""Get index."""
if 'index' in config['vhost'] and config['vhost']['index']:
elem = config['vhost']['index']
else:
elem = DEFAULT_CONFIG['vhost']['index']
return ' '.join(elem)
def vhost_get_php_fpm(config, template, docroot, proxy):
"""Get PHP FPM directive. If using reverse proxy, PHP-FPM will be disabled."""
if proxy is not None:
return ''
# Get PHP-FPM
php_fpm = ''
if config['vhost']['php_fpm']['enable']:
php_fpm = str_replace(template['features']['php_fpm'], {
'__PHP_ADDR__': to_str(config['vhost']['php_fpm']['address']),
'__PHP_PORT__': to_str(config['vhost']['php_fpm']['port']),
'__DOCUMENT_ROOT__': vhost_get_docroot_path(config, docroot)
})
return php_fpm
def vhost_get_aliases(config, template):
"""Get virtual host alias directives."""
aliases = []
for item in config['vhost']['alias']:
# Add optional xdomain request if enabled
xdomain_request = ''
if 'xdomain_request' in item:
if item['xdomain_request']['enable']:
xdomain_request = str_replace(template['features']['xdomain_request'], {
'__REGEX__': to_str(item['xdomain_request']['origin'])
})
# Replace everything
aliases.append(str_replace(template['features']['alias'], {
'__ALIAS__': to_str(item['alias']),
'__PATH__': to_str(item['path']),
'__XDOMAIN_REQ__': str_indent(xdomain_request, 4).rstrip()
}))
# Join by OS independent newlines
return os.linesep.join(aliases)
def vhost_get_denies(config, template):
"""Get virtual host deny alias directives."""
denies = []
for item in config['vhost']['deny']:
denies.append(str_replace(template['features']['deny'], {
'__REGEX__': to_str(item['alias'])
}))
# Join by OS independent newlines
return os.linesep.join(denies)
def vhost_get_server_status(config, template):
"""Get virtual host server status directivs."""
status = ''
if config['vhost']['server_status']['enable']:
status = template['features']['server_status']
return str_replace(status, {
'__REGEX__': to_str(config['vhost']['server_status']['alias'])
})
def vhost_get_custom_section(config):
"""Get virtual host custom directives."""
return to_str(config['custom'])
############################################################
# vHost create
############################################################
def get_vhost_plain(config, tpl, docroot, proxy, location, server_name, default):
"""Get plain vhost"""
return str_replace(tpl['vhost'], {
'__PORT__': vhost_get_port(config, False),
'__DEFAULT_VHOST__': vhost_get_default_server(config, default),
'__DOCUMENT_ROOT__': vhost_get_docroot_path(config, docroot),
'__VHOST_NAME__': vhost_get_server_name(config, server_name, default),
'__VHOST_DOCROOT__': str_indent(vhost_get_vhost_docroot(config, tpl, docroot, proxy), 4),
'__VHOST_RPROXY__': str_indent(vhost_get_vhost_rproxy(tpl, proxy, location), 4),
'__REDIRECT__': '',
'__SSL__': '',
'__INDEX__': vhost_get_index(config),
'__ACCESS_LOG__': vhost_get_access_log(config, server_name),
'__ERROR_LOG__': vhost_get_error_log(config, server_name),
'__PHP_FPM__': str_indent(vhost_get_php_fpm(config, tpl, docroot, proxy), 4),
'__ALIASES__': str_indent(vhost_get_aliases(config, tpl), 4),
'__DENIES__': str_indent(vhost_get_denies(config, tpl), 4),
'__SERVER_STATUS__': str_indent(vhost_get_server_status(config, tpl), 4),
'__CUSTOM__': str_indent(vhost_get_custom_section(config), 4)
})
def get_vhost_ssl(config, tpl, docroot, proxy, location, server_name, default):
"""Get ssl vhost"""
return str_replace(tpl['vhost'], {
'__PORT__': vhost_get_port(config, True),
'__DEFAULT_VHOST__': vhost_get_default_server(config, default),
'__DOCUMENT_ROOT__': vhost_get_docroot_path(config, docroot),
'__VHOST_NAME__': vhost_get_server_name(config, server_name, default),
'__VHOST_DOCROOT__': str_indent(vhost_get_vhost_docroot(config, tpl, docroot, proxy), 4),
'__VHOST_RPROXY__': str_indent(vhost_get_vhost_rproxy(tpl, proxy, location), 4),
'__REDIRECT__': '',
'__SSL__': str_indent(vhost_get_vhost_ssl(config, tpl, server_name), 4),
'__INDEX__': vhost_get_index(config),
'__ACCESS_LOG__': vhost_get_access_log(config, server_name + '_ssl'),
'__ERROR_LOG__': vhost_get_error_log(config, server_name + '_ssl'),
'__PHP_FPM__': str_indent(vhost_get_php_fpm(config, tpl, docroot, proxy), 4),
'__ALIASES__': str_indent(vhost_get_aliases(config, tpl), 4),
'__DENIES__': str_indent(vhost_get_denies(config, tpl), 4),
'__SERVER_STATUS__': str_indent(vhost_get_server_status(config, tpl), 4),
'__CUSTOM__': str_indent(vhost_get_custom_section(config), 4)
})
def get_vhost_redir(config, tpl, server_name, default):
"""Get redirect to ssl vhost"""
return str_replace(tpl['vhost'], {
'__PORT__': vhost_get_port(config, False),
'__DEFAULT_VHOST__': vhost_get_default_server(config, default),
'__DOCUMENT_ROOT__': vhost_get_docroot_path(config, docroot),
'__VHOST_NAME__': vhost_get_server_name(config, server_name, default),
'__VHOST_DOCROOT__': '',
'__VHOST_RPROXY__': '',
'__REDIRECT__': str_indent(vhost_get_vhost_redir(config, tpl, server_name), 4),
'__SSL__': '',
'__INDEX__': '',
'__ACCESS_LOG__': vhost_get_access_log(config, server_name),
'__ERROR_LOG__': vhost_get_error_log(config, server_name),
'__PHP_FPM__': '',
'__ALIASES__': '',
'__DENIES__': '',
'__SERVER_STATUS__': '',
'__CUSTOM__': ''
})
def get_vhost(config, tpl, docroot, proxy, mode, location, server_name, default):
"""Create the vhost."""
if mode == 'ssl':
return get_vhost_ssl(config, tpl, docroot, proxy, location,
server_name, default)
elif mode == 'both':
return (
get_vhost_ssl(config, tpl, docroot, proxy, location,
server_name, default) +
get_vhost_plain(config, tpl, docroot, proxy, location,
server_name, default)
)
elif mode == 'redir':
return (
get_vhost_ssl(config, tpl, docroot, proxy, location,
server_name, default) +
get_vhost_redir(config, tpl, server_name, default)
)
return get_vhost_plain(config, tpl, docroot, proxy, location,
server_name, default)
############################################################
# Load configs and templates
############################################################
def load_config(config_path):
"""Load config and merge with defaults in case not found or something is missing."""
# Load configuration file
if os.path.isfile(config_path):
succ, config, err = load_yaml(config_path)
if not succ:
return (False, dict(), err)
else:
print('[WARN] config file not found', config_path, file=sys.stderr)
config = dict()
# Merge config settings with program defaults (config takes precedence over defaults)
config = merge_yaml(DEFAULT_CONFIG, config)
return (True, config, '')
def load_template(template_dir, o_template_dir, server):
"""Load global and optional template file and merge them."""
# Load global template file
succ, template, err = load_yaml(os.path.join(template_dir, TEMPLATES[server]))
if not succ:
return (False, dict(), '[ERR] Error loading template' + err)
# Load optional template file (if specified file and merge it)
if o_template_dir is not None:
succ, template2, err = load_yaml(os.path.join(o_template_dir, TEMPLATES[server]))
template = merge_yaml(template, template2)
return (True, template, '')
############################################################
# Post actions
############################################################
def apply_log_settings(config):
"""
This function will apply various settings for the log defines, including
creating the directory itself as well as handling log file output (access
and error) to stderr/stdout.
"""
# Symlink stdout to access logfile
if config['vhost']['log']['access']['stdout']:
succ, err = symlink('/dev/stdout', STDOUT_ACCESS, force=True)
if not succ:
return (False, err)
# Symlink stderr to error logfile
if config['vhost']['log']['error']['stderr']:
succ, err = symlink('/dev/stderr', STDERR_ERROR, force=True)
if not succ:
return (False, err)
# Create log dir
if config['vhost']['log']['dir']['create']:
if not os.path.isdir(config['vhost']['log']['dir']['path']):
try:
os.makedirs(config['vhost']['log']['dir']['path'])
except OSError as err:
return (False, '[ERR] Cannot create directory: '+str(err))
return (True, None)
############################################################
# Main Function
############################################################
def main(argv):
"""Main entrypoint."""
# Get command line arguments
(config_path, tpl_dir, o_tpl_dir, docroot,
proxy, mode, location, name, default, save, verbose) = parse_args(argv)
# Validate command line arguments This will abort the program on error
# This will abort the program on error
validate_args_req(name, docroot, proxy, mode, location)
validate_args_opt(config_path, tpl_dir)
# Load config
succ, config, err = load_config(config_path)
if not succ:
print('[ERR] Error loading config', err, file=sys.stderr)
sys.exit(1)
# Load template
succ, template, err = load_template(tpl_dir, o_tpl_dir, config['server'])
if not succ:
print('[ERR] Error loading template', err, file=sys.stderr)
sys.exit(1)
# Validate configuration file
# This will abort the program on error
validate_config(config)
# Retrieve fully build vhost
vhost = get_vhost(config, template, docroot, proxy, mode, location, name, default)
if verbose:
print('vhostgen: [%s] Adding: %s' %
(time.strftime("%Y-%m-%d %H:%M:%S"),
to_str(config['vhost']['name']['prefix']) + name +
to_str(config['vhost']['name']['suffix'])))
if save:
if not os.path.isdir(config['conf_dir']):
print('[ERR] output conf_dir does not exist:', config['conf_dir'],
file=sys.stderr)
sys.exit(1)
if not os.access(config['conf_dir'], os.W_OK):
print('[ERR] directory does not have write permissions', config['conf_dir'],
file=sys.stderr)
sys.exit(1)
vhost_path | |
ProtocolData(self.edesign, ds, self.cache[dsname])
for dsname, ds in self.dataset.items()}
ds_to_get_circuits_from = self.dataset[list(self.dataset.keys())[0]]
else:
self._passdatas = {None: self}
ds_to_get_circuits_from = dataset
if self.edesign is None:
self.edesign = ExperimentDesign(list(ds_to_get_circuits_from.keys()))
super().__init__(self.edesign._dirs, {}, self.edesign._childcategory) # children created on-demand
def __getstate__(self):
# don't pickle ourself recursively if self._passdatas contains just ourself
to_pickle = self.__dict__.copy()
if list(to_pickle['_passdatas'].keys()) == [None]:
to_pickle['_passdatas'] = None
return to_pickle
def __setstate__(self, stateDict):
self.__dict__.update(stateDict)
if self._passdatas is None:
self._passdatas = {None: self}
def _create_childval(self, key): # (this is how children are created on-demand)
""" Create the value for `key` on demand. """
return self.edesign.create_subdata(key, self.dataset)
@property
def passes(self):
"""A dictionary of the per-pass sub-results."""
return self._passdatas
def is_multipass(self):
"""
Whether this protocol data contains multiple passes (more
accurately, whether the `.dataset` of this object is a
:class:`MultiDataSet`).
Returns
-------
bool
"""
return isinstance(self.dataset, _objs.MultiDataSet)
#def get_tree_paths(self):
# return self.edesign.get_tree_paths()
def filter_paths(self, paths, paths_are_sorted=False):
"""
Returns a new :class:`ProtocolData` object with a subset of the
data-tree paths contained under this one.
Parameters
----------
paths : list
A list of the paths to keep. Each path is a tuple of keys,
delineating a path in the data-tree.
paths_are_sorted : bool, optional
Whether `paths` has already been sorted lexographically.
Returns
-------
ProtocolData
"""
def build_data(des, src_data):
""" Uses a template (filtered) edesign to selectively
copy the non-edesign parts of a 'src_data' ProtocolData """
ret = ProtocolData(des, src_data.dataset, src_data.cache)
for subname, subedesign in des.items():
if subname in src_data._vals: # if we've actually created this sub-data...
ret._vals[subname] = build_data(subedesign, src_data._vals[subname])
return ret
filtered_edesign = self.edesign.filter_paths(paths, paths_are_sorted)
return build_data(filtered_edesign, self)
def write(self, dirname=None, parent=None):
"""
Write this protocol data to a directory.
Parameters
----------
dirname : str
The *root* directory to write into. This directory will have
'edesign' and 'data' subdirectories, which will be created if
needed and overwritten if present. If None, then the path this object
was loaded from is used (if this object wasn't loaded from disk,
an error is raised).
parent : ProtocolData, optional
The parent protocol data, when a parent is writing this
data as a sub-protocol-data object. Otherwise leave as None.
Returns
-------
None
"""
if dirname is None:
dirname = self.edesign._loaded_from
if dirname is None: raise ValueError("`dirname` must be given because there's no default directory")
dirname = _pathlib.Path(dirname)
data_dir = dirname / 'data'
data_dir.mkdir(parents=True, exist_ok=True)
_io.obj_to_meta_json(self, data_dir)
if parent is None:
self.edesign.write(dirname) # assume parent has already written edesign
if self.dataset is not None: # otherwise don't write any dataset
if parent and (self.dataset is parent.dataset): # then no need to write any data
assert(len(list(data_dir.glob('*.txt'))) == 0), "There shouldn't be *.txt files in %s!" % str(data_dir)
else:
data_dir.mkdir(exist_ok=True)
if isinstance(self.dataset, _objs.MultiDataSet):
for dsname, ds in self.dataset.items():
_io.write_dataset(data_dir / (dsname + '.txt'), ds)
else:
_io.write_dataset(data_dir / 'dataset.txt', self.dataset)
if self.cache:
_io.write_dict_to_json_or_pkl_files(self.cache, data_dir / 'cache')
self.write_children(dirname, write_subdir_json=False) # writes sub-datas
class ProtocolResults(object):
"""
A :class:`ProtocolResults` object contains a :class:`ProtocolData` object
and stores the results from running a QCVV protocol (a :class:`Protcocol`)
on this data.
"""
@classmethod
def from_dir(cls, dirname, name, preloaded_data=None):
"""
Initialize a new ProtocolResults object from `dirname` / results / `name`.
Parameters
----------
dirname : str
The *root* directory name (under which there is are 'edesign',
'data', and 'results' subdirectories).
name : str
The sub-directory name of the particular results object to load
(there can be multiple under a given root `dirname`). This is the
name of a subdirectory of `dirname` / results.
preloaded_data : ProtocolData, optional
In the case that the :class:`ProtocolData` object for `dirname`
is already loaded, it can be passed in here. Otherwise leave this
as None and it will be loaded.
Returns
-------
ProtocolResults
"""
dirname = _pathlib.Path(dirname)
ret = cls.__new__(cls)
ret.data = preloaded_data if (preloaded_data is not None) else \
_io.load_data_from_dir(dirname)
ret.__dict__.update(_io.load_meta_based_dir(dirname / 'results' / name, 'auxfile_types'))
assert(ret.name == name), "ProtocolResults name inconsistency!"
return ret
def __init__(self, data, protocol_instance):
"""
Create a new ProtocolResults object.
Parameters
----------
data : ProtocolData
The input data from which these results are derived.
protocol_instance : Protocol
The protocol that created these results.
Returns
-------
ProtocolResults
"""
self.name = protocol_instance.name # just for convenience in JSON dir
self.protocol = protocol_instance
self.data = data
self.auxfile_types = {'data': 'none', 'protocol': 'protocolobj'}
def write(self, dirname=None, data_already_written=False):
"""
Write these protocol results to a directory.
Parameters
----------
dirname : str
The *root* directory to write into. This directory will have
'edesign', 'data', and 'results/<myname>' subdirectories, which will
path be created if needed and overwritten if present. If None, then
the this object was loaded from is used (if this object wasn't
loaded from disk, an error is raised).
data_already_written : bool, optional
Set this to True if you're sure the `.data` :class:`ProtocolData` object
within this results object has already been written to `dirname`. Leaving
this as the default is a safe option.
Returns
-------
None
"""
if dirname is None:
dirname = self.data.edesign._loaded_from
if dirname is None: raise ValueError("`dirname` must be given because there's no default directory")
p = _pathlib.Path(dirname)
results_dir = p / 'results' / self.name
results_dir.mkdir(parents=True, exist_ok=True)
#write edesign and data
if not data_already_written:
self.data.write(dirname)
#write qtys to results dir
_io.write_obj_to_meta_based_dir(self, results_dir, 'auxfile_types')
def as_nameddict(self):
"""
Convert these results into nested :class:`NamedDict` objects.
Returns
-------
NamedDict
"""
#This function can be overridden by derived classes - this just
# tries to give a decent default implementation
ret = _NamedDict('Qty', 'category')
ignore_members = ('name', 'protocol', 'data', 'auxfile_types')
for k, v in self.__dict__.items():
if k.startswith('_') or k in ignore_members: continue
if isinstance(v, ProtocolResults):
ret[k] = v.as_nameddict()
elif isinstance(v, _NamedDict):
ret[k] = v
elif isinstance(v, dict):
pass # don't know how to make a dict into a (nested) NamedDict
else: # non-dicts are ok to just store
ret[k] = v
return ret
def as_dataframe(self):
"""
Convert these results into Pandas dataframe.
Returns
-------
DataFrame
"""
return self.as_nameddict().as_dataframe()
def __str__(self):
import pprint
P = pprint.PrettyPrinter()
return P.pformat(self.as_nameddict())
class MultiPassResults(ProtocolResults):
"""
Holds the results of a single protocol on multiple "passes"
(sets of data, typically taken at different times). The results
of each pass are held as a separate :class:`ProtcolResults` object
within the `.passes` attribute.
"""
def __init__(self, data, protocol_instance):
"""
Initialize an empty MultiPassResults object, which contain a dictionary
of sub-results one per "pass". Usually these sub-results are obtained
by running `protocol_instance` on each data set within `data`.
Parameters
----------
data : ProtocolData
The input data from which these results are derived.
protocol_instance : Protocol
The protocol that created these results.
Returns
-------
MultiPassResults
"""
super().__init__(data, protocol_instance)
self.passes = {}
self.auxfile_types['passes'] = 'dict-of-resultsobjs'
class ProtocolResultsDir(_TreeNode):
"""
A :class:`ProtocolResultsDir` holds a dictionary of :class:`ProtocolResults`
objects. It contains a :class:`ProtocolData` object and is rooted at the_model
corresponding node of the data-tree. It contains links to child-:class:`ProtocolResultsDir`
objects representing sub-directories.
"""
@classmethod
def from_dir(cls, dirname, parent=None, name=None):
"""
Initialize a new ProtocolResultsDir object from `dirname`.
Parameters
----------
dirname : str
The *root* directory name (under which there are 'edesign'
and 'data', and possibly 'results', subdirectories).
parent : ProtocolResultsDir, optional
The parent results-directory object that is loading the
returned object as a sub-results. This is used internally
when loading a :class:`ProtocolResultsDir` that represents
a node of the data-tree with children.
name : str, optional
The name of this result within `parent`. This is only
used when `parent` is not None.
Returns
-------
ProtcolResultsDir
"""
dirname = _pathlib.Path(dirname)
data = parent.data[name] if (parent and name) else \
_io.load_data_from_dir(dirname)
#Load results in results_dir
results = {}
results_dir = dirname / 'results'
if results_dir.is_dir(): # if results_dir doesn't exist that's ok (just no results to load)
for pth in results_dir.iterdir():
if pth.is_dir() and (pth / 'meta.json').is_file():
results[pth.name] = _io.cls_from_meta_json(pth).from_dir(
dirname, pth.name, preloaded_data=data)
ret = cls(data, results, {}) # don't initialize children now
ret._init_children(dirname, meta_subdir='results')
return ret
def __init__(self, data, protocol_results=None, children=None):
"""
Create a new ProtocolResultsDir object.
This container object holds two things:
1. A `.for_protocol` dictionary of :class:`ProtocolResults` corresponding
to different protocols (keys are protocol names).
2. | |
_DEF_IMAS_PLASMA_SIG.keys():
if plot_sig is None:
plot_sig = _DEF_IMAS_PLASMA_SIG[lids[0]]['plot_sig']
if plot_X is None:
plot_X = _DEF_IMAS_PLASMA_SIG[lids[0]]['plot_X']
if dsig is None:
lsig = (list(plot_sig) + list(plot_X)
+ _DEF_IMAS_PLASMA_SIG[lids[0]]['other'])
dsig = {lids[0]: lsig}
if plot_sig is None or plot_X is None:
msg = "Trying to plot a plasma profile\n"
msg += "Impossible if plot_sig and plot_X not provided!\n"
msg += " (resp. quantity and quant_X if calling from tofuplot)"
raise Exception(msg)
dq = imas2tofu.MultiIDSLoader._dshort[lids[0]]
lk = sorted(dq.keys())
for qq in plot_sig:
if qq not in lk:
_get_exception(qq, lids[0], qtype='quantity')
for qq in plot_X:
if qq not in lk:
_get_exception(qq, lids[0], qtype='X')
# -------------------
# load
lidsdiag = [kk for kk in lids
if kk in imas2tofu.MultiIDSLoader._lidsdiag]
if input_file is None:
for ss in shot:
multi = imas2tofu.MultiIDSLoader(shot=ss, run=run, user=user,
tokamak=tokamak, version=version,
ids=lids, synthdiag=False,
get=False)
lids_synth = multi.get_inputs_for_synthsignal(lidsdiag,
returnas=list,
verb=False)
if 'equilibrium' in lids_synth:
multi.add_ids('equilibrium', tokamak=tokamak_eq,
user=user_eq, shot=shot_eq, run=run_eq,
get=False)
lids_synth.remove('equilibrium')
if len(lids_synth) > 0:
multi.add_ids(lids_synth, tokamak=tokamak_prof,
user=user_prof, shot=shot_prof, run=run_prof,
get=False)
multi.open_get_close()
# export to instances
for ii in range(0, nids):
if out[ii] == "Data":
multi.calc_signal(ids=lids[ii],
tlim=tlim, dsig=dsig,
config=config, t=t,
res=res, indch=indch,
Brightness=Brightness,
interp_t=interp_t,
indch_auto=indch_auto,
t0=t0, dextra=dextra,
coefs=coefs, plot=True, bck=bck,
plot_compare=plot_compare)
else:
multi = imas2tofu.MultiIDSLoader(shot=shot[0], run=run, user=user,
tokamak=tokamak, version=version,
ids=lids, synthdiag=False, get=False)
if 'bremsstrahlung_visible' in lids:
multi.add_ids('equilibrium', get=True)
plasma = multi.to_Plasma2D()
lf = ['t', 'rhotn', 'brem']
lamb = multi.get_data(
dsig={'bremsstrahlung_visible': 'lamb'},
return_all=False
)['bremsstrahlung_visible']['lamb']
dout = imas2tofu.get_data_from_matids(input_file,
return_fields=lf,
lamb=lamb[0])
plasma.add_ref(key='core_profiles.t', data=dout['t'], group='time',
origin='input_file')
nrad = dout['rhotn'].shape[1]
plasma.add_ref(key='core_profiles.radius', data=np.arange(0, nrad),
group='radius', origin='input_file')
plasma.add_quantity(key='core_profiles.1drhotn',
data=dout['rhotn'],
depend=('core_profiles.t',
'core_profiles.radius'),
origin='input_file',
quant='rhotn', dim='rho', units='adim.')
plasma.add_quantity(key='core_profiles.1dbrem', data=dout['brem'],
depend=('core_profiles.t',
'core_profiles.radius'),
origin='input_file')
cam = multi.to_Cam(plot=False)
sig = cam.calc_signal_from_Plasma2D(plasma,
quant='core_profiles.1dbrem',
ref1d='core_profiles.1drhotn',
ref2d='equilibrium.2drhotn',
coefs=coefs, bck=bck,
Brightness=True, plot=plot)[0]
if output_file is not None:
try:
# Format output dictionnary to be saved
dout = {'shot': shot[0],
't': sig.t,
'data': sig.data,
'units_t': 's',
'units_data': 'ph / (s.m2.sr.m)',
'channels': sig.dchans('names'),
'tofu_version': __version__}
# Save to specified path + filename + extension
if output_file[-4:] != '.mat':
assert len(output_file.split('.')) == 1
output_file += '.mat'
scpio.savemat(output_file, dout)
msg = ("Successfully saved in:\n"
+ "\t{}".format(output_file))
print(msg)
except Exception as err:
msg = str(err)
msg += "\nCould not save computed synthetic signal to:\n"
msg += "scpio.savemat({0}, dout)".format(output_file)
warnings.warn(msg)
#############################################
# Generic tofu object
#############################################
def _check_notNone(dd, lk):
for k, v in dd.items():
if k in lk:
assert v is not None, "{0} should not be None !".format(k)
def _check_InputsGeneric(ld, tab=0):
# Prepare
bstr0 = "\n"+" "*tab + "Error on arg %s:"
bstr1 = "\n"+" "*(tab+1) + "Expected: "
bstr2 = "\n"+" "*(tab+1) + "Provided: "
ltypes_f2i = [int,float,np.integer,np.floating]
ltypes_i2f = [int,float,np.integer,np.floating]
# Check
err, msg = False, ''
for k in ld.keys():
errk, msgk = False, bstr0%k
if 'cls' in ld[k].keys():
if not isinstance(ld[k]['var'],ld[k]['cls']):
errk = True
msgk += bstr1 + "class {0}".format(ld[k]['cls'].__name__)
msgk += bstr2 + "class %s"%ld[k]['var'].__class__.__name__
if 'NoneOrCls' in ld[k].keys():
c = ld[k]['var'] is None or isinstance(ld[k]['var'],ld[k]['cls'])
if not c:
errk = True
msgk += bstr1 + "None or class {0}".format(ld[k]['cls'].__name__)
msgk += bstr2 + "class %s"%ld[k]['var'].__class__.__name__
if 'in' in ld[k].keys():
if not ld[k]['var'] in ld[k]['in']:
errk = True
msgk += bstr1 + "in {0}".format(ld[k]['in'])
msgk += bstr2 + "{0}".format(ld[k]['var'])
if 'lisfof' in ld[k].keys():
c0 = isinstance(ld[k]['var'], list)
c1 = c0 and all([isinstance(s,ld[k]['listof']) for s in ld[k]])
if not c1:
errk = True
msgk += bstr1 + "list of {0}".format(ld[k]['listof'].__name__)
msgk += bstr2 + "{0}".format(ld[k]['var'])
if 'iter2array' in ld[k].keys():
c0 = ld[k]['var'] is not None and hasattr(ld[k]['var'],'__iter__')
if not c0:
errk = True
msgk += bstr1 + "iterable of %s"%ld[k]['iter2array'].__name__
msgk += bstr2 + "{0}".format(ld[k]['var'])
ld[k]['var'] = np.asarray(ld[k]['var'], dtype=ld[k]['iter2array'])
if 'ndim' in ld[k].keys():
c0 = isinstance(ld[k]['var'], np.ndarray)
c1 = c0 and ld[k]['var'].ndim == ld[k]['ndim']
if not c1:
errk = True
msgk += bstr1 + "array of {0} dimensions".format(ld[k]['ndim'])
msgk += bstr2 + "shape {0}".format(ld[k]['ndim'].shape)
if 'inshape' in ld[k].keys():
c0 = isinstance(ld[k]['var'], np.ndarray)
c1 = c0 and ld[k]['inshape'] in ld[k]['var'].shape
if not c1:
errk = True
msgk += bstr1 + "shape including {0}".format(ld[k]['inshape'])
msgk += bstr2 + "shape {0}".format(ld[k]['var'].shape)
if 'float2int' in ld[k].keys():
lc = [(issubclass(ld[k]['var'].__class__, cc)
and int(ld[k]['var'])==ld[k]['var'])
for cc in ltypes_f2i]
if not any(lc):
errk = True
msgk += bstr1 + "convertible to int from %s"%str(ltypes_f2i)
msgk += bstr2+"{0} ({1})".format(ld[k]['var'],
ld[k]['var'].__class__.__name__)
ld[k]['var'] = int(ld[k]['var'])
if 'int2float' in ld[k].keys():
lc = [issubclass(ld[k]['var'].__class__, cc)
for cc in ltypes_i2f]
if not any(lc):
errk = True
msgk += bstr1 + "convertible to float from %s"%str(ltypes_i2f)
msgk += bstr2 + "class %s"%ld[k]['var'].__class__.__name__
ld[k]['var'] = float(ld[k]['var'])
if 'NoneOrIntPos' in ld[k].keys():
c0 = ld[k]['var'] is None
lc = [(issubclass(ld[k]['var'].__class__, cc)
and int(ld[k]['var'])==ld[k]['var']
and ld[k]['var']>0)
for cc in ltypes_f2i]
if not (c0 or any(lc)):
errk = True
msgk += bstr1 + "convertible to >0 int from %s"%str(ltypes_f2i)
msgk += bstr2 + "{0}".format(ld[k]['var'])
ld[k]['var'] = None if c0 else int(ld[k]['var'])
if '>' in ld[k].keys():
if not np.all(np.greater(ld[k]['var'], ld[k]['>'])):
errk = True
msgk += bstr1 + "> {0}".format(ld[k]['>'])
msgk += bstr2 + "{0}".format(ld[k]['var'])
if 'vectnd' in ld[k].keys():
c0 = any([isinstance(ld[k]['var'],tt)
for tt in [list,tuple,np.ndarray]])
if ld[k]['vectnd'] is not None:
c0 &= np.asarray(ld[k]['var']).size==ld[k]['vectnd']
if not c0:
errk = True
msgk += bstr1 + "array of size {0}".format(ld[k]['vectnd'])
msgk += bstr2 + "{0}".format(ld[k]['var'])
ld[k]['var'] = np.asarray(ld[k]['var'],dtype=float).ravel()
if 'unitvectnd' in ld[k].keys():
c0 = any([isinstance(ld[k]['var'],tt)
for tt in [list,tuple,np.ndarray]])
c1 = c0 and np.asarray(ld[k]['var']).size==ld[k]['unitvectnd']
if not c1:
errk = True
msgk += bstr1 + "array of size {0}".format(ld[k]['unitvectnd'])
msgk += bstr2 + "{0}".format(ld[k]['var'])
temp = np.asarray(ld[k]['var'],dtype=float).ravel()
ld[k]['var'] = temp/np.linalg.norm(temp)
if errk:
err = True
msg += msgk
return ld, err, msg
def _get_attrdictfromobj(obj, dd):
for k in dd.keys():
if dd[k] is None:
dd[k] = getattr(obj,k)
return dd
class ToFuObjectBase(object):
__metaclass__ = ABCMeta
_dstrip = {'strip':None, 'allowed':None}
# Does not exist before Python 3.6 !!!
def __init_subclass__(cls, *args, **kwdargs):
# super()
super(ToFuObjectBase,cls).__init_subclass__(*args, **kwdargs)
cls._dstrip = ToFuObjectBase._dstrip.copy()
cls._strip_init()
def __init__(self, fromdict=None,
**kwdargs):
self._Done = False
self._dstrip = self.__class__._dstrip.copy()
if fromdict is not None:
self.from_dict(fromdict, sep=kwdargs.get('sep', None))
else:
self._reset()
self._set_Id(**kwdargs)
self._init(**kwdargs)
self._Done = True
@abstractmethod
def _reset(self):
""" To be overloaded """
pass
def _set_Id(self, *args, **kwdargs):
""" To be overloaded """
pass
@abstractmethod
def _init(self, **kwdargs):
""" To be overloaded """
pass
@classmethod
def _strip_init(cls):
""" To be overloaded """
pass
@staticmethod
def _get_largs_Id():
largs = ['Id','Name','Type','Deg','Exp','Diag','shot',
'SaveName','SavePath','usr','dUSR','lObj','include']
return largs
@staticmethod
def _extract_kwdargs(din, largs):
dout = {}
for k in largs:
if k in din.keys():
dout[k] = din[k]
return dout
def _set_arrayorder(self, arrayorder='C', sep=None, verb=True):
d, account = _set_arrayorder(self, arrayorder=arrayorder, sep=sep)
if len(account['Failed'])>0:
msg = "All np.ndarrays were not set to {0} :\n".format(arrayorder)
msg += "Success : [{0}]".format(', '.join(account['Success']))
msg += "Failed : [{0}]".format(', '.join(account['Failed']))
raise Exception(msg)
else:
self.from_dict(d, sep=sep)
self._dextra['arrayorder'] = arrayorder
@staticmethod
def _strip_dict(dd, lkeep=[]):
for k in dd.keys():
if not k in lkeep:
dd[k] = None
@staticmethod
def _test_Rebuild(dd, lkeep=[]):
reset = False
for k in dd.keys():
if dd[k] is None and k not in lkeep:
reset = True
break
return reset
@staticmethod
def _check_Fields4Rebuild(dd, lkeep=[], dname=''):
for kk in lkeep:
if kk not in dd.keys() or dd[kk] is None:
msg = "Rebuilding {0}:\n".format(dname)
msg += "Field '{0}' is missing !".format(kk)
raise Exception(msg)
@staticmethod
def _check_InputsGeneric(ld, tab=0):
return _check_InputsGeneric(ld, tab=tab)
#############################
# charray and summary
#############################
@staticmethod
def _getcharray(ar, col=None, sep=' ', line='-', just='l', msg=True):
c0 = ar is None or len(ar) == 0
if c0:
return ''
ar = np.array(ar, dtype='U')
if ar.ndim == 1:
ar = ar.reshape((1,ar.size))
# Get just len
nn = np.char.str_len(ar).max(axis=0)
if col is not None:
if len(col) not in ar.shape:
msg = ("len(col) should be in np.array(ar, dtype='U').shape:\n"
+ "\t- len(col) = {}\n".format(len(col))
+ "\t- ar.shape = {}".format(ar.shape))
raise Exception(msg)
if len(col) != ar.shape[1]:
ar = ar.T
nn = np.char.str_len(ar).max(axis=0)
nn = np.fmax(nn, [len(cc) for cc in col])
# Apply to array
fjust = np.char.ljust if just == 'l' else np.char.rjust
out = np.array([sep.join(v) for v in fjust(ar,nn)])
# Apply to col
if col is not None:
arcol = np.array([col, [line*n for n in nn]], dtype='U')
arcol = np.array([sep.join(v) for v in fjust(arcol,nn)])
out = np.append(arcol,out)
if msg:
out = '\n'.join(out)
return out
@classmethod
| |
<filename>pytorch/libs/nnet/pooling.py
# -*- coding:utf-8 -*-
# Copyright xmuspeech (Author: Snowdar 2019-05-29 2020-06-10)
import numpy as np
import torch
import torch.nn.functional as F
from libs.support.utils import to_device
from .components import *
## Pooling ✿
class StatisticsPooling(torch.nn.Module):
""" An usual mean [+ stddev] poolling layer"""
def __init__(self, input_dim, stddev=True, unbiased=False, eps=1.0e-10):
super(StatisticsPooling, self).__init__()
self.stddev = stddev
self.input_dim = input_dim
if self.stddev :
self.output_dim = 2 * input_dim
else :
self.output_dim = input_dim
self.eps = eps
# Used for unbiased estimate of stddev
self.unbiased = unbiased
def forward(self, inputs):
"""
@inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
"""
assert len(inputs.shape) == 3
assert inputs.shape[1] == self.input_dim
# Get the num of frames
counts = inputs.shape[2]
mean = inputs.sum(dim=2, keepdim=True) / counts
if self.stddev :
if self.unbiased and counts > 1:
counts = counts - 1
# The sqrt (as follows) is deprecated because it results in Nan problem.
# std = torch.unsqueeze(torch.sqrt(torch.sum((inputs - mean)**2, dim=2) / counts), dim=2)
# There is a eps to solve this problem.
# Another method: Var is equal to std in "cat" way, actually. So, just use Var directly.
var = torch.sum((inputs - mean)**2, dim=2, keepdim=True) / counts
std = torch.sqrt(var.clamp(min=self.eps))
return torch.cat((mean, std), dim=1)
else:
return mean
def get_output_dim(self):
return self.output_dim
def extra_repr(self):
return '{input_dim}, {output_dim}, stddev={stddev}, unbiased={unbiased}, eps={eps}'.format(**self.__dict__)
@classmethod
def thop_count(self, m, x, y):
pass
# To do
# x = x[0]
# kernel_ops = torch.zeros(m.weight.size()[2:]).numel() # Kw x Kh
# bias_ops = 1 if m.bias is not None else 0
# # N x Cout x H x W x (Cin x Kw x Kh + bias)
# total_ops = y.nelement() * (m.input_dim * kernel_ops + bias_ops)
# m.total_ops += torch.DoubleTensor([int(total_ops)])
class LDEPooling(torch.nn.Module):
"""A novel learnable dictionary encoding layer.
Reference: <NAME>, etc., "A NOVEL LEARNABLE DICTIONARY ENCODING LAYER FOR END-TO-END
LANGUAGE IDENTIFICATION", icassp, 2018
"""
def __init__(self, input_dim, c_num=64, eps=1.0e-10):
super(LDEPooling, self).__init__()
self.input_dim = input_dim
self.output_dim = input_dim * c_num
self.eps = eps
self.mu = torch.nn.Parameter(torch.randn(input_dim, c_num))
self.s = torch.nn.Parameter(torch.ones(c_num))
self.softmax_for_w = torch.nn.Softmax(dim=3)
def forward(self, inputs):
"""
@inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
"""
assert len(inputs.shape) == 3
assert inputs.shape[1] == self.input_dim
r = inputs.transpose(1,2).unsqueeze(3) - self.mu
# Make sure beta=self.s**2+self.eps > 0
w = self.softmax_for_w(- (self.s**2 + self.eps) * torch.sum(r**2, dim=2, keepdim=True))
e = torch.mean(w * r, dim=1)
return e.reshape(-1, self.output_dim, 1)
def get_output_dim(self):
return self.output_dim
# Attention-based
class AttentionAlphaComponent(torch.nn.Module):
"""Compute the alpha with attention module.
alpha = softmax(v'·f(w·x + b) + k) or softmax(v'·x + k)
where f is relu here and bias could be lost.
Support:
1. Single or Multi-head attention
2. One affine or two affine
3. Share weight (last affine = vector) or un-shared weight (last affine = matrix)
4. Self-attention or time context attention (supported by context parameter of TdnnAffine)
5. Different temperatures for different heads.
"""
def __init__(self, input_dim, num_head=1, split_input=True, share=True, affine_layers=2,
hidden_size=64, context=[0], bias=True, temperature=False, fixed=True):
super(AttentionAlphaComponent, self).__init__()
assert num_head >= 1
# Multi-head case.
if num_head > 1:
if split_input:
# Make sure fatures/planes with input_dim dims could be splited to num_head parts.
assert input_dim % num_head == 0
if temperature:
if fixed:
t_list = []
for i in range(num_head):
t_list.append([[max(1, (i // 2) * 5)]])
# shape [1, num_head, 1, 1]
self.register_buffer('t', torch.tensor([t_list]))
else:
# Different heads have different temperature.
# Use 1 + self.t**2 in forward to make sure temperature >= 1.
self.t = torch.nn.Parameter(torch.zeros(1, num_head, 1, 1))
self.input_dim = input_dim
self.num_head = num_head
self.split_input = split_input
self.share = share
self.temperature = temperature
self.fixed = fixed
if share:
# weight: [input_dim, 1] or [input_dim, hidden_size] -> [hidden_size, 1]
final_dim = 1
else:
# weight: [input_dim, input_dim] or [input_dim, hidden_size] -> [hidden_size, input_dim]
final_dim = input_dim
first_groups = 1
last_groups = 1
if affine_layers == 1:
last_affine_input_dim = input_dim
# (x, 1) for global case and (x, h) for split case.
if num_head > 1 and split_input:
last_groups = num_head
self.relu_affine = False
elif affine_layers == 2:
last_affine_input_dim = hidden_size * num_head
if num_head > 1:
# (1, h) for global case and (h, h) for split case.
last_groups = num_head
if split_input:
first_groups = num_head
# Add a relu-affine with affine_layers=2.
self.relu_affine = True
self.first_affine = TdnnAffine(input_dim, last_affine_input_dim, context=context, bias=bias, groups=first_groups)
self.relu = torch.nn.ReLU(inplace=True)
else:
raise ValueError("Expected 1 or 2 affine layers, but got {}.",format(affine_layers))
self.last_affine = TdnnAffine(last_affine_input_dim, final_dim * num_head, context=context, bias=bias, groups=last_groups)
# Dim=2 means to apply softmax in different frames-index (batch is a 3-dim tensor in this case).
self.softmax = torch.nn.Softmax(dim=2)
def forward(self, inputs):
"""
@inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
"""
assert len(inputs.shape) == 3
assert inputs.shape[1] == self.input_dim
if self.temperature:
batch_size = inputs.shape[0]
chunk_size = inputs.shape[2]
x = inputs
if self.relu_affine:
x = self.relu(self.first_affine(x))
if self.num_head > 1 and self.temperature:
if self.fixed:
t = self.t
else:
t = 1 + self.t**2
x = self.last_affine(x).reshape(batch_size, self.num_head, -1, chunk_size) / t
return self.softmax(x.reshape(batch_size, -1, chunk_size))
else:
return self.softmax(self.last_affine(x))
class AttentiveStatisticsPooling(torch.nn.Module):
""" An attentive statistics pooling.
Reference: Okabe, Koji, <NAME>, and <NAME>. 2018. "Attentive Statistics Pooling
for Deep Speaker Embedding." ArXiv Preprint ArXiv:1803.10963.
"""
def __init__(self, input_dim, affine_layers=2, hidden_size=64, context=[0], stddev=True, stddev_attention=True, eps=1.0e-10):
super(AttentiveStatisticsPooling, self).__init__()
self.stddev = stddev
self.input_dim = input_dim
if self.stddev :
self.output_dim = 2 * input_dim
else :
self.output_dim = input_dim
self.eps = eps
self.stddev_attention = stddev_attention
self.attention = AttentionAlphaComponent(input_dim, num_head=1, share=True, affine_layers=affine_layers,
hidden_size=hidden_size, context=context)
def forward(self, inputs):
"""
@inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
"""
assert len(inputs.shape) == 3
assert inputs.shape[1] == self.input_dim
alpha = self.attention(inputs)
# Weight avarage
mean = torch.sum(alpha * inputs, dim=2, keepdim=True)
if self.stddev :
if self.stddev_attention:
var = torch.sum(alpha * inputs**2, dim=2, keepdim=True) - mean**2
std = torch.sqrt(var.clamp(min=self.eps))
else:
var = torch.mean((inputs - mean)**2, dim=2, keepdim=True)
std = torch.sqrt(var.clamp(min=self.eps))
return torch.cat((mean, std), dim=1)
else :
return mean
def get_output_dim(self):
return self.output_dim
class MultiHeadAttentionPooling(torch.nn.Module):
"""Implement multi-head attention pooling based on AttentionAlphaComponent.
Reference: Safari, Pooyan, and <NAME>. 2019. “Self Multi-Head Attention for Speaker
Recognition.” ArXiv Preprint ArXiv:1906.09890.
Note, in this paper, affine_layers is default to 1, and final_dim is 1 which means the weights are shared.
"""
def __init__(self, input_dim, stddev=True, num_head=4, share=True, affine_layers=1, **options):
super(MultiHeadAttentionPooling, self).__init__()
self.input_dim = input_dim
self.stddev = stddev
self.num_head = num_head
if self.stddev :
self.output_dim = 2 * input_dim
else :
self.output_dim = input_dim
if "split_input" in options.keys():
if not options["split_input"]:
raise ValueError("split_input==False is not valid for this MultiHeadAttentionPooling.")
options.pop("split_input")
# In this pooling, the special point is that inputs will be splited.
self.attention = AttentionAlphaComponent(input_dim, num_head=num_head, split_input=True, share=share,
affine_layers=affine_layers, bias=False, **options)
def forward(self, inputs):
"""
@inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
"""
assert len(inputs.shape) == 3
assert inputs.shape[1] == self.input_dim
batch_size = inputs.shape[0]
chunk_size = inputs.shape[2] # a.k.a total frames
# alpha: [batch, weight, frames]
# When using the conv1d to implement the multi-multiple of multi-head, we can get
# the weight distribution of multi-head: [h11, h12, h13, h21, h22, h23, ..., hn1, hn2, ...]
# So, just reshape it to split different heads.
alpha = self.attention(inputs)
# In sharing weight case, the shape of alpha is [batch, head, 1, frames] and [batch, head, splited-features, frames]
# for another case.
# inputs: [batch, head, splited-features, frames]
after_mul = alpha.reshape(batch_size, self.num_head, -1, chunk_size) * \
inputs.reshape(batch_size, self.num_head, -1, chunk_size)
# After multi-multipling alpha and inputs for multi-head case, the mean could be got by reshaping back.
mean = torch.sum(after_mul.reshape(batch_size, -1, chunk_size), dim=2, keepdim=True)
if self.stddev :
var = torch.mean((inputs - mean)**2, dim=2, keepdim=True)
std = torch.sqrt(var.clamp(min=1.0e-10))
return torch.cat((mean, std), dim=1)
else :
return mean
def get_output_dim(self):
return self.output_dim
class GlobalMultiHeadAttentionPooling(torch.nn.Module):
"""Implement global multi-head attention pooling based on AttentionAlphaComponent.
Reference: <NAME>, <NAME>, <NAME>, <NAME>. "MULTI-RESOLUTION MULTI-HEAD
ATTENTION IN DEEP SPEAKER EMBEDDING." ICASSP, 2020.
It is not equivalent to multi-head attention pooling even when
input_dim of global multi-head = 1/num_head * input_dim of multi-head.
"""
def __init__(self, input_dim, | |
<filename>theano/sandbox/cuda/opt.py
import logging
_logger = logging.getLogger('theano.sandbox.cuda.opt')
import sys
import theano
import numpy
from theano.scan_module import scan_utils, scan_op
from theano import scalar as scal
from theano import tensor, compile, gof
from theano.gof import (local_optimizer, EquilibriumDB, SequenceDB, ProxyDB,
Optimizer, toolbox, DestroyHandler,
EquilibriumOptimizer)
from theano.sandbox.cuda.basic_ops import *
from theano.sandbox.cuda.type import CudaNdarrayType
from theano.sandbox.cuda.blas import (gpu_dot22, gpu_dot22scalar,
gpu_gemm_inplace, gpu_gemm_no_inplace, gpu_outer, GpuConv)
from theano.sandbox.cuda.blas import gpu_gemv_inplace
from theano.sandbox.cuda.blas import gpu_gemv_no_inplace
from theano.sandbox.cuda.blas import gpu_ger_inplace
from theano.sandbox.cuda.blas import gpu_ger_no_inplace
from theano.sandbox.cuda.blas import (GpuDownsampleFactorMax,
GpuDownsampleFactorMaxGrad)
from theano.sandbox.cuda.nnet import (
GpuCrossentropySoftmaxArgmax1HotWithBias,
GpuCrossentropySoftmax1HotWithBiasDx,
GpuSoftmax, GpuSoftmaxWithBias)
from theano.sandbox.cuda.elemwise import SupportCodeError
from theano.compile import optdb
from theano.tensor.blas import _is_real_vector, _is_real_matrix
#optdb.print_summary() # shows what is currently registered
gpu_optimizer = EquilibriumDB()
gpu_cut_copies = EquilibriumDB()
gpu_seqopt = SequenceDB()
gpu_seqopt.register('gpu_local_optimizations', gpu_optimizer, 1,
'fast_run', 'inplace')
gpu_seqopt.register('gpu_cut_transfers', gpu_cut_copies, 2,
'fast_run', 'gpu')
# DO NOT PUT fast_run in gpu_opt! This will ALWAYS enable the GPU!
optdb.register('gpu_opt',
gpu_seqopt,
optdb.__position__.get('add_destroy_handler', 49.5) - 1,
'gpu')
# DO NOT PUT fast_run in gpu_after_fusion! This will ALWAYS enable the GPU!
# This second pass is needed as the fusion can put all the non float32 code
# inside the elemwise. When there is no float64 op, this is working.
optdb.register('gpu_after_fusion',
ProxyDB(gpu_seqopt),
optdb.__position__.get('elemwise_fusion', 71) + .1,
'gpu')
def register_opt(*tags, **kwargs):
def f(local_opt):
name = (kwargs and kwargs.pop('name')) or local_opt.__name__
gpu_optimizer.register(name, local_opt, 'fast_run', 'inplace', *tags)
return local_opt
return f
#register local_track_shape_i at this level too
#to make multi-level lift of shape work.
register_opt()(theano.tensor.opt.local_track_shape_i)
class InputToGpuOptimizer(Optimizer):
"""Transfert the input of a graph to the gpu if needed
It should make this part of the optimizer faster we will will need only 1
pass on the env.
"""
def __init__(self):
Optimizer.__init__(self)
def add_requirements(self, env):
env.extend(toolbox.ReplaceValidate())
env.extend(DestroyHandler())
def apply(self, env):
for input in env.inputs:
if not isinstance(input.type, CudaNdarrayType):
try:
new_input = host_from_gpu(gpu_from_host(input))
if new_input.type==input.type:
env.replace_validate(input, new_input, "InputToGpuOptimizer")
except TypeError, e:
#as we currently only support float32, this can fail.
#Using try except make that we won't need
pass
#we register it before all other gpu optimizer to be sure that the input are on the gpu.
gpu_seqopt.register('InputToGpuOptimizer', InputToGpuOptimizer(),
0, 'fast_run', 'fast_compile', 'merge')#TODO: how to make it mandatory for gpu_seqopt?
@local_optimizer([])
def local_cut_gpu_host_gpu(node):
if tensor.opt.opt.check_chain(node, gpu_from_host, host_from_gpu):
return [node.inputs[0].owner.inputs[0]]
if tensor.opt.opt.check_chain(node, host_from_gpu, gpu_from_host):
return [node.inputs[0].owner.inputs[0]]
return False
gpu_cut_copies.register('cut_gpu_host_transfers', local_cut_gpu_host_gpu,
'fast_run', 'inplace', 'gpu')
gpu_cut_copies.register('cut_gpu_constant_transfers',
tensor.opt.constant_folding,
'fast_run', 'gpu')
#register it into canonicalize to allow other optimization to work without
#botering with this useless pattern.
optdb['canonicalize'].register('local_cut_gpu_host_gpu',
local_cut_gpu_host_gpu, 'fast_run', 'gpu')
#'float64', 'complex128' and 'complex64' are not supported in elemwise on the gpu.
elemwise_cuda_dtype_supported=['float32','uint8','int8','uint16','int16',
'uint32','int32','uint64','int64']
def dtype_in_elemwise_supported(op):
"""
Return True of the Elemwise op is supported on the gpu.
Return False otherwise.
:note: We need to check inside the Composite op.
"""
def get_all_basic_scalar(composite_op):
l=[]
for i in composite_op.env.toposort():
if isinstance(i, theano.scalar.Composite):
l += get_all_basic_scalar(i)
else:
l.append(i)
return l
if isinstance(op, GpuElemwise) or isinstance(op, tensor.Elemwise):
if isinstance(op.scalar_op, theano.scalar.Composite):
scals = get_all_basic_scalar(op.scalar_op)
for s in scals:
if any([i.type.dtype not in elemwise_cuda_dtype_supported
for i in s.inputs+s.outputs]):
return False
return True
@register_opt()
@local_optimizer([])
def local_gpu_elemwise_0(node):
"""elemwise(..., host_from_gpu, ...)
-> host_from_gpu(elemwise(gpu_from_host, ..., gpu_from_host)
"""
if isinstance(node.op, tensor.Elemwise) and dtype_in_elemwise_supported(node.op):
if numpy.any([i.owner and isinstance(i.owner.op, HostFromGpu) for i in node.inputs]):
if numpy.all([o.type.dtype == 'float32' for o in node.outputs]):
# Don't set any inplace pattern.
# gpu_inplace_elemwise_optimizer will do it later
try:
new_op = GpuElemwise(node.op.scalar_op)
except SupportCodeError:
# This happens when scalar_op requires support code
return False
# first establish that float32 can store all inputs
upcastable = set(['float32', 'int8', 'int16', 'uint8', 'uint16'])
# case 1 - all inputs are already float32
if numpy.all([i.type.dtype == 'float32' for i in node.inputs]):
#TODO: change this when fusion makes Elemwise with multiple outputs
gpu_elemwise = new_op(*(gpu_from_host(i) for i in node.inputs))
# case 2 - it is still ok if some inputs were upcast to float32
elif numpy.all([i.type.dtype in upcastable for i in node.inputs]):
# second - establish that a new node with upcasted inputs has the same outputs
# types as the original node
upcasted = node.op.make_node(*[tensor.cast(i, 'float32') for i in node.inputs])
if [o.type for o in upcasted.outputs] == [o.type for o in node.outputs]:
new_inputs = [gpu_from_host(tensor.cast(i, 'float32')) for i in node.inputs]
gpu_elemwise = new_op(*new_inputs)
else:
return False
else:
return False
gpu_elemwise = split_huge_add_or_mul(gpu_elemwise.owner)
if not gpu_elemwise:
return False
if max_inputs_to_GpuElemwise(node)<len(gpu_elemwise.inputs):
return False
return [host_from_gpu(gpu_elemwise.outputs[0])]
@register_opt()
@local_optimizer([])
def local_gpu_elemwise_1(node):
"""
gpu_from_host(Elemwise)) -> GpuElemwise(gpu_from_host(...))
"""
if node.op == gpu_from_host:
host_i, = node.inputs
if (host_i.owner and
isinstance(host_i.owner.op, tensor.Elemwise) and
len(host_i.clients)==1 and
dtype_in_elemwise_supported(node.op)):
elemwise_node = host_i.owner
# Don't set any inplace pattern.
# gpu_inplace_elemwise_optimizer will do it later
try:
new_op = GpuElemwise(elemwise_node.op.scalar_op)
except SupportCodeError:
# This happens when scalar_op requires support code
return False
if all([i.dtype=='float32' for i in elemwise_node.inputs]):
gpu_elemwise = new_op(*[gpu_from_host(i) for i in elemwise_node.inputs])
gpu_elemwise = split_huge_add_or_mul(gpu_elemwise.owner)
if not gpu_elemwise:
return False
return [gpu_elemwise.outputs[0]]
return False
@register_opt()
@local_optimizer([])
def local_gpu_dimshuffle_0(node):
"""
dimshuffle(host_from_gpu()) -> host_from_gpu(gpu_dimshuffle)
gpu_from_host(dimshuffle) -> gpu_dimshuffle(gpu_from_host)
"""
if isinstance(node.op, tensor.DimShuffle):
input, = node.inputs
if input.owner and isinstance(input.owner.op, HostFromGpu):
# move the add to a GpuAdd
new_op = GpuDimShuffle(node.op.input_broadcastable,
node.op.new_order)
return [host_from_gpu(new_op(gpu_from_host(input)))]
if node.op == gpu_from_host:
host_input = node.inputs[0]
if host_input.owner and isinstance(host_input.owner.op, tensor.DimShuffle):
dimshuffle_node = host_input.owner
new_op = GpuDimShuffle(dimshuffle_node.op.input_broadcastable,
dimshuffle_node.op.new_order)
return [new_op(gpu_from_host(dimshuffle_node.inputs[0]))]
return False
@register_opt()
@local_optimizer([])
def local_gpu_dot_to_dot22(node):
"""
gpu_from_host(dot) -> gpudot(gpu_from_host)
dot(host_from_gpu) -> host_from_gpu(gpudot)
This optimization solves the vector-matrix multiplication issue by
transforming the vector into a matrix, apply gpudot22 and reshaping
the output.
A more suitable solution would be to use the right cublas call
"""
# In case the got do input upcast, we much check that we can
# make it run on the gpu.
if node.op == gpu_from_host:
if node.outputs[0].type.dtype != 'float32':
return False
host_input = node.inputs[0]
if host_input.owner and host_input.owner.op == tensor.basic.dot:
x, y = host_input.owner.inputs
# case one: vector X matrix
if _is_real_vector(x) and _is_real_matrix(y):
new_op = GpuDimShuffle((False,), ['x',0])
shape_out = y.shape[1].dimshuffle(['x'])
gpu_x = new_op(gpu_from_host(x))
gpu_y = gpu_from_host(y)
# case two: matrix X vector
elif _is_real_matrix(x) and _is_real_vector(y):
new_op = GpuDimShuffle((False,), [0,'x'])
shape_out = x.shape[0].dimshuffle(['x'])
gpu_x = gpu_from_host(x)
gpu_y = new_op(gpu_from_host(y))
else:
return False
return [GpuReshape(1)(gpu_dot22(gpu_x, gpu_y), shape_out)]
if node.op == tensor.basic.dot:
if node.outputs[0].type.dtype != 'float32':
return False
if numpy.any([(i.owner and i.owner.op == host_from_gpu) for i in node.inputs]):
x, y = node.inputs
if _is_real_vector(x) and _is_real_matrix(y):
new_op = GpuDimShuffle((False,), ['x',0])
shape_out = y.shape[1].dimshuffle(['x'])
gpu_x = new_op(gpu_from_host(x))
gpu_y = gpu_from_host(y)
elif _is_real_matrix(x) and _is_real_vector(y):
new_op = GpuDimShuffle((False,), [0,'x'])
shape_out = x.shape[0].dimshuffle(['x'])
gpu_x = gpu_from_host(x)
gpu_y = new_op(gpu_from_host(y))
else:
return False
return [host_from_gpu(GpuReshape(1)(gpu_dot22(gpu_x, gpu_y),
shape_out))]
return False
@register_opt()
@local_optimizer([])
def local_gpu_lazy_ifelse(node):
"""
gpu_from_host(ifelse) -> gpu_ifelse(gpu_from_host)
ifelse(host_from_gpu) -> host_from_gpu(ifelse)
"""
if hasattr(theano, "lazycond"):
gpu_ifelse = theano.lazycond.IfElse(gpu = True)
if node.op == gpu_from_host:
host_input = node.inputs[0]
if (host_input.owner
and host_input.owner.op == theano.lazycond.ifelse):
c, t, f = host_input.owner.inputs
if not isinstance(f.type,CudaNdarrayType):
f = gpu_from_host(f)
if not isinstance(t.type,CudaNdarrayType):
t = gpu_from_host(t)
if isinstance(c.type,CudaNdarrayType):
c = host_from_gpu(c)
return [gpu_ifelse(c, t, f)]
if node.op == theano.lazycond.ifelse:
if numpy.any([(i.owner and i.owner.op == host_from_gpu) for i in node.inputs]):
c, t, f = node.inputs
if not isinstance(f.type,CudaNdarrayType):
f = gpu_from_host(f)
if not isinstance(t.type,CudaNdarrayType):
t = gpu_from_host(t)
if isinstance(c.type,CudaNdarrayType):
c = host_from_gpu(c)
return [host_from_gpu(gpu_ifelse(c, t, f))]
return False
@register_opt()
@local_optimizer([])
def local_gpu_dot22(node):
"""
gpu_from_host(dot22) -> gpudot(gpu_from_host)
dot(host_from_gpu) -> host_from_gpu(gpudot22)
"""
if node.op == gpu_from_host:
host_input = node.inputs[0]
if host_input.owner and host_input.owner.op == tensor.blas._dot22:
x, y = host_input.owner.inputs
return [gpu_dot22(gpu_from_host(x), gpu_from_host(y))]
if node.op == tensor.blas._dot22:
if numpy.any([(i.owner and i.owner.op == host_from_gpu) for i in node.inputs]):
x, y = node.inputs
return [host_from_gpu(gpu_dot22(gpu_from_host(x), gpu_from_host(y)))]
return False
@register_opt()
@local_optimizer([])
def local_gpu_dot22scalar(node):
"""
gpu_from_host(dot22scalar) -> gpudot(gpu_from_host)
dot(host_from_gpu) -> host_from_gpu(gpudot22scalar)
"""
if node.op == gpu_from_host:
host_input = node.inputs[0]
if host_input.owner and host_input.owner.op == tensor.blas._dot22scalar:
x, y, scalar = host_input.owner.inputs
return [gpu_dot22scalar(gpu_from_host(x), gpu_from_host(y), tensor.blas._as_scalar(scalar))]
if node.op == tensor.blas._dot22scalar:
if numpy.any([(i.owner and i.owner.op == host_from_gpu) for i in node.inputs]):
x, y, scalar = node.inputs
return [host_from_gpu(gpu_dot22scalar(gpu_from_host(x), gpu_from_host(y),tensor.blas._as_scalar(scalar)))]
return False
@register_opt()
@local_optimizer([])
def local_gpu_gemv(node):
"""
gpu_from_host(gemv) -> gpu_gemv(gpu_from_host)
gemv(host_from_gpu) -> host_from_gpu(gpu_gemv)
"""
gemvs = {
tensor.blas.gemv_inplace: gpu_gemv_no_inplace,
tensor.blas.gemv_no_inplace: gpu_gemv_no_inplace,
tensor.blas_c.CGemv(inplace=True): gpu_gemv_no_inplace,
tensor.blas_c.CGemv(inplace=False): gpu_gemv_no_inplace,
}
if node.op == gpu_from_host:
host_input = node.inputs[0]
if host_input.owner and host_input.owner.op in gemvs:
op = host_input.owner.op
z, a, x, y, b = host_input.owner.inputs
return [gemvs[op](
gpu_from_host(z)
, a
, gpu_from_host(x)
, gpu_from_host(y)
, b)]
if node.op in gemvs:
z, a, x, y, b = node.inputs
x_on_gpu = (x.owner and x.owner.op == host_from_gpu)
y_on_gpu = (y.owner and y.owner.op == host_from_gpu)
z_on_gpu = (z.owner and z.owner.op == host_from_gpu)
if x_on_gpu or y_on_gpu or z_on_gpu:
return [host_from_gpu(
gemvs[node.op](
gpu_from_host(z)
, a
, gpu_from_host(x)
, gpu_from_host(y)
, b))]
return False
@register_opt()
@local_optimizer([])
def | |
row/column indices in the member's stiffness matrix
# 'm' & 'n' are corresponding row/column indices in the global stiffness matrix
for a in range(12):
# Determine if index 'a' is related to the i-node or j-node
if a < 6:
# Find the corresponding index 'm' in the global stiffness matrix
m = member.iNode.ID*6 + a
else:
# Find the corresponding index 'm' in the global stiffness matrix
m = member.jNode.ID*6 + (a-6)
for b in range(12):
# Determine if index 'b' is related to the i-node or j-node
if b < 6:
# Find the corresponding index 'n' in the global stiffness matrix
n = member.iNode.ID*6 + b
else:
# Find the corresponding index 'n' in the global stiffness matrix
n = member.jNode.ID*6 + (b-6)
# Now that 'm' and 'n' are known, place the term in the global stiffness matrix
K.itemset((m, n), K.item((m, n)) + member_K.item((a, b)))
# Add stiffness terms for each quadrilateral in the model
print('...Adding quadrilateral stiffness terms to global stiffness matrix')
for quad in self.Quads:
# Get the quadrilateral's global stiffness matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
quad_K = quad.K()
# Step through each term in the quadrilateral's stiffness matrix
# 'a' & 'b' below are row/column indices in the quadrilateral's stiffness matrix
# 'm' & 'n' are corresponding row/column indices in the global stiffness matrix
for a in range(24):
# Determine which node the index 'a' is related to
if a < 6:
# Find the corresponding index 'm' in the global stiffness matrix
m = quad.mNode.ID*6 + a
elif a < 12:
# Find the corresponding index 'm' in the global stiffness matrix
m = quad.nNode.ID*6 + (a-6)
elif a < 18:
# Find the corresponding index 'm' in the global stiffness matrix
m = quad.iNode.ID*6 + (a-12)
else:
# Find the corresponding index 'm' in the global stiffness matrix
m = quad.jNode.ID*6 + (a-18)
for b in range(24):
# Determine which node the index 'b' is related to
if b < 6:
# Find the corresponding index 'n' in the global stiffness matrix
n = quad.mNode.ID*6 + b
elif b < 12:
# Find the corresponding index 'n' in the global stiffness matrix
n = quad.nNode.ID*6 + (b-6)
elif b < 18:
# Find the corresponding index 'n' in the global stiffness matrix
n = quad.iNode.ID*6 + (b-12)
else:
# Find the corresponding index 'n' in the global stiffness matrix
n = quad.jNode.ID*6 + (b-18)
# Now that 'm' and 'n' are known, place the term in the global stiffness matrix
K[m, n] += quad_K[a, b]
# Add stiffness terms for each plate in the model
print('...Adding plate stiffness terms to global stiffness matrix')
for plate in self.Plates:
# Get the plate's global stiffness matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
plate_K = plate.K()
# Step through each term in the plate's stiffness matrix
# 'a' & 'b' below are row/column indices in the plate's stiffness matrix
# 'm' & 'n' are corresponding row/column indices in the global stiffness matrix
for a in range(24):
# Determine which node the index 'a' is related to
if a < 6:
# Find the corresponding index 'm' in the global stiffness matrix
m = plate.iNode.ID*6 + a
elif a < 12:
# Find the corresponding index 'm' in the global stiffness matrix
m = plate.nNode.ID*6 + (a-6)
elif a < 18:
# Find the corresponding index 'm' in the global stiffness matrix
m = plate.mNode.ID*6 + (a-12)
else:
# Find the corresponding index 'm' in the global stiffness matrix
m = plate.jNode.ID*6 + (a-18)
for b in range(24):
# Determine which node the index 'b' is related to
if b < 6:
# Find the corresponding index 'n' in the global stiffness matrix
n = plate.iNode.ID*6 + b
elif b < 12:
# Find the corresponding index 'n' in the global stiffness matrix
n = plate.nNode.ID*6 + (b-6)
elif b < 18:
# Find the corresponding index 'n' in the global stiffness matrix
n = plate.mNode.ID*6 + (b-12)
else:
# Find the corresponding index 'n' in the global stiffness matrix
n = plate.jNode.ID*6 + (b-18)
# Now that 'm' and 'n' are known, place the term in the global stiffness matrix
K.itemset((m, n), K.item((m, n)) + plate_K.item((a, b)))
# Return the global stiffness matrix
return K
#%%
def Kg(self, combo_name='Combo 1'):
'''
Assembles and returns the global geometric stiffness matrix.
The model must have a static solution prior to obtaining the geometric stiffness matrix.
Stiffness of plates is not included.
Parameters
----------
combo_name : string
The name of the load combination to derive the matrix for (not the load combination itself).
'''
# Initialize a zero matrix to hold all the stiffness terms
Kg = zeros((len(self.Nodes)*6, len(self.Nodes)*6))
# Add stiffness terms for each member in the model
print('...Adding member geometric stiffness terms to global geometric stiffness matrix')
for member in self.Members:
if member.active[combo_name] == True:
# Calculate the axial force in the member
E = member.E
A = member.A
L = member.L()
d = member.d(combo_name)
P = E*A/L*(d[6, 0] - d[0, 0])
# Get the member's global stiffness matrix
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
member_Kg = member.Kg(P)
# Step through each term in the member's stiffness matrix
# 'a' & 'b' below are row/column indices in the member's stiffness matrix
# 'm' & 'n' are corresponding row/column indices in the global stiffness matrix
for a in range(12):
# Determine if index 'a' is related to the i-node or j-node
if a < 6:
# Find the corresponding index 'm' in the global stiffness matrix
m = member.iNode.ID*6 + a
else:
# Find the corresponding index 'm' in the global stiffness matrix
m = member.jNode.ID*6 + (a-6)
for b in range(12):
# Determine if index 'b' is related to the i-node or j-node
if b < 6:
# Find the corresponding index 'n' in the global stiffness matrix
n = member.iNode.ID*6 + b
else:
# Find the corresponding index 'n' in the global stiffness matrix
n = member.jNode.ID*6 + (b-6)
# Now that 'm' and 'n' are known, place the term in the global stiffness matrix
Kg.itemset((m, n), Kg.item((m, n)) + member_Kg.item((a, b)))
# Return the global geometric stiffness matrix
return Kg
#%%
def FER(self, combo_name='Combo 1'):
'''
Assembles and returns the global fixed end reaction vector.
Parameters
----------
combo_name : string
The name of the load combination to get the fixed end reaction vector for (not the load combination itself).
'''
# Initialize a zero vector to hold all the terms
FER = zeros((len(self.Nodes) * 6, 1))
# Add terms for each member in the model
for member in self.Members:
# Get the member's global fixed end reaction vector
# Storing it as a local variable eliminates the need to rebuild it every time a term is needed
member_FER = member.FER(combo_name)
# Step through each term in the member's fixed end reaction vector
# 'a' below is the row index in the member's fixed end reaction vector
# 'm' below is the corresponding row index in the global fixed end reaction vector
for a in range(12):
# Determine if index 'a' is related to the i-node or j-node
if a < 6:
# Find the corresponding index 'm' in the global fixed end reaction vector
m = member.iNode.ID * 6 + a
else:
# Find the corresponding index 'm' in | |
self.m_textCtrl_fuse4e0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse4f0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl1.Add( self.m_textCtrl_fuse4f0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse500 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl1.Add( self.m_textCtrl_fuse500, 0, wx.ALL, 5 )
self.m_textCtrl_fuse510 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl1.Add( self.m_textCtrl_fuse510, 0, wx.ALL, 5 )
self.m_textCtrl_fuse520 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl1.Add( self.m_textCtrl_fuse520, 0, wx.ALL, 5 )
self.m_textCtrl_fuse530 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl1.Add( self.m_textCtrl_fuse530, 0, wx.ALL, 5 )
self.m_textCtrl_fuse540 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl1.Add( self.m_textCtrl_fuse540, 0, wx.ALL, 5 )
self.m_textCtrl_fuse550 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl1.Add( self.m_textCtrl_fuse550, 0, wx.ALL, 5 )
self.m_textCtrl_fuse560 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl1.Add( self.m_textCtrl_fuse560, 0, wx.ALL, 5 )
self.m_textCtrl_fuse570 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl1.Add( self.m_textCtrl_fuse570, 0, wx.ALL, 5 )
self.m_textCtrl_fuse580 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl1.Add( self.m_textCtrl_fuse580, 0, wx.ALL, 5 )
self.m_textCtrl_fuse590 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl1.Add( self.m_textCtrl_fuse590, 0, wx.ALL, 5 )
self.m_textCtrl_fuse5a0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl1.Add( self.m_textCtrl_fuse5a0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse5b0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl1.Add( self.m_textCtrl_fuse5b0, 0, wx.ALL, 5 )
wSizer_fuseUtil.Add( bSizer_fuseGroupCtrl1, 1, wx.EXPAND, 5 )
bSizer_fuseGroupTxt2 = wx.BoxSizer( wx.VERTICAL )
self.m_staticText_fuse5c0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"SRK4", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse5c0.Wrap( -1 )
self.m_staticText_fuse5c0.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, wx.EmptyString ) )
self.m_staticText_fuse5c0.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
bSizer_fuseGroupTxt2.Add( self.m_staticText_fuse5c0, 0, wx.ALL, 5 )
self.m_staticText_fuse5d0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"SRK5", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse5d0.Wrap( -1 )
self.m_staticText_fuse5d0.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, wx.EmptyString ) )
self.m_staticText_fuse5d0.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
bSizer_fuseGroupTxt2.Add( self.m_staticText_fuse5d0, 0, wx.ALL, 5 )
self.m_staticText_fuse5e0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"SRK6", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse5e0.Wrap( -1 )
self.m_staticText_fuse5e0.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, wx.EmptyString ) )
self.m_staticText_fuse5e0.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
bSizer_fuseGroupTxt2.Add( self.m_staticText_fuse5e0, 0, wx.ALL, 5 )
self.m_staticText_fuse5f0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"SRK7", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse5f0.Wrap( -1 )
self.m_staticText_fuse5f0.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, wx.EmptyString ) )
self.m_staticText_fuse5f0.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
bSizer_fuseGroupTxt2.Add( self.m_staticText_fuse5f0, 0, wx.ALL, 5 )
self.m_staticText_fuse600 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x600:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse600.Wrap( -1 )
self.m_staticText_fuse600.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt2.Add( self.m_staticText_fuse600, 0, wx.ALL, 5 )
self.m_staticText_fuse610 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x610:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse610.Wrap( -1 )
self.m_staticText_fuse610.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt2.Add( self.m_staticText_fuse610, 0, wx.ALL, 5 )
self.m_staticText_fuse620 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x620:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse620.Wrap( -1 )
self.m_staticText_fuse620.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt2.Add( self.m_staticText_fuse620, 0, wx.ALL, 5 )
self.m_staticText_fuse630 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x630:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse630.Wrap( -1 )
self.m_staticText_fuse630.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt2.Add( self.m_staticText_fuse630, 0, wx.ALL, 5 )
self.m_staticText_fuse640 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x640:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse640.Wrap( -1 )
self.m_staticText_fuse640.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt2.Add( self.m_staticText_fuse640, 0, wx.ALL, 5 )
self.m_staticText_fuse650 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x650:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse650.Wrap( -1 )
self.m_staticText_fuse650.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt2.Add( self.m_staticText_fuse650, 0, wx.ALL, 5 )
self.m_staticText_fuse660 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x660:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse660.Wrap( -1 )
self.m_staticText_fuse660.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt2.Add( self.m_staticText_fuse660, 0, wx.ALL, 5 )
self.m_staticText_fuse670 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x670:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse670.Wrap( -1 )
self.m_staticText_fuse670.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt2.Add( self.m_staticText_fuse670, 0, wx.ALL, 5 )
self.m_staticText_fuse680 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x680:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse680.Wrap( -1 )
self.m_staticText_fuse680.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt2.Add( self.m_staticText_fuse680, 0, wx.ALL, 5 )
self.m_staticText_fuse690 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"SwGp2", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse690.Wrap( -1 )
self.m_staticText_fuse690.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, wx.EmptyString ) )
self.m_staticText_fuse690.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
bSizer_fuseGroupTxt2.Add( self.m_staticText_fuse690, 0, wx.ALL, 5 )
wSizer_fuseUtil.Add( bSizer_fuseGroupTxt2, 1, wx.EXPAND, 5 )
bSizer_fuseGroupCtrl2 = wx.BoxSizer( wx.VERTICAL )
self.m_textCtrl_fuse5c0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl2.Add( self.m_textCtrl_fuse5c0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse5d0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl2.Add( self.m_textCtrl_fuse5d0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse5e0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl2.Add( self.m_textCtrl_fuse5e0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse5f0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl2.Add( self.m_textCtrl_fuse5f0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse600 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl2.Add( self.m_textCtrl_fuse600, 0, wx.ALL, 5 )
self.m_textCtrl_fuse610 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl2.Add( self.m_textCtrl_fuse610, 0, wx.ALL, 5 )
self.m_textCtrl_fuse620 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl2.Add( self.m_textCtrl_fuse620, 0, wx.ALL, 5 )
self.m_textCtrl_fuse630 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl2.Add( self.m_textCtrl_fuse630, 0, wx.ALL, 5 )
self.m_textCtrl_fuse640 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl2.Add( self.m_textCtrl_fuse640, 0, wx.ALL, 5 )
self.m_textCtrl_fuse650 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl2.Add( self.m_textCtrl_fuse650, 0, wx.ALL, 5 )
self.m_textCtrl_fuse660 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl2.Add( self.m_textCtrl_fuse660, 0, wx.ALL, 5 )
self.m_textCtrl_fuse670 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl2.Add( self.m_textCtrl_fuse670, 0, wx.ALL, 5 )
self.m_textCtrl_fuse680 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl2.Add( self.m_textCtrl_fuse680, 0, wx.ALL, 5 )
self.m_textCtrl_fuse690 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl2.Add( self.m_textCtrl_fuse690, 0, wx.ALL, 5 )
wSizer_fuseUtil.Add( bSizer_fuseGroupCtrl2, 1, wx.EXPAND, 5 )
bSizer_fuseGroupTxt3 = wx.BoxSizer( wx.VERTICAL )
self.m_staticText_fuse6a0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"SwGp2", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse6a0.Wrap( -1 )
self.m_staticText_fuse6a0.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, wx.EmptyString ) )
self.m_staticText_fuse6a0.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
bSizer_fuseGroupTxt3.Add( self.m_staticText_fuse6a0, 0, wx.ALL, 5 )
self.m_staticText_fuse6b0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"SwGp2", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse6b0.Wrap( -1 )
self.m_staticText_fuse6b0.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, wx.EmptyString ) )
self.m_staticText_fuse6b0.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
bSizer_fuseGroupTxt3.Add( self.m_staticText_fuse6b0, 0, wx.ALL, 5 )
self.m_staticText_fuse6c0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"SwGp2", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse6c0.Wrap( -1 )
self.m_staticText_fuse6c0.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, wx.EmptyString ) )
self.m_staticText_fuse6c0.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
bSizer_fuseGroupTxt3.Add( self.m_staticText_fuse6c0, 0, wx.ALL, 5 )
self.m_button_fuse6d0 = wx.Button( self.m_panel_fuseUtil, wx.ID_ANY, u"Conf0", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_button_fuse6d0.SetFont( wx.Font( 7, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "Arial" ) )
bSizer_fuseGroupTxt3.Add( self.m_button_fuse6d0, 0, wx.ALL, 5 )
self.m_button_fuse6e0 = wx.Button( self.m_panel_fuseUtil, wx.ID_ANY, u"Conf1", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_button_fuse6e0.SetFont( wx.Font( 7, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "Arial" ) )
bSizer_fuseGroupTxt3.Add( self.m_button_fuse6e0, 0, wx.ALL, 5 )
self.m_staticText_fuse6f0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x6f0:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse6f0.Wrap( -1 )
self.m_staticText_fuse6f0.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt3.Add( self.m_staticText_fuse6f0, 0, wx.ALL, 5 )
self.m_staticText_fuse700 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x700:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse700.Wrap( -1 )
self.m_staticText_fuse700.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt3.Add( self.m_staticText_fuse700, 0, wx.ALL, 5 )
self.m_staticText_fuse710 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x710:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse710.Wrap( -1 )
self.m_staticText_fuse710.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt3.Add( self.m_staticText_fuse710, 0, wx.ALL, 5 )
self.m_staticText_fuse720 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x720:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse720.Wrap( -1 )
self.m_staticText_fuse720.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt3.Add( self.m_staticText_fuse720, 0, wx.ALL, 5 )
self.m_staticText_fuse730 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x730:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse730.Wrap( -1 )
self.m_staticText_fuse730.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt3.Add( self.m_staticText_fuse730, 0, wx.ALL, 5 )
self.m_staticText_fuse740 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x740:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse740.Wrap( -1 )
self.m_staticText_fuse740.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt3.Add( self.m_staticText_fuse740, 0, wx.ALL, 5 )
self.m_staticText_fuse750 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x750:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse750.Wrap( -1 )
self.m_staticText_fuse750.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt3.Add( self.m_staticText_fuse750, 0, wx.ALL, 5 )
self.m_staticText_fuse760 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x760:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse760.Wrap( -1 )
self.m_staticText_fuse760.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt3.Add( self.m_staticText_fuse760, 0, wx.ALL, 5 )
self.m_staticText_fuse770 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x770:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse770.Wrap( -1 )
self.m_staticText_fuse770.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt3.Add( self.m_staticText_fuse770, 0, wx.ALL, 5 )
wSizer_fuseUtil.Add( bSizer_fuseGroupTxt3, 1, wx.EXPAND, 5 )
bSizer_fuseGroupCtrl3 = wx.BoxSizer( wx.VERTICAL )
self.m_textCtrl_fuse6a0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl3.Add( self.m_textCtrl_fuse6a0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse6b0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( | |
#!/tools/net/bin/python
# Copyright (c) 1994, 1996, <NAME> All rights reserved.
# Copyright (c) 2004, <NAME> All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of py-iso8211 nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""commandline - a simple command line interface
This module provides a class CommandLine which may be subclassed
to provide command line interpretation facilities.
This code written by:
<NAME>
Department of Geography & Topographic Science,
University of Glasgow,
GLASGOW G12 8QQ
Scotland
Tel: (+44)141-330-6649
Fax: (+44)141-330-4894
Email: <EMAIL>
or: <EMAIL>
and dated 10 January 1996
NOTE - this is not specific to ISO 8211, at all, but is needed by the
iso8211 command, so is provided in the iso8211 package for convenience.
"""
import sys
import os
import string
# Standard stuff
FALSE = 0
TRUE = 1
# Exceptions
class UnknownCommand(Exception):
"Unknown command"
pass
# The basic help string
Basic_help_string = """Command line processor.
Commands are:
interactive enter a simple interactive environment.
int synonym for "interactive"
debug switch debugging on (or off)
exit exit the interactive environment
help print this message
"""
#---------------------------------------------------------------------------------
class CommandLine:
"""Command line context.
This class provides the basic functionality for command line processing,
including the ability to manage a simple sort of interactive session.
Basic usage involves:
- create a sub-class of CommandLine
- add appropriate methods for the commands required
- add the command name / method correspondences to "self.commands"
- do
SubClass.obey_command_line(sys.argv[1:])
Initialisation arguments:
None
A CommandLine object contains:
self.prompt The prompt to use for interactive use
(defaults to "Command:")
self.separator The separator for multiple commands on one line
(defaults to "+")
self.commands A dictionary of the form:
command name :: command method
(defaults to entries for "int", "interactive", "help",
"debug" and "exit")
self.help_string A string giving help on the basic commands already
defined
self.exceptions_to_ignore A tuple of exceptions to be ignored
(defaults to an empty tuple)
self.exceptions_to_handle A tuple of exceptions to be pass to self.handler
(defaults to an empty tuple)
self.handler A routine to call (with arguments exception and tuple)
to handle any exceptions that are not to be ignored,
but do need special handling. If it raises any exceptions
of its own, they will not be trapped.
(defaults to None)
"""
def __init__(self):
# Set up a default prompt, used when doing an interactive session
self.prompt = "Command: "
# Remember if we are currently doing an interactive session
self.in_interactive = FALSE
# Decide on the string we use to separate multiple commands
# on the same line - a plus makes a reasonable default, since
# it doesn't get clobbered by the Unix shell
self.separator = '+'
# Define our basic commands
self.commands = { "interactive" : self.interactive,
"int" : self.interactive,
"help" : self.help,
"debug" : self.debug,
"exit" : self.exit }
# Define the basic help string
self.help_string = Basic_help_string
# We have a tuple of exceptions which should be ignored
# when obeying a command line - these just print out
# the basic exception type and value, which allows an
# interactive session to continue neatly over expected
# exceptions. Note that EOFError and UnknownCommand are
# handled separately
self.exceptions_to_ignore = () # no such exceptions, by default
# We don't have a special exception handler
self.exceptions_to_handle = () # no such exceptions, by default
self.handler = None
# And we're not (by default) debugging
self.debugging = FALSE
def __repr__(self):
return "Command line interface"
def exit(self,cmd,args):
"""Request to exit interactive mode - raise an EOF error."""
raise EOFError
def help(self,cmd,args):
"""The basic help command - prints the basic help string.
Called with ARGS = None if there were no arguments on the
command line."""
print self.help_string
def debug(self,cmd,args):
"""Command to switch debugging on or off."""
if args == None or len(args) == 0:
self.debugging = TRUE
elif args[0] == "true" or args[0] == "t" or args[0] == 1:
self.debugging = TRUE
elif args[0] == "false" or args[0] == "f" or args[0] == 0:
self.debugging = FALSE
else:
print "Command is `debug [t|f|true|false|1|0]'"
if self.debugging:
print "Debugging on"
else:
print "Debugging off"
def obey_command_line(self,command_line):
"""Obey a command line.
The COMMAND_LINE is a list of `words', so that this method
may be called with "sys.argv[1:]" as a sensible argument.
Note that a command line may be composed of multiple commands,
separated by the separator string (which defaults to "+").
Separators must be delimited by spaces (i.e., must count as
`words') - this is so that (for instance) a filename may safely
contain the separator string.
"""
if len(command_line) == 0:
self.help(None)
return
# Split the command line apart into individual commands
list_of_commands = self.extract_commands(command_line,self.separator)
if self.debugging:
print "--- command line =",list_of_commands
# And obey them
for command in list_of_commands:
try:
self.obey_command(command)
except UnknownCommand,(cmd,args):
# Unknown command - this should already have
# been grumbled about, so just abort this
# command line
break
except KeyboardInterrupt:
# Raised by typing "Control-C"
# Just ignore this command line
print # The system doesn't print a newline for us
break
except EOFError:
# Raised by typing "exit"
# If we're interactive, just raise it again
if self.in_interactive:
raise EOFError
else:
break
except self.exceptions_to_ignore:
# Any exception that we should just ignore
# - just ignore this command line after reporting
print "%s: %s"%(sys.exc_type,sys.exc_value)
break
except self.exceptions_to_handle:
# Any exception that we should handle specially
# - just ignore this command line afterwards
if self.handler == None:
print "%s: %s"%(sys.exc_type,sys.exc_value)
else:
self.handler(sys.exc_type,sys.exc_value)
break
def extract_commands(self,command_line,separator):
"""Extract individual commands from a command line.
Given a COMMAND_LINE (list of `words'), return a list of
individual commands, as delimited by the SEPARATOR string.
Each individual command is a list of command words.
For instance, given the COMMAND_LINE [ "help" "+" "fred" "2" ],
and the SEPARATOR "+", this routine would return:
[ [ "help" ], [ "fred", "2" ] ]
"""
if self.debugging:
print "--- extract:",command_line
# Individual command sequences are separated by the separator string...
commands = []
this_command = []
for word in command_line:
if word == separator:
commands.append(this_command)
this_command = []
else:
this_command.append(word)
else:
if len(this_command) > 0:
commands.append(this_command)
if self.debugging:
print "--- which produces:",commands
return commands
def obey_command(self,command):
"""Obey a command.
The COMMAND is a list of `words', where the first is the
command name, and the rest are any arguments.
"""
if self.debugging:
print "--- command =",command
cmd = command[0] # the command word
args = command[1:] # any arguments
# If we recognise this command, obey the relevent function,
# otherwise, call the "unknown command" function to take
# appropriate action
if self.commands.has_key(cmd):
self.commands[cmd](cmd,args)
else:
self.unknown_command(cmd,args)
def unknown_command(self,cmd,args):
"""Called to grumble about an unknown command.
This is given as a separate method so that it can be
overridden by child classes, if required.
"""
print "Unknown command: %s"%(cmd)
print "Commands are:",
names = self.commands.keys()
names.sort()
for name in names:
print name,
print
# And raise an exception to show something went wrong
raise UnknownCommand(cmd,args)
def interactive(self,cmd,args):
"""Start an interactive session."""
if self.in_interactive:
print "Already in an interactive session - ignored"
return
else:
self.in_interactive = TRUE
# Announce ourselves
self.interactive_intro(args)
# And loop, reading command lines and obeying them
while 1:
try:
command_line = raw_input(self.prompt)
except KeyboardInterrupt: # i.e., "Control-C"
print # need a newline
continue # ignore it
except EOFError:
print "exit"
break
# Split it into individual words, separated by spaces
word_list = string.split(command_line)
# Ignore blank lines
if len(word_list) == 0:
continue
# Obey the resultant command line
try:
self.obey_command_line(word_list)
except EOFError:
# Raised by typing "exit", so do so
self.in_interactive = FALSE
break
def interactive_intro(self,args):
"""Output appropriate messages at the start of an interactive session."""
print "Interactive command environment"
print "(use exit or `EOF' to exit the environment)"
print
if len(args) > 0:
print "The rest of the command line after an `interactive' command"\
" is ignored"
print "In this case, the following commands were ignored:",
for word in args:
print word,
print
print
#---------------------------------------------------------------------------------
class Example(CommandLine):
"""An example of subclassing the command line class."""
def __init__(self):
# Do the command line initialisation
CommandLine.__init__(self)
# Define our own data
self.had_fred = FALSE
# Add some commands
self.commands["fred"] = self.fred
self.commands["args"] = self.print_args
# And extend the help string to know about them
self.help_string = self.help_string + """
fred set the Fred flag
args <args> print the arguments
"""
def __repr__(self):
if self.had_fred:
return "Had Fred already"
else:
return "Fred"
def fred(self,args):
"""Implement the "fred" command."""
if self.had_fred:
print 'Had "fred" already'
else:
print 'Received "fred"'
self.had_fred = TRUE
def print_args(self,args):
"""Print the arguments to the | |
# --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
import torch
import torch.nn as nn
import math
import numpy as np
from torch.nn import functional as F
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample """
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x, drop_prob: float = 0., training: bool = False):
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
class Mlp(nn.Module):
""" Multilayer perceptron."""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
""" Window based multi-head self attention (W-MSA) module with relative position bias. """
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SwinTransformerBlock(nn.Module):
""" Swin Transformer Block. """
def __init__(self, dim, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, window_size=(self.window_size, self.window_size), num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.H = None
self.W = None
def forward(self, x, mask_matrix):
B, L, C = x.shape
H, W = self.H, self.W
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# pad feature maps to multiples of window size
pad_l = pad_t = 0
pad_r = (self.window_size - W % self.window_size) % self.window_size
pad_b = (self.window_size - H % self.window_size) % self.window_size
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
_, Hp, Wp, _ = x.shape
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
attn_mask = mask_matrix
else:
shifted_x = x
attn_mask = None
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
if pad_r > 0 or pad_b > 0:
x = x[:, :H, :W, :].contiguous()
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchMerging(nn.Module):
""" Patch Merging Layer """
def __init__(self, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x, H, W):
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
x = x.view(B, H, W, C)
# padding
pad_input = (H % 2 == 1) or (W % 2 == 1)
if pad_input:
x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))
x0 = x[:, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, :] # B H/2 W/2 C
x1 = x[:, fdf8:f53e:61e4::18, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, :] # B H/2 W/2 C
x2 = x[:, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, fdf8:f53e:61e4::18, :] # B H/2 W/2 C
x3 = x[:, fdf8:f53e:61e4::18, fdf8:f53e:61e4::18, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
class BasicLayer(nn.Module):
""" A basic Swin Transformer layer for one stage. """
def __init__(self,
dim,
depth,
num_heads,
window_size=7,
mlp_ratio=4.,
qkv_bias=True,
qk_scale=None,
drop=0.,
attn_drop=0.,
drop_path=0.,
norm_layer=nn.LayerNorm,
downsample=None,
):
super().__init__()
self.window_size = window_size
self.shift_size = window_size // 2
self.depth = depth
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock(
dim=dim,
num_heads=num_heads,
window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop,
attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, | |
<filename>grr/lib/communicator_test.py
#!/usr/bin/env python
"""Test for client."""
import array
import logging
import pdb
import time
import requests
from grr import config
from grr.client import comms
from grr.client.client_actions import admin
from grr.client.client_actions import standard
from grr.lib import communicator
from grr.lib import flags
from grr.lib import queues
from grr.lib import rdfvalue
from grr.lib import stats
from grr.lib import utils
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import crypto as rdf_crypto
from grr.lib.rdfvalues import flows as rdf_flows
from grr.server import aff4
from grr.server import front_end
from grr.server import maintenance_utils
from grr.server.aff4_objects import aff4_grr
from grr.server.flows.general import ca_enroller
from grr.test_lib import test_lib
from grr.test_lib import worker_mocks
# pylint: mode=test
def MakeHTTPException(code=500, msg="Error"):
"""A helper for creating a HTTPError exception."""
response = requests.Response()
response.status_code = code
return requests.ConnectionError(msg, response=response)
def MakeResponse(code=500, data=""):
"""A helper for creating a HTTPError exception."""
response = requests.Response()
response.status_code = code
response._content = data
return response
class ClientCommsTest(test_lib.GRRBaseTest):
"""Test the communicator."""
def setUp(self):
"""Set up communicator tests."""
super(ClientCommsTest, self).setUp()
# These tests change the config so we preserve state.
self.config_stubber = test_lib.PreserveConfig()
self.config_stubber.Start()
self.client_private_key = config.CONFIG["Client.private_key"]
self.server_serial_number = 0
self.server_certificate = config.CONFIG["Frontend.certificate"]
self.server_private_key = config.CONFIG["PrivateKeys.server_key"]
self.client_communicator = comms.ClientCommunicator(
private_key=self.client_private_key)
self.client_communicator.LoadServerCertificate(
server_certificate=self.server_certificate,
ca_certificate=config.CONFIG["CA.certificate"])
self.server_communicator = front_end.ServerCommunicator(
certificate=self.server_certificate,
private_key=self.server_private_key,
token=self.token)
self.last_urlmock_error = None
def tearDown(self):
super(ClientCommsTest, self).tearDown()
self.config_stubber.Stop()
def ClientServerCommunicate(self, timestamp=None):
"""Tests the end to end encrypted communicators."""
message_list = rdf_flows.MessageList()
for i in range(1, 11):
message_list.job.Append(
session_id=rdfvalue.SessionID(
base="aff4:/flows", queue=queues.FLOWS, flow_name=i),
name="OMG it's a string")
result = rdf_flows.ClientCommunication()
timestamp = self.client_communicator.EncodeMessages(
message_list, result, timestamp=timestamp)
self.cipher_text = result.SerializeToString()
(decoded_messages, source, client_timestamp) = (
self.server_communicator.DecryptMessage(self.cipher_text))
self.assertEqual(source, self.client_communicator.common_name)
self.assertEqual(client_timestamp, timestamp)
self.assertEqual(len(decoded_messages), 10)
for i in range(1, 11):
self.assertEqual(decoded_messages[i - 1].session_id,
rdfvalue.SessionID(
base="aff4:/flows", queue=queues.FLOWS, flow_name=i))
return decoded_messages
def testCommunications(self):
"""Test that messages from unknown clients are tagged unauthenticated."""
decoded_messages = self.ClientServerCommunicate()
for i in range(len(decoded_messages)):
self.assertEqual(decoded_messages[i].auth_state,
rdf_flows.GrrMessage.AuthorizationState.UNAUTHENTICATED)
def MakeClientAFF4Record(self):
"""Make a client in the data store."""
client_cert = self.ClientCertFromPrivateKey(self.client_private_key)
new_client = aff4.FACTORY.Create(
client_cert.GetCN(), aff4_grr.VFSGRRClient, token=self.token)
new_client.Set(new_client.Schema.CERT, client_cert)
new_client.Close()
return new_client
def testKnownClient(self):
"""Test that messages from known clients are authenticated."""
self.MakeClientAFF4Record()
# Now the server should know about it
decoded_messages = self.ClientServerCommunicate()
for i in range(len(decoded_messages)):
self.assertEqual(decoded_messages[i].auth_state,
rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED)
def testClientPingAndClockIsUpdated(self):
"""Check PING and CLOCK are updated, simulate bad client clock."""
new_client = self.MakeClientAFF4Record()
now = rdfvalue.RDFDatetime.Now()
client_now = now - 20
with test_lib.FakeTime(now):
self.ClientServerCommunicate(timestamp=client_now)
client_obj = aff4.FACTORY.Open(new_client.urn, token=self.token)
self.assertEqual(now.AsSecondsFromEpoch(),
client_obj.Get(
client_obj.Schema.PING).AsSecondsFromEpoch())
self.assertEqual(client_now.AsSecondsFromEpoch(),
client_obj.Get(
client_obj.Schema.CLOCK).AsSecondsFromEpoch())
now += 60
client_now += 40
with test_lib.FakeTime(now):
self.ClientServerCommunicate(timestamp=client_now)
client_obj = aff4.FACTORY.Open(new_client.urn, token=self.token)
self.assertEqual(now.AsSecondsFromEpoch(),
client_obj.Get(
client_obj.Schema.PING).AsSecondsFromEpoch())
self.assertEqual(client_now.AsSecondsFromEpoch(),
client_obj.Get(
client_obj.Schema.CLOCK).AsSecondsFromEpoch())
def testClientPingStatsUpdated(self):
"""Check client ping stats are updated."""
new_client = self.MakeClientAFF4Record()
self.assertEqual(
stats.STATS.GetMetricValue(
"client_pings_by_label", fields=["testlabel"]), 0)
with aff4.FACTORY.Open(
new_client.urn, mode="rw", token=self.token) as client_object:
client_object.AddLabel("testlabel")
now = rdfvalue.RDFDatetime.Now()
with test_lib.FakeTime(now):
self.ClientServerCommunicate(timestamp=now)
self.assertEqual(
stats.STATS.GetMetricValue(
"client_pings_by_label", fields=["testlabel"]), 1)
def testServerReplayAttack(self):
"""Test that replaying encrypted messages to the server invalidates them."""
self.MakeClientAFF4Record()
# First send some messages to the server
decoded_messages = self.ClientServerCommunicate(timestamp=1000000)
encrypted_messages = self.cipher_text
self.assertEqual(decoded_messages[0].auth_state,
rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED)
# Immediate replay is accepted by the server since some proxies do this.
(decoded_messages, _,
_) = self.server_communicator.DecryptMessage(encrypted_messages)
self.assertEqual(decoded_messages[0].auth_state,
rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED)
# Move the client time more than 1h forward.
self.ClientServerCommunicate(timestamp=1000000 + 3700 * 1000000)
# And replay the old messages again.
(decoded_messages, _,
_) = self.server_communicator.DecryptMessage(encrypted_messages)
# Messages should now be tagged as desynced
self.assertEqual(decoded_messages[0].auth_state,
rdf_flows.GrrMessage.AuthorizationState.DESYNCHRONIZED)
def testX509Verify(self):
"""X509 Verify can have several failure paths."""
# This is a successful verify.
with utils.Stubber(
rdf_crypto.RDFX509Cert, "Verify", lambda self, public_key=None: True):
self.client_communicator.LoadServerCertificate(
self.server_certificate, config.CONFIG["CA.certificate"])
def Verify(_, public_key=False):
_ = public_key
raise rdf_crypto.VerificationError("Testing verification failure.")
# Mock the verify function to simulate certificate failures.
with utils.Stubber(rdf_crypto.RDFX509Cert, "Verify", Verify):
self.assertRaises(IOError, self.client_communicator.LoadServerCertificate,
self.server_certificate,
config.CONFIG["CA.certificate"])
def testErrorDetection(self):
"""Tests the end to end encrypted communicators."""
# Install the client - now we can verify its signed messages
self.MakeClientAFF4Record()
# Make something to send
message_list = rdf_flows.MessageList()
for i in range(0, 10):
message_list.job.Append(session_id=str(i))
result = rdf_flows.ClientCommunication()
self.client_communicator.EncodeMessages(message_list, result)
cipher_text = result.SerializeToString()
# Depending on this modification several things may happen:
# 1) The padding may not match which will cause a decryption exception.
# 2) The protobuf may fail to decode causing a decoding exception.
# 3) The modification may affect the signature resulting in UNAUTHENTICATED
# messages.
# 4) The modification may have no effect on the data at all.
for x in range(0, len(cipher_text), 50):
# Futz with the cipher text (Make sure it's really changed)
mod_cipher_text = (
cipher_text[:x] + chr((ord(cipher_text[x]) % 250) + 1) +
cipher_text[x + 1:])
try:
decoded, client_id, _ = self.server_communicator.DecryptMessage(
mod_cipher_text)
for i, message in enumerate(decoded):
# If the message is actually authenticated it must not be changed!
if message.auth_state == message.AuthorizationState.AUTHENTICATED:
self.assertEqual(message.source, client_id)
# These fields are set by the decoder and are not present in the
# original message - so we clear them before comparison.
message.auth_state = None
message.source = None
self.assertRDFValuesEqual(message, message_list.job[i])
else:
logging.debug("Message %s: Authstate: %s", i, message.auth_state)
except communicator.DecodingError as e:
logging.debug("Detected alteration at %s: %s", x, e)
def testEnrollingCommunicator(self):
"""Test that the ClientCommunicator generates good keys."""
self.client_communicator = comms.ClientCommunicator()
self.client_communicator.LoadServerCertificate(
self.server_certificate, config.CONFIG["CA.certificate"])
# Verify that the CN is of the correct form
csr = self.client_communicator.GetCSR()
cn = rdf_client.ClientURN.FromPublicKey(csr.GetPublicKey())
self.assertEqual(cn, csr.GetCN())
def testServerKeyRotation(self):
self.MakeClientAFF4Record()
# Now the server should know about the client.
decoded_messages = self.ClientServerCommunicate()
for i in range(len(decoded_messages)):
self.assertEqual(decoded_messages[i].auth_state,
rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED)
# Suppress the output.
with utils.Stubber(maintenance_utils, "EPrint", lambda msg: None):
maintenance_utils.RotateServerKey()
server_certificate = config.CONFIG["Frontend.certificate"]
server_private_key = config.CONFIG["PrivateKeys.server_key"]
self.assertNotEqual(server_certificate, self.server_certificate)
self.assertNotEqual(server_private_key, self.server_private_key)
self.server_communicator = front_end.ServerCommunicator(
certificate=server_certificate,
private_key=server_private_key,
token=self.token)
# Clients can't connect at this point since they use the outdated
# session key.
with self.assertRaises(communicator.DecryptionError):
self.ClientServerCommunicate()
# After the client reloads the server cert, this should start
# working again.
self.client_communicator.LoadServerCertificate(
server_certificate=server_certificate,
ca_certificate=config.CONFIG["CA.certificate"])
self.assertEqual(len(list(self.ClientServerCommunicate())), 10)
class HTTPClientTests(test_lib.GRRBaseTest):
"""Test the http communicator."""
def setUp(self):
"""Set up communicator tests."""
super(HTTPClientTests, self).setUp()
# These tests change the config so we preserve state.
self.config_stubber = test_lib.PreserveConfig()
self.config_stubber.Start()
certificate = self.ClientCertFromPrivateKey(
config.CONFIG["Client.private_key"])
self.server_serial_number = 0
self.server_private_key = config.CONFIG["PrivateKeys.server_key"]
self.server_certificate = config.CONFIG["Frontend.certificate"]
self.client_cn = certificate.GetCN()
# Make a new client
self.CreateNewClientObject()
# The housekeeper threads of the time based caches also call time.time and
# interfere with some tests so we disable them here.
utils.InterruptableThread.exit = True
# The same also applies to the StatsCollector thread.
stats.StatsCollector.exit = True
# Make a client mock
self.client = aff4.FACTORY.Create(
self.client_cn, aff4_grr.VFSGRRClient, mode="rw", token=self.token)
self.client.Set(self.client.Schema.CERT(certificate.AsPEM()))
self.client.Flush()
# Stop the client from actually processing anything
self.out_queue_overrider = test_lib.ConfigOverrider({
"Client.max_out_queue": 0
})
self.out_queue_overrider.Start()
# And cache it in the server
self.CreateNewServerCommunicator()
self.requests_stubber = utils.Stubber(requests, "request", self.UrlMock)
self.requests_stubber.Start()
self.sleep_stubber = utils.Stubber(time, "sleep", lambda x: None)
self.sleep_stubber.Start()
self.messages = []
ca_enroller.enrolment_cache.Flush()
# Response to send back to clients.
self.server_response = dict(
session_id="aff4:/W:session", name="Echo", response_id=2)
def CreateNewServerCommunicator(self):
self.server_communicator = front_end.ServerCommunicator(
certificate=self.server_certificate,
private_key=self.server_private_key,
token=self.token)
self.server_communicator.client_cache.Put(self.client_cn, self.client)
def tearDown(self):
self.requests_stubber.Stop()
self.out_queue_overrider.Stop()
self.config_stubber.Stop()
self.sleep_stubber.Stop()
super(HTTPClientTests, self).tearDown()
def CreateClientCommunicator(self):
self.client_communicator = comms.GRRHTTPClient(
ca_cert=config.CONFIG["CA.certificate"],
worker_cls=worker_mocks.DisabledNannyClientWorker)
def CreateNewClientObject(self):
self.CreateClientCommunicator()
# Disable stats collection for tests.
self.client_communicator.client_worker.last_stats_sent_time = (
time.time() + 3600)
# Build a client context with preloaded server certificates
self.client_communicator.communicator.LoadServerCertificate(
self.server_certificate, config.CONFIG["CA.certificate"])
self.client_communicator.http_manager.retry_error_limit = 5
def UrlMock(self, num_messages=10, url=None, data=None, **kwargs):
"""A mock for url handler processing from the server's POV."""
if "server.pem" in url:
return MakeResponse(200,
utils.SmartStr(config.CONFIG["Frontend.certificate"]))
_ = kwargs
try:
comms_cls = rdf_flows.ClientCommunication
self.client_communication = comms_cls.FromSerializedString(data)
# Decrypt incoming messages
self.messages, source, ts = self.server_communicator.DecodeMessages(
self.client_communication)
# Make sure the messages are correct
self.assertEqual(source, self.client_cn)
for i, message in enumerate(self.messages):
# Do not check any status messages.
if message.request_id:
self.assertEqual(message.response_id, i)
self.assertEqual(message.request_id, 1)
self.assertEqual(message.session_id, "aff4:/W:session")
# Now prepare a response
response_comms = rdf_flows.ClientCommunication()
message_list = rdf_flows.MessageList()
for i in range(0, num_messages):
message_list.job.Append(request_id=i, **self.server_response)
# Preserve the timestamp as a nonce
self.server_communicator.EncodeMessages(
message_list,
response_comms,
destination=source,
timestamp=ts,
api_version=self.client_communication.api_version)
return MakeResponse(200, response_comms.SerializeToString())
except communicator.UnknownClientCert:
raise MakeHTTPException(406)
except Exception as e:
logging.info("Exception in mock urllib2.Open: %s.", e)
self.last_urlmock_error = e
if flags.FLAGS.debug:
pdb.post_mortem()
raise MakeHTTPException(500)
def CheckClientQueue(self):
"""Checks that the client context received all server messages."""
# Check the incoming messages
self.assertEqual(self.client_communicator.client_worker.InQueueSize(), 10)
for i, message in enumerate(
self.client_communicator.client_worker._in_queue):
# This is the common name embedded in the certificate.
self.assertEqual(message.source, "aff4:/GRR Test Server")
self.assertEqual(message.response_id, 2)
self.assertEqual(message.request_id, i)
self.assertEqual(message.session_id, "aff4:/W:session")
self.assertEqual(message.auth_state,
rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED)
# Clear the queue
self.client_communicator.client_worker._in_queue = []
def SendToServer(self):
"""Schedule some packets from client to server."""
# Generate some client traffic
for i in range(0, | |
<filename>codes/dataops/batchaug.py
import random
import numpy as np
import torch
from torch.nn import functional as F
class BatchAugment:
def __init__(self, train_opt):
self.mixopts = train_opt.get(
"mixopts", ["blend", "rgb", "mixup", "cutmix",
"cutmixup", "cutout"]) # , "cutblur"]
self.mixprob = train_opt.get(
"mixprob", [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]) # , 1.0]
self.mixalpha = train_opt.get(
"mixalpha", [0.6, 1.0, 1.2, 0.7, 0.7, 0.001]) # , 0.7]
self.aux_mixprob = train_opt.get("aux_mixprob", 1.0)
self.aux_mixalpha = train_opt.get("aux_mixalpha", 1.2)
self.mix_p = train_opt.get("mix_p", None)
self.aug = None
self.mask = None
def __call__(self, img1, img2):
"""Apply the configured augmentations.
Args:
img1: the target image.
img2: the input image.
"""
img1_aug, img2_aug, self.mask, self.aug = BatchAug(
img1, img2, self.mixopts, self.mixprob, self.mixalpha,
self.aux_mixprob, self.aux_mixalpha, self.mix_p)
return img1_aug, img2_aug
def apply_mask(self, img1, img2):
"""cutout-ed pixels are discarded when calculating loss by
masking removed pixels from generated and target images.
Args:
img1: the generated image.
img2: the target image.
"""
if self.aug == "cutout":
img1, img2 = img1 * self.mask, img2 * self.mask
return img1, img2
def BatchAug(img1, img2, options, probs, alphas,
aux_prob=None, aux_alpha=None, mix_p=None):
""" Mixture of Batch Augmentations (MoA)
Randomly selects single augmentation from the augmentation pool
and applies it to the batch.
Note: most of these augmentations require batch size > 1
References:
https://github.com/facebookresearch/mixup-cifar10/blob/master/train.py
https://github.com/kakaobrain/fast-autoaugment/blob/master/FastAutoAugment/aug_mixup.py
https://github.com/clovaai/CutMix-PyTorch/blob/master/train.py
https://github.com/hysts/pytorch_cutmix/blob/master/cutmix.py
https://github.com/clovaai/cutblur/blob/master/augments.py
"""
idx = np.random.choice(len(options), p=mix_p)
aug = options[idx]
prob = float(probs[idx])
alpha = float(alphas[idx])
mask = None
if aug == "none":
img1_aug, img2_aug = img1.clone(), img2.clone()
elif aug == "blend":
img1_aug, img2_aug = blend(
img1.clone(), img2.clone(),
prob=prob, alpha=alpha
)
elif aug == "rgb":
img1_aug, img2_aug = rgb(
img1.clone(), img2.clone(),
prob=prob
)
elif aug == "mixup":
img1_aug, img2_aug, = mixup(
img1.clone(), img2.clone(),
prob=prob, alpha=alpha,
)
elif aug == "cutout":
img1_aug, img2_aug, mask, _ = cutout(
img1.clone(), img2.clone(),
prob=prob, alpha=alpha
)
elif aug == "cutmix":
img1_aug, img2_aug = cutmix(
img1.clone(), img2.clone(),
prob=prob, alpha=alpha,
)
elif aug == "cutmixup":
img1_aug, img2_aug = cutmixup(
img1.clone(), img2.clone(),
mixup_prob=aux_prob, mixup_alpha=aux_alpha,
cutmix_prob=prob, cutmix_alpha=alpha,
)
elif aug == "cutblur":
img1_aug, img2_aug = cutblur(
img1.clone(), img2.clone(),
prob=prob, alpha=alpha
)
else:
raise ValueError("{} is not invalid.".format(aug))
return img1_aug, img2_aug, mask, aug
def blend(img1, img2, prob=1.0, alpha=0.6):
"""
Blend image with vectorv = (v1, v2, v3) , where vi ∼Unif(α,1).
"""
if alpha <= 0 or random.random() >= prob:
return img1, img2
h1, w1 = img1.shape[2:]
h2, w2 = img2.shape[2:]
c = torch.empty((img2.size(0), 3, 1, 1),
device=img2.device).uniform_(0, 1.0)
rimg1 = c.repeat((1, 1, h1, w1))
rimg2 = c.repeat((1, 1, h2, w2))
v = np.random.uniform(alpha, 1)
img1 = v * img1 + (1-v) * rimg1
img2 = v * img2 + (1-v) * rimg2
return img1, img2
def rgb(img1, img2, prob=1.0):
"""Randomly permute RGB channels."""
if random.random() >= prob:
return img1, img2
perm = np.random.permutation(img2.shape[1])
img1 = img1[:, perm]
img2 = img2[:, perm]
return img1, img2
def mixup(img1, img2, prob=1.0, alpha=1.2):
""" Blend two randomly selected images.
Uses the default setting of Feng et al. which is:
I0 = λIi + (1−λ)Ij, where λ ∼Beta(α,α).
From: "<NAME>, <NAME>, <NAME>, and <NAME>. mixup: Beyond empirical risk minimization.
arXiv preprint arXiv:1710.09412, 2017"
Args
img1: targets (labels, images, etc)
img1: input images tensor (in batch > 1)
alpha: used to calculate the random lambda (lam) combination
ratio from beta distribution
Returns mixed inputs and mixed targets
img1: is the random mixed image target
img2: is the result of mixing a random image in the batch with
the other images, selected with the random index "index"
"""
if alpha <= 0 or random.random() >= prob:
return img1, img2
"""
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
lam = max(lam, 1. - lam)
#assert 0.0 <= lam <= 1.0, lam
"""
# batch_size = img1.size()[0]
lam = np.random.beta(alpha, alpha)
r_index = torch.randperm(img1.size(0)).to(img2.device)
img1 = lam * img1 + (1 - lam) * img1[r_index, :]
img2 = lam * img2 + (1 - lam) * img2[r_index, :]
return img1, img2
#TODO: no longer used in cutmix, but can be repurposed elsewhere
def rand_bbox(size, lam):
W = size[2]
H = size[3]
# image_h, image_w = data.shape[2:]
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# uniform
cx = np.random.randint(W) # cx = np.random.uniform(0, image_w)
cy = np.random.randint(H) # cy = np.random.uniform(0, image_h)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
def _cutmix(img2, prob=1.0, alpha=1.0):
if alpha <= 0 or random.random() >= prob:
return None
cut_ratio = np.random.randn() * 0.01 + alpha
h, w = img2.shape[2:]
ch, cw = np.int(h * cut_ratio), np.int(w * cut_ratio)
fcy = np.random.randint(0, h-ch+1)
fcx = np.random.randint(0, w-cw+1)
tcy, tcx = fcy, fcx
r_index = torch.randperm(img2.size(0)).to(img2.device)
return {
"r_index": r_index, "ch": ch, "cw": cw,
"tcy": tcy, "tcx": tcx, "fcy": fcy, "fcx": fcx,
}
def cutmix(img1, img2, prob=1.0, alpha=1.0):
""" Replace randomly selected square-shape region to
sub-patch from other image in the batch. The coordinates are
calculated as:
rx = Unif(0,W), rw = λW , where λ ∼N(α,0.01)
(same for ry and rh).
From: "<NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, and <NAME>. Cutmix: Regularization strategy
to train strong classifiers with localizable features.
arXiv preprint arXiv:1905.04899, 2019"
Args:
img1: targets (labels, images, etc)
img2: input images tensor (in batch > 1)
alpha: used to calculate the random lambda (lam) combination
ratio from beta distribution
Returns mixed inputs and mixed targets
img1: is the random mixed image target
img2: is the result of mixing a random image in the batch with
the other images, selected with the random index "index"
"""
c = _cutmix(img2, prob, alpha)
if c is None:
return img1, img2
scale = img1.size(2) // img2.size(2)
r_index, ch, cw = c["r_index"], c["ch"], c["cw"]
tcy, tcx, fcy, fcx = c["tcy"], c["tcx"], c["fcy"], c["fcx"]
hch, hcw = ch * scale, cw * scale
hfcy, hfcx, htcy, htcx = (
fcy * scale, fcx * scale, tcy * scale, tcx * scale)
img1[..., htcy:htcy+hch, htcx:htcx+hcw] = img1[r_index, :, hfcy:hfcy+hch, hfcx:hfcx+hcw]
img2[..., tcy:tcy+ch, tcx:tcx+cw] = img2[r_index, :, fcy:fcy+ch, fcx:fcx+cw]
return img1, img2
def cutmixup(img1, img2, mixup_prob=1.0, mixup_alpha=1.0,
cutmix_prob=1.0, cutmix_alpha=1.0): # (α1 / α2) -> 0.7 / 1.2
""" CutMix with the Mixup-ed image.
CutMix and Mixup procedure use hyper-parameter α1 and α2 respectively.
"""
c = _cutmix(img2, cutmix_prob, cutmix_alpha)
if c is None:
return img1, img2
scale = img1.size(2) // img2.size(2)
r_index, ch, cw = c["r_index"], c["ch"], c["cw"]
tcy, tcx, fcy, fcx = c["tcy"], c["tcx"], c["fcy"], c["fcx"]
hch, hcw = ch * scale, cw * scale
hfcy, hfcx, htcy, htcx = (
fcy * scale, fcx * scale, tcy * scale, tcx * scale)
v = np.random.beta(mixup_alpha, mixup_alpha)
if mixup_alpha <= 0 or random.random() >= mixup_prob:
img1_aug = img1[r_index, :]
img2_aug = img2[r_index, :]
else:
img1_aug = v * img1 + (1-v) * img1[r_index, :]
img2_aug = v * img2 + (1-v) * img2[r_index, :]
# apply mixup to inside or outside
if np.random.random() > 0.5:
img1[..., htcy:htcy+hch, htcx:htcx+hcw] = img1_aug[..., hfcy:hfcy+hch, hfcx:hfcx+hcw]
img2[..., tcy:tcy+ch, tcx:tcx+cw] = img2_aug[..., fcy:fcy+ch, fcx:fcx+cw]
else:
img1_aug[..., htcy:htcy+hch, htcx:htcx+hcw] = img1[..., hfcy:hfcy+hch, hfcx:hfcx+hcw]
img2_aug[..., tcy:tcy+ch, tcx:tcx+cw] = img2[..., fcy:fcy+ch, fcx:fcx+cw]
img2, img1 = img2_aug, img1_aug
return img1, img2
def cutblur(img1, img2, prob=1.0, alpha=1.0):
""" Perform CutMix with same image but different resolution,
producing xHR→LR (HR patch pasted into LR) and xLR→HR (LR patch
pasted into HR). Randomly choose x from [xHR→LR, xLR→HR],
to one as input of the network.
Returns the modified LR image (img2) and unchanged target HR.
From: "<NAME> and <NAME> and <NAME>. Rethinking
Data Augmentation for Image Super-resolution: A Comprehensive
Analysis and a New Strategy. arXiv:2004.00448"
"""
if img1.size() != img2.size():
raise ValueError("img1 and img2 have to be the same resolution.")
if alpha <= 0 or random.random() >= prob:
return img1, img2
cut_ratio = np.random.randn() * 0.01 + alpha
h, w = img2.size(2), img2.size(3)
ch, cw = np.int(h*cut_ratio), np.int(w*cut_ratio)
cy | |
* rank
I_minus_s2XTAcorrXL_LAMBDA_LT = np.identity(n_C) \
- np.dot(s2XTAcorrXL_LAMBDA, L.T)
# dimension: n_grid * condition * condition
# The step above may be calculated by einsum. Not sure
# which is faster.
weight_grad = result_exp / result_sum
weight_grad_over_denominator = weight_grad / denominator
# dimension: n_grid * space
weighted_sXTAcorrY = sXTAcorrY \
* weight_grad_over_denominator[:, None, :]
# dimension: n_grid * condition * space
# sYTAcorrXL_LAMBDA = np.einsum('ijk,ijl->ikl', sXTAcorrY, L_LAMBDA)
# dimension: n_grid * space * rank
grad_L = np.zeros([n_C, rank])
for grid in range(n_grid):
grad_L += np.dot(
np.dot(I_minus_s2XTAcorrXL_LAMBDA_LT[grid, :, :],
sXTAcorrY[grid, :, :]),
np.dot(weighted_sXTAcorrY[grid, :, :].T,
L_LAMBDA[grid, :, :])) * (n_T - n_X0 - 2)
grad_L -= np.sum(s2XTAcorrXL_LAMBDA
* np.sum(weight_grad, axis=1)[:, None, None],
axis=0)
# dimension: condition * rank
return -LL_total, -grad_L[l_idx]
def _check_data_GBRSA(self, X, for_fit=True):
# Check input data
if type(X) is np.ndarray:
X = [X]
assert type(X) is list, 'Input data X must be either a list '\
'with each entry for one participant, or a numpy arrary '\
'for single participant.'
if for_fit:
for i, x in enumerate(X):
assert_all_finite(x)
assert x.ndim == 2, 'Each participants'' data should be ' \
'2 dimension ndarray'
assert np.all(np.std(x, axis=0) > 0),\
'The time courses of some voxels in participant {} '\
'do not change at all. Please make sure all voxels '\
'are within the brain'.format(i)
else:
for i, x in enumerate(X):
if x is not None:
assert x.ndim == 2, 'Each participants'' data should be ' \
'2 dimension ndarray'
assert x.shape[1] == self.n_V_[i], 'Number of voxels '\
'does not match that in the data used for fitting: '\
'subject {}'.format(i)
# This program allows to fit a single subject. But to have a consistent
# data structure, we make sure X and design are both lists.
return X
def _check_design_GBRSA(self, design, X):
# check design matrix
if type(design) is np.ndarray:
design = [design] * len(X)
if len(X) > 1:
logger.warning('There are multiple subjects while '
'there is only one design matrix. '
'I assume that the design matrix '
'is shared across all subjects.')
assert type(design) is list, 'design matrix must be either a list '\
'with each entry for one participant, or an numpy arrary '\
'for single participant.'
for i, d in enumerate(design):
if X[i] is not None:
assert_all_finite(d)
assert d.ndim == 2,\
'The design matrix should be 2 dimension ndarray'
assert np.linalg.matrix_rank(d) == d.shape[1], \
'Your design matrix of subject {} has rank ' \
'smaller than the number of columns. Some columns '\
'can be explained by linear combination of other columns.'\
'Please check your design matrix.'.format(i)
assert np.size(d, axis=0) == np.size(X[i], axis=0),\
'Design matrix and data of subject {} do not '\
'have the same number of time points.'.format(i)
assert self.rank is None or self.rank <= d.shape[1],\
'Your design matrix of subject {} '\
'has fewer columns than the rank you set'.format(i)
if i == 0:
n_C = np.shape(d)[1]
else:
assert n_C == np.shape(d)[1], \
'In Group Bayesian RSA, all subjects should have'\
' the same set of experiment conditions, t'\
'hus the same number of columns in design matrix'
if X[i].shape[1] <= d.shape[1]:
logger.warning('Your data have fewer voxels than the '
'number of task conditions. This might '
'cause problem in fitting. Please consider '
'increasing the size of your ROI, or set '
'the rank parameter to a lower number to '
'estimate a low-rank representational '
'structure.')
return design
def _check_nuisance_GBRSA(sef, nuisance, X):
# Check the nuisance regressors.
if nuisance is not None:
if type(nuisance) is np.ndarray:
nuisance = [nuisance] * len(X)
if len(X) > 1:
logger.warning('ATTENTION! There are multiple subjects '
'while there is only one nuisance matrix. '
'I assume that the nuisance matrix '
'is shared across all subjects. '
'Please double check.')
assert type(nuisance) is list, \
'nuisance matrix must be either a list '\
'with each entry for one participant, or an numpy arrary '\
'for single participant.'
for i, n in enumerate(nuisance):
assert_all_finite(n)
if n is not None:
assert n.ndim == 2,\
'The nuisance regressor should be '\
'2 dimension ndarray or None'
assert np.linalg.matrix_rank(n) == n.shape[1], \
'The nuisance regressor of subject {} has rank '\
'smaller than the number of columns.'\
'Some columns can be explained by linear '\
'combination of other columns. Please check your' \
' nuisance regressors.'.format(i)
assert np.size(n, axis=0) == np.size(X[i], axis=0), \
'Nuisance regressor and data do not have the same '\
'number of time points.'
else:
nuisance = [None] * len(X)
logger.info('None was provided for nuisance matrix. Replicating '
'it for all subjects.')
return nuisance
def _check_scan_onsets_GBRSA(self, scan_onsets, X):
# check scan_onsets validity
if scan_onsets is None or type(scan_onsets) is np.ndarray:
if scan_onsets is None:
scan_onsets = np.array([0], dtype=int)
scan_onsets = [scan_onsets] * len(X)
if len(X) > 1:
logger.warning('There are multiple subjects while '
'there is only one set of scan_onsets. '
'I assume that it is the same for all'
' subjects. Please double check')
for i in np.arange(len(scan_onsets)):
if X[i] is not None:
if scan_onsets[i] is None:
scan_onsets[i] = np.array([0], dtype=int)
logger.warning('No scan onsets were provided for subject'
' {}. Treating all data of this subject as'
' coming from the same run.')
else:
scan_onsets[i] = np.int32(scan_onsets[i])
assert (np.max(scan_onsets[i]) <= X[i].shape[0]
and np.min(scan_onsets[i]) >= 0
and 0 in scan_onsets[i]
and scan_onsets[i].ndim == 1), \
'Scan onsets of subject {} has formatting ' \
'issues: {}'.format(i, scan_onsets[i])
return scan_onsets
def _bin_exp(self, n_bin, scale=1.0):
""" Calculate the bin locations to approximate exponential distribution.
It breaks the cumulative probability of exponential distribution
into n_bin equal bins, each covering 1 / n_bin probability. Then it
calculates the center of mass in each bins and returns the
centers of mass. So, it approximates the exponential distribution
with n_bin of Delta function weighted by 1 / n_bin, at the
locations of these centers of mass.
Parameters:
-----------
n_bin: int
The number of bins to approximate the exponential distribution
scale: float, default: 1.0
The scale parameter of the exponential distribution, defined in
the same way as scipy.stats. It does not influence the ratios
between the bins, but just controls the spacing between the bins.
So generally users should not change its default.
Returns:
--------
bins: numpy array of size [n_bin,]
The centers of mass for each segment of the
exponential distribution.
"""
boundaries = np.flip(scipy.stats.expon.isf(
np.linspace(0, 1, n_bin + 1),
scale=scale), axis=0)
bins = np.empty(n_bin)
for i in np.arange(n_bin):
bins[i] = utils.center_mass_exp(
(boundaries[i], boundaries[i + 1]), scale=scale)
return bins
def _set_SNR_grids(self):
""" Set the grids and weights for SNR used in numerical integration
of SNR parameters.
"""
if self.SNR_prior == 'unif':
SNR_grids = np.linspace(0, 1, self.SNR_bins)
SNR_weights = np.ones(self.SNR_bins) / (self.SNR_bins - 1)
SNR_weights[0] = SNR_weights[0] / 2.0
SNR_weights[-1] = SNR_weights[-1] / 2.0
elif self.SNR_prior == 'lognorm':
dist = scipy.stats.lognorm
alphas = np.arange(np.mod(self.SNR_bins, 2),
self.SNR_bins + 2, 2) / self.SNR_bins
# The goal here is to divide the area under the pdf curve
# to segments representing equal probabilities.
bounds = dist.interval(alphas, (self.logS_range,))
bounds = np.unique(bounds)
# bounds contain the boundaries which equally separate
# the probability mass of the distribution
SNR_grids = np.zeros(self.SNR_bins)
for i in np.arange(self.SNR_bins):
SNR_grids[i] = dist.expect(
lambda x: x, args=(self.logS_range,),
lb=bounds[i], ub=bounds[i + 1]) * self.SNR_bins
# Center of mass of each segment between consecutive
# bounds are set as the grids for SNR.
SNR_weights = np.ones(self.SNR_bins) / self.SNR_bins
else: # SNR_prior == 'exp'
SNR_grids = self._bin_exp(self.SNR_bins)
SNR_weights = np.ones(self.SNR_bins) / self.SNR_bins
SNR_weights = SNR_weights / np.sum(SNR_weights)
return SNR_grids, SNR_weights
def _set_rho_grids(self):
""" Set the grids and weights for rho used in numerical integration
of AR(1) parameters.
"""
rho_grids = np.arange(self.rho_bins) * 2 / self.rho_bins - 1 \
+ 1 / self.rho_bins
rho_weights = np.ones(self.rho_bins) / self.rho_bins
return rho_grids, rho_weights
def _matrix_flattened_grid(self, X0TAX0, X0TAX0_i, SNR_grids, XTAcorrX,
YTAcorrY_diag, XTAcorrY, X0TAY, XTAX0,
n_C, n_V, n_X0, n_grid):
""" We need to | |
<gh_stars>0
import os
import sys
import subprocess
import multiprocessing
from joblib import Parallel, delayed
import shutil
import pathlib
import pandas as pd
import math
import EUKulele
from EUKulele.tax_placement import place_taxonomy
from EUKulele.visualize_results import visualize_all_results
from scripts.mag_stats import magStats
MEM_AVAIL_GB = 0
while MEM_AVAIL_GB == 0:
try:
os.system("free -m > free.csv")
MEM_AVAIL_GB = pd.read_csv("free.csv", sep = "\s+").free[0] / 10**3
except:
pass
# 25 GB memory per GB file size
# add a parameter that changes between the requirement per file size (reducing to 10 GB for now)
# also add a parameter to EUKulele that decides whether you use 100% of available memory and scales
# MEM_AVAIL_GB by that amount (default to 75%)
def calc_max_jobs(num_files, size_in_bytes = 2147483648, max_mem_per_proc = 10, perc_mem = 0.75):
size_in_gb = size_in_bytes / (1024*1024*1024)
if size_in_gb == 0:
size_in_gb = 0.01
MAX_JOBS = math.floor(MEM_AVAIL_GB * perc_mem / (max_mem_per_proc * size_in_gb * num_files)) #48)
if MAX_JOBS == 0:
MAX_JOBS = 1
return MAX_JOBS
MAX_JOBS = max(1, calc_max_jobs(1))
# For DIAMOND: The program can be expected to use roughly six times this number of memory (in GB).
# So for the default value of -b2.0, the memory usage will be about 12 GB.
# So we want alignment to have -b6.0
def manageEukulele(piece, mets_or_mags = "", samples = [], database_dir = "",
output_dir = "", ref_fasta = "", alignment_choice = "diamond",
rerun_rules = False, cutoff_file = "", sample_dir = "", nt_ext = "", pep_ext = "",
consensus_cutoff = 0.75, tax_tab = "", prot_tab = "", use_salmon_counts = False,
names_to_reads = "", alignment_res = "", filter_metric = "evalue",
run_transdecoder = False, transdecoder_orf_size = 100, perc_mem = 0.75):
"""
This function diverts management tasks to the below helper functions.
"""
if piece == "setup_eukulele":
setupEukulele(output_dir)
elif piece == "setup_databases":
createAlignmentDatabase(ref_fasta, rerun_rules, output_dir, alignment_choice, database_dir)
elif piece == "get_samples":
return getSamples(mets_or_mags, sample_dir, nt_ext, pep_ext)
elif piece == "transdecode":
if mets_or_mags == "mets":
manageTrandecode(samples, output_dir, rerun_rules, sample_dir,
mets_or_mags = "mets", transdecoder_orf_size = 100,
nt_ext = "." + nt_ext.strip('.'), pep_ext = "." + pep_ext.strip('.'),
run_transdecoder = run_transdecoder, perc_mem = perc_mem)
elif piece == "align_to_db":
return manageAlignment(alignment_choice, samples, filter_metric, output_dir, ref_fasta,
mets_or_mags, database_dir, sample_dir, rerun_rules, nt_ext, pep_ext, core = "full",
perc_mem = perc_mem)
elif piece == "estimate_taxonomy":
manageTaxEstimation(output_dir, mets_or_mags, tax_tab, cutoff_file, consensus_cutoff,
prot_tab, use_salmon_counts, names_to_reads, alignment_res,
rerun_rules, samples, sample_dir, pep_ext, nt_ext, perc_mem)
elif piece == "visualize_taxonomy":
manageTaxVisualization(output_dir, mets_or_mags, sample_dir, pep_ext, nt_ext,
use_salmon_counts, rerun_rules)
elif piece == "assign_taxonomy":
manageTaxAssignment(samples, mets_or_mags, output_dir, sample_dir, pep_ext, core = False)
elif piece == "core_align_to_db":
alignment_res = manageAlignment(alignment_choice, samples, filter_metric, output_dir, ref_fasta,
mets_or_mags, database_dir, sample_dir, rerun_rules, nt_ext, pep_ext, core = "core",
perc_mem = perc_mem)
alignment_res = [curr for curr in alignment_res if curr != ""]
return alignment_res
elif piece == "core_estimate_taxonomy":
manageCoreTaxEstimation(output_dir, mets_or_mags, tax_tab, cutoff_file, consensus_cutoff,
prot_tab, use_salmon_counts, names_to_reads, alignment_res,
rerun_rules, samples, sample_dir, pep_ext, nt_ext, perc_mem)
elif piece == "core_visualize_taxonomy":
manageCoreTaxVisualization(output_dir, mets_or_mags, sample_dir, pep_ext, nt_ext,
use_salmon_counts, rerun_rules, core = True)
elif piece == "core_assign_taxonomy":
manageTaxAssignment(samples, mets_or_mags, output_dir, sample_dir, pep_ext, core = True)
else:
print("Not a supported management function.")
sys.exit(1)
def getSamples(mets_or_mags, sample_dir, nt_ext, pep_ext):
"""
Get the names of the metagenomic or metatranscriptomic samples from the provided folder.
"""
if (mets_or_mags == "mets"):
samples_nt = [".".join(curr.split(".")[0:-1]) for curr in os.listdir(sample_dir) if curr.split(".")[-1] == nt_ext]
samples_pep = [".".join(curr.split(".")[0:-1]) for curr in os.listdir(sample_dir) if curr.split(".")[-1] == pep_ext]
samples = list(set(samples_nt + samples_pep))
print(samples)
if len(samples) == 0:
print("No samples found in sample directory with specified nucleotide or peptide extension.")
sys.exit(1)
else:
samples = [".".join(curr.split(".")[0:-1]) for curr in os.listdir(sample_dir) if curr.split(".")[-1] == pep_ext]
if len(samples) == 0:
print("No samples found in sample directory with specified peptide extension.")
sys.exit(1)
return samples
def transdecodeToPeptide(sample_name, output_dir, rerun_rules, sample_dir,
mets_or_mags = "mets", transdecoder_orf_size = 100,
nt_ext = ".fasta", pep_ext = ".faa", run_transdecoder = False):
"""
Use TransDecoder to convert input nucleotide metatranscriptomic sequences to peptide sequences.
"""
if (not run_transdecoder):
return 0
print("Running TransDecoder for sample " + str(sample_name) + "...", flush = True)
os.system("mkdir -p " + os.path.join(output_dir, mets_or_mags, "transdecoder"))
if (os.path.isfile(os.path.join(output_dir, mets_or_mags,
sample_name + pep_ext))) & (not rerun_rules):
print("TransDecoder file already detected for sample " +
str(sample_name) + "; will not re-run step.", flush = True)
return 0
elif (os.path.isfile(os.path.join(sample_dir, sample_name + pep_ext))) & (not rerun_rules):
print("Protein files detected for sample in sample directory; " +
"will not TransDecode.", flush = True)
os.system("cp " + os.path.join(sample_dir, sample_name + pep_ext) + " " +
os.path.join(output_dir, mets_or_mags, sample_name + pep_ext))
return 0
TD_log = open(os.path.join(output_dir,"log","transdecoder_longorfs_" + sample_name + ".log"), "w+")
TD_err = open(os.path.join(output_dir,"log","transdecoder_longorfs_" + sample_name + ".err"), "w+")
if (not os.path.isfile(os.path.join(sample_dir, sample_name + nt_ext))):
print("File: " + os.path.join(sample_dir, sample_name + nt_ext) + " was called by TransDecoder and "
"does not exist. Check for typos.")
sys.exit(1)
rc1 = subprocess.Popen(["TransDecoder.LongOrfs", "-t", os.path.join(sample_dir, sample_name + nt_ext),
"-m", str(transdecoder_orf_size)], stdout = TD_log, stderr = TD_err).wait()
TD_log.close()
TD_err.close()
TD_log = open(os.path.join(output_dir,"log","transdecoder_predict_" + sample_name + ".log"), "w+")
TD_err = open(os.path.join(output_dir,"log","transdecoder_predict_" + sample_name + ".err"), "w+")
rc2 = subprocess.Popen(["TransDecoder.Predict", "-t", os.path.join(sample_dir, sample_name + nt_ext),
"--no_refine_starts"], stdout = TD_log, stderr = TD_err).wait()
#rc2 = p2.returncode
TD_log.close()
TD_err.close()
if (rc1 + rc2) != 0:
print("TransDecoder did not complete successfully for sample " +
str(sample_name) + ". Check <output_dir>/log/ folder for details.")
sys.exit(1)
merged_name = sample_name + nt_ext
os.system("mkdir -p " + os.path.join(output_dir, mets_or_mags))
os.system("mkdir -p " + os.path.join(output_dir, mets_or_mags, "transdecoder"))
os.replace(merged_name + ".transdecoder.pep", os.path.join(output_dir, mets_or_mags,
sample_name + pep_ext))
os.replace(merged_name + ".transdecoder.cds", os.path.join(output_dir, mets_or_mags,
"transdecoder", sample_name +
".fasta.transdecoder.cds"))
os.replace(merged_name + ".transdecoder.gff3", os.path.join(output_dir, mets_or_mags,
"transdecoder", sample_name +
".fasta.transdecoder.gff3"))
os.replace(merged_name + ".transdecoder.bed", os.path.join(output_dir, mets_or_mags,
"transdecoder", sample_name +
".fasta.transdecoder.bed"))
#shutil.rmtree
os.system("rm -rf " + merged_name + "*.transdecoder_dir*")
return rc1 + rc2
def manageTrandecode(met_samples, output_dir, rerun_rules, sample_dir,
mets_or_mags = "mets", transdecoder_orf_size = 100,
nt_ext = "fasta", pep_ext = ".faa", run_transdecoder = False, perc_mem = 0.75):
"""
Now for some TransDecoding - a manager for TransDecoder steps.
"""
if (not run_transdecoder):
return 0
print("Running TransDecoder for MET samples...", flush = True)
MAX_JOBS = 1
MAX_JOBS_SAMPS = [calc_max_jobs(len(met_samples), pathlib.Path(os.path.join(sample_dir, sample + nt_ext)).stat().st_size,
max_mem_per_proc = 48, perc_mem = perc_mem) \
for sample in met_samples \
if os.path.isfile(os.path.join(sample_dir, sample + nt_ext))]
if len(MAX_JOBS_SAMPS) > 0:
MAX_JOBS = min([calc_max_jobs(len(met_samples), pathlib.Path(os.path.join(sample_dir, sample + nt_ext)).stat().st_size,
max_mem_per_proc = 48, perc_mem = perc_mem) \
for sample in met_samples \
if os.path.isfile(os.path.join(sample_dir, sample + nt_ext))])
n_jobs_align = min(multiprocessing.cpu_count(), len(met_samples), max(1,MAX_JOBS))
transdecoder_res = Parallel(n_jobs=n_jobs_align)(delayed(transdecodeToPeptide)(sample_name, output_dir,
rerun_rules, sample_dir,
mets_or_mags = "mets", transdecoder_orf_size = 100,
nt_ext = nt_ext, pep_ext = pep_ext,
run_transdecoder = run_transdecoder) for sample_name in met_samples)
all_codes = sum(transdecoder_res)
os.system("rm -f pipeliner*")
if all_codes > 0:
print("TransDecoder did not complete successfully; check log folder for details.")
sys.exit(1)
#rcodes = [os.remove(curr) for curr in glob.glob("pipeliner*")]
def setupEukulele(output_dir):
print("Setting things up...")
os.system("mkdir -p " + output_dir)
os.system("mkdir -p " + os.path.join(output_dir, "log"))
## Download software dependencies
rc1 = os.system("install_dependencies.sh references_bins/ " +
"1> " + os.path.join(output_dir, "log", "dependency_log.txt") + " 2> " +
os.path.join(output_dir, "log", "dependency_err.txt"))
sys.path.append("references_bins/")
os.system("echo $PATH > path_test.txt")
if rc1 != 0:
print("Could not successfully install all external dependent software.\n" +
"Check DIAMOND, BLAST, BUSCO, and TransDecoder installation.")
return 1
return 0
def manageAlignment(alignment_choice, samples, filter_metric, output_dir, ref_fasta,
mets_or_mags, database_dir, sample_dir, rerun_rules, nt_ext, pep_ext, core = "full",
perc_mem = 0.75):
"""
Manage the multithreaded management of aligning to either BLAST or DIAMOND database.
"""
print("Aligning to reference database...")
if mets_or_mags == "mets":
fastas = []
for sample in samples:
if os.path.isfile(os.path.join(output_dir, mets_or_mags, sample + "." + pep_ext)):
fp = open(os.path.join(output_dir, mets_or_mags, sample + "." + pep_ext))
for i, line in enumerate(fp):
if i == 2:
chars = set(list(line))
if len(chars) <= 5:
print("Peptide extension used, but this file, " +
str(os.path.join(output_dir, mets_or_mags, sample + "." + pep_ext)) +
", does not appear to be a peptide file.")
break
elif i > 2:
fastas.append(os.path.join(output_dir, mets_or_mags, sample + "." + pep_ext))
break
fp.close()
elif os.path.isfile(os.path.join(sample_dir, sample + "." + pep_ext)):
fp = open(os.path.join(sample_dir, | |
= h2
size = total_area / npieces
cuts = []
needed = size
return yupper
def func_b0c6a9790ac9463c94555e61e46286c5(npieces, upper, lower):
li = ui = 0
areas = []
total_area = 0
x = 0
h1 = upper[0][1] - lower[0][1]
W = lower[-1][0]
while x < W:
lnext = lower[li + 1]
unext = upper[ui + 1]
if lnext[0] == unext[0]:
xnext = lnext[0]
h2 = unext[1] - lnext[1]
li += 1
ui += 1
elif lnext[0] < upper[ui + 1][0]:
xnext = lnext[0]
frac = 1.0 * (xnext - upper[ui][0]) / (unext[0] - upper[ui][0])
yupper = upper[ui][1] + frac * (unext[1] - upper[ui][1])
h2 = yupper - lnext[1]
li += 1
else:
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
da = (xnext - x) * (h1 + h2) / 2.0
total_area += da
areas.append((x, xnext, h1, h2, da, total_area))
x = xnext
h1 = h2
size = total_area / npieces
cuts = []
needed = size
return size
def func_3cb29863d41147569feb372bde4bd8c4(npieces, upper, lower):
areas = []
total_area = 0
x = 0
h1 = upper[0][1] - lower[0][1]
W = lower[-1][0]
while x < W:
lnext = lower[li + 1]
unext = upper[ui + 1]
if lnext[0] == unext[0]:
xnext = lnext[0]
h2 = unext[1] - lnext[1]
li += 1
ui += 1
elif lnext[0] < upper[ui + 1][0]:
xnext = lnext[0]
frac = 1.0 * (xnext - upper[ui][0]) / (unext[0] - upper[ui][0])
yupper = upper[ui][1] + frac * (unext[1] - upper[ui][1])
h2 = yupper - lnext[1]
li += 1
else:
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
da = (xnext - x) * (h1 + h2) / 2.0
total_area += da
areas.append((x, xnext, h1, h2, da, total_area))
x = xnext
h1 = h2
size = total_area / npieces
cuts = []
needed = size
for x1, x2, h1, h2, segment_area, total_area in areas:
segment_remaining = segment_area
area_from_left = 0
while segment_remaining >= needed:
area_from_left += needed
width = x2 - x1
if h1 == h2:
x = area_from_left / h1
else:
qa = (h2 - h1) / (2 * width)
qb = h1
qc = -area_from_left
x = (-qb + (qb ** 2 - 4 * qa * qc) ** 0.5) / (2 * qa)
cuts.append(x1 + x)
if len(cuts) == npieces - 1:
return cuts
segment_remaining -= needed
needed = size
needed -= segment_remaining
return xnext
def func_67fb2f81eb2e4ef986b1ed690769b745(npieces, upper, lower):
areas = []
total_area = 0
x = 0
h1 = upper[0][1] - lower[0][1]
W = lower[-1][0]
while x < W:
lnext = lower[li + 1]
unext = upper[ui + 1]
if lnext[0] == unext[0]:
xnext = lnext[0]
h2 = unext[1] - lnext[1]
li += 1
ui += 1
elif lnext[0] < upper[ui + 1][0]:
xnext = lnext[0]
frac = 1.0 * (xnext - upper[ui][0]) / (unext[0] - upper[ui][0])
yupper = upper[ui][1] + frac * (unext[1] - upper[ui][1])
h2 = yupper - lnext[1]
li += 1
else:
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
da = (xnext - x) * (h1 + h2) / 2.0
total_area += da
areas.append((x, xnext, h1, h2, da, total_area))
x = xnext
h1 = h2
size = total_area / npieces
cuts = []
needed = size
for x1, x2, h1, h2, segment_area, total_area in areas:
segment_remaining = segment_area
area_from_left = 0
while segment_remaining >= needed:
area_from_left += needed
width = x2 - x1
if h1 == h2:
x = area_from_left / h1
else:
qa = (h2 - h1) / (2 * width)
qb = h1
qc = -area_from_left
x = (-qb + (qb ** 2 - 4 * qa * qc) ** 0.5) / (2 * qa)
cuts.append(x1 + x)
if len(cuts) == npieces - 1:
return cuts
segment_remaining -= needed
needed = size
needed -= segment_remaining
return h2
def func_8fb44746ac2c4948b5cdfebdfc742528(npieces, upper, lower):
areas = []
total_area = 0
x = 0
h1 = upper[0][1] - lower[0][1]
W = lower[-1][0]
while x < W:
lnext = lower[li + 1]
unext = upper[ui + 1]
if lnext[0] == unext[0]:
xnext = lnext[0]
h2 = unext[1] - lnext[1]
li += 1
ui += 1
elif lnext[0] < upper[ui + 1][0]:
xnext = lnext[0]
frac = 1.0 * (xnext - upper[ui][0]) / (unext[0] - upper[ui][0])
yupper = upper[ui][1] + frac * (unext[1] - upper[ui][1])
h2 = yupper - lnext[1]
li += 1
else:
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
da = (xnext - x) * (h1 + h2) / 2.0
total_area += da
areas.append((x, xnext, h1, h2, da, total_area))
x = xnext
h1 = h2
size = total_area / npieces
cuts = []
needed = size
for x1, x2, h1, h2, segment_area, total_area in areas:
segment_remaining = segment_area
area_from_left = 0
while segment_remaining >= needed:
area_from_left += needed
width = x2 - x1
if h1 == h2:
x = area_from_left / h1
else:
qa = (h2 - h1) / (2 * width)
qb = h1
qc = -area_from_left
x = (-qb + (qb ** 2 - 4 * qa * qc) ** 0.5) / (2 * qa)
cuts.append(x1 + x)
if len(cuts) == npieces - 1:
return cuts
segment_remaining -= needed
needed = size
needed -= segment_remaining
return areas
def func_8e9c0deff516484483b8146a3a1bb0d1(npieces, upper, lower):
areas = []
total_area = 0
x = 0
h1 = upper[0][1] - lower[0][1]
W = lower[-1][0]
while x < W:
lnext = lower[li + 1]
unext = upper[ui + 1]
if lnext[0] == unext[0]:
xnext = lnext[0]
h2 = unext[1] - lnext[1]
li += 1
ui += 1
elif lnext[0] < upper[ui + 1][0]:
xnext = lnext[0]
frac = 1.0 * (xnext - upper[ui][0]) / (unext[0] - upper[ui][0])
yupper = upper[ui][1] + frac * (unext[1] - upper[ui][1])
h2 = yupper - lnext[1]
li += 1
else:
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
da = (xnext - x) * (h1 + h2) / 2.0
total_area += da
areas.append((x, xnext, h1, h2, da, total_area))
x = xnext
h1 = h2
size = total_area / npieces
cuts = []
needed = size
for x1, x2, h1, h2, segment_area, total_area in areas:
segment_remaining = segment_area
area_from_left = 0
while segment_remaining >= needed:
area_from_left += needed
width = x2 - x1
if h1 == h2:
x = area_from_left / h1
else:
qa = (h2 - h1) / (2 * width)
qb = h1
qc = -area_from_left
x = (-qb + (qb ** 2 - 4 * qa * qc) ** 0.5) / (2 * qa)
cuts.append(x1 + x)
if len(cuts) == npieces - 1:
return cuts
segment_remaining -= needed
needed = size
needed -= segment_remaining
return frac
def func_282df07674e04e94a4e7ddb21ff3dd15(npieces, upper, lower):
areas = []
total_area = 0
x = 0
h1 = upper[0][1] - lower[0][1]
W = lower[-1][0]
while x < W:
lnext = lower[li + 1]
unext = upper[ui + 1]
if lnext[0] == unext[0]:
xnext = lnext[0]
h2 = unext[1] - lnext[1]
li += 1
ui += 1
elif lnext[0] < upper[ui + 1][0]:
xnext = lnext[0]
frac = 1.0 * (xnext - upper[ui][0]) / (unext[0] - upper[ui][0])
yupper = upper[ui][1] + frac * (unext[1] - upper[ui][1])
h2 = yupper - lnext[1]
li += 1
else:
xnext = unext[0]
frac = 1.0 | |
def test_type_list(self, command, version):
self.run_command(command, version=version)
columns_requested = ['ID', 'Name', 'visibility',
'is_default', 'required_extra_specs',
'optional_extra_specs', 'Description']
if 'columns' in command:
columns_requested = command.split('--columns ')[1].split(',')
is_default_in_api = (api_versions.APIVersion(version) >=
api_versions.APIVersion('2.46'))
if not is_default_in_api and 'is_default' in columns_requested:
self.assert_called('GET', '/types/default')
self.assert_called_anytime('GET', '/types')
else:
self.assert_called('GET', '/types')
cliutils.print_list.assert_called_with(
mock.ANY, columns_requested, mock.ANY)
@mock.patch.object(cliutils, 'print_list', mock.Mock())
def test_type_list_select_column(self):
self.run_command('type-list --columns id,name')
self.assert_called('GET', '/types')
cliutils.print_list.assert_called_once_with(
mock.ANY,
['id', 'name'],
mock.ANY)
def test_type_list_all(self):
self.run_command('type-list --all')
self.assert_called_anytime('GET', '/types?is_public=all')
@ddt.data(True, False)
def test_type_create_with_access(self, public):
expected = {
'share_type': {
'name': 'test-type-3',
'extra_specs': {
'driver_handles_share_servers': False,
},
'share_type_access:is_public': public
}
}
self.run_command(
'type-create test-type-3 false --is-public %s' %
six.text_type(public))
self.assert_called('POST', '/types', body=expected)
def test_type_access_list(self):
self.run_command('type-access-list 3')
self.assert_called('GET', '/types/3/share_type_access')
def test_type_access_add_project(self):
expected = {'addProjectAccess': {'project': '101'}}
self.run_command('type-access-add 3 101')
self.assert_called('POST', '/types/3/action', body=expected)
def test_type_access_remove_project(self):
expected = {'removeProjectAccess': {'project': '101'}}
self.run_command('type-access-remove 3 101')
self.assert_called('POST', '/types/3/action', body=expected)
def test_list_filter_by_project_id(self):
aliases = ['--project-id', '--project_id']
for alias in aliases:
for separator in self.separators:
self.run_command('list ' + alias + separator + 'fake_id')
self.assert_called('GET', '/shares/detail?project_id=fake_id')
@mock.patch.object(cliutils, 'print_list', mock.Mock())
def test_list_with_public_shares(self):
listed_fields = [
'ID',
'Name',
'Size',
'Share Proto',
'Status',
'Is Public',
'Share Type Name',
'Host',
'Availability Zone',
'Project ID'
]
self.run_command('list --public')
self.assert_called('GET', '/shares/detail?is_public=True')
cliutils.print_list.assert_called_with(mock.ANY, listed_fields,
sortby_index=None)
def test_show(self):
self.run_command('show 1234')
self.assert_called_anytime('GET', '/shares/1234')
def test_share_export_location_list(self):
self.run_command('share-export-location-list 1234')
self.assert_called_anytime(
'GET', '/shares/1234/export_locations')
@mock.patch.object(cliutils, 'print_list', mock.Mock())
def test_share_export_location_list_with_columns(self):
self.run_command('share-export-location-list 1234 --columns uuid,path')
self.assert_called_anytime(
'GET', '/shares/1234/export_locations')
cliutils.print_list.assert_called_once_with(mock.ANY, ['Uuid', 'Path'])
def test_share_export_location_show(self):
self.run_command('share-export-location-show 1234 fake_el_uuid')
self.assert_called_anytime(
'GET', '/shares/1234/export_locations/fake_el_uuid')
@ddt.data({'cmd_args': '--driver_options opt1=opt1 opt2=opt2'
' --share_type fake_share_type',
'valid_params': {
'driver_options': {'opt1': 'opt1', 'opt2': 'opt2'},
'share_type': 'fake_share_type',
'share_server_id': None,
}},
{'cmd_args': '--share_type fake_share_type',
'valid_params': {
'driver_options': {},
'share_type': 'fake_share_type',
'share_server_id': None,
}},
{'cmd_args': '',
'valid_params': {
'driver_options': {},
'share_type': None,
'share_server_id': None,
}},
{'cmd_args': '--public',
'valid_params': {
'driver_options': {},
'share_type': None,
'share_server_id': None,
},
'is_public': True,
'version': '--os-share-api-version 2.8',
},
{'cmd_args': '',
'valid_params': {
'driver_options': {},
'share_type': None,
'share_server_id': None,
},
'is_public': False,
'version': '--os-share-api-version 2.8',
},
{'cmd_args': '--driver_options opt1=opt1 opt2=opt2'
' --share_type fake_share_type',
'valid_params': {
'driver_options': {'opt1': 'opt1', 'opt2': 'opt2'},
'share_type': 'fake_share_type',
'share_server_id': None,
},
'version': '--os-share-api-version 2.49',
},
{'cmd_args': '--driver_options opt1=opt1 opt2=opt2'
' --share_type fake_share_type'
' --share_server_id fake_server',
'valid_params': {
'driver_options': {'opt1': 'opt1', 'opt2': 'opt2'},
'share_type': 'fake_share_type',
'share_server_id': 'fake_server',
},
'version': '--os-share-api-version 2.49',
},
{'cmd_args': '--driver_options opt1=opt1 opt2=opt2'
' --share_type fake_share_type'
' --share_server_id fake_server',
'valid_params': {
'driver_options': {'opt1': 'opt1', 'opt2': 'opt2'},
'share_type': 'fake_share_type',
'share_server_id': 'fake_server',
}},
)
@ddt.unpack
def test_manage(self, cmd_args, valid_params, is_public=False,
version=None):
if version is not None:
self.run_command(version
+ ' manage fake_service fake_protocol '
+ ' fake_export_path '
+ cmd_args)
else:
self.run_command(' manage fake_service fake_protocol '
+ ' fake_export_path '
+ cmd_args)
expected = {
'share': {
'service_host': 'fake_service',
'protocol': 'fake_protocol',
'export_path': 'fake_export_path',
'name': None,
'description': None,
'is_public': is_public,
'share_server_id': valid_params['share_server_id'],
}
}
expected['share'].update(valid_params)
self.assert_called('POST', '/shares/manage', body=expected)
def test_manage_invalid_param_share_server_id(self):
self.assertRaises(
exceptions.CommandError,
self.run_command,
'--os-share-api-version 2.48'
+ ' manage fake_service fake_protocol '
+ ' fake_export_path '
+ ' --driver_options opt1=opt1 opt2=opt2'
+ ' --share_type fake_share_type'
+ ' --share_server_id fake_server')
def test_share_server_manage_unsupported_version(self):
self.assertRaises(
exceptions.UnsupportedVersion,
self.run_command,
'--os-share-api-version 2.48 ' +
'share-server-manage fake_host fake_share_net_id fake_id')
def test_share_server_manage_invalid_param_subnet_id(self):
self.assertRaises(
exceptions.CommandError,
self.run_command,
'--os-share-api-version 2.49 ' +
'share-server-manage fake_host fake_share_net_id fake_id ' +
'--share-network-subnet fake_subnet_id')
@ddt.data({'driver_args': '--driver_options opt1=opt1 opt2=opt2',
'valid_params': {
'driver_options': {'opt1': 'opt1', 'opt2': 'opt2'},
}},
{'driver_args': '--driver_options opt1=opt1 opt2=opt2',
'subnet_id': 'fake_subnet_1',
'valid_params': {
'driver_options': {'opt1': 'opt1', 'opt2': 'opt2'},
}},
{'driver_args': '--driver_options opt1=opt1 opt2=opt2',
'valid_params': {
'driver_options': {'opt1': 'opt1', 'opt2': 'opt2'},
},
'version': '2.51',
},
{'driver_args': '--driver_options opt1=opt1 opt2=opt2',
'subnet_id': 'fake_subnet_1',
'valid_params': {
'driver_options': {'opt1': 'opt1', 'opt2': 'opt2'},
},
'version': '2.51',
},
{'driver_args': "",
'valid_params': {
'driver_options': {}
},
'version': '2.51',
},
{'driver_args': '--driver_options opt1=opt1 opt2=opt2',
'valid_params': {
'driver_options': {'opt1': 'opt1', 'opt2': 'opt2'},
},
'version': '2.49',
},
{'driver_args': '',
'valid_params': {
'driver_options': {},
},
'network_id': 'fake_network_id',
'version': '2.49',
},
{'driver_args': "",
'valid_params': {
'driver_options': {}
},
'version': '2.49',
},
)
@ddt.unpack
def test_share_server_manage(self, driver_args, valid_params,
version=None, network_id=None,
subnet_id=None):
subnet_support = (version is None or
api_versions.APIVersion(version) >=
api_versions.APIVersion('2.51'))
network_id = '3456' if network_id is None else network_id
fake_share_network = type(
'FakeShareNetwork', (object,), {'id': network_id})
self.mock_object(
shell_v2, '_find_share_network',
mock.Mock(return_value=fake_share_network))
command = ('share-server-manage '
'%(host)s '
'%(share_network_id)s '
'%(identifier)s '
'%(driver_args)s ' % {
'host': 'fake_host',
'share_network_id': fake_share_network.id,
'identifier': '88-as-23-f3-45',
'driver_args': driver_args,
})
command += '--share-network-subnet %s' % subnet_id if subnet_id else ''
self.run_command(command, version=version)
expected = {
'share_server': {
'host': 'fake_host',
'share_network_id': fake_share_network.id,
'identifier': '88-as-23-f3-45',
'driver_options': driver_args
}
}
if subnet_support:
expected['share_server']['share_network_subnet_id'] = subnet_id
expected['share_server'].update(valid_params)
self.assert_called('POST', '/share-servers/manage', body=expected)
@ddt.data(constants.STATUS_ERROR, constants.STATUS_ACTIVE,
constants.STATUS_MANAGE_ERROR, constants.STATUS_UNMANAGE_ERROR,
constants.STATUS_DELETING, constants.STATUS_CREATING)
def test_share_server_reset_state(self, status):
self.run_command('share-server-reset-state 1234 --state %s ' % status)
expected = {'reset_status': {'status': status}}
self.assert_called('POST', '/share-servers/1234/action', body=expected)
def test_unmanage(self):
self.run_command('unmanage 1234')
self.assert_called('POST', '/shares/1234/action')
def test_share_server_unmanage(self):
self.run_command('share-server-unmanage 1234')
self.assert_called('POST', '/share-servers/1234/action',
body={'unmanage': {'force': False}})
def test_share_server_unmanage_force(self):
self.run_command('share-server-unmanage 1234 --force')
self.assert_called('POST', '/share-servers/1234/action',
body={'unmanage': {'force': True}})
@ddt.data({'cmd_args': '--driver_options opt1=opt1 opt2=opt2',
'valid_params': {
'driver_options': {'opt1': 'opt1', 'opt2': 'opt2'},
}},
{'cmd_args': '',
'valid_params': {
'driver_options': {},
}},
)
@ddt.unpack
@mock.patch.object(shell_v2, '_find_share', mock.Mock())
def test_snapshot_manage(self, cmd_args, valid_params):
shell_v2._find_share.return_value = 'fake_share'
self.run_command('snapshot-manage fake_share fake_provider_location '
+ cmd_args)
expected = {
'snapshot': {
'share_id': 'fake_share',
'provider_location': 'fake_provider_location',
'name': None,
'description': None,
}
}
expected['snapshot'].update(valid_params)
self.assert_called('POST', '/snapshots/manage', body=expected)
def test_snapshot_unmanage(self):
self.run_command('snapshot-unmanage 1234')
self.assert_called('POST', '/snapshots/1234/action',
body={'unmanage': None})
def test_revert_to_snapshot(self):
fake_share_snapshot = type(
'FakeShareSnapshot', (object,), {'id': '5678', 'share_id': '1234'})
self.mock_object(
shell_v2, '_find_share_snapshot',
mock.Mock(return_value=fake_share_snapshot))
self.run_command('revert-to-snapshot 5678')
self.assert_called('POST', '/shares/1234/action',
body={'revert': {'snapshot_id': '5678'}})
def test_delete(self):
self.run_command('delete 1234')
self.assert_called('DELETE', '/shares/1234')
@ddt.data(
'--group sg1313', '--share-group sg1313', '--share_group sg1313')
@mock.patch.object(shell_v2, '_find_share_group', mock.Mock())
def test_delete_with_share_group(self, sg_cmd):
fake_sg = type('FakeShareGroup', (object,), {'id': sg_cmd.split()[-1]})
shell_v2._find_share_group.return_value = fake_sg
self.run_command('delete 1234 %s' % sg_cmd)
self.assert_called('DELETE', '/shares/1234?share_group_id=sg1313')
self.assertTrue(shell_v2._find_share_group.called)
def test_delete_not_found(self):
self.assertRaises(
exceptions.CommandError,
self.run_command,
'delete fake-not-found'
)
def test_list_snapshots(self):
self.run_command('snapshot-list')
self.assert_called('GET', '/snapshots/detail')
@mock.patch.object(cliutils, 'print_list', mock.Mock())
def test_snapshot_list_select_column(self):
self.run_command('snapshot-list --columns id,name')
self.assert_called('GET', '/snapshots/detail')
cliutils.print_list.assert_called_once_with(
mock.ANY,
['Id', 'Name'], sortby_index=None)
@mock.patch.object(cliutils, 'print_list', mock.Mock())
def test_list_snapshots_all_tenants_only_key(self):
self.run_command('snapshot-list --all-tenants')
self.assert_called('GET', '/snapshots/detail?all_tenants=1')
cliutils.print_list.assert_called_once_with(
mock.ANY,
['ID', 'Share ID', 'Status', 'Name', 'Share Size', 'Project ID'],
sortby_index=None)
def test_list_snapshots_all_tenants_key_and_value_1(self):
for separator in self.separators:
self.run_command('snapshot-list --all-tenants' + separator + '1')
self.assert_called(
'GET', '/snapshots/detail?all_tenants=1')
def test_list_snapshots_all_tenants_key_and_value_0(self):
for separator in self.separators:
self.run_command('snapshot-list --all-tenants' + separator + '0')
self.assert_called('GET', '/snapshots/detail')
def test_list_snapshots_filter_by_name(self):
for separator in self.separators:
self.run_command('snapshot-list --name' + separator + '1234')
self.assert_called(
'GET', '/snapshots/detail?name=1234')
def test_list_snapshots_filter_by_status(self):
for separator in self.separators:
self.run_command('snapshot-list --status' + separator + '1234')
self.assert_called(
'GET', '/snapshots/detail?status=1234')
def test_list_snapshots_filter_by_share_id(self):
aliases = ['--share_id', '--share-id']
for alias in aliases:
for separator in self.separators:
self.run_command('snapshot-list ' + alias + separator + '1234')
self.assert_called(
'GET', '/snapshots/detail?share_id=1234')
def test_list_snapshots_only_used(self):
for separator in self.separators:
self.run_command('snapshot-list --usage' + separator + 'used')
self.assert_called('GET', '/snapshots/detail?usage=used')
def test_list_snapshots_only_unused(self):
for separator in self.separators:
self.run_command('snapshot-list --usage' + separator + 'unused')
self.assert_called('GET', '/snapshots/detail?usage=unused')
def test_list_snapshots_any(self):
for separator in self.separators:
self.run_command('snapshot-list --usage' + separator + 'any')
self.assert_called('GET', '/snapshots/detail?usage=any')
def test_list_snapshots_with_limit(self):
for separator in self.separators:
self.run_command('snapshot-list --limit' + separator + '50')
self.assert_called(
'GET', '/snapshots/detail?limit=50')
def test_list_snapshots_with_offset(self):
for separator in self.separators:
self.run_command('snapshot-list --offset' + separator + '50')
self.assert_called(
'GET', '/snapshots/detail?offset=50')
def test_list_snapshots_filter_by_inexact_name(self):
for separator in self.separators:
self.run_command('snapshot-list --name~' + separator +
'fake_name')
self.assert_called(
'GET',
'/snapshots/detail?name~=fake_name')
def test_list_snapshots_filter_by_inexact_description(self):
for separator in self.separators:
self.run_command('snapshot-list --description~' + separator +
'fake_description')
self.assert_called(
'GET',
'/snapshots/detail?description~=fake_description')
def test_list_snapshots_filter_by_inexact_unicode_name(self):
for separator in self.separators:
self.run_command('snapshot-list --name~' + separator +
u'ффф')
self.assert_called(
'GET',
'/snapshots/detail?name~=%D1%84%D1%84%D1%84')
def test_list_snapshots_filter_by_inexact_unicode_description(self):
for separator in self.separators:
self.run_command('snapshot-list --description~' + separator +
u'ффф')
self.assert_called(
'GET',
'/snapshots/detail?description~=%D1%84%D1%84%D1%84')
def test_list_snapshots_with_sort_dir_verify_keys(self):
aliases = ['--sort_dir', '--sort-dir']
for alias in aliases:
for key in constants.SORT_DIR_VALUES:
for separator in self.separators:
self.run_command(
'snapshot-list ' + alias + separator + key)
self.assert_called(
'GET',
'/snapshots/detail?sort_dir=' + key)
def test_list_snapshots_with_fake_sort_dir(self):
self.assertRaises(
ValueError,
self.run_command,
'snapshot-list --sort-dir fake_sort_dir',
)
def test_list_snapshots_with_sort_key_verify_keys(self):
aliases = ['--sort_key', '--sort-key']
for alias in aliases:
for key in constants.SNAPSHOT_SORT_KEY_VALUES:
for separator in self.separators:
self.run_command(
'snapshot-list ' + alias + separator + key)
self.assert_called(
'GET',
'/snapshots/detail?sort_key=' + key)
def test_list_snapshots_with_fake_sort_key(self):
self.assertRaises(
ValueError,
self.run_command,
'snapshot-list --sort-key fake_sort_key',
)
@mock.patch.object(cliutils, 'print_list', mock.Mock())
def test_extra_specs_list(self):
self.run_command('extra-specs-list')
self.assert_called('GET', '/types?is_public=all')
cliutils.print_list.assert_called_once_with(
mock.ANY, ['ID', 'Name', 'all_extra_specs'], mock.ANY)
@mock.patch.object(cliutils, 'print_list', mock.Mock())
def test_extra_specs_list_select_column(self):
self.run_command('extra-specs-list --columns id,name')
self.assert_called('GET', '/types?is_public=all')
cliutils.print_list.assert_called_once_with(
mock.ANY, ['id', 'name'], mock.ANY)
@ddt.data('fake', 'FFFalse', 'trueee')
def test_type_create_invalid_dhss_value(self, value):
self.assertRaises(
exceptions.CommandError,
self.run_command,
'type-create test ' + value,
)
@ddt.data('True', 'False')
def test_type_create_duplicate_dhss(self, value):
self.assertRaises(
exceptions.CommandError,
self.run_command,
'type-create test ' + value +
' --extra-specs driver_handles_share_servers=' + value,
)
@ddt.data(*itertools.product(
['snapshot_support', 'create_share_from_snapshot_support'],
| |
rootNode.CREATE )
self.obj40=MT_pre__Feature(self)
self.obj40.isGraphObjectVisual = True
if(hasattr(self.obj40, '_setHierarchicalLink')):
self.obj40._setHierarchicalLink(False)
# MT_label__
self.obj40.MT_label__.setValue('4')
# MT_pivotOut__
self.obj40.MT_pivotOut__.setValue('')
self.obj40.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj40.MT_subtypeMatching__.setValue(('True', 0))
self.obj40.MT_subtypeMatching__.config = 0
# MT_pre__classtype
self.obj40.MT_pre__classtype.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj40.MT_pre__classtype.setHeight(15)
# MT_pre__cardinality
self.obj40.MT_pre__cardinality.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj40.MT_pre__cardinality.setHeight(15)
# MT_pre__name
self.obj40.MT_pre__name.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj40.MT_pre__name.setHeight(15)
# MT_pivotIn__
self.obj40.MT_pivotIn__.setValue('')
self.obj40.MT_pivotIn__.setNone()
self.obj40.graphClass_= graph_MT_pre__Feature
if self.genGraphics:
new_obj = graph_MT_pre__Feature(520.0,200.0,self.obj40)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__Feature", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj40.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj40)
self.globalAndLocalPostcondition(self.obj40, rootNode)
self.obj40.postAction( rootNode.CREATE )
self.obj51=MT_pre__Feature(self)
self.obj51.isGraphObjectVisual = True
if(hasattr(self.obj51, '_setHierarchicalLink')):
self.obj51._setHierarchicalLink(False)
# MT_label__
self.obj51.MT_label__.setValue('14')
# MT_pivotOut__
self.obj51.MT_pivotOut__.setValue('')
self.obj51.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj51.MT_subtypeMatching__.setValue(('True', 0))
self.obj51.MT_subtypeMatching__.config = 0
# MT_pre__classtype
self.obj51.MT_pre__classtype.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj51.MT_pre__classtype.setHeight(15)
# MT_pre__cardinality
self.obj51.MT_pre__cardinality.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj51.MT_pre__cardinality.setHeight(15)
# MT_pre__name
self.obj51.MT_pre__name.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj51.MT_pre__name.setHeight(15)
# MT_pivotIn__
self.obj51.MT_pivotIn__.setValue('')
self.obj51.MT_pivotIn__.setNone()
self.obj51.graphClass_= graph_MT_pre__Feature
if self.genGraphics:
new_obj = graph_MT_pre__Feature(520.0,280.0,self.obj51)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__Feature", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj51.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj51)
self.globalAndLocalPostcondition(self.obj51, rootNode)
self.obj51.postAction( rootNode.CREATE )
self.obj41=MT_pre__MatchModel(self)
self.obj41.isGraphObjectVisual = True
if(hasattr(self.obj41, '_setHierarchicalLink')):
self.obj41._setHierarchicalLink(False)
# MT_label__
self.obj41.MT_label__.setValue('5')
# MT_pivotOut__
self.obj41.MT_pivotOut__.setValue('')
self.obj41.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj41.MT_subtypeMatching__.setValue(('True', 0))
self.obj41.MT_subtypeMatching__.config = 0
# MT_pivotIn__
self.obj41.MT_pivotIn__.setValue('')
self.obj41.MT_pivotIn__.setNone()
self.obj41.graphClass_= graph_MT_pre__MatchModel
if self.genGraphics:
new_obj = graph_MT_pre__MatchModel(200.0,100.0,self.obj41)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__MatchModel", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj41.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj41)
self.globalAndLocalPostcondition(self.obj41, rootNode)
self.obj41.postAction( rootNode.CREATE )
self.obj48=MT_pre__ApplyModel(self)
self.obj48.isGraphObjectVisual = True
if(hasattr(self.obj48, '_setHierarchicalLink')):
self.obj48._setHierarchicalLink(False)
# MT_label__
self.obj48.MT_label__.setValue('11')
# MT_pivotOut__
self.obj48.MT_pivotOut__.setValue('')
self.obj48.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj48.MT_subtypeMatching__.setValue(('True', 0))
self.obj48.MT_subtypeMatching__.config = 0
# MT_pivotIn__
self.obj48.MT_pivotIn__.setValue('')
self.obj48.MT_pivotIn__.setNone()
self.obj48.graphClass_= graph_MT_pre__ApplyModel
if self.genGraphics:
new_obj = graph_MT_pre__ApplyModel(200.0,380.0,self.obj48)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__ApplyModel", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj48.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj48)
self.globalAndLocalPostcondition(self.obj48, rootNode)
self.obj48.postAction( rootNode.CREATE )
self.obj42=MT_pre__match_contains(self)
self.obj42.isGraphObjectVisual = True
if(hasattr(self.obj42, '_setHierarchicalLink')):
self.obj42._setHierarchicalLink(False)
# MT_label__
self.obj42.MT_label__.setValue('8')
# MT_pivotOut__
self.obj42.MT_pivotOut__.setValue('')
self.obj42.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj42.MT_subtypeMatching__.setValue(('True', 0))
self.obj42.MT_subtypeMatching__.config = 0
# MT_pivotIn__
self.obj42.MT_pivotIn__.setValue('')
self.obj42.MT_pivotIn__.setNone()
self.obj42.graphClass_= graph_MT_pre__match_contains
if self.genGraphics:
new_obj = graph_MT_pre__match_contains(280.5,191.5,self.obj42)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__match_contains", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
else: new_obj = None
self.obj42.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj42)
self.globalAndLocalPostcondition(self.obj42, rootNode)
self.obj42.postAction( rootNode.CREATE )
self.obj43=MT_pre__match_contains(self)
self.obj43.isGraphObjectVisual = True
if(hasattr(self.obj43, '_setHierarchicalLink')):
self.obj43._setHierarchicalLink(False)
# MT_label__
self.obj43.MT_label__.setValue('9')
# MT_pivotOut__
self.obj43.MT_pivotOut__.setValue('')
self.obj43.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj43.MT_subtypeMatching__.setValue(('True', 0))
self.obj43.MT_subtypeMatching__.config = 0
# MT_pivotIn__
self.obj43.MT_pivotIn__.setValue('')
self.obj43.MT_pivotIn__.setNone()
self.obj43.graphClass_= graph_MT_pre__match_contains
if self.genGraphics:
new_obj = graph_MT_pre__match_contains(360.5,191.5,self.obj43)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__match_contains", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
else: new_obj = None
self.obj43.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj43)
self.globalAndLocalPostcondition(self.obj43, rootNode)
self.obj43.postAction( rootNode.CREATE )
self.obj44=MT_pre__match_contains(self)
self.obj44.isGraphObjectVisual = True
if(hasattr(self.obj44, '_setHierarchicalLink')):
self.obj44._setHierarchicalLink(False)
# MT_label__
self.obj44.MT_label__.setValue('10')
# MT_pivotOut__
self.obj44.MT_pivotOut__.setValue('')
self.obj44.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj44.MT_subtypeMatching__.setValue(('True', 0))
self.obj44.MT_subtypeMatching__.config = 0
# MT_pivotIn__
self.obj44.MT_pivotIn__.setValue('')
self.obj44.MT_pivotIn__.setNone()
self.obj44.graphClass_= graph_MT_pre__match_contains
if self.genGraphics:
new_obj = graph_MT_pre__match_contains(440.5,191.5,self.obj44)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__match_contains", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
else: new_obj = None
self.obj44.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj44)
self.globalAndLocalPostcondition(self.obj44, rootNode)
self.obj44.postAction( rootNode.CREATE )
self.obj64=MT_pre__apply_contains(self)
self.obj64.isGraphObjectVisual = True
if(hasattr(self.obj64, '_setHierarchicalLink')):
self.obj64._setHierarchicalLink(False)
# MT_label__
self.obj64.MT_label__.setValue('17')
# MT_pivotOut__
self.obj64.MT_pivotOut__.setValue('')
self.obj64.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj64.MT_subtypeMatching__.setValue(('True', 0))
self.obj64.MT_subtypeMatching__.config = 0
# MT_pivotIn__
self.obj64.MT_pivotIn__.setValue('')
self.obj64.MT_pivotIn__.setNone()
self.obj64.graphClass_= graph_MT_pre__apply_contains
if self.genGraphics:
new_obj = graph_MT_pre__apply_contains(282.5,371.5,self.obj64)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__apply_contains", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
else: new_obj = None
self.obj64.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj64)
self.globalAndLocalPostcondition(self.obj64, rootNode)
self.obj64.postAction( rootNode.CREATE )
self.obj65=MT_pre__apply_contains(self)
self.obj65.isGraphObjectVisual = True
if(hasattr(self.obj65, '_setHierarchicalLink')):
self.obj65._setHierarchicalLink(False)
# MT_label__
self.obj65.MT_label__.setValue('18')
# MT_pivotOut__
self.obj65.MT_pivotOut__.setValue('')
self.obj65.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj65.MT_subtypeMatching__.setValue(('True', 0))
self.obj65.MT_subtypeMatching__.config = 0
# MT_pivotIn__
self.obj65.MT_pivotIn__.setValue('')
self.obj65.MT_pivotIn__.setNone()
self.obj65.graphClass_= graph_MT_pre__apply_contains
if self.genGraphics:
new_obj = graph_MT_pre__apply_contains(362.5,371.5,self.obj65)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__apply_contains", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
else: new_obj = None
self.obj65.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj65)
self.globalAndLocalPostcondition(self.obj65, rootNode)
self.obj65.postAction( rootNode.CREATE )
self.obj66=MT_pre__apply_contains(self)
self.obj66.isGraphObjectVisual = True
if(hasattr(self.obj66, '_setHierarchicalLink')):
self.obj66._setHierarchicalLink(False)
# MT_label__
self.obj66.MT_label__.setValue('19')
# MT_pivotOut__
self.obj66.MT_pivotOut__.setValue('')
self.obj66.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj66.MT_subtypeMatching__.setValue(('True', 0))
self.obj66.MT_subtypeMatching__.config = 0
# MT_pivotIn__
self.obj66.MT_pivotIn__.setValue('')
self.obj66.MT_pivotIn__.setNone()
self.obj66.graphClass_= graph_MT_pre__apply_contains
if self.genGraphics:
new_obj = graph_MT_pre__apply_contains(442.5,371.5,self.obj66)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__apply_contains", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
else: new_obj = None
self.obj66.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj66)
self.globalAndLocalPostcondition(self.obj66, rootNode)
self.obj66.postAction( rootNode.CREATE )
self.obj52=MT_pre__directLink_T(self)
self.obj52.isGraphObjectVisual = True
if(hasattr(self.obj52, '_setHierarchicalLink')):
self.obj52._setHierarchicalLink(False)
# MT_label__
self.obj52.MT_label__.setValue('15')
# MT_pivotOut__
self.obj52.MT_pivotOut__.setValue('')
self.obj52.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj52.MT_subtypeMatching__.setValue(('True', 0))
self.obj52.MT_subtypeMatching__.config = 0
# MT_pivotIn__
self.obj52.MT_pivotIn__.setValue('')
self.obj52.MT_pivotIn__.setNone()
# MT_pre__associationType
self.obj52.MT_pre__associationType.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn attr_value == "entities"\n')
self.obj52.MT_pre__associationType.setHeight(15)
self.obj52.graphClass_= graph_MT_pre__directLink_T
if self.genGraphics:
new_obj = graph_MT_pre__directLink_T(360.0,322.0,self.obj52)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__directLink_T", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
else: new_obj = None
self.obj52.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj52)
self.globalAndLocalPostcondition(self.obj52, rootNode)
self.obj52.postAction( rootNode.CREATE )
self.obj53=MT_pre__directLink_T(self)
self.obj53.isGraphObjectVisual = True
if(hasattr(self.obj53, '_setHierarchicalLink')):
self.obj53._setHierarchicalLink(False)
# MT_label__
self.obj53.MT_label__.setValue('16')
# MT_pivotOut__
self.obj53.MT_pivotOut__.setValue('')
self.obj53.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj53.MT_subtypeMatching__.setValue(('True', 0))
self.obj53.MT_subtypeMatching__.config = 0
# MT_pivotIn__
self.obj53.MT_pivotIn__.setValue('')
self.obj53.MT_pivotIn__.setNone()
# MT_pre__associationType
self.obj53.MT_pre__associationType.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn | |
"not in [0,1)")
if (feedback_end_frac < 0):
raise ValueError(
f"feedback_end_frac = {feedback_end_frac} not > 0")
# If feedback_start_frac exceeds feedback_end_frac, then
# there's no range of the flux ramp cycle over which we're
# applying feedback.
if (feedback_end_frac < feedback_start_frac):
raise ValueError(
f"feedback_end_frac = {feedback_end_frac} " +
"is not less than " +
f"feedback_start_frac = {feedback_start_frac}")
# Done validating feedbackStart and feedbackEnd
## End argument validation
##
if not flux_ramp:
self.log('WARNING: THIS WILL NOT TURN ON FLUX RAMP!')
if make_plot:
if show_plot:
plt.ion()
else:
plt.ioff()
if fraction_full_scale is None:
fraction_full_scale = self._fraction_full_scale
else:
self.fraction_full_scale = fraction_full_scale
# Measure either LMS freq or the flux ramp amplitude
if lms_freq_hz is None:
if meas_lms_freq and meas_flux_ramp_amp:
self.log('Requested measurement of both LMS freq '+
'and flux ramp amplitude. Cannot do both.',
self.LOG_ERROR)
return None, None, None
elif meas_lms_freq:
# attempts to measure the flux ramp frequency and leave the
# flux ramp amplitude the same
lms_freq_hz = self.estimate_lms_freq(
band, reset_rate_khz,
fraction_full_scale=fraction_full_scale,
channel=channel)
elif meas_flux_ramp_amp:
# attempts to measure the the number of phi0 and adjust
# the ampltidue of the flux ramp to achieve the desired number
# of phi0 per flux ramp
fraction_full_scale = self.estimate_flux_ramp_amp(band,
n_phi0,reset_rate_khz=reset_rate_khz, channel=channel)
lms_freq_hz = reset_rate_khz * n_phi0 * 1.0E3
else:
# Load from config
lms_freq_hz = self._lms_freq_hz[band]
self._lms_freq_hz[band] = lms_freq_hz
if write_log:
self.log('Using lms_freq_estimator : ' +
f'{lms_freq_hz:.0f} Hz')
if not flux_ramp:
lms_enable1 = 0
lms_enable2 = 0
lms_enable3 = 0
if write_log:
self.log("Using lmsFreqHz = " +
f"{lms_freq_hz:.0f} Hz",
self.LOG_USER)
self.set_feedback_gain(band, feedback_gain, write_log=write_log)
self.set_lms_gain(band, lms_gain, write_log=write_log)
self.set_lms_enable1(band, lms_enable1, write_log=write_log)
self.set_lms_enable2(band, lms_enable2, write_log=write_log)
self.set_lms_enable3(band, lms_enable3, write_log=write_log)
self.set_lms_freq_hz(band, lms_freq_hz, write_log=write_log)
iq_stream_enable = 0 # must be zero to access f,df stream
self.set_iq_stream_enable(band, iq_stream_enable, write_log=write_log)
if setup_flux_ramp:
self.flux_ramp_setup(reset_rate_khz, fraction_full_scale,
write_log=write_log, new_epics_root=new_epics_root)
else:
self.log("Not changing flux ramp status. Use setup_flux_ramp " +
"boolean to run flux_ramp_setup")
# Doing this after flux_ramp_setup so that if needed we can
# set feedback_end based on the flux ramp settings.
feedback_start = self._feedback_frac_to_feedback(band, feedback_start_frac)
feedback_end = self._feedback_frac_to_feedback(band, feedback_end_frac)
# Set feedbackStart and feedbackEnd
self.set_feedback_start(band, feedback_start, write_log=write_log)
self.set_feedback_end(band, feedback_end, write_log=write_log)
if write_log:
self.log("Applying feedback over "+
f"{(feedback_end_frac-feedback_start_frac)*100.:.1f}% of each "+
f"flux ramp cycle (with feedbackStart={feedback_start} and " +
f"feedbackEnd={feedback_end})", self.LOG_USER)
if flux_ramp:
self.flux_ramp_on(write_log=write_log, new_epics_root=new_epics_root)
# take one dataset with all channels
if return_data or make_plot:
f, df, sync = self.take_debug_data(band, IQstream=iq_stream_enable,
single_channel_readout=0, nsamp=nsamp)
df_std = np.std(df, 0)
df_channels = np.ravel(np.where(df_std >0))
# Intersection of channels that are on and have some flux ramp resp
channels_on = list(set(df_channels) & set(self.which_on(band)))
self.log(f"Number of channels on : {len(self.which_on(band))}",
self.LOG_USER)
self.log("Number of channels on with flux ramp "+
f"response : {len(channels_on)}", self.LOG_USER)
f_span = np.max(f,0) - np.min(f,0)
if make_plot:
timestamp = self.get_timestamp()
fig,ax = plt.subplots(1,3, figsize=(12,5))
fig.suptitle(f'{timestamp} Band {band}')
# Histogram the stddev
ax[0].hist(df_std[channels_on]*1e3, bins=20, edgecolor = 'k')
ax[0].set_xlabel('Flux ramp demod error std (kHz)')
ax[0].set_ylabel('number of channels')
# Histogram the max-min flux ramp amplitude response
ax[1].hist(f_span[channels_on]*1e3, bins=20, edgecolor='k')
ax[1].set_xlabel('Flux ramp amplitude (kHz)')
ax[1].set_ylabel('number of channels')
# Plot df vs resp amplitude
ax[2].plot(f_span[channels_on]*1e3, df_std[channels_on]*1e3, '.')
ax[2].set_xlabel('FR Amp (kHz)')
ax[2].set_ylabel('RF demod error (kHz)')
x = np.array([0, np.max(f_span[channels_on])*1.0E3])
# useful line to guide the eye
y_factor = 100
y = x/y_factor
ax[2].plot(x, y, color='k', linestyle=':',label=f'1:{y_factor}')
ax[2].legend(loc='upper right')
bbox = dict(boxstyle="round", ec='w', fc='w', alpha=.65)
text = f"Reset rate: {reset_rate_khz} kHz" + "\n" + \
f"LMS freq: {lms_freq_hz:.0f} Hz" + "\n" + \
f"LMS gain: {lms_gain}" + "\n" + \
f"FR amp: {self.get_fraction_full_scale():1.3f}" + "\n" + \
f"FB start: {feedback_start_frac}" + "\n" + \
f"FB end: {feedback_end_frac}" + "\n" + \
f"FB enable 1/2/3 : {lms_enable1}/{lms_enable2}/{lms_enable3}" + \
"\n" + \
r"$n_{chan}$:" + f" {len(channels_on)}"
ax[2].text(.05, .97, text, transform=ax[2].transAxes, va='top',
ha='left', fontsize=10, bbox=bbox)
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
if save_plot:
path = os.path.join(self.plot_dir,
timestamp + '_FR_amp_v_err' + plotname_append + '.png')
plt.savefig(path, bbox_inches='tight')
self.pub.register_file(path, 'amp_vs_err', plot=True)
if not show_plot:
# If we don't want a live view, close the plot
plt.close()
if channel is not None:
channel = np.ravel(np.array(channel))
sync_idx = self.make_sync_flag(sync)
for ch in channel:
# Setup plotting
fig, ax = plt.subplots(2, sharex=True, figsize=(9, 4.75))
# Plot tracked component
ax[0].plot(f[:, ch]*1e3)
ax[0].set_ylabel('Tracked Freq [kHz]')
ax[0].text(.025, .93,
f'LMS Freq {lms_freq_hz:.0f} Hz', fontsize=10,
transform=ax[0].transAxes, bbox=bbox, ha='left',
va='top')
ax[0].text(.95, .93, f'Band {band} Ch {ch:03}',
fontsize=10, transform=ax[0].transAxes, ha='right',
va='top', bbox=bbox)
# Plot the untracking part
ax[1].plot(df[:, ch]*1e3)
ax[1].set_ylabel('Freq Error [kHz]')
ax[1].set_xlabel('Samp Num')
ax[1].text(.025, .93,
f'RMS error = {df_std[ch]*1e3:.2f} kHz\n' +
f'FR frac. full scale = {fraction_full_scale:.2f}',
fontsize=10, transform=ax[1].transAxes, bbox=bbox,
ha='left', va='top')
n_sync_idx = len(sync_idx)
for i, s in enumerate(sync_idx):
# Lines for reset
ax[0].axvline(s, color='k', linestyle=':', alpha=.5)
ax[1].axvline(s, color='k', linestyle=':', alpha=.5)
# highlight used regions
if i < n_sync_idx-1:
nsamp = sync_idx[i+1]-sync_idx[i]
start = s + feedback_start_frac*nsamp
end = s + feedback_end_frac*nsamp
ax[0].axvspan(start, end, color='k', alpha=.15)
ax[1].axvspan(start, end, color='k', alpha=.15)
plt.tight_layout()
if save_plot:
path = os.path.join(self.plot_dir, timestamp +
f'_FRtracking_b{band}_ch{ch:03}{plotname_append}.png')
plt.savefig(path, bbox_inches='tight')
self.pub.register_file(path, 'tracking', plot=True)
if not show_plot:
plt.close()
self.set_iq_stream_enable(band, 1, write_log=write_log)
if return_data:
return f, df, sync
@set_action()
def track_and_check(self, band, channel=None, reset_rate_khz=None,
make_plot=False, save_plot=True, show_plot=True,
lms_freq_hz=None, flux_ramp=True, fraction_full_scale=None,
lms_enable1=True, lms_enable2=True, lms_enable3=True, lms_gain=None,
f_min=.015, f_max=.2, df_max=.03, toggle_feedback=True,
relock=True, tracking_setup=True,
feedback_start_frac=None, feedback_end_frac=None, setup_flux_ramp=True):
"""
This runs tracking setup and check_lock to prune bad channels. This has
all the same inputs and tracking_setup and check_lock. In particular the
cut parameters are f_min, f_max, and df_max.
Args
----
band : int
The band to track and check.
channel : int or int array or None, optional, default None
List of channels to plot.
reset_rate_khz : float or None, optional, default None
The flux ramp frequency.
make_plot : bool, optional, default False
Whether to make plots.
save_plot : bool, optional, default True
Whether to save plots.
show_plot : bool, optional, default True
Whether to display the plot.
lms_freq_hz : float or None, optional, default None
The frequency of the tracking algorithm.
flux_ramp : bool, optional, default True
Whether to turn on flux ramp.
fraction_full_scale : float or None, optional, default None
The flux ramp amplitude, as a fraction of the maximum.
lms_enable1 : bool, optional, default True
Whether to use the first harmonic for tracking.
lms_enable2 : bool, optional, default True
Whether to use the second harmonic for tracking.
lms_enable3 : bool, optional, default True
Whether to use the third harmonic for tracking.
lms_gain : int or None, optional, default None
The tracking gain parameters. Default is the value in the
config file
f_min : float, optional, default 0.015
The maximum frequency swing.
f_max : float, optional, default 0.20
The minimium frequency swing.
df_max : float, optional, default 0.03
The maximum value of the stddev of df.
toggle_feedback : bool, optional, default True
Whether or not to reset feedback (both the global band
feedbackEnable and the lmsEnables between tracking_setup
and check_lock.
relock : bool, optional, default True
Whether or not to relock at the start.
tracking_setup : bool, optional, default True
Whether or not to run tracking_setup.
feedback_start_frac : float or None, optional, default None
The fraction of the full flux ramp at which to stop
applying feedback in each flux ramp cycle. Must be in
[0,1). Defaults to whatever's in the cfg file.
feedback_end_frac : float or None, optional, default None
The fraction of the full flux ramp at which to stop
applying feedback in each flux ramp cycle. Must be >0.
Defaults to whatever's in the cfg file.
setup_flux_ramp : bool, optional, default True
Whether to setup the flux ramp at the end.
"""
if reset_rate_khz is None:
reset_rate_khz = self._reset_rate_khz
if lms_gain is None:
lms_gain = self._lms_gain[band]
if relock:
self.relock(band)
# Start tracking
if tracking_setup:
self.tracking_setup(band, channel=channel,
reset_rate_khz=reset_rate_khz, make_plot=make_plot,
save_plot=save_plot, show_plot=show_plot,
lms_freq_hz=lms_freq_hz, flux_ramp=flux_ramp,
fraction_full_scale=fraction_full_scale, lms_enable1=lms_enable1,
lms_enable2=lms_enable2, lms_enable3=lms_enable3,
lms_gain=lms_gain, return_data=False,
feedback_start_frac=feedback_start_frac,
feedback_end_frac=feedback_end_frac,
setup_flux_ramp=setup_flux_ramp)
# Toggle the feedback because sometimes tracking exits in a bad state.
# I'm not sure if this is still the case, but no reason to stop doing
# this. -EY 20191001
if toggle_feedback:
self.toggle_feedback(band)
# Check the lock status and cut channels based on inputs.
self.check_lock(band, f_min=f_min, | |
if x.Name == 'VRFHeatCapFT'])
assert curveobj[0].Name == 'VRFHeatCapFT'
assert curveobj[0].Coefficient1_Constant == 1.014599599
assert curveobj[0].Coefficient2_x == -0.002506703
assert curveobj[0].Coefficient3_x2 == -0.000141599
assert curveobj[0].Coefficient4_y == 0.026931595
assert curveobj[0].Coefficient5_y2 == 1.83538E-06
assert curveobj[0].Coefficient6_xy == -0.000358147
assert curveobj[0].Minimum_Value_of_x == 15
assert curveobj[0].Maximum_Value_of_x == 27
assert curveobj[0].Minimum_Value_of_y == -20
assert curveobj[0].Maximum_Value_of_y == 15
assert curveobj[0].Minimum_Curve_Output == ''
assert curveobj[0].Maximum_Curve_Output == ''
assert curveobj[0].Input_Unit_Type_for_X == 'Temperature'
assert curveobj[0].Input_Unit_Type_for_Y == 'Temperature'
assert curveobj[0].Output_Unit_Type == 'Dimensionless'
curveobj = ([x
for x
in idf1.idfobjects['Curve:Biquadratic']
if x.Name == 'VRFHeatCapFTHi'])
assert curveobj[0].Name == 'VRFHeatCapFTHi'
assert curveobj[0].Coefficient1_Constant == 1.161134821
assert curveobj[0].Coefficient2_x == 0.027478868
assert curveobj[0].Coefficient3_x2 == -0.00168795
assert curveobj[0].Coefficient4_y == 0.001783378
assert curveobj[0].Coefficient5_y2 == 2.03208E-06
assert curveobj[0].Coefficient6_xy == -6.8969E-05
assert curveobj[0].Minimum_Value_of_x == 15
assert curveobj[0].Maximum_Value_of_x == 27
assert curveobj[0].Minimum_Value_of_y == -10
assert curveobj[0].Maximum_Value_of_y == 15
assert curveobj[0].Minimum_Curve_Output == ''
assert curveobj[0].Maximum_Curve_Output == ''
assert curveobj[0].Input_Unit_Type_for_X == 'Temperature'
assert curveobj[0].Input_Unit_Type_for_Y == 'Temperature'
assert curveobj[0].Output_Unit_Type == 'Dimensionless'
curveobj = ([x
for x
in idf1.idfobjects['Curve:Biquadratic']
if x.Name == 'VRFHeatEIRFT'])
assert curveobj[0].Name == 'VRFHeatEIRFT'
assert curveobj[0].Coefficient1_Constant == 0.87465501
assert curveobj[0].Coefficient2_x == -0.01319754
assert curveobj[0].Coefficient3_x2 == 0.00110307
assert curveobj[0].Coefficient4_y == -0.0133118
assert curveobj[0].Coefficient5_y2 == 0.00089017
assert curveobj[0].Coefficient6_xy == -0.00012766
assert curveobj[0].Minimum_Value_of_x == 15
assert curveobj[0].Maximum_Value_of_x == 27
assert curveobj[0].Minimum_Value_of_y == -20
assert curveobj[0].Maximum_Value_of_y == 12
assert curveobj[0].Minimum_Curve_Output == ''
assert curveobj[0].Maximum_Curve_Output == ''
assert curveobj[0].Input_Unit_Type_for_X == 'Temperature'
assert curveobj[0].Input_Unit_Type_for_Y == 'Temperature'
assert curveobj[0].Output_Unit_Type == 'Dimensionless'
curveobj = ([x
for x
in idf1.idfobjects['Curve:Biquadratic']
if x.Name == 'VRFHeatEIRFTHi'])
assert curveobj[0].Name == 'VRFHeatEIRFTHi'
assert curveobj[0].Coefficient1_Constant == 2.504005146
assert curveobj[0].Coefficient2_x == -0.05736767
assert curveobj[0].Coefficient3_x2 == 4.07336E-05
assert curveobj[0].Coefficient4_y == -0.12959669
assert curveobj[0].Coefficient5_y2 == 0.00135839
assert curveobj[0].Coefficient6_xy == 0.00317047
assert curveobj[0].Minimum_Value_of_x == 15
assert curveobj[0].Maximum_Value_of_x == 27
assert curveobj[0].Minimum_Value_of_y == -10
assert curveobj[0].Maximum_Value_of_y == 15
assert curveobj[0].Minimum_Curve_Output == ''
assert curveobj[0].Maximum_Curve_Output == ''
assert curveobj[0].Input_Unit_Type_for_X == 'Temperature'
assert curveobj[0].Input_Unit_Type_for_Y == 'Temperature'
assert curveobj[0].Output_Unit_Type == 'Dimensionless'
curveobj = ([x
for x
in idf1.idfobjects['Curve:Biquadratic']
if x.Name == 'CoolingLengthCorrectionFactor'])
assert curveobj[0].Name == 'CoolingLengthCorrectionFactor'
assert curveobj[0].Coefficient1_Constant == 1.0693794
assert curveobj[0].Coefficient2_x == -0.0014951
assert curveobj[0].Coefficient3_x2 == 2.56E-06
assert curveobj[0].Coefficient4_y == -0.1151104
assert curveobj[0].Coefficient5_y2 == 0.0511169
assert curveobj[0].Coefficient6_xy == -0.0004369
assert curveobj[0].Minimum_Value_of_x == 8
assert curveobj[0].Maximum_Value_of_x == 175
assert curveobj[0].Minimum_Value_of_y == 0.5
assert curveobj[0].Maximum_Value_of_y == 1.5
assert curveobj[0].Minimum_Curve_Output == ''
assert curveobj[0].Maximum_Curve_Output == ''
assert curveobj[0].Input_Unit_Type_for_X == 'Temperature'
assert curveobj[0].Input_Unit_Type_for_Y == 'Temperature'
assert curveobj[0].Output_Unit_Type == 'Dimensionless'
curveobj = ([x
for x
in idf1.idfobjects['Curve:Biquadratic']
if x.Name == 'VRF Piping Correction Factor for Length in Heating Mode'])
assert curveobj[0].Name == 'VRF Piping Correction Factor for Length in Heating Mode'
assert curveobj[0].Coefficient1_Constant == .989916
assert curveobj[0].Coefficient2_x == .001961
assert curveobj[0].Coefficient3_x2 == -.000036
assert curveobj[0].Coefficient4_y == 0
assert curveobj[0].Coefficient5_y2 == 0
assert curveobj[0].Coefficient6_xy == 0
assert curveobj[0].Minimum_Value_of_x == 7
assert curveobj[0].Maximum_Value_of_x == 106.5
assert curveobj[0].Minimum_Value_of_y == 1
assert curveobj[0].Maximum_Value_of_y == 1
assert curveobj[0].Minimum_Curve_Output == ''
assert curveobj[0].Maximum_Curve_Output == ''
assert curveobj[0].Input_Unit_Type_for_X == 'Distance'
assert curveobj[0].Input_Unit_Type_for_Y == 'Dimensionless'
assert curveobj[0].Output_Unit_Type == 'Dimensionless'
curveobj = ([x
for x
in idf1.idfobjects['Curve:Biquadratic']
if x.Name == 'VRF Heat Recovery Cooling Capacity Modifier'])
assert curveobj[0].Name == 'VRF Heat Recovery Cooling Capacity Modifier'
assert curveobj[0].Coefficient1_Constant == .9
assert curveobj[0].Coefficient2_x == 0
assert curveobj[0].Coefficient3_x2 == 0
assert curveobj[0].Coefficient4_y == 0
assert curveobj[0].Coefficient5_y2 == 0
assert curveobj[0].Coefficient6_xy == 0
assert curveobj[0].Minimum_Value_of_x == -100
assert curveobj[0].Maximum_Value_of_x == 100
assert curveobj[0].Minimum_Value_of_y == -100
assert curveobj[0].Maximum_Value_of_y == 100
assert curveobj[0].Minimum_Curve_Output == ''
assert curveobj[0].Maximum_Curve_Output == ''
assert curveobj[0].Input_Unit_Type_for_X == 'Temperature'
assert curveobj[0].Input_Unit_Type_for_Y == 'Temperature'
assert curveobj[0].Output_Unit_Type == 'Dimensionless'
curveobj = ([x
for x
in idf1.idfobjects['Curve:Biquadratic']
if x.Name == 'VRF Heat Recovery Cooling Energy Modifier'])
assert curveobj[0].Name == 'VRF Heat Recovery Cooling Energy Modifier'
assert curveobj[0].Coefficient1_Constant == 1.1
assert curveobj[0].Coefficient2_x == 0
assert curveobj[0].Coefficient3_x2 == 0
assert curveobj[0].Coefficient4_y == 0
assert curveobj[0].Coefficient5_y2 == 0
assert curveobj[0].Coefficient6_xy == 0
assert curveobj[0].Minimum_Value_of_x == -100
assert curveobj[0].Maximum_Value_of_x == 100
assert curveobj[0].Minimum_Value_of_y == -100
assert curveobj[0].Maximum_Value_of_y == 100
assert curveobj[0].Minimum_Curve_Output == ''
assert curveobj[0].Maximum_Curve_Output == ''
assert curveobj[0].Input_Unit_Type_for_X == 'Temperature'
assert curveobj[0].Input_Unit_Type_for_Y == 'Temperature'
assert curveobj[0].Output_Unit_Type == 'Dimensionless'
curveobj = ([x
for x
in idf1.idfobjects['Curve:Biquadratic']
if x.Name == 'VRF Heat Recovery Heating Capacity Modifier'])
assert curveobj[0].Name == 'VRF Heat Recovery Heating Capacity Modifier'
assert curveobj[0].Coefficient1_Constant == .9
assert curveobj[0].Coefficient2_x == 0
assert curveobj[0].Coefficient3_x2 == 0
assert curveobj[0].Coefficient4_y == 0
assert curveobj[0].Coefficient5_y2 == 0
assert curveobj[0].Coefficient6_xy == 0
assert curveobj[0].Minimum_Value_of_x == -100
assert curveobj[0].Maximum_Value_of_x == 100
assert curveobj[0].Minimum_Value_of_y == -100
assert curveobj[0].Maximum_Value_of_y == 100
assert curveobj[0].Minimum_Curve_Output == ''
assert curveobj[0].Maximum_Curve_Output == ''
assert curveobj[0].Input_Unit_Type_for_X == 'Temperature'
assert curveobj[0].Input_Unit_Type_for_Y == 'Temperature'
assert curveobj[0].Output_Unit_Type == 'Dimensionless'
curveobj = ([x
for x
in idf1.idfobjects['Curve:Biquadratic']
if x.Name == 'VRF Heat Recovery Heating Energy Modifier'])
assert curveobj[0].Name == 'VRF Heat Recovery Heating Energy Modifier'
assert curveobj[0].Coefficient1_Constant == 1.1
assert curveobj[0].Coefficient2_x == 0
assert curveobj[0].Coefficient3_x2 == 0
assert curveobj[0].Coefficient4_y == 0
assert curveobj[0].Coefficient5_y2 == 0
assert curveobj[0].Coefficient6_xy == 0
assert curveobj[0].Minimum_Value_of_x == -100
assert curveobj[0].Maximum_Value_of_x == 100
assert curveobj[0].Minimum_Value_of_y == -100
assert curveobj[0].Maximum_Value_of_y == 100
assert curveobj[0].Minimum_Curve_Output == ''
assert curveobj[0].Maximum_Curve_Output == ''
assert curveobj[0].Input_Unit_Type_for_X == 'Temperature'
assert curveobj[0].Input_Unit_Type_for_Y == 'Temperature'
assert curveobj[0].Output_Unit_Type == 'Dimensionless'
curveobj = ([x
for x
in idf1.idfobjects['Curve:Quadratic']
if x.Name == 'VRFACCoolCapFFF'])
assert curveobj[0].Name == 'VRFACCoolCapFFF'
assert curveobj[0].Coefficient1_Constant == 0.8
assert curveobj[0].Coefficient2_x == 0.2
assert curveobj[0].Coefficient3_x2 == 0.0
assert curveobj[0].Minimum_Value_of_x == 0.5
assert curveobj[0].Maximum_Value_of_x == 1.5
assert curveobj[0].Minimum_Curve_Output == ''
assert curveobj[0].Maximum_Curve_Output == ''
assert curveobj[0].Input_Unit_Type_for_X == ''
assert curveobj[0].Output_Unit_Type == ''
curveobj = ([x
for x
in idf1.idfobjects['Curve:Quadratic']
if x.Name == 'CoolingEIRHiPLR'])
assert curveobj[0].Name == 'CoolingEIRHiPLR'
assert curveobj[0].Coefficient1_Constant == 1.0
assert curveobj[0].Coefficient2_x == 0.0
assert curveobj[0].Coefficient3_x2 == 0.0
assert curveobj[0].Minimum_Value_of_x == 1.0
assert curveobj[0].Maximum_Value_of_x == 1.5
assert curveobj[0].Minimum_Curve_Output == ''
assert curveobj[0].Maximum_Curve_Output == ''
assert curveobj[0].Input_Unit_Type_for_X == ''
assert curveobj[0].Output_Unit_Type == ''
curveobj = ([x
for x
in idf1.idfobjects['Curve:Quadratic']
if x.Name == 'VRFCPLFFPLR'])
assert curveobj[0].Name == 'VRFCPLFFPLR'
assert curveobj[0].Coefficient1_Constant == 0.85
assert curveobj[0].Coefficient2_x == 0.15
assert curveobj[0].Coefficient3_x2 == 0.0
assert curveobj[0].Minimum_Value_of_x == 0.0
assert curveobj[0].Maximum_Value_of_x == 1.0
assert curveobj[0].Minimum_Curve_Output == ''
assert curveobj[0].Maximum_Curve_Output == ''
assert curveobj[0].Input_Unit_Type_for_X == ''
assert curveobj[0].Output_Unit_Type == ''
curveobj = ([x
for x
in idf1.idfobjects['Curve:Quadratic']
if x.Name == 'HeatingEIRHiPLR'])
assert curveobj[0].Name == 'HeatingEIRHiPLR'
assert curveobj[0].Coefficient1_Constant == 2.4294355
assert curveobj[0].Coefficient2_x == -2.235887
assert curveobj[0].Coefficient3_x2 == 0.8064516
assert curveobj[0].Minimum_Value_of_x == 1.0
assert curveobj[0].Maximum_Value_of_x == 1.5
assert curveobj[0].Minimum_Curve_Output == ''
assert curveobj[0].Maximum_Curve_Output == ''
assert curveobj[0].Input_Unit_Type_for_X == ''
assert curveobj[0].Output_Unit_Type == ''
curveobj = ([x
for x
in idf1.idfobjects['Curve:Linear']
if x.Name == 'CoolingCombRatio'])
assert curveobj[0].Name == 'CoolingCombRatio'
assert curveobj[0].Coefficient1_Constant == 0.618055
assert curveobj[0].Coefficient2_x == 0.381945
assert curveobj[0].Minimum_Value_of_x == 1.0
assert curveobj[0].Maximum_Value_of_x == 1.5
assert curveobj[0].Minimum_Curve_Output == ''
assert curveobj[0].Maximum_Curve_Output == ''
assert curveobj[0].Input_Unit_Type_for_X == ''
assert curveobj[0].Output_Unit_Type == ''
curveobj = ([x
for x
in idf1.idfobjects['Curve:Linear']
if x.Name == 'HeatingCombRatio'])
assert curveobj[0].Name == 'HeatingCombRatio'
assert curveobj[0].Coefficient1_Constant == 0.96034
assert curveobj[0].Coefficient2_x == 0.03966
assert curveobj[0].Minimum_Value_of_x == 1.0
assert curveobj[0].Maximum_Value_of_x == 1.5
assert curveobj[0].Minimum_Curve_Output == ''
assert curveobj[0].Maximum_Curve_Output == ''
assert curveobj[0].Input_Unit_Type_for_X == ''
assert curveobj[0].Output_Unit_Type == ''
def test_addForscriptSchVRFsystem(IDFobject):
from eppy.modeleditor import IDF
IDFobject.addForscriptSchVRFsystem(verboseMode=False)
IDFobject.saveaccim(verboseMode=False)
idf1 = IDF('TestModel_MultipleZone_pymod.idf')
zonenames_orig = ([zone.Name for zone in idf1.idfobjects['ZONE']])
zonenames = ([sub.replace(':', '_') for sub in ([zone.Name for zone in idf1.idfobjects['ZONE']])])
for zonename in zonenames:
obj = ([x
for x
in idf1.idfobjects['Schedule:Compact']
if x.Name == "FORSCRIPT_AHST_" + zonename])
assert obj[0].Name == "FORSCRIPT_AHST_" + zonename
assert obj[0].Schedule_Type_Limits_Name == "Any Number"
assert obj[0].Field_1 == 'Through: 12/31'
assert obj[0].Field_2 == 'For: AllDays'
assert obj[0].Field_3 == 'Until: 24:00'
assert obj[0].Field_4 == '20'
obj = ([x
for x
in idf1.idfobjects['Schedule:Compact']
if x.Name == "FORSCRIPT_ACST_" + zonename])
assert obj[0].Name == "FORSCRIPT_ACST_" + zonename
assert obj[0].Schedule_Type_Limits_Name == "Any Number"
assert obj[0].Field_1 == 'Through: 12/31'
assert obj[0].Field_2 == 'For: AllDays'
assert obj[0].Field_3 == 'Until: 24:00'
assert obj[0].Field_4 == '24'
for i in range(len(zonenames_orig)):
obj = ([x
for x
in idf1.idfobjects['ThermostatSetpoint:DualSetpoint']
if x.Name == zonenames_orig[i]+' Dual SP'])
assert obj[0].Name == zonenames_orig[i]+' Dual SP'
assert obj[0].Heating_Setpoint_Temperature_Schedule_Name == "FORSCRIPT_AHST_" + zonenames[i]
assert obj[0].Cooling_Setpoint_Temperature_Schedule_Name == "FORSCRIPT_ACST_" + zonenames[i]
def test_checkVentIsOn(IDFobject):
from eppy.modeleditor import IDF
IDFobject.checkVentIsOn(verboseMode=False)
IDFobject.saveaccim(verboseMode=False)
idf1 = IDF('TestModel_MultipleZone_pymod.idf')
obj = ([x
for x
in idf1.idfobjects['Schedule:Compact']
if x.Name == "Vent_SP_temp"])
assert obj[0].Name == "Vent_SP_temp"
assert obj[0].Schedule_Type_Limits_Name | |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""Crossword Layout
"""
class IntRect:
"""Integer Rectangle, [left, right), [top, bottom)"""
def __init__(self, left=0, top=0, right=1, bottom=1):
self.left = left
self.top = top
self.right = right
self.bottom = bottom
@property
def width(self):
"""Return width"""
return self.right - self.left
@property
def height(self):
"""Return height"""
return self.bottom - self.top
def area(self):
"""Return area"""
return self.width * self.height
def __contains__(self, point):
"""Test if a point inside or not"""
x, y = point
return self.left <= x < self.right and self.top <= y < self.bottom
def __ior__(self, other):
"""Merge another"""
self.left = min(self.left, other.left)
self.top = min(self.top, other.top)
self.right = max(self.right, other.right)
self.bottom = max(self.bottom, other.bottom)
return self
def __or__(self, other):
"""Merge another and return new one"""
return IntRect(
min(self.left, other.left),
min(self.top, other.top),
max(self.right, other.right),
max(self.bottom, other.bottom),
)
def __and__(self, other):
"""Test if two rects have intersection or not"""
return not (self.right <= other.left or
other.right <= self.left or
self.bottom <= other.top or
other.bottom <= self.top
)
def intersect(self, other):
"""Return intersection"""
if self & other:
return IntRect(
max(self.left, other.left),
max(self.top, other.top),
min(self.right, other.right),
min(self.bottom, other.bottom)
)
return None
def __iter__(self):
"""Spiral iteration from center to outside"""
x_offset = (self.left + self.right) / 2.0
y_offset = (self.top + self.bottom) / 2.0
x, y, dx, dy = 0, 0, 0, -1
# 1 round more prevents width/height NOT divided by 2
for _ in range(max(self.width + 2, self.height + 2) ** 2):
point = int(x + x_offset), int(y + y_offset)
if point in self:
yield point
if x == y or (x < 0 and x == -y) or (x > 0 and x == 1 - y):
dx, dy = -dy, dx
x, y = x + dx, y + dy
class CrosswordLayout:
"""Crossword Layout"""
class Error(Exception):
"""Error class"""
class WordLayout:
"""Word layout"""
def __init__(self, word, x, y, horizontal):
self.word = word
self.horizontal = horizontal
self.rect = IntRect(
x, y,
x + [1, len(self.word)][horizontal],
y + [len(self.word), 1][horizontal],
)
# intersect count with other word(s' layouts)
self.intersect_cnt = 0
def can_intersect(self):
"""Can intersect with other or not"""
# Word CAN NOT entirely intersects with other words
# Otherwise this word will hided by others
return self.intersect_cnt + 1 < len(self.word)
def add_intersect(self, other):
"""Two words intersect"""
self.intersect_cnt += 1
other.intersect_cnt += 1
def print_layout(self, board, x, y, word_rewrite_callback):
"""Output layout to a 2D array"""
for char in word_rewrite_callback(self.word):
board[y][x] = char
x, y = x + [0, 1][self.horizontal], y + [1, 0][self.horizontal]
def __getitem__(self, point):
"""Get char by point(x, y)"""
if point in self.rect:
x, y = point
offset = (x - self.rect.left) if self.horizontal else (y - self.rect.top)
return self.word[offset]
return None
def __and__(self, other):
"""Test if two layouts intersect with same char or not"""
common = self.rect.intersect(other.rect)
return common and self[common.left, common.top] == other[common.left, common.top]
def __init__(
self,
layout_count, # Word count need to be layout
key_word, # MUST layout word, seed word
other_words, # Other word choices
max_width=0, # 0 for unlimited
max_height=0 # 0 for unlimited
):
if layout_count < 2:
raise self.Error("layout_count MUST more than 2!")
self.layout_count = layout_count
self.words = list(other_words)
if key_word:
self.words[:0] = [key_word]
if len(self.words) < layout_count:
raise self.Error("layout_count MUST NOT more than words count!")
# Width/height limit for rect
if not (isinstance(max_width, int) and
isinstance(max_height, int) and
max_width >= 0 and
max_height >= 0):
raise self.Error(
"max_width:%d or max_height:%d invalid!" % (max_width, max_height)
)
# Have rect width/height limit or not
self.have_rect_limit = max_width > 0 or max_height > 0
# Real rect width/height limit
self._real_rect_max_width = max_width or 999999999
self._real_rect_max_height = max_height or 999999999
# Rect for words' layouts
self.rect = IntRect()
# [self.WordLayout]
# Layouts of words already layout
self.word_layouts = []
# Clock-wisely layout poses on the outer 4 sides of words' layouts rect
# 0.same direction:
# 1.same direction:
# 2.reversed direction:
self.outside_layout_poses = [0, 0, 0]
self.do_layout()
def layout_words(self):
"""Return words already layout"""
return [layout.word for layout in self.word_layouts]
def check_and_add_word_layout(self, word, x, y, horizontal, inserted_layout=None):
"""Check and layout it if a word can be layout"""
new_layout = self.WordLayout(word, x, y, horizontal)
if self.have_rect_limit:
new_rect = self.rect | new_layout.rect
if new_rect.width > self._real_rect_max_width or \
new_rect.height > self._real_rect_max_height:
return False
passed_layouts = set()
if self.check_word_layout(new_layout, inserted_layout, passed_layouts):
self.word_layouts.append(new_layout)
self.rect |= new_layout.rect
# Add intersect for all these(insert/inserted/passed) words
if inserted_layout:
for layout in [inserted_layout] + list(passed_layouts):
layout.add_intersect(new_layout)
return True
return False
def check_word_layout(self, new_layout, inserted_layout, passed_layouts):
"""Check if a word can be layout"""
new_rect = new_layout.rect
left, top, right, bottom = \
new_rect.left, new_rect.top, new_rect.right, new_rect.bottom
horizontal = new_layout.horizontal
# Dangerous rects for new layout
# +--> danger_rect1
# (0,-1) |
# +---------+------+
# | |
# (-1,0) | |
# +----+----------------+----+
# | | | +-----> danger_rect0
# | | | |
# +----+----------------+----+
# | | (+1,0)
# | |
# +----------------+
# (0,+1)
if horizontal:
danger_rects = [
IntRect(left - 1, top + 0, right + 1, bottom + 0),
IntRect(left + 0, top - 1, right + 0, bottom + 1),
]
else:
danger_rects = [
IntRect(left + 0, top - 1, right + 0, bottom + 1),
IntRect(left - 1, top + 0, right + 1, bottom + 0),
]
inserted = inserted_layout
# Already layouts' rect CAN NOT intersect with dangerous rects
for layout, danger_rect in (
(_layout, _danger_rect)
for _layout in self.word_layouts if _layout is not inserted
for _danger_rect in danger_rects if _layout.rect & _danger_rect
):
if not inserted:
return False
if layout.horizontal == horizontal:
# Exception:
# Like the following, 'as' should connect with 'saw' in char 'a'
#
# was
# a
# w
#
if not (inserted.can_intersect() and
layout.rect & inserted.rect and
layout.rect.intersect(danger_rect).area() == 1):
return False
# Old layout will intersect new layout with same char
elif not (layout.can_intersect() and new_layout & layout):
return False
else:
passed_layouts.add(layout)
return True
def layout_word_not_insert(self, word):
"""Layout a word, WITHOUT inserted layout"""
rect = self.rect
# Search layout point from center to outside spirally
for x, y in rect:
if self.check_and_add_word_layout(word, x, y, horizontal=True) or \
self.check_and_add_word_layout(word, x, y, horizontal=False):
return
left, top, right, bottom = rect.left, rect.top, rect.right, rect.bottom
# Clock-wisely layout on the outer 4 sides of words' layouts rect
positions_array = [
# 0.same direction:
# horizontal layouts horizontally, vertical layouts vertically, connected to rect
#
# 0 1
# +---> +--->
#
# 7+ +--------------------+2
# | | |
# v | v
# | |
# 6+ | +3
# | | |
# v +---->----------+--->v
# 5 4
[
(left, top - 1, True),
(right - len(word), top - 1, True),
(right, top, False),
(right, bottom - len(word), False),
(right - len(word), bottom, True),
(left, bottom, True),
(left - 1, bottom - len(word), False),
(left - 1, top, False),
],
# 1.same direction:
# horizontal layouts horizontally, vertical layouts vertically, NOT connected to rect
# 0
# +---->
# 1
# 3+ +--------------------+ +
# | | | |
# | | | |
# v | | v
# | |
# | |
# +--------------------+
# 2
# +----->
[
(left, top - 2, True),
(right + 1, top, False),
(left, bottom + 1, True),
(left - 2, top, False),
],
# 2.reversed direction:
# horizontal layouts vertically, vertical layouts horizontally, NOT connected to rect
# + 0
# |
# |
# v
# 3 1
# +-----> +--------------------+ +---->
# | |
# | |
# | |
# | |
# | |
# +--------------------+
# 2
# +
# |
# |
# v
[
(left, top - 1 - len(word), False),
(right + 1, top, True),
(left, bottom + 1, False),
(left - 1 - len(word), top, True),
],
]
for pos_index, positions in enumerate(positions_array):
for _ in range(len(positions)):
x, y, horizontal = \
positions[self.outside_layout_poses[pos_index] % len(positions)]
self.outside_layout_poses[pos_index] | |
<reponame>m1griffin/arrayfunc
#!/usr/bin/env python3
##############################################################################
# Project: arrayfunc
# Purpose: Generate the C code for math operators with one variable.
# Language: Python 3.4
# Date: 18-Mar-2018
#
###############################################################################
#
# Copyright 2014 - 2021 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##############################################################################
# ==============================================================================
import itertools
import codegen_common
# ==============================================================================
uniops_head = """//------------------------------------------------------------------------------
// Project: arrayfunc
// Module: %(funclabel)s.c
// Purpose: Calculate the %(funclabel)s of values in an array.
// Language: C
// Date: 15-Nov-2017.
//
//------------------------------------------------------------------------------
//
// Copyright 2014 - 2021 <NAME> <<EMAIL>>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//------------------------------------------------------------------------------
/*--------------------------------------------------------------------------- */
// This must be defined before "Python.h" in order for the pointers in the
// argument parsing functions to work properly.
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#include <limits.h>
#include <math.h>
#include "arrayerrs.h"
#include "arrayparams_base.h"
#include "arrayparams_onesimd.h"
#include "simddefs.h"
#ifdef AF_HASSIMD_X86
#include "%(funclabel)s_simd_x86.h"
#endif
#if defined(AF_HASSIMD_ARMv7_32BIT) || defined(AF_HASSIMD_ARM_AARCH64)
#include "arm_neon.h"
#endif
#if defined(AF_HASSIMD_ARMv7_32BIT)
#include "%(funclabel)s_simd_armv7.h"
#endif
#if defined(AF_HASSIMD_ARM_AARCH64)
#include "%(funclabel)s_simd_armv8.h"
#endif
/*--------------------------------------------------------------------------- */
"""
# ==============================================================================
# For floating point.
uniops_op_float = """
/*--------------------------------------------------------------------------- */
/* arraylen = The length of the data arrays.
data = The input data array.
dataout = The output data array.
ignoreerrors = If true, disable arithmetic math error checking (default is false).
hasoutputarray = If true, the output goes into the second array.
*/
signed int %(funclabel)s_%(funcmodifier)s(Py_ssize_t arraylen, int nosimd, %(arraytype)s *data, %(arraytype)s *dataout, unsigned int ignoreerrors, bool hasoutputarray) {
// array index counter.
Py_ssize_t x;
%(simd_call)s
// Math error checking disabled.
if (ignoreerrors) {
if (hasoutputarray) {
for (x = 0; x < arraylen; x++) {
dataout[x] = %(copname)s;
}
} else {
for (x = 0; x < arraylen; x++) {
data[x] = %(copname)s;
}
}
} else {
// Math error checking enabled.
if (hasoutputarray) {
for (x = 0; x < arraylen; x++) {
dataout[x] = %(copname)s;
if (!isfinite(dataout[x])) {return ARR_ERR_ARITHMETIC;}
}
} else {
for (x = 0; x < arraylen; x++) {
data[x] = %(copname)s;
if (!isfinite(data[x])) {return ARR_ERR_ARITHMETIC;}
}
}
}
%(simd_call_close)s
return ARR_NO_ERR;
}
"""
# ==============================================================================
# ==============================================================================
# For negating signed integer.
uniops_neg_int = """
/*--------------------------------------------------------------------------- */
/* arraylen = The length of the data arrays.
data = The input data array.
dataout = The output data array.
ignoreerrors = If true, disable arithmetic math error checking (default is false).
hasoutputarray = If true, the output goes into the second array.
*/
signed int %(funclabel)s_%(funcmodifier)s(Py_ssize_t arraylen, int nosimd, %(arraytype)s *data, %(arraytype)s *dataout, unsigned int ignoreerrors, bool hasoutputarray) {
// array index counter.
Py_ssize_t x;
%(simd_call)s
// Math error checking disabled.
if (ignoreerrors) {
if (hasoutputarray) {
for (x = 0; x < arraylen; x++) {
dataout[x] = -data[x];
}
} else {
for (x = 0; x < arraylen; x++) {
data[x] = -data[x];
}
}
} else {
// Math error checking enabled.
if (hasoutputarray) {
for (x = 0; x < arraylen; x++) {
if ( minval_loop_willoverflow_%(funcmodifier)s(data[x]) ) {return ARR_ERR_OVFL;}
dataout[x] = -data[x];
}
} else {
for (x = 0; x < arraylen; x++) {
if ( minval_loop_willoverflow_%(funcmodifier)s(data[x]) ) {return ARR_ERR_OVFL;}
data[x] = -data[x];
}
}
}
%(simd_call_close)s
return ARR_NO_ERR;
}
"""
# ==============================================================================
# ==============================================================================
# For absolute value of a signed integer.
uniops_abs_int = """
/*--------------------------------------------------------------------------- */
/* arraylen = The length of the data arrays.
data = The input data array.
dataout = The output data array.
ignoreerrors = If true, disable arithmetic math error checking (default is false).
hasoutputarray = If true, the output goes into the second array.
*/
signed int %(funclabel)s_%(funcmodifier)s(Py_ssize_t arraylen, int nosimd, %(arraytype)s *data, %(arraytype)s *dataout, unsigned int ignoreerrors, bool hasoutputarray) {
// array index counter.
Py_ssize_t x;
%(simd_call)s
// Math error checking disabled.
if (ignoreerrors) {
if (hasoutputarray) {
for (x = 0; x < arraylen; x++) {
dataout[x] = data[x] >= 0 ? data[x] : -data[x];
}
} else {
for (x = 0; x < arraylen; x++) {
data[x] = data[x] >= 0 ? data[x] : -data[x];
}
}
} else {
// Math error checking enabled.
if (hasoutputarray) {
for (x = 0; x < arraylen; x++) {
if ( minval_loop_willoverflow_%(funcmodifier)s(data[x]) ) {return ARR_ERR_OVFL;}
dataout[x] = data[x] >= 0 ? data[x] : -data[x];
}
} else {
for (x = 0; x < arraylen; x++) {
if ( minval_loop_willoverflow_%(funcmodifier)s(data[x]) ) {return ARR_ERR_OVFL;}
data[x] = data[x] >= 0 ? data[x] : -data[x];
}
}
}
%(simd_call_close)s
return ARR_NO_ERR;
}
"""
# ==============================================================================
# ==============================================================================
# The operations using SIMD.
ops_simdsupport = """
/*--------------------------------------------------------------------------- */
/* The following series of functions reflect the different parameter options possible.
arraylen = The length of the data arrays.
data = The input data array.
dataout = The output data array.
*/
// param_arr_none
%(simdplatform)s
void %(funclabel)s_%(funcmodifier)s_1_simd(Py_ssize_t arraylen, %(arraytype)s *data) {
// array index counter.
Py_ssize_t x;
// SIMD related variables.
Py_ssize_t alignedlength;
%(simdattr)s datasliceleft;
%(vsignparam)s
// Calculate array lengths for arrays whose lengths which are not even
// multipes of the SIMD slice length.
alignedlength = calcalignedlength(arraylen, %(simdwidth)s);
// Perform the main operation using SIMD instructions.
for (x = 0; x < alignedlength; x += %(simdwidth)s) {
// Load the data into the vector register.
datasliceleft = %(vldinstr)s &data[x]);
// The actual SIMD operation.
datasliceleft = %(simdop)s;
// Store the result.
%(vstinstr1)s &data[x], %(vstinstr2)s datasliceleft);
}
// Get the max value within the left over elements at the end of the array.
for (x = alignedlength; x < arraylen; x++) {
data[x] = %(simdcleanup)s;
}
}
#endif
// param_arr_arr
%(simdplatform)s
void %(funclabel)s_%(funcmodifier)s_2_simd(Py_ssize_t arraylen, %(arraytype)s *data, %(arraytype)s *dataout) {
// array index counter.
Py_ssize_t x;
// SIMD related variables.
Py_ssize_t alignedlength;
%(simdattr)s datasliceleft;
%(vsignparam)s
// Calculate array lengths for arrays whose lengths which are not even
// multipes of the SIMD slice length.
alignedlength = calcalignedlength(arraylen, %(simdwidth)s);
// Perform the main operation using SIMD instructions.
for (x = 0; x < alignedlength; x += %(simdwidth)s) {
// Load the data into the vector register.
datasliceleft = %(vldinstr)s &data[x]);
// The actual SIMD operation.
datasliceleft = %(simdop)s;
// Store the result.
%(vstinstr1)s &dataout[x], %(vstinstr2)s datasliceleft);
}
// Get the max value within the left over elements at the end of the array.
for (x = alignedlength; x < arraylen; x++) {
dataout[x] = %(simdcleanup)s;
}
}
#endif
"""
# ==============================================================================
# Helper functions for SIMD support. There needs to be one for each data type.
simd_helpers = """
/*--------------------------------------------------------------------------- */
/* Initialise an SIMD vector with a specifired value.
initval = The value to initialise the vector to.
Returns the initalised SIMD vector.
*/
%(simdplatform)s
%(simdattr)s initvec_%(funcmodifier)s(%(arraytype)s initval) {
unsigned int y;
%(arraytype)s initvals[%(simdwidth)s];
%(simdattr)s simdvec;
for (y = 0; y < %(simdwidth)s; y++) {
initvals[y] = initval;
}
simdvec = %(vldinstr)s(initvals));
return simdvec;
}
#endif
"""
# Create this for each signed integers type.
intov_macros_signed = """
/*--------------------------------------------------------------------------- */
// For %(arraytype)s.
// Use to detect if an overflow condition will occur due to negating a minimum integer.
#define minval_loop_willoverflow_%(funcmodifier)s(val) (val == %(intminvalue)s)
"""
# ==============================================================================
# The template for overflow checks for x86_64. This requires the correct SIMD attribute
# to be insertered before itself being inserted into the next template.
simd_ovflchk1_x86 = '''// Do an equal compare operation.
ovcheck = %(veqinstr)s (datasliceleft, ovflvec);
// Check for overflow.
if (!(__builtin_ia32_pmovmskb128((v16qi) ovcheck) == 0x0000)) {'''
simd_equ_willoverflow_armv7 = '''// Do an equal compare operation.
ovcheck = %(veqinstr)s (datasliceleft, ovflvec);
// Check for overflow.
if (!(%(vreinterpinstr)s(ovcheck) == 0x0000000000000000)){'''
simd_equ_willoverflow_armv8 = '''// Do an equal compare operation.
ovcheck = %(veqinstr)s (datasliceleft, ovflvec);
// Check for overflow.
// Combine the result to two 64 bit vectors.
veccombine = %(vreinterpinstr)s(ovcheck);
// Get the high and low lanes of the combined vector.
lowresult = vgetq_lane_u64(veccombine, 0);
highresult = vgetq_lane_u64(veccombine, 1);
// Check if overflow will happen.
if ((lowresult != 0x0000000000000000) || (highresult != 0x0000000000000000)) {'''
# Extra variables needed for ARMv8.
simd_ovflchk_extravars_armv8 = '''uint64x2_t veccombine;
uint64_t highresult, lowresult;'''
# ==============================================================================
# The abs_ operations using SIMD. This version checks for overflow.
ops_simdsupport_ovfl_abs = """
/*--------------------------------------------------------------------------- */
/* The following series of functions reflect the different parameter options possible.
arraylen = The length of the data arrays.
data = The input data array.
dataout = The output data array.
*/
// param_arr_none
%(simdplatform)s
char %(funclabel)s_%(funcmodifier)s_1_simd_ovfl(Py_ssize_t arraylen, %(arraytype)s *data) {
// array index counter.
Py_ssize_t x;
// SIMD related variables.
Py_ssize_t alignedlength;
%(simdattr)s datasliceleft, datasliceright, ovflvec;
%(ovflsimdattr)s ovcheck;
%(vsignparam)s
%(simd_ovflchk_extravars)s
// This is used for detecting a potential overflow condition.
ovflvec = initvec_%(funcmodifier)s(%(intminvalue)s);
// Calculate array lengths for arrays whose lengths which are not even
// multipes of the SIMD slice length.
alignedlength = calcalignedlength(arraylen, %(simdwidth)s);
// Perform the main operation using SIMD instructions.
for (x = 0; x < alignedlength; x += %(simdwidth)s) {
// Load the data | |
"""Simple implementation of single linked lists. """
class Node:
"""Implemetation of a list node. """
def __init__(self, data=None, next=None):
"""Init node object.
Parameters:
data (...): Data that is stored in this node
next (Node): Reference to the next node in the list
Returns:
None
Raises:
None
"""
self.data = data
self.next = next
def __str__(self):
"""Get string representation of the data from a node.
Parameters:
None
Returns:
[...] (...): String representation of the data that is stored in this node object
Raises:
None
"""
return str(self.data)
def get_data(self):
"""Get data from a node object.
Parameters:
None
Returns:
[...] (...): Data that is stored in this node object
Raises:
None
"""
return self.data
def set_data(self, data):
"""Set data in a node object.
Parameters:
data (...): Data that will be stored in this node
Returns:
None
Raises:
None
"""
self.data = data
def get_next(self):
"""Get the next node object after this one.
Parameters:
None
Returns:
[...] (Node): Next node object after this one
Raises:
None
"""
return self.next
def set_next(self, next):
"""Set the reference to the next node object after this one.
Parameters:
next (Node): Reference to the next node in the list
Returns:
None
Raises:
None
"""
self.next = next
class SingleLinkedList(object):
"""Implemetation of a single linked list. """
def __init__(self):
"""Init list object.
Parameters:
None
Returns:
None
Raises:
None
"""
self._head = None
self._size = 0
def __len__(self):
"""Get length of the list object.
Parameters:
None
Returns:
[...] (int): Number of data nodes in the list object
Raises:
None
"""
return self._size
def __str__(self):
"""Get string representation of the list object.
Parameters:
None
Returns:
output (str): String representation of the list object
Raises:
None
"""
current_node = self._head
output = ""
while current_node:
output += str(current_node) + " -> "
current_node = current_node.get_next()
output += "None"
return output
def _increase_size(self):
"""Increase size attribute of the list object by one.
Parameters:
None
Returns:
None
Raises:
None
"""
self._size += 1
def _decrease_size(self):
"""Decrease size attribute of the list object by one.
Parameters:
None
Returns:
None
Raises:
None
"""
if self._size > 0:
self._size -= 1
def size(self):
"""Returns number of node objects in the list.
Parameters:
None
Returns:
[...] (int): Number of items in the array
Raises:
None
"""
return self._size
def is_empty(self):
"""Returns True if the list is empty.
Parameters:
None
Returns:
[...] (bool): Indicator if array is empty
Raises:
None
"""
return self._size == 0
def set_head(self, node):
"""Set the head attribute to a node reference.
Parameters:
node (Node): Reference to a node object
Returns:
None
Raises:
None
"""
self._head = node
def insert_at(self, index, data):
"""Insert a node containing the data into the list at the index.
Parameters:
index (int): Index to add the node at
data (...): Data to add to the node
Returns:
None
Raises:
IndexError: If the index is out of range
"""
if 0 > index or index > self._size:
return IndexError('index out of range!')
node = Node(data)
current_node = self._head
current_node_idx = 0
if current_node:
if index == 0:
self.set_head(node)
node.set_next(current_node)
else:
while current_node_idx + 1 != index:
current_node = current_node.get_next()
current_node_idx += 1
node.set_next(current_node.get_next())
current_node.set_next(node)
else:
self.set_head(node)
self._increase_size()
def append(self, data):
"""Append a node containing the data at the end of the list.
Parameters:
data (...): Data to add to the node
Returns:
None
Raises:
None
"""
node = Node(data)
current_node = self._head
if current_node:
while current_node.get_next():
current_node = current_node.get_next()
current_node.set_next(node)
else:
self.set_head(node)
self._increase_size()
def prepend(self, data):
"""Prepend a node containing the data at the front of the list.
Parameters:
data (...): Data to add to the node
Returns:
None
Raises:
None
"""
node = Node(data)
node.set_next(self._head)
self.set_head(node)
self._increase_size()
def delete_at(self, index):
"""Delete a node in the list at the index.
Parameters:
index (int): Index to add the node at
Returns:
None
Raises:
IndexError: If the index is out of range
"""
if 0 > index or index > self._size - 1:
return IndexError('index out of range!')
current_node = self._head
current_node_idx = 0
if index == 0:
self.set_head(current_node.get_next())
else:
while current_node_idx + 1 != index:
current_node = current_node.get_next()
current_node_idx += 1
current_node.set_next(current_node.get_next().get_next())
self._decrease_size()
def pop_front(self):
"""Remove front node and return its data.
Parameters:
None
Returns:
pop_data (...): Data from the first node
Raises:
IndexError: If the list is empty
"""
if self._head:
pop_data = self._head.get_data()
self._head = self._head.get_next()
self._decrease_size()
return pop_data
else:
return IndexError("can not pop from empty list!")
def pop_back(self):
"""Remove last node and return its data.
Parameters:
None
Returns:
pop_data (...): Data from the last node
Raises:
IndexError: If the list is empty
"""
current_node = self._head
if not current_node:
return IndexError("can not pop from empty list!")
else:
if not current_node.get_next():
pop_data = current_node.get_data()
else:
while current_node.get_next().get_next():
current_node = current_node.get_next()
pop_data = current_node.get_next().get_data()
current_node.set_next(None)
self._decrease_size()
return pop_data
def data_at(self, index):
"""Return data of the node at the index.
Parameters:
None
Returns:
data (...): Data from the node at the index
Raises:
IndexError: If the index is out of range
"""
if 0 > index or index > self._size - 1:
return IndexError('index out of range!')
current_node = self._head
current_node_idx = 0
if current_node:
while current_node_idx != index:
current_node = current_node.get_next()
current_node_idx += 1
data = current_node.get_data()
else:
data = self._head
return data
def find(self, data):
"""Search and return the index of the next node with data equal to the input.
Parameters:
data (...): Data to find in the list
Returns:
found_node_idx (int): Index of the node that contains the same data as the input
Raises:
None
"""
current_node = self._head
current_node_idx = 0
found_node = None
found_node_idx = None
while current_node and not found_node:
if current_node.get_data() == data:
found_node = current_node
found_node_idx = current_node_idx
else:
current_node = current_node.get_next()
current_node_idx += 1
return found_node_idx
def contains(self, data):
"""Returns True if the list contains the data.
Parameters:
data (...): Data to find in the list
Returns:
[...] (bool): Indicator if input data was found
Raises:
None
"""
return self.find(data) != None
def remove_first(self, data):
"""Remove the first node with data equal to the input.
Parameters:
data (...): Data to remove in the list
Returns:
None
Raises:
None
"""
index = self.find(data)
if index:
self.delete_at(index)
def remove_all(self, data):
"""Remove all nodes with data equal to the input.
Parameters:
data (...): Data to remove in the list
Returns:
None
Raises:
None
"""
index = self.find(data)
while index != None:
self.delete_at(index)
index = self.find(data)
def reverse(self):
"""Reverse the order of nodes in the list.
Parameters:
None
Returns:
None
Raises:
None
"""
previous_node = None
current_node = self._head
while current_node is not None:
next_node = current_node.get_next()
current_node.set_next(previous_node)
previous_node = current_node
current_node = next_node
self._head = previous_node
def main():
print("Init single linked list.")
sll = SingleLinkedList()
print("List content: ", sll)
print("Size: ", sll.size(), "\n")
print("Fill single linked list.")
sll.insert_at(0, 'first_item')
sll.insert_at(0, 'second_item')
sll.insert_at(2, 'third_item')
sll.append('appended_item')
sll.append('another_appended_item')
sll.append('another_appended_item')
sll.prepend('prepended_item')
sll.prepend('another_prepended_item')
sll.prepend('another_prepended_item')
print("List content: ", sll)
print("Size: ", sll.size(), "\n")
print("Show data from the list via using index.")
print("First entry: ", sll.data_at(0))
print("Third entry: ", sll.data_at(2))
print("List content: ", sll)
print("Size: ", sll.size(), "\n")
print("Find data in the list.")
print("Find 'prepended_item': ", sll.find('prepended_item'))
print("Find 'prepended_item_2': ", sll.find('prepended_item_2'))
print("Contains 'second_item': ", sll.contains('second_item'))
print("Contains 'second_item_2': ", sll.contains('second_item_2'))
print("List content: ", sll)
print("Size: ", sll.size(), "\n")
print("Remove data from the list.")
print("Remove the first 'another_appended_item': ",
sll.remove_first('another_appended_item'))
print("Remove all 'another_prepended_item': ",
sll.remove_all('another_prepended_item'))
print("List content: ", sll)
print("Size: ", sll.size(), "\n")
print("Delete data from the list using the index.")
sll.delete_at(0)
sll.delete_at(2)
print("List content: ", sll)
print("Size: ", sll.size(), "\n")
print("Pop data from the list.")
print("Pop front: ", sll.pop_front())
print("Pop_back: ", sll.pop_back())
print("List content: ", sll)
print("Size: ", sll.size(), "\n")
print("Check 'out of range' insertion and deletion.")
print(sll.insert_at(5, 'test'))
print(SingleLinkedList().delete_at(0))
print(SingleLinkedList().pop_back())
print(SingleLinkedList().pop_front(), "\n")
print("List content: ", sll)
print("Size: ", sll.size(), "\n")
print("Add a few items and reverse the list.")
sll.append('added_back')
sll.append('added_back_2')
sll.prepend('added_front')
sll.prepend('added_front_2')
print("List content: ", sll)
sll.reverse()
print("List content: ", sll)
print("Size: ", sll.size(), "\n")
if __name__ | |
self.assertIn(path, schema_changes_files)
# instantly create two, which will likely have the same timestamp
pathname = self.empty_schema_changes.make_template()
with self.assertRaisesRegex(MigratorError, "schema changes file '.+' already exists"):
self.empty_schema_changes.make_template()
os.remove(pathname)
schema_changes = SchemaChanges()
with self.assertRaisesRegex(MigratorError, "schema_repo must be initialized"):
schema_changes.make_template()
def test_make_template_command(self):
# test default: schema_repo_dir='.'
# save cwd
cwd = os.getcwd()
os.chdir(self.git_migration_test_repo.repo_dir)
with capturer.CaptureOutput(relay=False) as captured:
schema_changes_template_file = SchemaChanges.make_template_command('.')
self.assertTrue(os.path.isfile(schema_changes_template_file))
# restore
os.chdir(cwd)
# prevent collision of filenames 1 sec apart
time.sleep(2)
with capturer.CaptureOutput(relay=False) as captured:
schema_changes_template_file = SchemaChanges.make_template_command(
self.git_migration_test_repo.repo_dir)
self.assertTrue(os.path.isfile(schema_changes_template_file))
with self.assertRaisesRegex(MigratorError, "schema_dir is not a directory"):
SchemaChanges.make_template_command('no such dir')
with self.assertRaisesRegex(MigratorError,
"commit_or_hash '.+' cannot be converted into a commit"):
SchemaChanges.make_template_command(self.git_migration_test_repo.repo_dir, 'no such commit')
@unittest.skip('Fixture must be updated due to changes to obj_tables')
def test_load_transformations(self):
schema_changes_kwargs = SchemaChanges.load(self.schema_changes_file)
schema_changes = SchemaChanges()
schema_changes.schema_changes_file = schema_changes_kwargs['schema_changes_file']
schema_changes.transformations_file = schema_changes_kwargs['transformations_file']
transformations = schema_changes.load_transformations()
self.assertTrue(isinstance(transformations, MigrationWrapper))
schema_changes.transformations_file = 'no_transformations_wrapper.py'
with self.assertRaisesRegex(MigratorError,
r"schema changes file '.+' must define a MigrationWrapper instance called 'transformations'"):
schema_changes.load_transformations()
def test_load(self):
schema_changes = SchemaChanges.load(self.schema_changes_file)
expected_schema_changes = dict(
commit_hash=self.known_hash_ba1f9d3,
renamed_attributes=[],
renamed_models=[['Test', 'TestNew']],
schema_changes_file=self.schema_changes_file,
transformations_file='transformations_ba1f9d3.py'
)
self.assertEqual(schema_changes, expected_schema_changes)
no_such_file = 'no such file'
with self.assertRaisesRegex(MigratorError, "could not read schema changes file: '.+'"):
SchemaChanges.load(no_such_file)
# detect bad yaml
with TemporaryDirectory() as temp_dir:
bad_yaml = os.path.join(temp_dir, 'bad_yaml.yaml')
with open(bad_yaml, "w") as f:
f.write("unbalanced brackets: ][")
with self.assertRaisesRegex(MigratorError,
r"could not parse YAML schema changes file: '\S+':"):
SchemaChanges.load(bad_yaml)
with open(bad_yaml, "w") as f:
f.write("wrong_attr: []")
with self.assertRaisesRegex(MigratorError,
r"schema changes file '.+' must have a dict with these attributes:"):
SchemaChanges.load(bad_yaml)
pathname = self.empty_schema_changes.make_template()
with self.assertRaisesRegex(MigratorError,
r"schema changes file '.+' is empty \(an unmodified template\)"):
SchemaChanges.load(pathname)
def test_validate(self):
schema_changes_file = os.path.join(self.fixtures_path, 'schema_changes',
'good_schema_changes_2019-03.yaml')
self.assertFalse(SchemaChanges.validate(SchemaChanges.load(schema_changes_file)))
schema_changes_file = os.path.join(self.fixtures_path, 'schema_changes',
'bad_types_schema_changes_2019-03.yaml')
schema_changes_kwargs = SchemaChanges.load(schema_changes_file)
errors = SchemaChanges.validate(schema_changes_kwargs)
self.assertTrue(any([re.search('commit_hash must be a str', e) for e in errors]))
self.assertTrue(any([re.search('transformations_file must be a str', e) for e in errors]))
self.assertTrue(any([re.search("renamed_models .* a list of pairs of strings, but is '.*'$", e)
for e in errors]))
self.assertTrue(
any([re.search('renamed_models .*list of pairs of strings, .* examining it raises', e)
for e in errors]))
self.assertTrue(
any([re.search("renamed_attributes.*list of pairs of pairs of strings, but is '.*'$", e)
for e in errors]))
self.assertTrue(
any([re.search("renamed_attributes.*list of.*but.*'.*',.*examining it raises.*error$", e)
for e in errors]))
schema_changes_file = os.path.join(self.fixtures_path, 'schema_changes',
'short_hash_schema_changes_2019-03.yaml')
schema_changes_kwargs = SchemaChanges.load(schema_changes_file)
errors = SchemaChanges.validate(schema_changes_kwargs)
self.assertRegex(errors[0], "commit_hash is '.*', which isn't the right length for a git hash")
def test_generate_instance(self):
with TemporaryDirectory() as temp_dir:
good_yaml = os.path.join(temp_dir, 'good_yaml.yaml')
with open(good_yaml, "w") as f:
f.write(yaml.dump(self.test_data))
schema_changes = SchemaChanges.generate_instance(good_yaml)
for attr in SchemaChanges._CHANGES_FILE_ATTRS:
self.assertEqual(getattr(schema_changes, attr), self.test_data[attr])
schema_changes_file = os.path.join(self.fixtures_path, 'schema_changes',
'bad_types_schema_changes_2019-03.yaml')
with self.assertRaises(MigratorError):
SchemaChanges.generate_instance(schema_changes_file)
def test_str(self):
for attr in SchemaChanges._ATTRIBUTES:
self.assertIn(attr, str(self.schema_changes))
def get_github_api_token():
config = core.get_config()['obj_tables']
return config['github_api_token']
# todo: move RemoteBranch to wc_utils.util.testing
class RemoteBranch(object):
""" Make branches from master on `github.com/KarrLab`
This context manager creates and deletes branches on GitHub, which makes it easy to test
changes to remote repos without permanently modifying them. For example,
.. code-block:: python
with RemoteBranch(repo_name, test_branch):
# make some changes to branch `test_branch` of repo `repo_name`
# clone the branch
git_repo = GitRepo()
git_repo.clone_repo_from_url(repo_url, branch=test_branch)
# test properties of the repo
# test_branch has been deleted
"""
ORGANIZATION = 'KarrLab'
def __init__(self, repo_name, branch_name, delete=True):
""" Initialize
Args:
repo_name (:obj:`str`): the name of an existing repo
branch_name (:obj:`str`): the name of the new branch
delete (:obj:`bool`, optional): if set, the new branch will be deleted upon exiting a
`RemoteBranch` context manager; default=`True`
"""
self.repo_name = repo_name
self.branch_name = branch_name
self.delete = delete
github = Github(get_github_api_token())
self.repo = github.get_repo("{}/{}".format(self.ORGANIZATION, repo_name))
master = self.repo.get_branch(branch="master")
self.head = master.commit
def make_branch(self):
""" Make a new branch
Returns:
:obj:`github.GitRef.GitRef`: a ref to the new branch
"""
fully_qualified_ref = "refs/heads/{}".format(self.branch_name)
self.branch_ref = self.repo.create_git_ref(fully_qualified_ref, self.head.sha)
return self.branch_ref
def delete_branch(self):
""" Delete the branch """
self.branch_ref.delete()
def __enter__(self):
""" Make a new branch as a context manager
Returns:
:obj:`Github.`: a ref to the new branch
"""
return self.make_branch()
def __exit__(self, type, value, traceback):
""" Delete the new branch when exiting the context manager
"""
if self.delete:
self.delete_branch()
@staticmethod
def unique_branch_name(branch_name):
""" Make a unique branch name -- avoids conflicts among concurrent test runs
Args:
branch_name (:obj:`str`): name of the new branch
Returns:
:obj:`str`: a branch name made unique by a timestamp suffix
"""
return branch_name + '-' + SchemaChanges.get_date_timestamp()
@unittest.skipUnless(internet_connected(), "Internet not connected")
class TestRemoteBranch(unittest.TestCase):
def setUp(self):
self.branch_test_repo = 'branch_test_repo'
self.branch_test_git_repo_for_testing = GitHubRepoForTests(self.branch_test_repo)
def tearDown(self):
self.branch_test_git_repo_for_testing.delete_test_repo()
@unittest.skip('Fixture must be updated due to changes to obj_tables')
def test_remote_branch_utils(self):
self.branch_test_git_repo_for_testing.make_test_repo()
test_branch = RemoteBranch.unique_branch_name('test_branch_x')
remote_branch = RemoteBranch(self.branch_test_repo, test_branch)
self.assertTrue(isinstance(remote_branch.make_branch(), github.GitRef.GitRef))
self.assertFalse(remote_branch.delete_branch())
with RemoteBranch(self.branch_test_repo, test_branch) as branch_ref:
self.assertTrue(isinstance(branch_ref, github.GitRef.GitRef))
# ensure that RemoteBranch context deletes branch on exit
time.sleep(1.)
with self.assertRaisesRegex(GithubException, "'Branch not found'"):
remote_branch.repo.get_branch(branch=test_branch)
with RemoteBranch(self.branch_test_repo, test_branch):
pass
time.sleep(1.)
with self.assertRaisesRegex(GithubException, "'Branch not found'"):
remote_branch.repo.get_branch(branch=test_branch)
# with delete=False RemoteBranch context shouldn't delete branch on exit
with RemoteBranch(self.branch_test_repo, test_branch, delete=False) as branch_ref:
self.assertTrue(isinstance(branch_ref, github.GitRef.GitRef))
self.assertTrue(isinstance(remote_branch.repo.get_branch(branch=test_branch), Branch.Branch))
@unittest.skipUnless(internet_connected(), "Internet not connected")
class TestGitRepo(AutoMigrationFixtures):
@classmethod
def setUpClass(cls):
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
def setUp(self):
super().setUp()
self.repo_root = self.test_repo.repo_dir
self.no_such_hash = 'ab34419496756675b6e8499e0948e697256f2699'
@unittest.skip('Fixture must be updated due to changes to obj_tables')
def test_init(self):
self.assertIsInstance(self.test_repo.repo, git.Repo)
self.assertEqual(self.repo_root, self.test_repo.repo_dir)
self.assertEqual(self.test_repo.repo_url, self.test_repo_url)
git_repo = GitRepo(self.test_repo.repo_dir)
self.assertIsInstance(git_repo.repo, git.Repo)
with self.assertRaisesRegex(MigratorError, "instantiating a git.Repo from directory '.+' failed"):
GitRepo(self.tmp_dir)
# test search_parent_directories
git_repo_from_root = GitRepo(repo_location=self.git_migration_test_repo.repo_dir)
repo_child_dir = os.path.join(self.git_migration_test_repo.repo_dir, 'obj_tables_test_migration_repo')
git_repo_from_child = GitRepo(repo_location=repo_child_dir, search_parent_directories=True)
self.assertEqual(git_repo_from_root.repo_dir, git_repo_from_child.repo_dir)
# test branch
test_branch = RemoteBranch.unique_branch_name('branch_for_git_repo_init')
# make new branch
with RemoteBranch(self.test_repo.repo_name(), test_branch):
# clone the branch
git_repo = GitRepo(self.test_repo_url, branch=test_branch)
self.assertEqual(git_repo.branch, test_branch)
self.assertEqual(git_repo.repo.active_branch.name, test_branch)
with self.assertRaisesRegex(MigratorError, "instantiating a git.Repo from directory '.+' failed"):
GitRepo(repo_location=repo_child_dir, search_parent_directories=False)
def test_get_temp_dir(self):
self.assertTrue(os.path.isdir(self.totally_empty_git_repo.get_temp_dir()))
temp_git_repo = GitRepo()
temp_git_repo.clone_repo_from_url(self.test_repo_url)
dirs = [dir for dir in temp_git_repo.temp_dirs]
self.assertTrue(dirs)
temp_git_repo.del_temp_dirs()
for d in dirs:
self.assertFalse(os.path.isdir(d))
@unittest.skip('Fixture must be updated due to changes to obj_tables')
def test_clone_repo_from_url(self):
repo, dir = self.totally_empty_git_repo.clone_repo_from_url(self.test_repo_url)
self.assertIsInstance(repo, git.Repo)
self.assertTrue(os.path.isdir(os.path.join(dir, '.git')))
repo, dir = self.totally_empty_git_repo.clone_repo_from_url(self.test_repo_url,
directory=self.make_tmp_dir())
self.assertTrue(os.path.isdir(os.path.join(dir, '.git')))
# test branch
test_branch = RemoteBranch.unique_branch_name('branch_for_test_clone_repo_from_url')
# make new branch
with RemoteBranch(self.test_repo.repo_name(), test_branch):
# clone the branch
git_repo = GitRepo()
repo, directory = git_repo.clone_repo_from_url(self.test_repo_url, branch=test_branch)
self.assertEqual(git_repo.branch, test_branch)
self.assertTrue(os.path.isdir(directory))
# cloning branch that no longer exists should raise exception
with self.assertRaisesRegex(MigratorError, "repo cannot be cloned from"):
git_repo.clone_repo_from_url(self.test_repo_url, branch=test_branch)
bad_dir = '/asdfdsf/no such dir'
with self.assertRaisesRegex(MigratorError, "'.+' is not a directory"):
self.totally_empty_git_repo.clone_repo_from_url(self.test_repo_url, directory=bad_dir)
bad_url = 'http://www.ibm.com/nothing_here'
with self.assertRaisesRegex(MigratorError, "repo cannot be cloned from '.+'"):
self.totally_empty_git_repo.clone_repo_from_url(bad_url)
# todo: put in wc_util with unittests
@staticmethod
def are_dir_trees_equal(dir1, dir2, ignore=None):
""" Compare two directories recursively
Files in directories being compared are considered equal if their names and contents are equal.
Based on https://stackoverflow.com/a/6681395.
Args:
dir1 (:obj:`str`): path of left (first) directory
dir2 (:obj:`str`): path of right (second) directory
ignore (:obj:`list`, optional): passed as the `ignore` argument to `filecmp.dircmp()`
Returns:
:obj:`bool`: True if `dir1` and `dir2` are the same and no exceptions were raised while
accessing the directories and files; false otherwise.
"""
dirs_cmp = filecmp.dircmp(dir1, dir2, ignore=ignore)
if dirs_cmp.left_only or dirs_cmp.right_only or dirs_cmp.funny_files:
return False
_, mismatch, errors = filecmp.cmpfiles(dir1, dir2, dirs_cmp.common_files, shallow=False)
if mismatch or errors:
return False
for common_dir in dirs_cmp.common_dirs:
new_dir1 = os.path.join(dir1, common_dir)
new_dir2 = os.path.join(dir2, common_dir)
if not TestGitRepo.are_dir_trees_equal(new_dir1, new_dir2, ignore=ignore):
return False
return True
def test_copy(self):
repo_copy = self.git_migration_test_repo.copy()
self.assertEqual(repo_copy.latest_hash(), self.git_migration_test_repo.latest_hash())
self.assertEqual(repo_copy.repo_url, self.git_migration_test_repo.repo_url)
self.assertNotEqual(repo_copy.migrations_dir(), self.git_migration_test_repo.migrations_dir())
self.assertTrue(TestGitRepo.are_dir_trees_equal(self.git_migration_test_repo.repo_dir,
repo_copy.repo_dir, ignore=[]))
repo_copy = self.git_migration_test_repo.copy(tmp_dir=self.make_tmp_dir())
self.assertEqual(repo_copy.latest_hash(), self.git_migration_test_repo.latest_hash())
# checkout an earlier version of the repo
repo_copy.checkout_commit(self.migration_test_repo_known_hash)
self.assertNotEqual(repo_copy.latest_hash(), self.git_migration_test_repo.latest_hash())
repo_copy_copy = repo_copy.copy()
self.assertEqual(repo_copy.latest_hash(), repo_copy_copy.latest_hash())
self.assertNotEqual(repo_copy.migrations_dir(), repo_copy_copy.migrations_dir())
self.assertTrue(TestGitRepo.are_dir_trees_equal(repo_copy.repo_dir, repo_copy_copy.repo_dir, ignore=[]))
with self.assertRaisesRegex(MigratorError, "is not a directory"):
repo_copy.copy(tmp_dir='not a directory')
empty_git_repo = GitRepo()
with self.assertRaisesRegex(MigratorError, "cannot copy an empty GitRepo"):
empty_git_repo.copy()
def test_migrations_dir(self):
self.assertTrue(os.path.isdir(self.test_repo.migrations_dir()))
self.assertEqual(os.path.basename(self.test_repo.migrations_dir()),
DataSchemaMigration._MIGRATIONS_DIRECTORY)
def test_fixtures_dir(self):
self.assertTrue(os.path.isdir(self.test_repo.fixtures_dir()))
self.assertEqual(os.path.basename(self.test_repo.fixtures_dir()), 'fixtures')
def test_repo_name(self):
self.assertEqual(self.test_repo.repo_name(), 'obj_tables_test_schema_repo')
empty_git_repo = GitRepo()
self.assertEqual(empty_git_repo.repo_name(), GitRepo._NAME_UNKNOWN)
tmp_git_repo = GitRepo(self.test_repo.repo_dir)
self.assertIsInstance(tmp_git_repo.repo_name(), str)
def test_head_commit(self):
self.assertIsInstance(self.test_repo.head_commit(), git.objects.commit.Commit)
def test_latest_hash(self):
commit_hash = self.nearly_empty_git_repo.latest_hash()
self.assertIsInstance(commit_hash, str)
self.assertEqual(len(commit_hash), 40)
self.assertEqual(commit_hash, self.nearly_empty_git_repo.repo.head.commit.hexsha)
def test_get_commit(self):
commit = self.test_repo.get_commit(self.known_hash)
self.assertIsInstance(commit, git.objects.commit.Commit)
self.assertEqual(commit, self.test_repo.get_commit(commit))
# errors
with self.assertRaisesRegex(MigratorError, "commit_or_hash .* cannot be converted into a commit"):
self.test_repo.get_commit(self.no_such_hash)
with self.assertRaisesRegex(MigratorError, "commit_or_hash .* cannot be converted into a commit"):
self.test_repo.get_commit(1)
def test_get_commits(self):
self.assertEqual(self.test_repo.get_commits([]), [])
commit = self.test_repo.get_commit(self.known_hash)
commits = self.test_repo.get_commits([self.known_hash, self.known_hash])
self.assertEqual(commits, [commit, commit])
# errors
with self.assertRaisesRegex(MigratorError, "No commit found for .+"):
self.test_repo.get_commits([self.known_hash, self.no_such_hash, self.no_such_hash])
| |
of the results,
this method will return an empty list.
Optional parameter:
* `page` – request a specific page from the results set
'''
if self.total_results == '20,000+':
return {'total_results': self.total_results, 'page': None, 'number_of_results': 0, 'results': [], 'error': 'Your search returns too many results.'}
if page:
self.page = page
else:
self.page += 1
if self.page <= self.total_pages:
results = self.search(**self.params, **self.kwargs)
else:
# No more data, so return an empty list
results = {'total_results': self.total_results, 'page': None, 'number_of_results': 0, 'results': []}
return results
def process_list(self, details):
results = []
retrieved = arrow.now(tz='Australia/Sydney').isoformat()
for row in details.find_all('tr')[1:]:
record = self.process_row(row)
record['retrieved'] = retrieved
results.append(record)
return results
def process_page(self, soup, record_detail):
# Do something
return []
def get_total_results(self, soup):
total = 0
if soup.find(id='ContentPlaceHolderSNR_lblToManyRecordsError') is not None:
total = '20,000+'
elif soup.find('span', attrs={'id': re.compile('lblDisplaying$')}) is not None:
total_text = soup.find('span', attrs={'id': re.compile('lblDisplaying$')}).text
total = int(re.search(r'of (\d+)', total_text).group(1))
elif soup.find('span', text='Displaying 1 of 1'):
total = 1
return total
def refresh_cache(self):
'''
Delete data for this search from the cache, then retrieve a fresh version from RecordSearch.
'''
cache_key = self.generate_cache_key()
cache_key = re.sub('_page_\d+$', '', cache_key)
for key in cache_db.keys():
if cache_key in key:
del cache_db[key]
self.page = 0
self.initialise_search()
def generate_cache_key(self):
'''
Use the search parameters to generate a key to use in storing the cached results.
'''
params = self.params.copy()
params.update(self.kwargs)
search_key = '_'.join(sorted([f'{<KEY>, v in params.items() if v is not None]))
search_key = f'{self.entity_type}_{search_key}_page_{self.page}'
return search_key
def search(self, results_per_page=None, sort=None, record_detail='brief', **kwargs):
# Generate key to use with cache
cache_key = self.generate_cache_key()
# Try to get results from cache first
try:
results = cache_db[cache_key]
except KeyError:
# Set the number of results per page
if results_per_page != 20:
search_form = self.browser.select_form('#formSNRMaster')
search_form.set('ctl00$ContentPlaceHolderSNR$ddlResultsPerPage', results_per_page)
submit_button = self.browser.page.find(id='ContentPlaceHolderSNR_btnSearch')
self.browser.submit_selected()
# Apply sort
if sort:
r = self.browser.open(f'{self.browser.url}?sort={sort}')
# Retrieve a specific page in the results set
if self.page > 1:
url = self.browser.url.split('?')[0]
self.browser.open(f'{url}?page={self.page-1}')
# Get item details from list of search results
data = self.process_page(self.browser.page, record_detail)
results = {
'total_results': self.total_results,
'page': self.page,
'number_of_results': len(data),
'results': data,
'retrieved': arrow.now(tz='Australia/Sydney').isoformat()
}
cache_db[cache_key] = results
#self.page = results['page']
return results
def initialise_search(self):
'''
Populates the search form and retrieves the total number of results.
'''
# Start a session
self.get_url('https://recordsearch.naa.gov.au/scripts/Logon.asp?N=guest')
# Find the main advanced search link
main_link = self.browser.find_link(url_regex='AdvSearchMain.aspx')
self.browser.follow_link(main_link)
# Find the advanced search link for this entity
search_link = self.browser.find_link(url_regex=self.search_page)
self.browser.follow_link(search_link)
# Get the submit button for the search form
submit_button = self.browser.page.find(id='ContentPlaceHolderSNR_btnSearch')
# Get the search form
search_form = self.browser.select_form('#formSNRMaster')
# Populate the search form with the supplied params
for key, value in self.kwargs.items():
search_form.set(self.search_params[key]['id'], value)
# Submit the form
search_form.choose_submit(submit_button)
self.browser.submit_selected()
# There's a 'search is running' page that has a form that needs to be submitted.
running_form = self.browser.select_form('#Form1')
self.browser.submit_selected()
# Save the total number of results
self.total_results = self.get_total_results(self.browser.page)
# Calcuate the number of pages in the results set
try:
self.total_pages = math.ceil(self.total_results / self.params['results_per_page'])
except TypeError:
# More than 20,000 results
pass
# Cell
class RSItem(RSEntity):
'''
Class used for extracting data about an individual item (usually a file, but can be a volume, box, photograph etc) from RecordSearch.
You need to supply one of the following parameters:
* `identifier` – the Item ID (aka barcode)
* `details` – the BeautifulSoup HTML element containing the item details
You'd only use `details` if you already have a RecordSearch page and want to extract item data from it.
(There's an example of this in the `RSItemSearch` class.)
The item data is obtained by accessing the item's `.data` attribute.
'''
entity_type = 'item'
def __init__(self, identifier=None, cache=True, details=None):
super(RSItem, self).__init__(identifier, cache)
self.details = details
if details:
self.identifier = self.get_value('Item ID')
self.data = self.get_item()
def get_series(self):
cell = self.get_cell('Series number')
return cell.find('a').string.strip()
def get_access_reasons(self):
'''
Extract the list of reasons why material has been withheld after access examination.
'''
cell = self.get_cell('Reason for restriction')
reasons = []
if cell:
for link in cell.find_all('a'):
reasons.append(link.string.strip())
return reasons
def get_digitised_pages(self):
'''
Returns the number of pages (images) in a digitised file.
This is scraped from the RecordSearch digitised file viewer.
The file viewer is outside of RecordSearch's session system, so it can be requested directly.
'''
url = f'https://recordsearch.naa.gov.au/SearchNRetrieve/Interface/ViewImage.aspx?B={self.identifier}'
response = s.get(url, timeout=30)
soup = BeautifulSoup(response.text, features='lxml')
try:
# The last page number from the navigation will be the total number of pages
pages = int(soup.find('span', attrs={'id': "lblEndPage"}).string)
except AttributeError:
# If there's no navigation it might be a single page
if soup.find('span', attrs={'id': "lblCitation"}):
pages = 1
# Or something else...
else:
pages = 0
return pages
def check_if_digitised(self):
'''
Check to see if the file is digitised, by looking for a link to the digital copy.
'''
if self.details.find(text=re.compile("View digital copy")):
return True
else:
return False
def get_item(self, date_format='iso'):
# Try to retrieve from cache first
try:
item = cache_db[f'item_{self.identifier}']
# If not in the cache and the details are not supplied, get it from RS.
except KeyError:
if not self.details:
self.get_entity_page()
self.details = self.get_details()
if self.details:
item = {
'title': self.get_value('Title'),
'identifier': self.identifier,
'series': self.get_series(),
'control_symbol': self.get_value('Control symbol'),
'digitised_status': self.check_if_digitised(),
'digitised_pages': self.get_digitised_pages(),
'access_status': self.get_value('Access status'),
'access_decision_reasons': self.get_access_reasons(),
'location': self.get_value('Location'),
'retrieved': arrow.now(tz='Australia/Sydney').isoformat()
}
item.update(self.get_formatted_dates('Contents date range', 'contents_'))
item.update(self.get_formatted_date('Date of decision', 'access_decision_'))
if self.cache:
# Add to the cache
cache_db[f'item_{self.identifier}'] = item
else:
item = {'identifier': self.identifier, 'error': 'Item not found'}
return item
def __repr__(self):
return f'NAA: {self.data["series"]}, {self.data["control_symbol"]}'
# Cell
class RSItemSearch(RSSearch):
'''
Search for items in RecordSearch.
Supply any of the item search parameters as kwargs to initialise the search.
Optional parameters:
* `results_per_page` (default: 20)
* `sort` (default: 1 – order by id)
* `page` – to retrieve a specific page of results
* `record_detail` – amount of detail to include, options are:
* 'brief' (default) – just the info in the search results
* `digitised` – add the number of pages if the file is digitised (slower)
* 'full' – get the full individual record for each result (slowest)
To access a page of results, use the `.get_results()` method.
This method increments the results page, so you can call it in a loop
to retrieve the complete result set.
Useful attributes:
* `.total_results` – the total number of results in the results set
* `.total_pages` – the total number of result pages
* `.kwargs` – a dict containing the supplied search parameters
* `.params` – a dict containing the values of the optional parameters
'''
entity_type = 'item'
search_params = ITEM_FORM
search_page = 'AdvSearchItems.aspx'
entity = RSItem
def __init__(self, results_per_page=20, sort=9, record_detail='brief', **kwargs):
super(RSItemSearch, self).__init__(results_per_page=results_per_page, sort=sort, record_detail=record_detail, **kwargs)
def process_row(self, row):
cells = row.find_all('td')
item = {
'series': cells[1].string.strip(),
'control_symbol': cells[2].string.strip(),
'title': cells[3].contents[0].string.strip(),
'identifier': cells[6].string.strip()
}
access_string = cells[3].find('div', 'CombinedTitleBottomLeft').string
item['access_status'] = re.search(r'Access status: ([\w ]+)', access_string).group(1).strip()
location_string = cells[3].find('div', 'CombinedTitleBottomRight').string
item['location'] = re.search(r'Location: ([\w ]+)', location_string).group(1).strip()
date_str = cells[4].string.strip()
item.update(self.process_date_string(date_str, 'contents_'))
if cells[5].find('a') is not None:
item['digitised_status'] = True
else:
item['digitised_status'] = False
return item
def get_digitised_page_counts(self, items):
for item in items:
if item['digitised_status'] == True:
item['digitised_pages'] = RSItem(item['identifier']).get_digitised_pages()
else:
item['digitised_pages'] = 0
return items
def process_page(self, soup, record_detail):
'''
Extract item data from a search results page.
Level of item data can be varied using the `record_detail` parameter:
* 'brief' - just the data in the search results
* 'digitised' - if the file is digitised, get the number of pages
* 'full': retrieve the individual item record to get extra fields
'''
# There's a list of items
if details := soup.find(id=re.compile('tblItemDetails$')):
items = self.process_list(details)
# There's a single item
elif soup.find(id=re.compile('ContentPlaceHolderSNR_ucItemDetails_phDetailsView')) is not None:
details = soup.find('div', 'detailsTable')
items = [self.entity(details=details).data]
# No items?
else:
raise Exception('No results found on page!')
# Add number of pages in digitised files
if record_detail == 'digitised':
items = self.get_digitised_page_counts(items)
# Get full item information
if record_detail == 'full':
items = self.get_full_details(items)
return items
# Cell
class RSSeries(RSEntity):
'''
Class used for | |
# coding: utf-8
from __future__ import unicode_literals
import base64
import binascii
import collections
import ctypes
import email
import getpass
import io
import itertools
import optparse
import os
import platform
import re
import shlex
import shutil
import socket
import struct
import subprocess
import sys
import xml.etree.ElementTree
try:
import urllib.request as compat_urllib_request
except ImportError: # Python 2
import urllib2 as compat_urllib_request
try:
import urllib.error as compat_urllib_error
except ImportError: # Python 2
import urllib2 as compat_urllib_error
try:
import urllib.parse as compat_urllib_parse
except ImportError: # Python 2
import urllib as compat_urllib_parse
try:
from urllib.parse import urlparse as compat_urllib_parse_urlparse
except ImportError: # Python 2
from urlparse import urlparse as compat_urllib_parse_urlparse
try:
import urllib.parse as compat_urlparse
except ImportError: # Python 2
import urlparse as compat_urlparse
try:
import urllib.response as compat_urllib_response
except ImportError: # Python 2
import urllib as compat_urllib_response
try:
import http.cookiejar as compat_cookiejar
except ImportError: # Python 2
import cookielib as compat_cookiejar
if sys.version_info[0] == 2:
class compat_cookiejar_Cookie(compat_cookiejar.Cookie):
def __init__(self, version, name, value, *args, **kwargs):
if isinstance(name, compat_str):
name = name.encode()
if isinstance(value, compat_str):
value = value.encode()
compat_cookiejar.Cookie.__init__(self, version, name, value, *args, **kwargs)
else:
compat_cookiejar_Cookie = compat_cookiejar.Cookie
try:
import http.cookies as compat_cookies
except ImportError: # Python 2
import Cookie as compat_cookies
if sys.version_info[0] == 2:
class compat_SimpleCookie(compat_cookies.SimpleCookie):
def load(self, rawdata):
if isinstance(rawdata, unicode):
rawdata = str(rawdata)
return super(compat_SimpleCookie, self).load(rawdata)
else:
compat_SimpleCookie = compat_cookies.SimpleCookie
try:
import html.entities as compat_html_entities
except ImportError: # Python 2
import htmlentitydefs as compat_html_entities
try: # Python >= 3.3
compat_html_entities_html5 = compat_html_entities.html5
except AttributeError:
# Copied from CPython 3.5.1 html/entities.py
compat_html_entities_html5 = {
'Aacute': '\xc1',
'aacute': '\xe1',
'Aacute;': '\xc1',
'aacute;': '\xe1',
'Abreve;': '\u0102',
'abreve;': '\u0103',
'ac;': '\u223e',
'acd;': '\u223f',
'acE;': '\u223e\u0333',
'Acirc': '\xc2',
'acirc': '\xe2',
'Acirc;': '\xc2',
'acirc;': '\xe2',
'acute': '\xb4',
'acute;': '\xb4',
'Acy;': '\u0410',
'acy;': '\u0430',
'AElig': '\xc6',
'aelig': '\xe6',
'AElig;': '\xc6',
'aelig;': '\xe6',
'af;': '\u2061',
'Afr;': '\U0001d504',
'afr;': '\U0001d51e',
'Agrave': '\xc0',
'agrave': '\xe0',
'Agrave;': '\xc0',
'agrave;': '\xe0',
'alefsym;': '\u2135',
'aleph;': '\u2135',
'Alpha;': '\u0391',
'alpha;': '\u03b1',
'Amacr;': '\u0100',
'amacr;': '\u0101',
'amalg;': '\u2a3f',
'AMP': '&',
'amp': '&',
'AMP;': '&',
'amp;': '&',
'And;': '\u2a53',
'and;': '\u2227',
'andand;': '\u2a55',
'andd;': '\u2a5c',
'andslope;': '\u2a58',
'andv;': '\u2a5a',
'ang;': '\u2220',
'ange;': '\u29a4',
'angle;': '\u2220',
'angmsd;': '\u2221',
'angmsdaa;': '\u29a8',
'angmsdab;': '\u29a9',
'angmsdac;': '\u29aa',
'angmsdad;': '\u29ab',
'angmsdae;': '\u29ac',
'angmsdaf;': '\u29ad',
'angmsdag;': '\u29ae',
'angmsdah;': '\u29af',
'angrt;': '\u221f',
'angrtvb;': '\u22be',
'angrtvbd;': '\u299d',
'angsph;': '\u2222',
'angst;': '\xc5',
'angzarr;': '\u237c',
'Aogon;': '\u0104',
'aogon;': '\u0105',
'Aopf;': '\U0001d538',
'aopf;': '\U0001d552',
'ap;': '\u2248',
'apacir;': '\u2a6f',
'apE;': '\u2a70',
'ape;': '\u224a',
'apid;': '\u224b',
'apos;': "'",
'ApplyFunction;': '\u2061',
'approx;': '\u2248',
'approxeq;': '\u224a',
'Aring': '\xc5',
'aring': '\xe5',
'Aring;': '\xc5',
'aring;': '\xe5',
'Ascr;': '\U0001d49c',
'ascr;': '\U0001d4b6',
'Assign;': '\u2254',
'ast;': '*',
'asymp;': '\u2248',
'asympeq;': '\u224d',
'Atilde': '\xc3',
'atilde': '\xe3',
'Atilde;': '\xc3',
'atilde;': '\xe3',
'Auml': '\xc4',
'auml': '\xe4',
'Auml;': '\xc4',
'auml;': '\xe4',
'awconint;': '\u2233',
'awint;': '\u2a11',
'backcong;': '\u224c',
'backepsilon;': '\u03f6',
'backprime;': '\u2035',
'backsim;': '\u223d',
'backsimeq;': '\u22cd',
'Backslash;': '\u2216',
'Barv;': '\u2ae7',
'barvee;': '\u22bd',
'Barwed;': '\u2306',
'barwed;': '\u2305',
'barwedge;': '\u2305',
'bbrk;': '\u23b5',
'bbrktbrk;': '\u23b6',
'bcong;': '\u224c',
'Bcy;': '\u0411',
'bcy;': '\u0431',
'bdquo;': '\u201e',
'becaus;': '\u2235',
'Because;': '\u2235',
'because;': '\u2235',
'bemptyv;': '\u29b0',
'bepsi;': '\u03f6',
'bernou;': '\u212c',
'Bernoullis;': '\u212c',
'Beta;': '\u0392',
'beta;': '\u03b2',
'beth;': '\u2136',
'between;': '\u226c',
'Bfr;': '\U0001d505',
'bfr;': '\U0001d51f',
'bigcap;': '\u22c2',
'bigcirc;': '\u25ef',
'bigcup;': '\u22c3',
'bigodot;': '\u2a00',
'bigoplus;': '\u2a01',
'bigotimes;': '\u2a02',
'bigsqcup;': '\u2a06',
'bigstar;': '\u2605',
'bigtriangledown;': '\u25bd',
'bigtriangleup;': '\u25b3',
'biguplus;': '\u2a04',
'bigvee;': '\u22c1',
'bigwedge;': '\u22c0',
'bkarow;': '\u290d',
'blacklozenge;': '\u29eb',
'blacksquare;': '\u25aa',
'blacktriangle;': '\u25b4',
'blacktriangledown;': '\u25be',
'blacktriangleleft;': '\u25c2',
'blacktriangleright;': '\u25b8',
'blank;': '\u2423',
'blk12;': '\u2592',
'blk14;': '\u2591',
'blk34;': '\u2593',
'block;': '\u2588',
'bne;': '=\u20e5',
'bnequiv;': '\u2261\u20e5',
'bNot;': '\u2aed',
'bnot;': '\u2310',
'Bopf;': '\U0001d539',
'bopf;': '\U0001d553',
'bot;': '\u22a5',
'bottom;': '\u22a5',
'bowtie;': '\u22c8',
'boxbox;': '\u29c9',
'boxDL;': '\u2557',
'boxDl;': '\u2556',
'boxdL;': '\u2555',
'boxdl;': '\u2510',
'boxDR;': '\u2554',
'boxDr;': '\u2553',
'boxdR;': '\u2552',
'boxdr;': '\u250c',
'boxH;': '\u2550',
'boxh;': '\u2500',
'boxHD;': '\u2566',
'boxHd;': '\u2564',
'boxhD;': '\u2565',
'boxhd;': '\u252c',
'boxHU;': '\u2569',
'boxHu;': '\u2567',
'boxhU;': '\u2568',
'boxhu;': '\u2534',
'boxminus;': '\u229f',
'boxplus;': '\u229e',
'boxtimes;': '\u22a0',
'boxUL;': '\u255d',
'boxUl;': '\u255c',
'boxuL;': '\u255b',
'boxul;': '\u2518',
'boxUR;': '\u255a',
'boxUr;': '\u2559',
'boxuR;': '\u2558',
'boxur;': '\u2514',
'boxV;': '\u2551',
'boxv;': '\u2502',
'boxVH;': '\u256c',
'boxVh;': '\u256b',
'boxvH;': '\u256a',
'boxvh;': '\u253c',
'boxVL;': '\u2563',
'boxVl;': '\u2562',
'boxvL;': '\u2561',
'boxvl;': '\u2524',
'boxVR;': '\u2560',
'boxVr;': '\u255f',
'boxvR;': '\u255e',
'boxvr;': '\u251c',
'bprime;': '\u2035',
'Breve;': '\u02d8',
'breve;': '\u02d8',
'brvbar': '\xa6',
'brvbar;': '\xa6',
'Bscr;': '\u212c',
'bscr;': '\U0001d4b7',
'bsemi;': '\u204f',
'bsim;': '\u223d',
'bsime;': '\u22cd',
'bsol;': '\\',
'bsolb;': '\u29c5',
'bsolhsub;': '\u27c8',
'bull;': '\u2022',
'bullet;': '\u2022',
'bump;': '\u224e',
'bumpE;': '\u2aae',
'bumpe;': '\u224f',
'Bumpeq;': '\u224e',
'bumpeq;': '\u224f',
'Cacute;': '\u0106',
'cacute;': '\u0107',
'Cap;': '\u22d2',
'cap;': '\u2229',
'capand;': '\u2a44',
'capbrcup;': '\u2a49',
'capcap;': '\u2a4b',
'capcup;': '\u2a47',
'capdot;': '\u2a40',
'CapitalDifferentialD;': '\u2145',
'caps;': '\u2229\ufe00',
'caret;': '\u2041',
'caron;': '\u02c7',
'Cayleys;': '\u212d',
'ccaps;': '\u2a4d',
'Ccaron;': '\u010c',
'ccaron;': '\u010d',
'Ccedil': '\xc7',
'ccedil': '\xe7',
'Ccedil;': '\xc7',
'ccedil;': '\xe7',
'Ccirc;': '\u0108',
'ccirc;': '\u0109',
'Cconint;': '\u2230',
'ccups;': '\u2a4c',
'ccupssm;': '\u2a50',
'Cdot;': '\u010a',
'cdot;': '\u010b',
'cedil': '\xb8',
'cedil;': '\xb8',
'Cedilla;': '\xb8',
'cemptyv;': '\u29b2',
'cent': '\xa2',
'cent;': '\xa2',
'CenterDot;': '\xb7',
'centerdot;': '\xb7',
'Cfr;': '\u212d',
'cfr;': '\U0001d520',
'CHcy;': '\u0427',
'chcy;': '\u0447',
'check;': '\u2713',
'checkmark;': '\u2713',
'Chi;': '\u03a7',
'chi;': '\u03c7',
'cir;': '\u25cb',
'circ;': '\u02c6',
'circeq;': '\u2257',
'circlearrowleft;': '\u21ba',
'circlearrowright;': '\u21bb',
'circledast;': '\u229b',
'circledcirc;': '\u229a',
'circleddash;': '\u229d',
'CircleDot;': '\u2299',
'circledR;': '\xae',
'circledS;': '\u24c8',
'CircleMinus;': '\u2296',
'CirclePlus;': '\u2295',
'CircleTimes;': '\u2297',
'cirE;': '\u29c3',
'cire;': '\u2257',
'cirfnint;': '\u2a10',
'cirmid;': '\u2aef',
'cirscir;': '\u29c2',
'ClockwiseContourIntegral;': '\u2232',
'CloseCurlyDoubleQuote;': '\u201d',
'CloseCurlyQuote;': '\u2019',
'clubs;': '\u2663',
'clubsuit;': '\u2663',
'Colon;': '\u2237',
'colon;': ':',
'Colone;': '\u2a74',
'colone;': '\u2254',
'coloneq;': '\u2254',
'comma;': ',',
'commat;': '@',
'comp;': '\u2201',
'compfn;': '\u2218',
'complement;': '\u2201',
'complexes;': '\u2102',
'cong;': '\u2245',
'congdot;': '\u2a6d',
'Congruent;': '\u2261',
'Conint;': '\u222f',
'conint;': '\u222e',
'ContourIntegral;': '\u222e',
'Copf;': '\u2102',
'copf;': '\U0001d554',
'coprod;': '\u2210',
'Coproduct;': '\u2210',
'COPY': '\xa9',
'copy': '\xa9',
'COPY;': '\xa9',
'copy;': '\xa9',
'copysr;': '\u2117',
'CounterClockwiseContourIntegral;': '\u2233',
'crarr;': '\u21b5',
'Cross;': '\u2a2f',
'cross;': '\u2717',
'Cscr;': '\U0001d49e',
'cscr;': '\U0001d4b8',
'csub;': '\u2acf',
'csube;': '\u2ad1',
'csup;': '\u2ad0',
'csupe;': '\u2ad2',
'ctdot;': '\u22ef',
'cudarrl;': '\u2938',
'cudarrr;': '\u2935',
'cuepr;': '\u22de',
'cuesc;': '\u22df',
'cularr;': '\u21b6',
'cularrp;': '\u293d',
'Cup;': '\u22d3',
'cup;': '\u222a',
'cupbrcap;': '\u2a48',
'CupCap;': '\u224d',
'cupcap;': '\u2a46',
'cupcup;': '\u2a4a',
'cupdot;': '\u228d',
'cupor;': '\u2a45',
'cups;': '\u222a\ufe00',
'curarr;': '\u21b7',
'curarrm;': '\u293c',
'curlyeqprec;': '\u22de',
'curlyeqsucc;': '\u22df',
'curlyvee;': '\u22ce',
'curlywedge;': '\u22cf',
'curren': '\xa4',
'curren;': '\xa4',
'curvearrowleft;': '\u21b6',
'curvearrowright;': '\u21b7',
'cuvee;': '\u22ce',
'cuwed;': '\u22cf',
'cwconint;': '\u2232',
'cwint;': '\u2231',
'cylcty;': '\u232d',
'Dagger;': '\u2021',
'dagger;': '\u2020',
'daleth;': '\u2138',
'Darr;': '\u21a1',
'dArr;': '\u21d3',
'darr;': '\u2193',
'dash;': '\u2010',
'Dashv;': '\u2ae4',
'dashv;': '\u22a3',
'dbkarow;': '\u290f',
'dblac;': '\u02dd',
'Dcaron;': '\u010e',
'dcaron;': '\u010f',
'Dcy;': '\u0414',
'dcy;': '\u0434',
'DD;': '\u2145',
'dd;': '\u2146',
'ddagger;': '\u2021',
'ddarr;': '\u21ca',
'DDotrahd;': '\u2911',
'ddotseq;': '\u2a77',
'deg': '\xb0',
'deg;': '\xb0',
'Del;': '\u2207',
'Delta;': '\u0394',
'delta;': '\u03b4',
'demptyv;': '\u29b1',
'dfisht;': '\u297f',
'Dfr;': '\U0001d507',
'dfr;': '\U0001d521',
'dHar;': '\u2965',
'dharl;': '\u21c3',
'dharr;': '\u21c2',
'DiacriticalAcute;': '\xb4',
'DiacriticalDot;': '\u02d9',
'DiacriticalDoubleAcute;': '\u02dd',
'DiacriticalGrave;': '`',
'DiacriticalTilde;': '\u02dc',
'diam;': '\u22c4',
'Diamond;': '\u22c4',
'diamond;': '\u22c4',
'diamondsuit;': '\u2666',
'diams;': '\u2666',
'die;': '\xa8',
'DifferentialD;': '\u2146',
'digamma;': '\u03dd',
'disin;': '\u22f2',
'div;': '\xf7',
'divide': '\xf7',
'divide;': '\xf7',
'divideontimes;': '\u22c7',
'divonx;': '\u22c7',
'DJcy;': '\u0402',
'djcy;': '\u0452',
'dlcorn;': '\u231e',
'dlcrop;': '\u230d',
'dollar;': '$',
'Dopf;': '\U0001d53b',
'dopf;': '\U0001d555',
'Dot;': '\xa8',
'dot;': '\u02d9',
'DotDot;': '\u20dc',
'doteq;': '\u2250',
'doteqdot;': '\u2251',
'DotEqual;': '\u2250',
'dotminus;': '\u2238',
'dotplus;': '\u2214',
'dotsquare;': '\u22a1',
'doublebarwedge;': '\u2306',
'DoubleContourIntegral;': '\u222f',
'DoubleDot;': '\xa8',
'DoubleDownArrow;': '\u21d3',
'DoubleLeftArrow;': '\u21d0',
'DoubleLeftRightArrow;': '\u21d4',
'DoubleLeftTee;': '\u2ae4',
'DoubleLongLeftArrow;': '\u27f8',
'DoubleLongLeftRightArrow;': '\u27fa',
'DoubleLongRightArrow;': '\u27f9',
'DoubleRightArrow;': '\u21d2',
'DoubleRightTee;': '\u22a8',
'DoubleUpArrow;': '\u21d1',
'DoubleUpDownArrow;': '\u21d5',
'DoubleVerticalBar;': '\u2225',
'DownArrow;': '\u2193',
'Downarrow;': '\u21d3',
'downarrow;': '\u2193',
'DownArrowBar;': '\u2913',
'DownArrowUpArrow;': '\u21f5',
'DownBreve;': '\u0311',
'downdownarrows;': '\u21ca',
'downharpoonleft;': '\u21c3',
'downharpoonright;': '\u21c2',
'DownLeftRightVector;': '\u2950',
'DownLeftTeeVector;': '\u295e',
'DownLeftVector;': '\u21bd',
'DownLeftVectorBar;': '\u2956',
'DownRightTeeVector;': '\u295f',
'DownRightVector;': '\u21c1',
'DownRightVectorBar;': '\u2957',
'DownTee;': '\u22a4',
'DownTeeArrow;': '\u21a7',
'drbkarow;': '\u2910',
'drcorn;': '\u231f',
'drcrop;': '\u230c',
'Dscr;': '\U0001d49f',
'dscr;': '\U0001d4b9',
'DScy;': '\u0405',
'dscy;': '\u0455',
'dsol;': '\u29f6',
'Dstrok;': '\u0110',
'dstrok;': '\u0111',
'dtdot;': '\u22f1',
'dtri;': '\u25bf',
'dtrif;': '\u25be',
'duarr;': '\u21f5',
'duhar;': '\u296f',
'dwangle;': '\u29a6',
'DZcy;': '\u040f',
'dzcy;': '\u045f',
'dzigrarr;': '\u27ff',
'Eacute': '\xc9',
'eacute': '\xe9',
'Eacute;': '\xc9',
'eacute;': '\xe9',
'easter;': '\u2a6e',
'Ecaron;': '\u011a',
'ecaron;': '\u011b',
'ecir;': '\u2256',
'Ecirc': '\xca',
'ecirc': '\xea',
'Ecirc;': '\xca',
'ecirc;': '\xea',
'ecolon;': '\u2255',
'Ecy;': '\u042d',
'ecy;': '\u044d',
'eDDot;': '\u2a77',
'Edot;': '\u0116',
'eDot;': '\u2251',
'edot;': '\u0117',
'ee;': '\u2147',
'efDot;': '\u2252',
'Efr;': '\U0001d508',
'efr;': '\U0001d522',
'eg;': '\u2a9a',
'Egrave': '\xc8',
'egrave': '\xe8',
'Egrave;': '\xc8',
'egrave;': '\xe8',
'egs;': '\u2a96',
'egsdot;': '\u2a98',
'el;': '\u2a99',
'Element;': '\u2208',
'elinters;': '\u23e7',
'ell;': '\u2113',
'els;': '\u2a95',
'elsdot;': '\u2a97',
'Emacr;': '\u0112',
'emacr;': '\u0113',
'empty;': '\u2205',
'emptyset;': '\u2205',
'EmptySmallSquare;': '\u25fb',
'emptyv;': '\u2205',
'EmptyVerySmallSquare;': '\u25ab',
'emsp13;': '\u2004',
'emsp14;': '\u2005',
'emsp;': '\u2003',
'ENG;': '\u014a',
'eng;': '\u014b',
'ensp;': '\u2002',
'Eogon;': '\u0118',
'eogon;': '\u0119',
'Eopf;': '\U0001d53c',
'eopf;': '\U0001d556',
'epar;': '\u22d5',
'eparsl;': '\u29e3',
'eplus;': '\u2a71',
'epsi;': '\u03b5',
'Epsilon;': | |
<filename>venv/lib/python3.7/site-packages/MDAnalysis/topology/guessers.py
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# <NAME>, <NAME>, <NAME>, and <NAME>.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
Guessing unknown Topology information --- :mod:`MDAnalysis.topology.guessers`
=============================================================================
In general `guess_atom_X` returns the guessed value for a single value,
while `guess_Xs` will work on an array of many atoms.
"""
from __future__ import absolute_import
import numpy as np
import warnings
import re
from ..lib import distances
from . import tables
def guess_masses(atom_types):
"""Guess the mass of many atoms based upon their type
Parameters
----------
atom_types
Type of each atom
Returns
-------
atom_masses : np.ndarray dtype float64
"""
validate_atom_types(atom_types)
masses = np.array([get_atom_mass(atom_t) for atom_t in atom_types], dtype=np.float64)
return masses
def validate_atom_types(atom_types):
"""Vaildates the atom types based on whether they are available in our tables
Parameters
----------
atom_types
Type of each atom
Returns
-------
None
.. versionchanged:: 0.20.0
Try uppercase atom type name as well
"""
for atom_type in np.unique(atom_types):
try:
tables.masses[atom_type]
except KeyError:
try:
tables.masses[atom_type.upper()]
except KeyError:
warnings.warn("Failed to guess the mass for the following atom types: {}".format(atom_type))
def guess_types(atom_names):
"""Guess the atom type of many atoms based on atom name
Parameters
----------
atom_names
Name of each atom
Returns
-------
atom_types : np.ndarray dtype object
"""
return np.array([guess_atom_element(name) for name in atom_names], dtype=object)
def guess_atom_type(atomname):
"""Guess atom type from the name.
At the moment, this function simply returns the element, as
guessed by :func:`guess_atom_element`.
See Also
--------
:func:`guess_atom_element`
:mod:`MDAnalysis.topology.tables`
"""
return guess_atom_element(atomname)
NUMBERS = re.compile(r'[0-9]') # match numbers
SYMBOLS = re.compile(r'[\*\+\-]') # match *, +, -
def guess_atom_element(atomname):
"""Guess the element of the atom from the name.
Looks in dict to see if element is found, otherwise it uses the first
character in the atomname. The table comes from CHARMM and AMBER atom
types, where the first character is not sufficient to determine the atom
type. Some GROMOS ions have also been added.
.. Warning: The translation table is incomplete. This will probably result
in some mistakes, but it still better than nothing!
See Also
--------
:func:`guess_atom_type`
:mod:`MDAnalysis.topology.tables`
"""
if atomname == '':
return ''
try:
return tables.atomelements[atomname.upper()]
except KeyError:
# strip symbols and numbers
no_symbols = re.sub(SYMBOLS, '', atomname)
name = re.sub(NUMBERS, '', no_symbols).upper()
# just in case
if name in tables.atomelements:
return tables.atomelements[name]
while name:
if name in tables.elements:
return name
if name[:-1] in tables.elements:
return name[:-1]
if name[1:] in tables.elements:
return name[1:]
if len(name) <= 2:
return name[0]
name = name[:-1] # probably element is on left not right
# if it's numbers
return no_symbols
def guess_bonds(atoms, coords, box=None, **kwargs):
r"""Guess if bonds exist between two atoms based on their distance.
Bond between two atoms is created, if the two atoms are within
.. math::
d < f \cdot (R_1 + R_2)
of each other, where :math:`R_1` and :math:`R_2` are the VdW radii
of the atoms and :math:`f` is an ad-hoc *fudge_factor*. This is
the `same algorithm that VMD uses`_.
Parameters
----------
atoms : AtomGroup
atoms for which bonds should be guessed
coords : array
coordinates of the atoms (i.e., `AtomGroup.positions)`)
fudge_factor : float, optional
The factor by which atoms must overlap eachother to be considered a
bond. Larger values will increase the number of bonds found. [0.55]
vdwradii : dict, optional
To supply custom vdwradii for atoms in the algorithm. Must be a dict
of format {type:radii}. The default table of van der Waals radii is
hard-coded as :data:`MDAnalysis.topology.tables.vdwradii`. Any user
defined vdwradii passed as an argument will supercede the table
values. [``None``]
lower_bound : float, optional
The minimum bond length. All bonds found shorter than this length will
be ignored. This is useful for parsing PDB with altloc records where
atoms with altloc A and B maybe very close together and there should be
no chemical bond between them. [0.1]
box : array_like, optional
Bonds are found using a distance search, if unit cell information is
given, periodic boundary conditions will be considered in the distance
search. [``None``]
Returns
-------
list
List of tuples suitable for use in Universe topology building.
Warnings
--------
No check is done after the bonds are guessed to see if Lewis
structure is correct. This is wrong and will burn somebody.
Raises
------
:exc:`ValueError` if inputs are malformed or `vdwradii` data is missing.
.. _`same algorithm that VMD uses`:
http://www.ks.uiuc.edu/Research/vmd/vmd-1.9.1/ug/node26.html
.. versionadded:: 0.7.7
.. versionchanged:: 0.9.0
Updated method internally to use more :mod:`numpy`, should work
faster. Should also use less memory, previously scaled as
:math:`O(n^2)`. *vdwradii* argument now augments table list
rather than replacing entirely.
"""
# why not just use atom.positions?
if len(atoms) != len(coords):
raise ValueError("'atoms' and 'coord' must be the same length")
fudge_factor = kwargs.get('fudge_factor', 0.55)
vdwradii = tables.vdwradii.copy() # so I don't permanently change it
user_vdwradii = kwargs.get('vdwradii', None)
if user_vdwradii: # this should make algo use their values over defaults
vdwradii.update(user_vdwradii)
# Try using types, then elements
atomtypes = atoms.types
# check that all types have a defined vdw
if not all(val in vdwradii for val in set(atomtypes)):
raise ValueError(("vdw radii for types: " +
", ".join([t for t in set(atomtypes) if
not t in vdwradii]) +
". These can be defined manually using the" +
" keyword 'vdwradii'"))
lower_bound = kwargs.get('lower_bound', 0.1)
if box is not None:
box = np.asarray(box)
# to speed up checking, calculate what the largest possible bond
# atom that would warrant attention.
# then use this to quickly mask distance results later
max_vdw = max([vdwradii[t] for t in atomtypes])
bonds = []
pairs, dist = distances.self_capped_distance(coords,
max_cutoff=2.0*max_vdw,
min_cutoff=lower_bound,
box=box)
for idx, (i, j) in enumerate(pairs):
d = (vdwradii[atomtypes[i]] + vdwradii[atomtypes[j]])*fudge_factor
if (dist[idx] < d):
bonds.append((atoms[i].index, atoms[j].index))
return tuple(bonds)
def guess_angles(bonds):
"""Given a list of Bonds, find all angles that exist between atoms.
Works by assuming that if atoms 1 & 2 are bonded, and 2 & 3 are bonded,
then (1,2,3) must be an angle.
Returns
-------
list of tuples
List of tuples defining the angles.
Suitable for use in u._topology
See Also
--------
:meth:`guess_bonds`
.. versionadded 0.9.0
"""
angles_found = set()
for b in bonds:
for atom in b:
other_a = b.partner(atom) # who's my friend currently in Bond
for other_b in atom.bonds:
if other_b != b: # if not the same bond I start as
third_a = other_b.partner(atom)
desc = tuple([other_a.index, atom.index, third_a.index])
if desc[0] > desc[-1]: # first index always less than last
desc = desc[::-1]
angles_found.add(desc)
return tuple(angles_found)
def guess_dihedrals(angles):
"""Given a list of Angles, find all dihedrals that exist between atoms.
Works by assuming that if (1,2,3) is an angle, and 3 & 4 are bonded,
then (1,2,3,4) must be a dihedral.
Returns
-------
list of tuples
List of tuples defining the dihedrals.
Suitable for use in u._topology
.. versionadded 0.9.0
"""
dihedrals_found = set()
for b in angles:
a_tup = tuple([a.index for a in b]) # angle as tuple of numbers
# if searching with b[0], want tuple of (b[2], b[1], b[0], +new)
# search the first and last atom of each angle
for atom, prefix in zip([b.atoms[0], b.atoms[-1]],
[a_tup[::-1], a_tup]):
for other_b in atom.bonds:
if not other_b.partner(atom) in b:
third_a = other_b.partner(atom)
desc = prefix + (third_a.index,)
if desc[0] > desc[-1]:
desc = desc[::-1]
dihedrals_found.add(desc)
return tuple(dihedrals_found)
def guess_improper_dihedrals(angles):
"""Given a list of Angles, find all improper dihedrals that exist between
atoms.
Works by assuming that if (1,2,3) is an angle, | |
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_29_1 = settings_1_29_0
settings_1_29_2 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_30_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_30_1 = settings_1_30_0
settings_1_30_2 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, | |
self.is_cli)
self.print_err = partial(print_err, view = self.is_cli)
## Check paths images.
if self.paths_images and isinstance(self.paths_images, list):
self.check_output()
self.remind = {}
self.all_icocur_written = {}
groups = zip(self.paths_images, self.paths_icocur, self.names_icocur, self.formats_icocur, self.hotspots)
for indx, (path_image, self.path_icocur, name, frmt, hotspot) in enumerate(groups):
self.print_std('#' * 80)
no_err, paths = True, []
if isinstance(path_image, list):
if not path_image:
no_err = False
message = "Input error: image file/directory path/s missing."
else:
for imapath in path_image:
if not isinstance(imapath, str):
no_err = False
message = "Input error: image file/directory path '%s' not a string." %imapath
else:
if isfile(imapath):
paths.append(imapath)
else:
if isdir(imapath):
paths.extend([join(imapath, file) for file in listdir(imapath)])
else:
no_err = False
message = "Input error: file/directory '%s' not found." %imapath
if len(paths) > 1:
if frmt == '.cur':
if name != "":
no_err = False
message = "Input error: can't create multi-size '.cur'."
else:
# eventually remove duplicate jobs.
paths = list(set(paths))
elif frmt == '.ico':
if name == "":
name = 'multi'
else:
no_err = False
message = "Input error: image file/directory path/s not a list of lists."
## Do job.
if name != "":
self.add_name2path(name, frmt, indx)
if not no_err:
if name == "":
self.add_name2path('noname', frmt, indx)
self.add_errors(message)
else:
self.work(paths, name, frmt, hotspot)
else:
print_err("Input error: image file/directory path/s not a list of lists.")
def convert_8bit_to_4bit(self, bits_8):
""" Converts 8-bit image data to 4-bit. """
bits_4 = []
for i in range(0, len(bits_8), 2):
high = bits_8[i] & 0b11110000
low = bits_8[i + 1] & 0b00001111
bits_4 += [high | low]
return bytes(bits_4)
def convert_8bit_to_2bit(self, bits_8):
""" Converts 8-bit image data to 2-bit. """
bits_2 = []
for i in range(0, len(bits_8), 4):
hh = bits_8[i] & 0b11000000
hl = bits_8[i + 1] & 0b00110000
lh = bits_8[i + 2] & 0b00001100
ll = bits_8[i + 3] & 0b00000011
bits_2 += [hh | hl | lh | ll]
return bytes(bits_2)
def convert_16bit_to_8bit(self, bits_16):
""" Converts 16-bit image data to 8-bit """
pass
def get_bgra(self, image, pad):
""" Gets image data for RGBA. """
try:
dataimage = image.tobytes('raw', 'BGRA', pad, -1)
except SystemError:
# workaround for earlier versions.
r, g, b, a = image.split()
image = Image.merge('RGBA', (b, g, r, a))
dataimage = image.tobytes('raw', 'BGRA', pad, -1)
return dataimage
def extract(self, path):
""" Gets parameters input image. """
## Open image in-memory as '.png'.
_, ext = splitext(path)
try:
image = Image.open(path, 'r')
except:
raise EncodeErr(code = 1, msg = "Image error: format '%s' not recognized or corrupted." %ext)
imagebyte = BytesIO()
try:
image.save(imagebyte, format = 'PNG')
except:
raise EncodeErr(code = 1, msg = "Image error: format '%s' not recognized or corrupted." %ext)
dataimage = imagebyte.getvalue()
image = Image.open(imagebyte)
self.parameters['bWidth'], self.parameters['bHeight'] = image.size
self.mode = image.mode
## PNG color type represent sums of this values: 1 (palette used), 2 (color used) and 4 (alpha channel used)
## Color Option - Channels - Bits per channel - Bits per pixel - Color type - Interpretation
## indexed 1 1,2,4,8 1,2,4,8 3 each pixel is a palette index
## grayscale 1 1,2,4,8,16 1,2,4,8,16 0 each pixel is a grayscale sample
## grayscale+alpha 2 8,16 16,32 4 each pixel is a grayscale sample followed by an alpha sample
## truecolor 3 8,16 24,48 2 each pixel is an R,G,B triple
## truecolor+alpha 4 8,16 32,64 6 each pixel is an R,G,B triple followed by an alpha sample
with open(path, 'rb') as file:
data = file.read(30)
bitdepth, coltyp = unpack_from('<2B', data[24 : 26])
self.parameters['wBitCount'] = len(image.getbands()) * bitdepth
if coltyp == 4 and self.mode == 'RGBA':
# fix this PIL mode.
self.mode, self.parameters['wBitCount'] = ('LA', int(self.parameters['wBitCount'] / 2))
dict_colortype = {0 : (['1', 'I', 'L'], 'grayscale'),
2 : (['RGB'], 'truecolor'),
3 : (['P'], 'indexed'),
4 : (['LA'], 'grayscale+alpha'),
6 : (['RGBA'], 'truecolor+alpha')
}
try:
assert self.mode in dict_colortype[coltyp][0]
except AssertionError:
raise EncodeErr(code = 2, msg = "Image error: malformed.")
dizio = {'file' : path,
'mode' : dict_colortype[coltyp][1],
'depth' : self.parameters['wBitCount']}
if self.path_icocur not in self.all_icocur_written:
self.all_icocur_written[self.path_icocur] = [dizio]
else:
self.all_icocur_written[self.path_icocur].extend([dizio])
return image
def load(self, path_image):
""" Loads input image data. """
## Get parameters.
image = self.extract(path_image)
## Manage resize.
image = self.ico_resize(image, how = self.type_resize, method = Image.ANTIALIAS)
## Manage ICC profile.
if 'icc_profile' in image.info:
icc = mkstemp(suffix = '.icc')[1]
with open(icc, 'wb') as iccfile:
iccfile.write(image.info.get('icc_profile'))
srgb = ImageCms.createProfile('sRGB')
image = ImageCms.profileToProfile(image, icc, srgb)
## | force_to = 'original' | force_to |
##--------------------------------------------------------------------
## monochrome 1bpp ("1") | "1"
## grayscale 2bpp ("L;2") | "L;2"
## grayscale 4bpp ("L;4") | "L;4"
## grayscale 8bpp ("L") | "L"
## indexed 1bpp ("P;1") | "P;1"
## indexed 2bpp ("P;2") | "P;2"
## indexed 4bpp ("P;4") | "P;4"
## indexed 8bpp ("P") | "P"
## high-color 16bpp ("I;16") | "RGBA5551;16"
## grayscale+alpha 16bpp ("LA;16") | "RGBA;32"
## grayscale+alpha 32bpp ("LA;32") | "RGBA;32"
## true-color 24bpp ("RGB;24") | "RGB;24"
## deep-color 48bpp ("RGB;48") | "RGB;24"
## true-color+alpha 32bpp ("RGBA;32") | "RGBA;32"
## true-color+alpha 64bpp ("RGBA;64") | "RGBA;32"
## any mode with indexed transparency | "RGBA;32"
# Modes that needs always forced conversion.
forced = True
if (self.mode == 'LA' and self.parameters['wBitCount'] in [16, 32]) \
or ('transparency' in image.info) \
or (self.mode == 'RGBA' and self.parameters['wBitCount'] == 64):
image = image.convert('RGBA')
self.mode, self.parameters['wBitCount'], string_mode = image.mode, 32, 'truecolor+alpha'
elif (self.mode == 'RGB' and self.parameters['wBitCount'] == 48):
image = image.convert('RGB')
self.mode, self.parameters['wBitCount'], string_mode = image.mode, 24, 'truecolor'
else:
forced = False
if forced:
dizio = {'new_mode' : string_mode,
'new_depth' : self.parameters['wBitCount']
}
self.all_icocur_written[self.path_icocur][self.index].update(dizio)
## Continue loading data.
if self.mode == 'I':
self.mode = 'L'
table = [i / (2 ** int(self.parameters['wBitCount'] / 2)) for i in range(2 ** self.parameters['wBitCount'])]
image = image.point(table, self.mode)
else:
image = image.convert(self.mode)
if self.mode in ['1', 'L', 'I']:
if self.parameters['wBitCount'] in [1, 8]:
pad = calc_rowsize(self.parameters['wBitCount'], self.parameters['bWidth'])
elif self.parameters['wBitCount'] in [2, 4, 16]:
pad = calc_rowsize(8, self.parameters['bWidth'])
dataimage = image.tobytes('raw', self.mode, pad, -1)
if self.parameters['wBitCount'] == 2:
# tobytes() not include a raw L;2
dataimage = self.convert_8bit_to_2bit(dataimage)
elif self.parameters['wBitCount'] == 4:
# tobytes() not include a raw L;4
dataimage = self.convert_8bit_to_4bit(dataimage)
elif self.parameters['wBitCount'] == 16:
# PIL I;16 converted to ABGR1555 format.
temp = []
for data in dataimage:
value = ((data & 0b10000000) << 8) | ((data & 0b11111000) << 7) | ((data & 0b11111000) << 2) | (data >> 3)
temp.append((value).to_bytes(2, byteorder = 'little'))
dataimage = b"".join(temp)
elif self.mode in ['P', 'RGB', 'RGBA']:
pad = calc_rowsize(self.parameters['wBitCount'], self.parameters['bWidth'])
if self.parameters['wBitCount'] == 1:
dataimage = image.tobytes('raw', 'P;1', pad, -1)
elif self.parameters['wBitCount'] == 2:
# tobytes() not include a raw P;2
pad = calc_rowsize(8, self.parameters['bWidth'])
dataimage = image.tobytes('raw', self.mode, pad, -1)
dataimage = self.convert_8bit_to_2bit(dataimage)
elif self.parameters['wBitCount'] == 4:
dataimage = image.tobytes('raw', 'P;4', pad, -1)
elif self.parameters['wBitCount'] == 8:
dataimage = image.tobytes('raw', 'P', pad, -1)
elif self.parameters['wBitCount'] == 24:
dataimage = image.tobytes('raw', 'BGR', pad, -1)
elif self.parameters['wBitCount'] == 32:
dataimage = self.get_bgra(image, pad)
return image, dataimage
def ico_palette_gpl(self, file):
""" Gets values from `.gpl` file. """
palette = []
with open(file, 'r') as fd:
for line in fd.readlines():
if not line.lower().startswith(("gimp", "name", "columns", "#")):
for pal in line.strip().split()[0:3]:
palette.append(int(pal))
return palette
def ico_palette_add(self, values):
""" Adds 4th element (b'\x00') to RGB palette entries. """
self.parameters['palette'] = bytes(list(chain(*[values[i : i + 3] + [0] \
if len(values[i : i + 3]) == 3 \
else values[i : i + 3] \
for i in range(0, len(values), 3)])))
def ico_palette(self, image):
""" Makes some operations on palettes. """
self.parameters['palette'], self.parameters['size_pal'] = b"", 0
adjust, is_fallback = (False for _ in range(2))
## Assign/create palette.
if self.parameters['wBitCount'] <= 8:
if not image.palette:
if self.custom_palettes:
if isinstance(self.custom_palettes, dict):
try:
palvalues = self.custom_palettes[(self.mode, self.parameters['wBitCount'])]
except:
print_err("Input error: option `custom_palettes` not proper defined.")
else:
print_err("Input error: option `custom_palettes` not proper defined.")
else:
is_fallback = True
fallback_palettes = {('1', 1) : self.ico_palette_gpl(join(working_path, 'palettes/11.gpl')),
('L', 2) : self.ico_palette_gpl(join(working_path, 'palettes/L2.gpl')),
('L', 4) : self.ico_palette_gpl(join(working_path, 'palettes/L4.gpl')),
('L', 8) : | |
<filename>infoblox_netmri/api/broker/v3_8_0/scorecard_history_broker.py<gh_stars>10-100
from ..broker import Broker
class ScorecardHistoryBroker(Broker):
controller = "scorecard_histories"
def index(self, **kwargs):
"""Lists the available scorecard histories. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param Component: The issue component (Devices, Configuration, VLANs, etc.).
:type Component: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Component: The issue component (Devices, Configuration, VLANs, etc.).
:type Component: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param Timestamp: The date and time this record was collected or calculated.
:type Timestamp: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Timestamp: The date and time this record was collected or calculated.
:type Timestamp: Array of DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` Component
:param sort: The data field(s) to use for sorting the output. Default is Component. Valid values are Component, Timestamp, Correctness, Stability, Info, Warn, Error, InfoDetails, WarnDetails, ErrorDetails.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each ScorecardHistory. Valid values are Component, Timestamp, Correctness, Stability, Info, Warn, Error, InfoDetails, WarnDetails, ErrorDetails. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return scorecard_histories: An array of the ScorecardHistory objects that match the specified input criteria.
:rtype scorecard_histories: Array of ScorecardHistory
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available scorecard histories matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param Component: The issue component (Devices, Configuration, VLANs, etc.).
:type Component: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Component: The issue component (Devices, Configuration, VLANs, etc.).
:type Component: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param Correctness: The correctness contribution for this issue in the scorecard history.
:type Correctness: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Correctness: The correctness contribution for this issue in the scorecard history.
:type Correctness: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param Error: The error of the scorecard history.
:type Error: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Error: The error of the scorecard history.
:type Error: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ErrorDetails: The error details of the scorecard history.
:type ErrorDetails: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ErrorDetails: The error details of the scorecard history.
:type ErrorDetails: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param Info: The information details of the scorecard history.
:type Info: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Info: The information details of the scorecard history.
:type Info: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param InfoDetails: The information details of the scorecard history.
:type InfoDetails: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param InfoDetails: The information details of the scorecard history.
:type InfoDetails: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param Stability: The stability contribution for this issue in the scorecard history.
:type Stability: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Stability: The stability contribution for this issue in the scorecard history.
:type Stability: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param Timestamp: The date and time this record was collected or calculated.
:type Timestamp: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Timestamp: The date and time this record was collected or calculated.
:type Timestamp: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param Warn: The warning details of the scorecard history.
:type Warn: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Warn: The warning details of the scorecard history.
:type Warn: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param WarnDetails: The warning details of the scorecard history.
:type WarnDetails: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param WarnDetails: The warning details of the scorecard history.
:type WarnDetails: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more | |
= sketchArc1.addByThreePoints(adsk.core.Point3D.create(nutDistance+endC-L, (endLength/2), fretboardHeight), adsk.core.Point3D.create(nutDistance-L, 0, fretboardHeight),
adsk.core.Point3D.create(nutDistance+endC-L, (endLength/-2), fretboardHeight))
elif createEndCurve.value:
path1 = sketchArc1.addByThreePoints(endTopL, endTopC, endTopR)
else:
path1 = sketchArc1.addByThreePoints(adsk.core.Point3D.create(nutDistance-L, (endLength/2), fretboardHeight-endR), adsk.core.Point3D.create(nutDistance-L, 0, fretboardHeight),
adsk.core.Point3D.create(nutDistance-L, (endLength/-2), fretboardHeight-endR))
# else:
# path1 = line1.addByTwoPoints(adsk.core.Point3D.create((endLength/2), fretboardHeight, nutDistance-L), adsk.core.Point3D.create((endLength/-2), fretboardHeight, nutDistance-L))
openProfile1 = adsk.fusion.Path.create(path1.createForAssemblyContext(newOcc), adsk.fusion.ChainedCurveOptions.noChainedCurves)
if createEndCurve.value:
path2 = sketchArc1.addByThreePoints(endBotL, endBotC, endBotR)
else:
path2 = line1.addByTwoPoints(adsk.core.Point3D.create(nutDistance-L, (endLength/2), 0), adsk.core.Point3D.create(nutDistance-L, (endLength/-2), 0))
openProfile2 = adsk.fusion.Path.create(path2.createForAssemblyContext(newOcc), adsk.fusion.ChainedCurveOptions.noChainedCurves)
if defaultFretboardStyle.selectedItem.name == "Flat/No Radius":
path3 = line1.addByTwoPoints(adsk.core.Point3D.create(nutDistance, (nutLength/-2), fretboardHeight), adsk.core.Point3D.create(nutDistance, (nutLength/2), fretboardHeight))
else:
path3 = sketchArc1.addByThreePoints(nutTopL, nutTopC, nutTopR)
openProfile3 = adsk.fusion.Path.create(path3.createForAssemblyContext(newOcc), adsk.fusion.ChainedCurveOptions.noChainedCurves)
path4 = line1.addByTwoPoints(adsk.core.Point3D.create(nutDistance, (nutLength/-2), 0), adsk.core.Point3D.create(nutDistance, (nutLength/2), 0))
openProfile4 = adsk.fusion.Path.create(path4.createForAssemblyContext(newOcc), adsk.fusion.ChainedCurveOptions.noChainedCurves)
line1.addByTwoPoints(path1.startSketchPoint, path3.startSketchPoint)
line1.addByTwoPoints(path2.startSketchPoint, path4.endSketchPoint)
line1.addByTwoPoints(path1.endSketchPoint, path3.endSketchPoint)
line1.addByTwoPoints(path2.endSketchPoint, path4.startSketchPoint)
sketch1.name = 'Fretboard curves'
sketch1.arePointsShown = False
sketch1.isVisible = False
#Create sketch for fret lines
sketch5 = sketches.add(xyPlane)
sketch5.isComputeDeferred = True
frets = sketch5.sketchCurves.sketchLines;
sketch5.name = 'Fret Lines (Reference)'
sketch5.isVisible = False
#Create sketch for fret cuts
sketch6 = sketches.add(xyPlane)
sketch6.isComputeDeferred = False
cuts = sketch6.sketchCurves.sketchLines;
sketch6.name = 'Fret slots [ ' + str(fretNumber) + ' frets ]'
sketch6.isVisible = False
#create sketch for nut cut
sketch7 = sketches.add(xyPlane)
sketch7.isComputeDeferred = False
nutSketch = sketch7.sketchCurves.sketchLines;
nutSlotsketch = nutSketch.addTwoPointRectangle(adsk.core.Point3D.create(nutDistance, nutLength, fretboardHeight),
adsk.core.Point3D.create(nutDistance+nutSlotWidth, -nutLength, fretboardHeight))
nutProfile = sketch7.profiles.item(0)
sketch7.name = 'Nut slot profile'
sketch7.isVisible = False
#create sketch for nut cut
sketch9 = sketches.add(xyPlane)
sketch9.isComputeDeferred = False
fretMarker = sketch9.sketchCurves.sketchCircles;
sketch9.name = 'Inlays'
sketch9.isVisible = False
sketch10 = sketches.add(xyPlane)
sketch10.isComputeDeferred = True
inplayPoints = sketch10.sketchPoints
sketch10.name = 'Default Marker Positions'
sketch10.isVisible = False
# Create surface for bridge-end of fretboard
loftFeats = fretboardComp.features.loftFeatures
loftInput1 = loftFeats.createInput(adsk.fusion.FeatureOperations.NewBodyFeatureOperation)
loftSections1 = loftInput1.loftSections
loftSections1.add(openProfile2)
loftSections1.add(openProfile1)
loftInput1.isSolid = False
loft1 = loftFeats.add(loftInput1)
l1 = loft1.faces[0]
loft1.name = 'Loft: Fretboard End'
# Create surface for nut-end of fretboard
loftInput2 = loftFeats.createInput(adsk.fusion.FeatureOperations.NewBodyFeatureOperation)
loftSections2 = loftInput2.loftSections
loftSections2.add(openProfile3)
loftSections2.add(openProfile4)
loftInput2.isSolid = False
loft2 = loftFeats.add(loftInput2)
l2 = loft2.faces[0]
loft2.name = 'Loft: Fretboard Start'
# Create new surface using previous surfaces
loftInput3 = loftFeats.createInput(adsk.fusion.FeatureOperations.NewBodyFeatureOperation)
loftSections3 = loftInput3.loftSections
loftSections3.add(l1)
loftSections3.add(l2)
loftInput3.isSolid = False
loft3 = loftFeats.add(loftInput3)
l3 = loft3.faces[0]
loft3.name = 'Loft: Fretboard'
# Get surface bodies and add them to object collection
surface1 = loft1.bodies.item(0)
surface2 = loft2.bodies.item(0)
surface3 = loft3.bodies.item(0)
surfaces = adsk.core.ObjectCollection.create()
surfaces.add(surface1)
surfaces.add(surface2)
surfaces.add(surface3)
# Define tolerance
tolerance = adsk.core.ValueInput.createByString('0.001 in')
# Create a stitch input to be able to define the input needed for an stitch.
stitches = fretboardComp.features.stitchFeatures
stitchInput = stitches.createInput(surfaces, tolerance, adsk.fusion.FeatureOperations.NewBodyFeatureOperation)
# Create a stitch feature.
stitch = stitches.add(stitchInput)
stitch.name = 'Stitch: Fretboard'
#Select edges of bridge-end of fretboard to make fillets
fretboardEndFace = stitch.bodies.item(0)
fretboardEndEdge1 = fretboardEndFace.edges.item(1)
fretboardEndEdge2 = fretboardEndFace.edges.item(3)
#Create collection
endEdges = adsk.core.ObjectCollection.create()
endEdges.add(fretboardEndEdge1)
endEdges.add(fretboardEndEdge2)
if createFilletRadius.value:
#Creating fillets
fillets = fretboardComp.features.filletFeatures
filletInput = fillets.createInput()
filletInput.addConstantRadiusEdgeSet(endEdges, adsk.core.ValueInput.createByReal(filletRadius), True)
filletInput.isG2 = False
filletInput.isRollingBallCorner = True
fillet = fillets.add(filletInput)
fillet.name = 'Rounded Corners'
else:
pass
# Get the body created by the stitch
face = stitch.bodies.item(0)
if createFilletRadius.value:
topFace = face.faces.item(6)
else:
topFace = face.faces.item(4)
# Create input entities for offset feature
inputEntities = adsk.core.ObjectCollection.create()
inputEntities.add(topFace)
# Create an input for offset feature
offsetFeature = fretboardComp.features.offsetFeatures
offsetInput = offsetFeature.createInput(inputEntities, adsk.core.ValueInput.createByReal(-tangDepth),
adsk.fusion.FeatureOperations.NewBodyFeatureOperation)
#Get the surface body.
extrudeFeature = offsetFeature.add(offsetInput)
extrudeFeature.name = 'Offset: Fret Projections'
offSurf = extrudeFeature.bodies.item(0)
offSurf.name = 'Reference for fret cuts'
offSurf.isVisible = False
#Get an edge from surface, and add it to object collection.
extend = offSurf.edges
ext1 = extend.item(0)
ext2 = extend.item(1)
ext3 = extend.item(2)
ext4 = extend.item(3)
inputEdges = adsk.core.ObjectCollection.create()
inputEdges.add(ext1)
inputEdges.add(ext2)
inputEdges.add(ext3)
inputEdges.add(ext4)
#Define a distance to extend.
distance = adsk.core.ValueInput.createByString('0.5 in')
#Create an extend input to be able to define the input needed for an extend.
extendFeatures = fretboardComp.features.extendFeatures
extendFeatureInput1 = extendFeatures.createInput(inputEdges, distance, adsk.fusion.SurfaceExtendTypes.NaturalSurfaceExtendType)
#Create an extend feature.
extendFeature1 = extendFeatures.add(extendFeatureInput1)
extendFeature1.name = 'Extend: Fret Projections'
#Create loop for fret spacing and creation
fretSpacing = []
for fret in range(1,int(fretNumber)+1):
fretDistance = scaleLength-(scaleLength/(2**(fret/12.0)))
fretSpacing.append(fretDistance)
fretLength = nutLength + 2*sqrt(((fretDistance/(math.cos(math.radians(math.acos((L**2+(sqrt((((endLength-nutLength)/2)**2) +(L**2)))**2-
((endLength-nutLength)/2)**2)/(2*L*(sqrt((((endLength-nutLength)/2)**2)+(L**2)))))*((180)/math.pi)))))**2)
-(fretDistance**2))
if createBlindFrets.value:
#Create fret lines for fret spacing reference
fretLines = frets.addByTwoPoints(adsk.core.Point3D.create((nutDistance-fretDistance), ((fretLength/2)-(blindFrets/2)), fretboardHeight),
adsk.core.Point3D.create((nutDistance-fretDistance), ((-fretLength/2)+(blindFrets/2)), fretboardHeight))
#Create fret cuts
cutLines = cuts.addTwoPointRectangle(adsk.core.Point3D.create((nutDistance-fretDistance-(tangWidth/2)), ((-fretLength/2)+(blindFrets/2)), fretboardHeight),
adsk.core.Point3D.create((nutDistance-fretDistance+(tangWidth/2)), ((fretLength/2)-(blindFrets/2)), fretboardHeight))
else:
#Create fret lines for fret spacing reference
fretLines = frets.addByTwoPoints(adsk.core.Point3D.create((nutDistance-fretDistance), ((fretLength/2)+(1)), fretboardHeight),
adsk.core.Point3D.create((nutDistance-fretDistance), ((-fretLength/2)-(1)), fretboardHeight))
#Create fret cuts
cutLines = cuts.addTwoPointRectangle(adsk.core.Point3D.create((nutDistance-fretDistance-(tangWidth/2)), ((-fretLength/2)-(1)), fretboardHeight),
adsk.core.Point3D.create((nutDistance-fretDistance+(tangWidth/2)), ((fretLength/2)+(1)), fretboardHeight))
inlays = [((y-x)/2)+x for x, y in zip(fretSpacing,fretSpacing[1:])]
for inlayOdd in inlays[1:9:2] + inlays[13:21:2]:
fretMarker.addByCenterRadius(adsk.core.Point3D.create(nutDistance-inlayOdd, 0, fretboardHeight), markerDiameter)
points = adsk.core.Point3D.create(nutDistance-inlayOdd, 0, fretboardHeight)
sketch10 = inplayPoints.add(points)
for inlayOdd in inlays[25:33:2]:
fretMarker.addByCenterRadius(adsk.core.Point3D.create(nutDistance-inlayOdd, 0, fretboardHeight), markerDiameter/2)
points = adsk.core.Point3D.create(nutDistance-inlayOdd, 0, fretboardHeight)
sketch10 = inplayPoints.add(points)
for inlayEven in inlays[10:24:12]:
fretMarker.addByCenterRadius(adsk.core.Point3D.create(nutDistance-inlayEven, markerSpacing/2, fretboardHeight), markerDiameter)
fretMarker.addByCenterRadius(adsk.core.Point3D.create(nutDistance-inlayEven, -markerSpacing/2, fretboardHeight), markerDiameter)
points = adsk.core.Point3D.create(nutDistance-inlayEven, 0, fretboardHeight)
sketch10 = inplayPoints.add(points)
fretMarkers = adsk.core.ObjectCollection.create()
for markers in sketch9.profiles:
fretMarkers.add(markers)
fretboard = stitch.bodies.item(0)
fretboard.name = 'Fretboard' + ' [ ' + str(fretNumber) + ' frets ]'
fretboardFaces = [face for face in fretboard.faces]
if createFilletRadius.value:
fretboardSurf = fretboardFaces[6::2]
else:
fretboardSurf = fretboardFaces[4::2]
fretCurves = [curve for curve in sketch5.sketchCurves]
sketch8 = sketches.add(xyPlane)
sketch8.isComputeDeferred = True
fretProj = sketch8.projectToSurface(fretboardSurf, fretCurves, adsk.fusion.SurfaceProjectTypes.AlongVectorSurfaceProjectType,
fretboardComp.zConstructionAxis)
sketch8.name = 'Fret Lines [ ' + str(fretNumber) + ' frets ]'
sketch8.isVisible = False
#Create an object collection to use an input.
profs = adsk.core.ObjectCollection.create()
#Add all of the profiles to the collection.
for prof in sketch6.profiles:
profs.add(prof)
#Get extrude features
extrudes = fretboardComp.features.extrudeFeatures
if createFretMarkers.value:
#Extrusion for fret markers
markerExtrude = extrudes.addSimple(fretMarkers, adsk.core.ValueInput.createByReal(-markerDepth), adsk.fusion.FeatureOperations.CutFeatureOperation)
markerExtrude.name = 'Extrusion: Fret Markers'
else:
pass
# Extrude Sample 4: Create an extrusion that goes from the profile plane to a specified entity.
extrudeInput1 = extrudes.createInput(profs, adsk.fusion.FeatureOperations.CutFeatureOperation)
extentToEntity = adsk.fusion.ToEntityExtentDefinition.create(extrudeFeature.faces.item(0), True)
# Set the one side extent with the to-entity-extent-definition, and with a taper angle of 0 degree
extrudeInput1.setOneSideExtent(extentToEntity, adsk.fusion.ExtentDirections.PositiveExtentDirection)
#Extrusion for adding material to the nut-end of the fretboard
if createFilletRadius.value:
nutExtend = extrudes.addSimple(fretboardFaces[4], distance, adsk.fusion.FeatureOperations.NewBodyFeatureOperation)
else:
nutExtend = extrudes.addSimple(fretboardFaces[1], distance, adsk.fusion.FeatureOperations.NewBodyFeatureOperation)
nutExt = nutExtend.bodies.item(0)
nutExt.name = 'Extension'
nutExtend.name = 'Extrusion: Extension'
nutSlot = extrudes.addSimple(nutProfile, adsk.core.ValueInput.createByReal(-nutSlotDepth), adsk.fusion.FeatureOperations.CutFeatureOperation)
nutSlot.name = 'Extrusion: Nut Slot'
if createFretCuts.value:
#Create the extrusion
extrude1 = extrudes.add(extrudeInput1)
extrude1.name = 'Extrusion: Cutting Frets'
if extensionVisibility.value:
nutExt.isVisible = True
else:
nutExt.isVisible = False
# Get a reference to an appearance in the library.
lib = app.materialLibraries.itemByName('Fusion 360 Appearance Library')
libAppear1 = lib.appearances.itemByName('Paint - Enamel Glossy (Yellow)')
libAppear2 = lib.appearances.itemByName('Wax (White)')
fretboardAppearance1 = fretboardComp.bRepBodies.item(0)
fretboardAppearance1.appearance = libAppear1
fretboardAppearance2 = fretboardComp.bRepBodies.item(1)
fretboardAppearance2.appearance = libAppear1
offSurf.appearance = libAppear2
#Centers the camera to fit the entire fretboard
cam = app.activeViewport.camera
cam.isFitView = True
cam.isSmoothTransition = False
app.activeViewport.camera = cam
# Group everything used to create the fretboard in the timeline.
timelineGroups = design.timeline.timelineGroups
newOccIndex = newOcc.timelineObject.index
if createFretCuts.value:
endIndex = extrude1.timelineObject.index
else:
endIndex = nutSlot.timelineObject.index
timelineGroup = timelineGroups.add(newOccIndex, endIndex)
timelineGroup.name = 'Fretboard [ ' + str(fretNumber) + ' Frets ]'
fretboardComp.name = 'Fretboard' + ' [ ' + str(fretNumber) + ' Frets ]'
return fretboardComp
except:
if ui:
ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
def buildBlanks(design, bodyLength, bodyWidth, bodyThickness, headstockLength, headstockWidth, headstockThickness, guitarLength):
try:
design = adsk.fusion.Design.cast(app.activeProduct)
rootComp = design.rootComponent
blanksOccs = rootComp.occurrences
blanksOcc = blanksOccs.addNewComponent(adsk.core.Matrix3D.create())
blanksComp = adsk.fusion.Component.cast(blanksOcc.component)
# Create a new sketch.
sketches = blanksComp.sketches
xzPlane = blanksComp.xYConstructionPlane
#Get extrude features
extrudes = blanksComp.features.extrudeFeatures
#Create sketch for bridge spacing
sketch1 = sketches.add(xzPlane)
sketch1.isComputeDeferred = False
sketch1.name = 'Body Blank'
bodyBlankSketch = sketch1.sketchCurves.sketchLines;
bodyBlank = bodyBlankSketch.addTwoPointRectangle(adsk.core.Point3D.create(0, -bodyWidth/2, 0), adsk.core.Point3D.create(bodyLength, bodyWidth/2, 0))
#Create sketch for bridge spacing
sketch2 = sketches.add(xzPlane)
sketch2.isComputeDeferred = False
sketch2.name = 'Headstock Blank'
headstockBlankSketch = sketch2.sketchCurves.sketchLines;
headstockBlank = headstockBlankSketch.addTwoPointRectangle(adsk.core.Point3D.create(guitarLength-headstockLength, -headstockWidth/2, 0), adsk.core.Point3D.create(guitarLength, headstockWidth/2, 0))
#Get extrude features
extrudes = blanksComp.features.extrudeFeatures
bodyProf = sketch1.profiles.item(0)
headstockProf = sketch2.profiles.item(0)
bodyExtrude = extrudes.addSimple(bodyProf, adsk.core.ValueInput.createByReal(-bodyThickness), adsk.fusion.FeatureOperations.NewBodyFeatureOperation)
# Get the extrusion body
bodyBody = bodyExtrude.bodies.item(0)
bodyExtrude.name = "Extrusion: Body Blank"
bodyBody.name = "Body Blank"
headstockExtrude = extrudes.addSimple(headstockProf, adsk.core.ValueInput.createByReal(-headstockThickness), adsk.fusion.FeatureOperations.NewBodyFeatureOperation)
# Get the extrusion body
headstockBody = headstockExtrude.bodies.item(0)
headstockExtrude.name = "Extrusion: Headstock Blank"
headstockBody.name = "Headstock Blank"
# Get a reference to an appearance in the library.
lib = app.materialLibraries.itemByName('Fusion 360 Appearance Library')
libAppear1 = lib.appearances.itemByName('Wax (White)')
blanksAppearance1 = blanksComp.bRepBodies.item(0)
blanksAppearance1.appearance = libAppear1
blanksAppearance2 = blanksComp.bRepBodies.item(1)
blanksAppearance2.appearance = libAppear1
#Centers the camera to fit the entire fretboard
| |
test_future_tasks_hidden(self):
self.run_test(
todo0 = ["past t:1999-12-31", "present t:2000-01-01", "future t:2000-01-02"],
edit0 = ["i:1 past t:1999-12-31", "i:2 present t:2000-01-01"]
)
def test_future_tasks_not_hidden(self):
self.run_test(
todo0 = ["past t:1999-12-31", "present t:2000-01-01", "future t:2000-01-02"],
edit0 = ["i:1 past t:1999-12-31", "i:2 present t:2000-01-01", "i:3 future t:2000-01-02"],
export = {"TODOTXT_DISABLE_FILTER": "1"}
)
def test_remove_task(self):
self.run_test(
todo0 = ["x"],
edit0 = ["i:1 x"],
edit1 = [],
todo1 = [],
)
def test_edit_task(self):
self.run_test(
todo0 = ["a", "b"],
edit0 = ["i:1 a", "i:2 b"],
edit1 = ["i:1 x", "i:2 b"],
todo1 = ["x", "b"],
)
def test_empty_line_preserved(self):
self.run_test(
todo0 = ["", "orig"],
edit0 = ["i:2 orig"],
edit1 = ["i:2 orig"],
todo1 = ["", "orig"]
)
def test_empty_line_unchanged_if_no_other_edits(self):
self.run_test(
todo0 = ["", "orig"],
edit0 = ["i:2 orig"],
edit1 = ["i:2 orig"],
todo1 = ["", "orig"],
export = {"TODOTXT_PRESERVE_LINE_NUMBERS": "0"}
)
def test_empty_line_not_preserved_when_other_edits(self):
self.run_test(
todo0 = ["", "orig"],
edit0 = ["i:2 orig"],
edit1 = ["i:2 changed"],
todo1 = ["changed"],
export = {"TODOTXT_PRESERVE_LINE_NUMBERS": "0"}
)
# regression test
def test_insert_task_empty_line_preserved(self):
self.run_test(
todo0 = ["", "orig"],
edit0 = ["i:2 orig"],
edit1 = ["i:2 orig", "new"],
todo1 = ["", "orig", "new"]
)
# regression test
def test_insert_task_empty_line_not_preserved(self):
self.run_test(
todo0 = ["", "orig"],
edit0 = ["i:2 orig"],
edit1 = ["i:2 orig", "new"],
todo1 = ["orig", "new"],
export = {"TODOTXT_PRESERVE_LINE_NUMBERS": "0"}
)
def test_leading_tag_order_not_normalized_if_no_other_edits(self):
self.run_test(
todo0 = ["k:v +p @c x"],
edit0 = ["i:1 k:v +p @c x"],
)
def test_leading_tag_order_normalized_if_other_edits(self):
self.run_test(
todo0 = ["k:v +p @c x"],
edit0 = ["i:1 k:v +p @c x"],
edit1 = ["i:1 k:v +p @c y"],
todo1 = ["@c +p k:v y"],
)
def test_trailing_tag_order_not_normalized_if_no_other_edits(self):
self.run_test(
todo0 = ["x k:v +p @c"],
edit0 = ["i:1 x k:v +p @c"],
)
def test_trailing_tag_order_normalized_if_other_edits(self):
self.run_test(
todo0 = ["x k:v +p @c"],
edit0 = ["i:1 x k:v +p @c"],
edit1 = ["i:1 y k:v +p @c"],
todo1 = ["y @c +p k:v"],
)
# regression test
def test_can_manually_make_changes_equal_to_normalization_even_if_no_other_edits(self):
self.run_test(
todo0 = ["x t:2000-01-01"],
edit0 = ["i:1 x t:2000-01-01"],
edit1 = ["i:1 x"],
todo1 = ["x"],
)
# regression test
def test_url_is_not_considered_tag(self):
self.run_test(
todo0 = ["http://example.com @c"],
edit0 = ["i:1 http://example.com @c"]
)
def test_duplicate_id_tag_ignored(self):
self.run_test(
todo0 = ["a"],
edit0 = ["i:1 a"],
edit1 = ["i:1 i:1 x"],
todo1 = ["x"],
expect_warnings = True
)
def test_hides_but_preserves_date(self):
self.run_test(
todo0 = ["(A) 1999-12-31 a"],
edit0 = ["(A) i:1 a"]
)
class SliceAllTest(AbstractSliceAllTest, unittest.TestCase):
slice_name = "all"
export = {}
# Any tests for "all" should go in AbstractSliceAllTest instead,
# so the tests for the "terms" and "tags" slices can inherit them too.
# The "terms" and "tags" slices should behave identically when they have no arguments.
class SliceTermsTest(AbstractSliceAllTest, unittest.TestCase):
slice_name = "terms"
export = {}
def test_comment_header_included_terms(self):
self.run_test(
slice_args = ["x", "y"],
todo0 = [],
edit0 = ["# Tasks including terms: x y", ""],
edit1 = [],
todo1 = [],
strip_edit0_comments = False
)
def test_comment_header_excluded_terms(self):
self.run_test(
slice_args = ["-x", "-y"],
todo0 = [],
edit0 = ["# Tasks excluding terms: x y", ""],
edit1 = [],
todo1 = [],
strip_edit0_comments = False
)
def test_comment_header_included_and_excluded_terms(self):
self.run_test(
slice_args = ["x", "-y"],
todo0 = [],
edit0 = ["# Tasks including terms: x and excluding terms: y", ""],
edit1 = [],
todo1 = [],
strip_edit0_comments = False
)
def test_match_task_with_term(self):
self.run_test(
slice_args = ["x"],
todo0 = ["x", "y"],
edit0 = ["i:1 x"]
)
# regression test
def test_match_task_with_term_case_insensitive(self):
self.run_test(
slice_args = ["X"],
todo0 = ["x", "y"],
edit0 = ["i:1 x"]
)
def test_match_task_without_excluded_term(self):
self.run_test(
slice_args = ["-x"],
todo0 = ["x", "y"],
edit0 = ["i:2 y"]
)
# regression test
def test_match_task_without_excluded_term_case_insensitive(self):
self.run_test(
slice_args = ["-X"],
todo0 = ["x", "y"],
edit0 = ["i:2 y"]
)
def test_match_task_with_multiple_terms(self):
self.run_test(
slice_args = ["x", "y1"],
todo0 = ["x y1", "y1 x", "x y2"],
edit0 = ["i:1 x y1", "i:2 y1 x"]
)
def test_match_task_with_included_and_without_excluded_terms(self):
self.run_test(
slice_args = ["x", "-y1"],
todo0 = ["x y1", "y1 x", "x y2"],
edit0 = ["i:3 x y2"]
)
def test_does_not_strip_tag(self):
self.run_test(
slice_args = ["@c"],
todo0 = ["x", "a @c"],
edit0 = ["i:2 a @c"]
)
class SliceTagsTest(AbstractSliceAllTest, unittest.TestCase):
slice_name = "tags"
export = {}
def test_comment_header_priority(self):
self.run_test(
slice_args = ["A"],
todo0 = [],
edit0 = ["# Tasks with priority (A)", ""],
edit1 = [],
todo1 = [],
strip_edit0_comments = False
)
# regression test
def test_comment_header_no_level_priority(self):
self.run_test(
slice_args = ["_"],
todo0 = [],
edit0 = ["# Tasks with priority (_)", ""],
edit1 = [],
todo1 = [],
strip_edit0_comments = False
)
def test_comment_header_tags(self):
self.run_test(
slice_args = ["@c"],
todo0 = [],
edit0 = ["# Tasks with tags: @c", ""],
edit1 = [],
todo1 = [],
strip_edit0_comments = False
)
def test_comment_header_priority_and_tags(self):
self.run_test(
slice_args = ["A", "@c"],
todo0 = [],
edit0 = ["# Tasks with priority (A) and tags: @c", ""],
edit1 = [],
todo1 = [],
strip_edit0_comments = False
)
def test_match_task_with_priority(self):
self.run_test(
slice_args = ["A"],
todo0 = ["x", "(A) a"],
edit0 = ["i:2 a"]
)
def test_match_task_with_no_level_priority(self):
self.run_test(
slice_args = ["_"],
todo0 = ["x", "(A) a"],
edit0 = ["i:1 x"]
)
def test_match_task_with_context(self):
self.run_test(
slice_args = ["@c"],
todo0 = ["x", "a @c"],
edit0 = ["i:2 a"]
)
def test_match_task_with_project(self):
self.run_test(
slice_args = ["+p"],
todo0 = ["x", "a +p"],
edit0 = ["i:2 a"]
)
def test_match_task_with_kv(self):
self.run_test(
slice_args = ["k:v"],
todo0 = ["x", "a k:v"],
edit0 = ["i:2 a"]
)
# regression test
def test_unparseable_tag(self):
self.run_test(
slice_args = ["not_a_tag"],
todo0 = [],
edit0 = [],
expect_warnings = True,
expect_clean_exit = False
)
def test_forged_id_tag_ignored(self):
self.run_test(
slice_args = ["A"],
todo0 = ["(B) b"],
edit0 = [],
edit1 = ["i:1 a"],
todo1 = ["(B) b", "(A) a"],
expect_warnings = True
)
def test_insert_task_with_no_level_priority(self):
self.run_test(
slice_args = ["_"],
todo0 = [],
edit0 = [],
edit1 = ["y"],
todo1 = ["y"]
)
def test_insert_task_with_duplicate_tag(self):
self.run_test(
slice_args = ["@c"],
todo0 = [],
edit0 = [],
edit1 = ["y @c"],
todo1 = ["y @c"]
)
def test_insert_task_with_multiple_tags(self):
self.run_test(
slice_args = ["A", "@c", "+p", "k:v"],
todo0 = [],
edit0 = [],
edit1 = ["y"],
todo1 = ["(A) y @c +p k:v"]
)
def test_edit_task_with_multiple_tags(self):
self.run_test(
slice_args = ["A", "@c", "+p", "k:v"],
todo0 = ["x", "(A) a @c +p k:v", "(A) a +q"],
edit0 = ["i:2 a"],
edit1 = ["i:2 y"],
todo1 = ["x", "(A) y @c +p k:v", "(A) a +q"]
)
# regression test
def test_edit_task_explicitly_readding_hidden_tag(self):
self.run_test(
slice_args = ["@c"],
todo0 = ["a @c"],
edit0 = ["i:1 a"],
edit1 = ["i:1 a @c"],
todo1 = ["a @c"]
)
class SliceReviewTest(AbstractSliceTest, unittest.TestCase):
slice_name = "review"
export = {"TODOTXT_SLICE_REVIEW_INTERVALS": ""}
def test_comment_header(self):
self.run_test(
todo0 = [],
edit0 = ["# Reviewable tasks (A:1)", ""],
edit1 = [],
todo1 = [],
export = {"TODOTXT_SLICE_REVIEW_INTERVALS": "A:1"},
strip_edit0_comments = False
)
def test_slice_review_intervals_required(self):
self.run_test(
todo0 = [],
edit0 = [],
unset = {"TODOTXT_SLICE_REVIEW_INTERVALS"},
expect_warnings = True
)
def test_reviewable_by_age(self):
self.run_test(
todo0 = ["(A) 1999-12-31 a"],
edit0 = ["(_) i:1 a"],
export = {"TODOTXT_SLICE_REVIEW_INTERVALS": "A:1"},
)
def test_not_reviewable_by_age(self):
self.run_test(
todo0 = ["(A) 1999-12-31 a"],
edit0 = [],
export = {"TODOTXT_SLICE_REVIEW_INTERVALS": "A:2"},
)
def test_reviewable_by_priority(self):
self.run_test(
todo0 = ["(A) 2000-01-01 a", "(B) 2000-01-01 b"],
edit0 = ["(_) i:1 a"],
export = {"TODOTXT_SLICE_REVIEW_INTERVALS": "A:0,B:1"},
)
def test_reviewable_by_no_priority(self):
self.run_test(
todo0 = ["2000-01-01 a", "(B) 2000-01-01 b"],
edit0 = ["(_) i:1 a"],
export = {"TODOTXT_SLICE_REVIEW_INTERVALS": "_:0,B:1"},
)
def test_unconfigured_priority_ignored(self):
self.run_test(
todo0 = ["(A) 2000-01-01 a", "(B) 2000-01-01 b"],
edit0 = [],
export = {"TODOTXT_SLICE_REVIEW_INTERVALS": "A:1"},
expect_warnings = True
)
# regression test
def test_unconfigured_no_priority_ignored(self):
self.run_test(
todo0 = ["(A) 2000-01-01 a", "2000-01-01 b"],
edit0 = [],
export = {"TODOTXT_SLICE_REVIEW_INTERVALS": "A:1"},
expect_warnings = True
)
def test_reviewable_by_start_date(self):
self.run_test(
todo0 = ["1999-12-31 a t:2000-01-01", "1999-12-31 b t:2000-01-02"],
edit0 = ["(_) i:1 a t:2000-01-01"],
export = {"TODOTXT_SLICE_REVIEW_INTERVALS": "_:5"},
)
def test_set_complete_date_does_not_reset_create_date(self):
self.run_test(
todo0 = ["(A) 1999-12-31 a"],
edit0 = ["(_) i:1 a"],
edit1 = ["x 2000-01-01 (_) i:1 a"],
todo1 = ["x 2000-01-01 1999-12-31 a"],
export = {"TODOTXT_SLICE_REVIEW_INTERVALS": "A:1"},
)
def test_set_complete_date_clears_start_date(self):
self.run_test(
todo0 = ["1999-12-31 a t:2000-01-01"],
edit0 = ["(_) i:1 a t:2000-01-01"],
edit1 = ["x 2000-01-01 (_) i:1 a | |
# -*- coding: utf-8 -*-
import unittest
import os
# prepare for test
os.environ['ANIMA_TEST_SETUP'] = ""
from anima.env import mayaEnv # to setup maya extensions
import pymel.core
from anima.edit import Sequence, Media, Video, Track, Clip, File
class SequenceManagerTestCase(unittest.TestCase):
"""tests the SequenceManagerExtension class
"""
def setUp(self):
"""set up the test
"""
# create a new scene and get the sequenceManager in the scene
pymel.core.newFile(force=True)
self.sm = pymel.core.PyNode('sequenceManager1')
def test_from_xml_path_argument_skipped(self):
"""testing if a TypeError will be raised when the path argument is
skipped
"""
sm = pymel.core.PyNode('sequenceManager1')
with self.assertRaises(TypeError) as cm:
sm.from_xml()
self.assertEqual(
cm.exception.message,
'from_xml() takes exactly 2 arguments (1 given)'
)
def test_from_xml_path_argument_is_not_a_string(self):
"""testing if a TypeError will be raised when the path argument is not
a string
"""
sm = pymel.core.PyNode('sequenceManager1')
with self.assertRaises(TypeError) as cm:
sm.from_xml(30)
self.assertEqual(
cm.exception.message,
'path argument in SequenceManager.from_xml should be a string, '
'not int'
)
def test_from_xml_path_argument_is_not_a_valid_path(self):
"""testing if a IOError will be raised when the path argument is not
a valid path
"""
sm = pymel.core.PyNode('sequenceManager1')
with self.assertRaises(IOError) as cm:
sm.from_xml('not a valid path')
self.assertEqual(
cm.exception.message,
'Please supply a valid path to an XML file!'
)
def test_from_xml_generates_correct_sequencer_hierarchy(self):
"""testing if from_xml method will generate Sequences and shots
correctly
"""
path = os.path.abspath('./test_data/test_v001.xml')
sm = pymel.core.PyNode('sequenceManager1')
sm.from_xml(path)
sequences = sm.sequences.get()
self.assertEqual(len(sequences), 1)
sequencer = sequences[0]
self.assertIsInstance(sequencer, pymel.core.nt.Sequencer)
self.assertEqual(sequencer.duration, 111)
self.assertEqual(sequencer.sequence_name.get(), 'SEQ001_HSNI_003')
# check scene fps
self.assertEqual(pymel.core.currentUnit(q=1, t=1), 'film')
# check timecode
time = pymel.core.PyNode('time1')
self.assertEqual(time.timecodeProductionStart.get(), 0.0)
shots = sequencer.shots.get()
self.assertEqual(len(shots), 3)
shot1 = shots[0]
shot2 = shots[1]
shot3 = shots[2]
self.assertEqual('0010', shot1.shotName.get())
self.assertEqual(1024, shot1.wResolution.get())
self.assertEqual(778, shot1.hResolution.get())
self.assertEqual(1, shot1.track.get())
self.assertEqual(1.0, shot1.sequenceStartFrame.get())
self.assertEqual(34.0, shot1.sequenceEndFrame.get())
self.assertEqual(34.0, shot1.duration)
self.assertEqual(10.0, shot1.startFrame.get())
self.assertEqual(43.0, shot1.endFrame.get())
self.assertEqual(
'/tmp/SEQ001_HSNI_003_0010_v001.mov',
shot1.output.get()
)
# Clip2
self.assertEqual('0020', shot2.shotName.get())
self.assertEqual(1024, shot2.wResolution.get())
self.assertEqual(778, shot2.hResolution.get())
self.assertEqual(1, shot2.track.get())
self.assertEqual(35.0, shot2.sequenceStartFrame.get())
self.assertEqual(65.0, shot2.sequenceEndFrame.get())
self.assertEqual(31.0, shot2.duration)
self.assertEqual(10.0, shot2.startFrame.get())
self.assertEqual(40.0, shot2.endFrame.get())
self.assertEqual(
'/tmp/SEQ001_HSNI_003_0020_v001.mov',
shot2.output.get()
)
# Clip3
self.assertEqual('0030', shot3.shotName.get())
self.assertEqual(1024, shot3.wResolution.get())
self.assertEqual(778, shot3.hResolution.get())
self.assertEqual(1, shot3.track.get())
self.assertEqual(66.0, shot3.sequenceStartFrame.get())
self.assertEqual(111.0, shot3.sequenceEndFrame.get())
self.assertEqual(46.0, shot3.duration)
self.assertEqual(10.0, shot3.startFrame.get())
self.assertEqual(55.0, shot3.endFrame.get())
self.assertEqual(
'/tmp/SEQ001_HSNI_003_0030_v001.mov',
shot3.output.get()
)
def test_from_xml_updates_sequencer_hierarchy_with_shots_expanded_and_contracted(self):
"""testing if from_xml method will update Sequences and shots
correctly with the xml file
"""
path = os.path.abspath('./test_data/test_v002.xml')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_version('v001')
seq = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(33)
shot1.sequenceStartFrame.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot1.track.set(1)
shot2 = seq.create_shot('0020')
shot2.startFrame.set(34)
shot2.endFrame.set(64)
shot2.sequenceStartFrame.set(35)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(10)
shot2.track.set(1)
shot3 = seq.create_shot('0030')
shot3.startFrame.set(65)
shot3.endFrame.set(110)
shot3.sequenceStartFrame.set(66)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(10)
shot3.track.set(1)
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
# now update it with test_v002.xml
sm.from_xml(path)
# check shot data
self.assertEqual('0010', shot1.shotName.get())
self.assertEqual(1, shot1.track.get())
self.assertEqual(1.0, shot1.sequenceStartFrame.get())
self.assertEqual(54.0, shot1.sequenceEndFrame.get())
self.assertEqual(-10.0, shot1.startFrame.get())
self.assertEqual(43.0, shot1.endFrame.get())
# Clip2
self.assertEqual('0020', shot2.shotName.get())
self.assertEqual(1, shot2.track.get())
self.assertEqual(55.0, shot2.sequenceStartFrame.get())
self.assertEqual(75.0, shot2.sequenceEndFrame.get())
self.assertEqual(44.0, shot2.startFrame.get())
self.assertEqual(64.0, shot2.endFrame.get())
# Clip3
self.assertEqual('0030', shot3.shotName.get())
self.assertEqual(1, shot3.track.get())
self.assertEqual(76.0, shot3.sequenceStartFrame.get())
self.assertEqual(131.0, shot3.sequenceEndFrame.get())
self.assertEqual(65.0, shot3.startFrame.get())
self.assertEqual(120.0, shot3.endFrame.get())
def test_from_edl_updates_sequencer_hierarchy_with_shots_expanded_and_contracted(self):
"""testing if from_edl method will update Sequences and shots
correctly with the edl file
"""
path = os.path.abspath('./test_data/test_v002.edl')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_version('v001')
seq = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(33)
shot1.sequenceStartFrame.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot1.track.set(1)
shot2 = seq.create_shot('0020')
shot2.startFrame.set(34)
shot2.endFrame.set(64)
shot2.sequenceStartFrame.set(35)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(10)
shot2.track.set(1)
shot3 = seq.create_shot('0030')
shot3.startFrame.set(65)
shot3.endFrame.set(110)
shot3.sequenceStartFrame.set(66)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(10)
shot3.track.set(1)
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
# now update it with test_v002.xml
sm.from_edl(path)
# check shot data
self.assertEqual('0010', shot1.shotName.get())
self.assertEqual(1, shot1.track.get())
self.assertEqual(1.0, shot1.sequenceStartFrame.get())
self.assertEqual(54.0, shot1.sequenceEndFrame.get())
self.assertEqual(-10.0, shot1.startFrame.get())
self.assertEqual(43.0, shot1.endFrame.get())
# Clip2
self.assertEqual('0020', shot2.shotName.get())
self.assertEqual(1, shot2.track.get())
self.assertEqual(55.0, shot2.sequenceStartFrame.get())
self.assertEqual(76.0, shot2.sequenceEndFrame.get())
self.assertEqual(44.0, shot2.startFrame.get())
self.assertEqual(65.0, shot2.endFrame.get())
# Clip3
self.assertEqual('0030', shot3.shotName.get())
self.assertEqual(1, shot3.track.get())
self.assertEqual(77.0, shot3.sequenceStartFrame.get())
self.assertEqual(133.0, shot3.sequenceEndFrame.get())
self.assertEqual(65.0, shot3.startFrame.get())
self.assertEqual(121.0, shot3.endFrame.get())
def test_from_edl_updates_sequencer_hierarchy_with_shots_used_more_than_one_times(self):
"""testing if from_edl method will update Sequences and shots correctly
with shot are used more than once
"""
path = os.path.abspath('./test_data/test_v004.edl')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_version('v001')
seq = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(33)
shot1.sequenceStartFrame.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot1.track.set(1)
shot2 = seq.create_shot('0020')
shot2.startFrame.set(34)
shot2.endFrame.set(64)
shot2.sequenceStartFrame.set(35)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(10)
shot2.track.set(1)
shot3 = seq.create_shot('0030')
shot3.startFrame.set(65)
shot3.endFrame.set(110)
shot3.sequenceStartFrame.set(66)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(10)
shot3.track.set(1)
# set a camera for shot4
shot3.set_camera('persp')
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
# now update it with test_v002.xml
sm.from_edl(path)
# check if there are 4 shots
self.assertEqual(4, len(seq.shots.get()))
# check shot data
self.assertEqual('0010', shot1.shotName.get())
self.assertEqual(1, shot1.track.get())
self.assertEqual(1.0, shot1.sequenceStartFrame.get())
self.assertEqual(54.0, shot1.sequenceEndFrame.get())
self.assertEqual(-10.0, shot1.startFrame.get())
self.assertEqual(43.0, shot1.endFrame.get())
# Clip2
self.assertEqual('0020', shot2.shotName.get())
self.assertEqual(1, shot2.track.get())
self.assertEqual(55.0, shot2.sequenceStartFrame.get())
self.assertEqual(76.0, shot2.sequenceEndFrame.get())
self.assertEqual(44.0, shot2.startFrame.get())
self.assertEqual(65.0, shot2.endFrame.get())
# Clip3
self.assertEqual('0030', shot3.shotName.get())
self.assertEqual(1, shot3.track.get())
self.assertEqual(77.0, shot3.sequenceStartFrame.get())
self.assertEqual(133.0, shot3.sequenceEndFrame.get())
self.assertEqual(65.0, shot3.startFrame.get())
self.assertEqual(121.0, shot3.endFrame.get())
# Clip4
# there should be an extra shot
shot4 = seq.shots.get()[-1]
self.assertEqual('0030', shot4.shotName.get())
self.assertEqual(1, shot4.track.get())
self.assertEqual(133.0, shot4.sequenceStartFrame.get())
self.assertEqual(189.0, shot4.sequenceEndFrame.get())
self.assertEqual(65.0, shot4.startFrame.get())
self.assertEqual(121.0, shot4.endFrame.get())
# check if their cameras also the same
self.assertEqual(
shot3.get_camera(),
shot4.get_camera()
)
def test_from_xml_updates_sequencer_hierarchy_with_shots_removed(self):
"""testing if from_xml method will update Sequences and shots
correctly with the xml file
"""
path = os.path.abspath('./test_data/test_v003.xml')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_version('v001')
seq = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(33)
shot1.sequenceStartFrame.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot1.track.set(1)
shot2 = seq.create_shot('0020')
shot2.startFrame.set(34)
shot2.endFrame.set(64)
shot2.sequenceStartFrame.set(35)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(10)
shot2.track.set(1)
shot3 = seq.create_shot('0030')
shot3.startFrame.set(65)
shot3.endFrame.set(110)
shot3.sequenceStartFrame.set(66)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(10)
shot3.track.set(1)
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
# now update it with test_v002.xml
sm.from_xml(path)
# we should have 2 shots only
self.assertEqual(2, len(seq.shots.get()))
# check shot data
self.assertEqual('0010', shot1.shotName.get())
self.assertEqual(1, shot1.track.get())
self.assertEqual(1.0, shot1.sequenceStartFrame.get())
self.assertEqual(54.0, shot1.sequenceEndFrame.get())
self.assertEqual(-10.0, shot1.startFrame.get())
self.assertEqual(43.0, shot1.endFrame.get())
# Clip2
# removed
# Clip3
self.assertEqual('0030', shot3.shotName.get())
self.assertEqual(1, shot3.track.get())
self.assertEqual(55.0, shot3.sequenceStartFrame.get())
self.assertEqual(110.0, shot3.sequenceEndFrame.get())
self.assertEqual(65.0, shot3.startFrame.get())
self.assertEqual(120.0, shot3.endFrame.get())
def test_to_xml_will_generate_proper_xml_string(self):
"""testing if a proper xml compatible string will be generated with
to_xml() method
"""
path = os.path.abspath('./test_data/test_v001.xml')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_shot_name_template('<Sequence>_<Shot>_<Version>')
sm.set_version('v001')
seq1 = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq1.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(33)
shot1.sequenceStartFrame.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot1.track.set(1)
shot2 = seq1.create_shot('0020')
shot2.startFrame.set(34)
shot2.endFrame.set(64)
shot2.sequenceStartFrame.set(35)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(10)
shot2.track.set(1)
shot3 = seq1.create_shot('0030')
shot3.startFrame.set(65)
shot3.endFrame.set(110)
shot3.sequenceStartFrame.set(66)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(10)
shot3.track.set(1)
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
result = sm.to_xml()
with open(path) as f:
expected = f.read()
self.maxDiff = None
self.assertEqual(expected, result)
def test_create_sequence_is_working_properly(self):
"""testing if create_sequence is working properly
"""
seq = self.sm.create_sequence()
self.assertEqual(seq.type(), 'sequencer')
self.maxDiff = None
self.assertEqual(self.sm, seq.message.connections()[0])
def test_create_sequence_is_properly_setting_the_sequence_name(self):
"""testing if create_sequence is working properly
"""
seq = self.sm.create_sequence('Test Sequence')
self.assertEqual(
'Test Sequence',
seq.sequence_name.get()
)
def test_to_edl_is_working_properly(self):
"""testing if to_edl method is working properly
"""
import edl
# create a sequence
seq1 = self.sm.create_sequence('sequence1')
seq1.create_shot('shot1')
seq1.create_shot('shot2')
seq1.create_shot('shot3')
l = self.sm.to_edl()
self.assertIsInstance(
l,
edl.List
)
def test_to_edl_will_generate_a_proper_edl_content(self):
"""testing if to_edl will generate a proper edl content
"""
edl_path = os.path.abspath('./test_data/test_v001.edl')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_version('v001')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_shot_name_template('<Sequence>_<Shot>_<Version>')
sm.set_version('v001')
seq1 = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq1.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(33)
shot1.sequenceStartFrame.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot1.track.set(1)
shot2 = seq1.create_shot('0020')
shot2.startFrame.set(34)
shot2.endFrame.set(64)
shot2.sequenceStartFrame.set(35)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(10)
shot2.track.set(1)
shot3 = seq1.create_shot('0030')
shot3.startFrame.set(65)
shot3.endFrame.set(110)
shot3.sequenceStartFrame.set(66)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(10)
shot3.track.set(1)
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
l = sm.to_edl()
result = l.to_string()
with open(edl_path) as f:
expected_edl_content = f.read()
self.assertEqual(
expected_edl_content,
result
)
def test_generate_sequence_structure_returns_a_sequence_instance(self):
"""testing if generate_sequence_structure() method will return a
Sequence instance
"""
sm = pymel.core.PyNode('sequenceManager1')
seq1 = sm.create_sequence('sequence1')
shot1 = seq1.create_shot('shot1')
shot1.output.set('/tmp/shot1.mov')
shot2 = seq1.create_shot('shot2')
shot2.output.set('/tmp/shot2.mov')
result = sm.generate_sequence_structure()
self.assertIsInstance(
result,
Sequence
)
def test_generate_sequence_structure_will_generate_sequences_and_shots_with_correct_number_of_tracks(self):
"""testing if a proper sequence structure will be generated by using
the generate_sequence_structure() method with correct number of tracks
"""
path = os.path.abspath('./test_data/test_v001.xml')
sm = pymel.core.PyNode('sequenceManager1')
sm.from_xml(path)
seq1 = sm.sequences.get()[0]
shots = seq1.shots.get()
shot1 = shots[0]
shot2 = shots[1]
shot3 = shots[2]
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
seq = sm.generate_sequence_structure()
tracks = seq.media.video.tracks
self.assertEqual(len(tracks), 1)
track1 = tracks[0]
clips = track1.clips
self.assertEqual(len(clips), 3)
def test_set_shot_name_template_is_working_properly(self):
"""testing if set_shot_name_template() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('shot_name_template'))
test_template = '<Sequence>_<Shot>_<Version>'
sm.set_shot_name_template(test_template)
self.assertTrue(sm.hasAttr('shot_name_template'))
self.assertEqual(sm.shot_name_template.get(), test_template)
def test_get_shot_name_template_is_working_properly(self):
"""testing if set_shot_name_template() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('shot_name_template'))
test_template = '<Sequence>_<Shot>_<Version>'
sm.set_shot_name_template(test_template)
self.assertTrue(sm.hasAttr('shot_name_template'))
self.assertEqual(sm.get_shot_name_template(), test_template)
def test_get_shot_name_template_will_create_shot_name_template_attribute_if_missing(self):
"""testing if set_shot_name_template() will create the
shot_name_template attribute if missing
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('shot_name_template'))
result = sm.get_shot_name_template()
self.assertTrue(sm.hasAttr('shot_name_template'))
self.assertEqual(result, '<Sequence>_<Shot>_<Version>')
def test_set_version_is_working_properly(self):
"""testing if set_version() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('version'))
test_version = 'v001'
sm.set_version(test_version)
self.assertTrue(sm.hasAttr('version'))
self.assertEqual(sm.version.get(), test_version)
def test_get_version_is_working_properly(self):
"""testing if set_version() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('version'))
test_version = 'v001'
sm.set_version(test_version)
self.assertTrue(sm.hasAttr('version'))
self.assertEqual(sm.get_version(), test_version)
def test_get_version_will_create_attribute_if_missing(self):
"""testing if get_version() will create the missing version attribute
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('version'))
result = sm.get_version()
self.assertTrue(sm.hasAttr('version'))
self.assertEqual(result, '')
def test_set_task_name_is_working_properly(self):
"""testing if set_task_name() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('task_name'))
test_task_name = 'Animation'
sm.set_task_name(test_task_name)
self.assertTrue(sm.hasAttr('task_name'))
self.assertEqual(sm.task_name.get(), test_task_name)
def test_get_task_name_is_working_properly(self):
"""testing if set_task_name() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('task_name'))
| |
<reponame>sotte/tf-encrypted<filename>tests/test_convert.py
from typing import List, Tuple
import unittest
import logging
import os
import numpy as np
import tensorflow as tf
import tf_encrypted as tfe
from tf_encrypted.convert import Converter
from tf_encrypted.convert.register import register
from tensorflow.python.platform import gfile
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import graph_io
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Flatten, Conv2D
from tensorflow.keras import backend as K
import pytest
global_filename = ''
class TestConvert(unittest.TestCase):
def setUp(self):
tf.reset_default_graph()
K.clear_session()
self.previous_logging_level = logging.getLogger().level
logging.getLogger().setLevel(logging.ERROR)
def tearDown(self):
global global_filename
tf.reset_default_graph()
K.clear_session()
logging.debug("Cleaning file: %s" % global_filename)
os.remove(global_filename)
logging.getLogger().setLevel(self.previous_logging_level)
@staticmethod
def ndarray_input_fn(x):
def input_fn():
return tf.constant(x)
return input_fn
@staticmethod
def _assert_successful_conversion(prot, graph_def, actual, *input_fns, decimals=3, **kwargs):
prot.clear_initializers()
converter = Converter(tfe.get_config(), prot, 'model-provider')
x = converter.convert(graph_def, register(), 'input-provider', list(input_fns))
with tfe.Session() as sess:
sess.run(tf.global_variables_initializer())
if not isinstance(x, (list, tuple)):
x = [x]
actual = [actual]
else:
assert isinstance(actual, (list, tuple)), "expected output to be tensor sequence"
try:
output = sess.run([xi.reveal() for xi in x], tag='reveal')
except AttributeError:
# assume all xi are all public
output = sess.run([xi for xi in x], tag='reveal')
for o_i, a_i in zip(output, actual):
np.testing.assert_array_almost_equal(o_i, a_i, decimal=decimals)
@staticmethod
def _construct_conversion_test(op_name, *test_inputs, **kwargs):
global global_filename
global_filename = '{}.pb'.format(op_name)
exporter = globals()['export_{}'.format(op_name)]
runner = globals()['run_{}'.format(op_name)]
protocol = kwargs.pop('protocol')
path = exporter(global_filename, test_inputs[0].shape, **kwargs)
tf.reset_default_graph()
graph_def = read_graph(path)
tf.reset_default_graph()
actual = runner(*test_inputs, **kwargs)
tf.reset_default_graph()
prot_class = getattr(tfe.protocol, protocol)
return graph_def, actual, prot_class
@classmethod
def _test_with_ndarray_input_fn(cls, op_name, test_input, protocol='Pond', decimals=3, **kwargs):
# Treat this as an example of how to run tests with a particular kind of input
graph_def, actual, prot_class = cls._construct_conversion_test(op_name,
test_input,
protocol=protocol,
**kwargs)
with prot_class() as prot:
input_fn = cls.ndarray_input_fn(test_input)
cls._assert_successful_conversion(prot, graph_def, actual, input_fn, decimals=decimals, **kwargs)
def test_cnn_convert(self):
test_input = np.ones([1, 1, 28, 28])
self._test_with_ndarray_input_fn('cnn', test_input, protocol='Pond')
test_input = np.ones([1, 28, 28, 1])
self._test_with_ndarray_input_fn('cnn', test_input, protocol='Pond', data_format='NHWC')
def test_matmul_convert(self):
test_input = np.ones([1, 28])
self._test_with_ndarray_input_fn('matmul', test_input, protocol='Pond')
def test_add_convert(self):
test_input = np.ones([28, 1])
self._test_with_ndarray_input_fn('add', test_input, protocol='Pond')
def test_transpose_convert(self):
test_input = np.ones([1, 2, 3, 4])
self._test_with_ndarray_input_fn('transpose', test_input, protocol='Pond')
def test_reshape_convert(self):
test_input = np.ones([1, 2, 3, 4])
self._test_with_ndarray_input_fn('reshape', test_input, protocol='Pond')
def test_expand_dims_convert(self):
test_input = np.ones([2, 3, 4])
self._test_with_ndarray_input_fn('expand_dims', test_input, protocol='Pond')
def test_pad_convert(self):
test_input = np.ones([2, 3])
self._test_with_ndarray_input_fn('pad', test_input, protocol='Pond')
def test_batch_to_space_nd_convert(self):
test_input = np.ones([8, 1, 3, 1])
self._test_with_ndarray_input_fn('batch_to_space_nd', test_input, protocol='Pond')
def test_space_to_batch_nd_convert(self):
test_input = np.ones([2, 2, 4, 1])
self._test_with_ndarray_input_fn('space_to_batch_nd', test_input, protocol='Pond')
def test_squeeze_convert(self):
test_input = np.ones([1, 2, 3, 1])
self._test_with_ndarray_input_fn('squeeze', test_input, protocol='Pond')
def test_sub_convert(self):
test_input = np.ones([28, 1])
self._test_with_ndarray_input_fn('sub', test_input, protocol='Pond')
def test_mul_convert(self):
test_input = np.array([[1., 2., 3., 4.]])
self._test_with_ndarray_input_fn('mul', test_input, protocol='Pond')
def test_strided_slice_convert(self):
test_input = np.ones((3, 2, 3))
# test_input = np.array([[[1., 1., 1.], [2., 2., 2.]],
# [[3., 3., 3.], [4., 4., 4.]],
# [[5., 5., 5.], [6., 6., 6.]]])
self._test_with_ndarray_input_fn('strided_slice', test_input, protocol='Pond')
def test_slice_convert(self):
test_input = np.array([[[1., 1., 1.], [2., 2., 2.]],
[[3., 3., 3.], [4., 4., 4.]],
[[5., 5., 5.], [6., 6., 6.]]])
self._test_with_ndarray_input_fn('slice', test_input, protocol='Pond')
def test_batchnorm_convert(self):
test_input = np.ones([1, 1, 28, 28])
self._test_with_ndarray_input_fn('batchnorm', test_input, protocol='Pond')
def test_avgpool_convert(self):
test_input = np.ones([1, 28, 28, 1])
self._test_with_ndarray_input_fn('avgpool', test_input, protocol='Pond')
@pytest.mark.convert_maxpool
def test_maxpool_convert(self):
test_input = np.ones([1, 4, 4, 1])
self._test_with_ndarray_input_fn('maxpool', test_input, protocol='SecureNN')
def test_stack_convert(self):
input1 = np.array([1, 4])
input2 = np.array([2, 5])
input3 = np.array([3, 6])
test_inputs = [input1, input2, input3]
graph_def, actual, prot_class = self._construct_conversion_test('stack',
*test_inputs,
protocol='Pond')
with prot_class() as prot:
input_fns = [self.ndarray_input_fn(x) for x in test_inputs]
self._assert_successful_conversion(prot, graph_def, actual, *input_fns)
@unittest.skipUnless(tfe.config.tensorflow_supports_int64(), "Too slow on Circle CI otherwise")
def test_argmax_convert(self):
test_input = np.array([1., 2., 3., 4.])
self._test_with_ndarray_input_fn('argmax', test_input, protocol='SecureNN', axis=0)
def test_required_space_to_batch_paddings_convert(self):
test_input = np.array([4, 1, 3], dtype=np.int32)
self._test_with_ndarray_input_fn('required_space_to_batch_paddings', test_input, protocol='Pond')
def test_flatten_convert(self):
test_input = np.random.uniform(size=(1, 5, 5, 5)).astype(np.float32)
self._test_with_ndarray_input_fn('flatten', test_input, decimals=2, protocol='Pond')
def export_argmax(filename, input_shape, axis):
input = tf.placeholder(tf.float32, shape=input_shape)
output = tf.argmax(input, axis)
return export(output, filename)
def run_argmax(input, axis):
inp = tf.constant(input)
output = tf.argmax(inp, axis)
with tf.Session() as sess:
out = sess.run(output)
return out
def run_stack(input1, input2, input3):
x = tf.constant(input1)
y = tf.constant(input2)
z = tf.constant(input3)
out = tf.stack([x, y, z])
with tf.Session() as sess:
out = sess.run(out)
return out
def export_stack(filename: str, input_shape: Tuple[int]):
x = tf.placeholder(tf.float32, shape=input_shape)
y = tf.placeholder(tf.float32, shape=input_shape)
z = tf.placeholder(tf.float32, shape=input_shape)
out = tf.stack([x, y, z])
return export(out, filename)
def run_avgpool(input):
a = tf.placeholder(tf.float32, shape=input.shape, name="input")
x = tf.nn.avg_pool(a, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID')
with tf.Session() as sess:
output = sess.run(x, feed_dict={a: input})
return output
def export_avgpool(filename, input_shape):
input = tf.placeholder(tf.float32, shape=input_shape, name="input")
x = tf.nn.avg_pool(input, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID')
return export(x, filename)
def run_maxpool(input):
a = tf.placeholder(tf.float32, shape=input.shape, name="input")
x = tf.nn.max_pool(a, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID')
with tf.Session() as sess:
output = sess.run(x, feed_dict={a: input})
return output
def export_maxpool(filename, input_shape):
input = tf.placeholder(tf.float32, shape=input_shape, name="input")
x = tf.nn.max_pool(input, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID')
return export(x, filename)
def run_batchnorm(input):
x = tf.placeholder(tf.float32, shape=input.shape, name="input")
dim = input.shape[3]
mean = np.ones((1, 1, 1, dim)) * 1
variance = np.ones((1, 1, 1, dim)) * 2
offset = np.ones((1, 1, 1, dim)) * 3
scale = np.ones((1, 1, 1, dim)) * 4
y = tf.nn.batch_normalization(x, mean, variance, offset, scale, 0.00001)
with tf.Session() as sess:
output = sess.run(y, feed_dict={x: input})
return output
def export_batchnorm(filename: str, input_shape: List[int]):
input = tf.placeholder(tf.float32, shape=input_shape, name="input")
mean = np.ones((1, 1, 1, input_shape[3]), dtype=np.float32) * 1
variance = np.ones((1, 1, 1, input_shape[3]), dtype=np.float32) * 2
offset = np.ones((1, 1, 1, input_shape[3]), dtype=np.float32) * 3
scale = np.ones((1, 1, 1, input_shape[3]), dtype=np.float32) * 4
x = tf.nn.batch_normalization(input, mean, variance, offset, scale, 0.00001)
return export(x, filename)
def run_cnn(input, data_format="NCHW"):
feed_me = tf.placeholder(tf.float32, shape=input.shape, name="input")
x = feed_me
if data_format == "NCHW":
x = tf.transpose(x, (0, 2, 3, 1))
filter = tf.constant(np.ones((5, 5, 1, 16)), dtype=tf.float32, name="weights")
x = tf.nn.conv2d(x, filter, (1, 1, 1, 1), "SAME", name="conv2d")
with tf.Session() as sess:
output = sess.run(x, feed_dict={feed_me: input})
if data_format == "NCHW":
output = output.transpose(0, 3, 1, 2)
return output
def export_cnn(filename: str, input_shape: List[int], data_format="NCHW"):
input = tf.placeholder(tf.float32, shape=input_shape, name="input")
filter = tf.constant(np.ones((5, 5, 1, 16)), dtype=tf.float32, name="weights")
x = tf.nn.conv2d(input, filter, (1, 1, 1, 1), "SAME", data_format=data_format, name="conv2d")
return export(x, filename)
def run_matmul(input):
a = tf.placeholder(tf.float32, shape=input.shape, name="input")
b = tf.constant(np.ones((input.shape[1], 1)), dtype=tf.float32)
x = tf.matmul(a, b)
with tf.Session() as sess:
output = sess.run(x, feed_dict={a: input})
return output
def export_matmul(filename: str, input_shape: List[int]):
a = tf.placeholder(tf.float32, shape=input_shape, name="input")
b = tf.constant(np.ones((input_shape[1], 1)), dtype=tf.float32)
x = tf.matmul(a, b)
return export(x, filename)
def run_add(input):
a = tf.placeholder(tf.float32, shape=input.shape, name="input")
b = tf.constant(np.ones((input.shape[1], 1)), dtype=tf.float32)
x = tf.add(a, b)
with tf.Session() as sess:
output = sess.run(x, feed_dict={a: input})
return output
def export_add(filename: str, input_shape: List[int]):
a = tf.placeholder(tf.float32, shape=input_shape, name="input")
b = tf.constant(np.ones((input_shape[0], 1)), dtype=tf.float32)
x = tf.add(a, b)
return export(x, filename)
def run_transpose(input):
a = tf.placeholder(tf.float32, shape=input.shape, name="input")
x = tf.transpose(a, perm=(0, 3, 1, 2))
with tf.Session() as sess:
output = sess.run(x, feed_dict={a: input})
return output
def export_transpose(filename: str, input_shape: List[int]):
a = tf.placeholder(tf.float32, shape=input_shape, name="input")
x = tf.transpose(a, perm=(0, 3, 1, 2))
return export(x, filename)
def run_reshape(input):
a = tf.placeholder(tf.float32, shape=input.shape, name="input")
last_size = 1
for i in input.shape[1:]:
last_size *= i
x = tf.reshape(a, [-1, last_size])
with tf.Session() as sess:
output = sess.run(x, feed_dict={a: input})
return output
def export_reshape(filename: str, input_shape: List[int]):
a = tf.placeholder(tf.float32, shape=input_shape, name="input")
last_size = 1
for i in input_shape[1:]:
last_size *= i
x = tf.reshape(a, [-1, last_size])
return export(x, filename)
def run_expand_dims(input):
a = tf.placeholder(tf.float32, shape=input.shape, name="input")
x = tf.expand_dims(a, axis=0)
with tf.Session() as sess:
output = sess.run(x, feed_dict={a: input})
return output
def export_expand_dims(filename: str, input_shape: List[int]):
a = tf.placeholder(tf.float32, shape=input_shape, name="input")
x = tf.expand_dims(a, axis=0)
return export(x, filename)
def run_pad(input):
a = tf.placeholder(tf.float32, shape=input.shape, name="input")
x = tf.pad(a, paddings=tf.constant([[2, 2], [3, 4]]), mode="CONSTANT")
with tf.Session() as sess:
output = sess.run(x, feed_dict={a: input})
return output
def export_pad(filename: str, input_shape: List[int]):
a = tf.placeholder(tf.float32, shape=input_shape, name="input")
x = tf.pad(a, paddings=tf.constant([[2, 2], [3, 4]]), mode="CONSTANT")
return export(x, filename)
def _construct_batch_to_space_nd(input_shape):
a = tf.placeholder(tf.float32, shape=input_shape, name="input")
block_shape = tf.constant([2, 2], dtype=tf.int32)
crops = tf.constant([[0, 0], [2, 0]], dtype=tf.int32)
x = tf.batch_to_space_nd(a, block_shape=block_shape, crops=crops)
return x, a
def export_batch_to_space_nd(filename, input_shape):
x, _ = _construct_batch_to_space_nd(input_shape)
return export(x, filename)
def run_batch_to_space_nd(input):
x, a = _construct_batch_to_space_nd(input.shape)
with tf.Session() as sess:
output = sess.run(x, feed_dict={a: input})
return output
def _construct_space_to_batch_nd(input_shape):
a = | |
<reponame>XueQinliang/DDB_RPQL
import sys
from typing import List
from concurrent.futures import ThreadPoolExecutor, as_completed
import grpc
import pickle
from time import time
from time import localtime
from time import asctime
#from jr.jr_execute import sql_drop_temp
#import structures
#from structures import Node
sys.path.append("../")
from net import net_pb2, net_pb2_grpc
from server.connect import Conndb
import etcd3
from etcd.etcd import Etcd_S
import client.structures
from client.structures import Node
from client.create_tree import create_a_tree
etcd = etcd3.client(host="10.77.70.61", port=2379)
def Node2dict(node_x):
dict_x = {}
dict_x['id'] = node_x.id #node id, start from 0
dict_x['type'] = node_x.type #type fragment, select, projection, join, union
dict_x['parent'] = node_x.parent #the parent node id, default -1
dict_x['children'] = node_x.children #the children node ids, defalut []
dict_x['tables'] = node_x.tables #table names in this node
dict_x['site'] = node_x.site #which site this node is in
dict_x['size'] = node_x.size #total bytes of data in this node
dict_x['if_valid'] = node_x.if_valid #if this node has been pruned
dict_x['columns'] = node_x.columns
dict_x['f_id'] = node_x.f_id #if type is 'fragment', the fragment id
dict_x['f_name'] = node_x.f_name
dict_x['select_condi'] = node_x.select_condi #if type is 'select', the select condition
dict_x['projection'] = node_x.projection #if type is 'projection', the project attributes
dict_x['join'] = node_x.join #if type is join, the join condition
dict_x['top'] = node_x.top #if type is 'fragment', correspond id for join
return dict_x
def dict2Node(dict_x):
node_x = Node(dict_x['id'],
dict_x['type'],
dict_x['parent'],
dict_x['children'],
dict_x['tables'],
dict_x['site'],
dict_x['size'],
dict_x['columns'],
dict_x['if_valid'],
dict_x['f_id'],
dict_x['f_name'],
dict_x['select_condi'],
dict_x['projection'],
dict_x['join'],
dict_x['top']
)
return node_x
def str2nodes(str_x):
nodes_x = eval(str_x)
for i in range(0, len(nodes_x)):
nodes_x[i] = dict2Node(nodes_x[i])
return nodes_x
def nodes2str(nodes_x):
str_x = []
for i in nodes_x:
str_x.append(Node2dict(i))
return str(str_x)
def table_column_from_pj(pj):
tables = []
columns = []
for i in pj:
point_loc = i.find('.')
tables.append(i[0 : point_loc])
columns.append(i[point_loc+1 : len(i)])
return tables, columns
def table_column_type(table, column):
columns = eval(etcd.get('/table/' + table + '/columns')[0])
for i in columns:
if i[0] == column:
return table, column, i[1]
def fragment_columns(table, site):
print('fragment_site: ' + site)
return eval(etcd.get('/table/' + table + '/fragment/' + str(site))[0])['columns']
def sql_create(db_conn, query_no, node_no, columns):
temp_table_name = 'Q' + str(query_no) + '_N' + str(node_no)
sql = 'create table ' + temp_table_name + ' ('
str_columns = []
for i in columns:
str_columns.append(i[0] + '_' + i[1] + ' ' + i[2])
sql += ', '.join(str_columns)
sql += ');'
dfs_sql(db_conn, sql, True) #create table
def sql_createindex(db_conn, query_no, node_no, columns):
temp_table_name = 'Q' + str(query_no) + '_N' + str(node_no)
str_columns = []
for i in columns:
str_columns.append(i[0] + '_' + i[1])
for i in str_columns:
sql = 'create index ' + i + '_index on ' + temp_table_name + ' (' + i + ');'
dfs_sql(db_conn, sql, True)
def valueij(valueij):
if str(type(valueij)).find('str') >= 0:
return "'" + valueij + "'"
else:
return str(valueij)
def value_tuple(value):
sql = '(' + valueij(value[0])
for i in range(1, len(value)):
sql += ', ' + valueij(value[i])
sql += ')'
return sql
def sql_insert(query_no, node_no, columns, values):
if values == []:
return ''
sql = 'insert into Q' + str(query_no) + '_N' + str(node_no) + '('
print('SQL: ' + sql)
sql += columns[0][0] + '_' + columns[0][1]
for i in range(1, len(columns)):
sql += ', ' + columns[i][0] + '_' + columns[i][1]
sql += ') values'
sql += value_tuple(values[0])
for i in range(1, len(values)):
sql += ', ' + value_tuple(values[i])
sql += ';'
print('SQL: insert into Q' + str(query_no) + '_N' + str(node_no) + ' generated')
return sql
def sql_select_fragment(columns):
sql = 'select ' + columns[0][1]
for i in range(1, len(columns)):
sql += ', ' + columns[i][1]
sql += ' from ' + columns[0][0]
sql += ';'
return sql
def sql_select_star(query_no, node_no):
return 'select * from Q' + str(query_no) + '_N' + str(node_no) + ';'
def sql_select(query_no, node_no, columns, select_condi):
ss = sql_select_star(query_no, node_no)
sql = ss[0 : len(ss)-1]
sql += ' where Q' + str(query_no) + '_N' + str(node_no) + '.' + columns[0][0] + '_' + columns[0][1]
sql += ' ' + select_condi[1] + ' ' + select_condi[2]
sql += ';'
return sql
def sql_project(query_no, node_no, columns):
sql = 'select ' + columns[0][0] + '_' + columns[0][1]
for i in range(1, len(columns)):
sql += ', ' + columns[i][0] + '_' + columns[i][1]
sql += ' from Q' + str(query_no) + '_N' + str(node_no) + ';'
return sql
def sql_union(query_no, children):
sss = sql_select_star(query_no, children[0])
sql = sss[0 : len(sss)-1]
for i in range(1, len(children)):
sss = sql_select_star(query_no, children[i])
sql += ' UNION ALL ' + sss[0 : len(sss)-1]
sql += ';'
return sql
def sql_join(db_conn, query_no, nodes_no, columns):
'''
if columns[0][0] == columns[1][0] and columns[0][1] == columns[1][1]:
sql = 'select ' + columns[0][0] + '_' + columns[0][1]
else:
sql = 'select *'
'''
sql_createindex(db_conn, query_no, nodes_no[0], [columns[0]]) #在左表的join相关列上建索引
sql = 'select *'
sql += ' from Q' + str(query_no) + '_N' + str(nodes_no[0])
sql += ', Q' + str(query_no) + '_N' + str(nodes_no[1])
sql += ' where Q' + str(query_no) + '_N' + str(nodes_no[0]) + '.' + columns[0][0] + '_' + columns[0][1]
sql += ' = Q' + str(query_no) + '_N' + str(nodes_no[1]) + '.' + columns[1][0] + '_' + columns[1][1]
sql += ';'
return dfs_sql(db_conn, sql, False)
def sql_drop_temp(query_no, children, db_conn):
for i in children:
sql = 'drop table Q' + str(query_no) + '_N' + str(i) + ';'
#print(sql) #实际运行应当执行drop
dfs_sql(db_conn, sql, True) #drop临时表
def temp_GC(db_conn):
tbs = dfs_sql(db_conn, 'show tables;', False)
for i in tbs:
if i[0][0] == 'Q' and i[0].find('_') >= 0:
dfs_sql(db_conn, 'drop table ' + i[0] + ';', True)
print("drop table " + i[0] + "success")
print('temp GC!')
def get_site2ipd():
site2ipd = [None,
'10.77.70.61:8883:db1',
'10.77.70.61:8885:db2',
'10.77.70.62:8883:db1',
'10.77.70.63:8883:db1'
]
#'''
site2ipd = eval(etcd.get('sitenames')[0])
#'''
return site2ipd
def dfs_execute(query_no, node_no, str_nodes, self_site, db_conn, client):
site2ipd = get_site2ipd()
nodes = str2nodes(str_nodes)
#print(query_no, node_no)
#response
dfs_node_no = None
str_columns = None
str_values = None
trans_vol = None
print(nodes[node_no].site, self_site)
if nodes[node_no].site != self_site: #不同站,grpc远程过程调用
print('[grpc from Site' + str(self_site) + ' to Site' + str(nodes[node_no].site) + '] Start Node' + str(node_no))
try:
conn = client[site2ipd[nodes[node_no].site]]
response = conn.grpc_dfs(net_pb2.para_grpc_dfs(query_no=query_no, node_no=node_no, str_nodes=str_nodes))
except KeyError as e:
print('WARNING! ' + str(e) + ' NOT found')
temp_GC(db_conn)
return -1, [], (), nodes[node_no].site
else:
dfs_node_no = response.dfs_node_no
str_columns = response.str_columns
str_values = response.str_values
trans_vol = response.trans_vol + sys.getsizeof(str_values) #不同站,累计传输量
#site check
if dfs_node_no == -1:
print('WARNING! Site' + str(trans_vol) + ' NOT found')
temp_GC(db_conn)
return -1, [], (), trans_vol
else:
print('_THIS Site Start Node' + str(node_no)) #同站
dfs_node_no, str_columns, str_values, trans_vol = dfs(query_no, node_no, str_nodes, db_conn, client)
if dfs_node_no == -1:
print('WARNING! Site' + str(trans_vol) + ' NOT found')
temp_GC(db_conn)
return -1, [], (), trans_vol
columns = eval(str_columns)
values = eval(str_values)
i = nodes[node_no]
#print(sql_create(query_no, i.id, columns)) #创建中间结果表
sql_create(db_conn, query_no, i.id, columns) #创建中间结果表及索引加速
if values != (): #若结果非空,把中间结果写入中间结果表
#print(sql_insert(query_no, i.id, columns, values))
dfs_sql(db_conn, sql_insert(query_no, i.id, columns, values), True)
print('Finish Node' + str(node_no))
return node_no, columns, values, trans_vol #return node_no,columns,data,trans_vol data=tuple(tuple)
def grpc_dfs(self, request, context):
#get paras from request
query_no = request.query_no
node_no = request.node_no
str_nodes = request.str_nodes
#call dfs local
#把servermaster.py里的 conndb、clients给dfs
global client
dfs_node_no, str_columns, str_values, trans_vol = dfs(query_no, node_no, str_nodes, self.conndb, client)
#return
return net_pb2.ret_grpc_dfs(dfs_node_no=dfs_node_no, str_columns=str_columns, str_values=str_values, trans_vol=trans_vol)
def dfs(query_no, node_no, str_nodes, db_conn, client): #return node_no,columns,data,trans_vol data=tuple(tuple)
#print(node_no)
nodes = str2nodes(str_nodes)
#print('dfs' + str(node_no) + ' on Site' + str(nodes[node_no].site))
#if nodes[node_no].children != []:
# print('dfs' + str(node_no) + ' wait for ' + str(len(nodes[node_no].children)) + ' children')
future_results = []
if nodes[node_no].children != []:
tasks = []
with ThreadPoolExecutor(max_workers=5) as t:
for i in nodes[node_no].children:
task = t.submit(dfs_execute, query_no, i, str_nodes, nodes[node_no].site, db_conn, client)
tasks.append(task)
for future in as_completed(tasks):
future_results.append(future.result())
#site check
print('site check:')
for i in future_results:
#print(i)
if i[0] == -1:
print('WARNING! Site' + str(i[-1]) + ' NOT found')
temp_GC(db_conn)
return i[0], '', '', i[-1]
i = nodes[node_no]
columns = []
values = ()
#values = ((300001, 'Xiaoming', 1), (300002,'Xiaohong', 1)) #实际运行没有这一行
if i.type == 'fragment':
columns = fragment_columns(i.tables[0], i.site)
for j in range(0, len(columns)):
columns[j] = table_column_type(i.tables[0], columns[j])
print('Node' + str(node_no), end=': ')
#print(sql_select_fragment(columns)) #实际运行时,调用db_conn[i.site]
values = dfs_sql(db_conn, sql_select_fragment(columns), False) #values = fragment sql结果
elif i.type == 'projection':
tables, columns = table_column_from_pj(i.projection)
for j in range(0, len(columns)):
columns[j] | |
<reponame>WolfLo/OPTIMIST
import baselines.common.tf_util as U
import tensorflow as tf
import gym
from baselines.common.distributions import MultiGaussianVectorPdType
import numpy as np
from baselines.common import set_global_seeds
import scipy.stats as sts
"""References
PGPE: Sehnke, Frank, et al. "Policy gradients with parameter-based exploration for
control." International Conference on Artificial Neural Networks. Springer,
Berlin, Heidelberg, 2008.
"""
class MultiPeMlpPolicy(object):
"""Multi-layer-perceptron policy with Gaussian parameter-based exploration"""
def __init__(self, name, *args, **kwargs):
with tf.variable_scope(name):
self._init(*args, **kwargs)
self.scope = tf.get_variable_scope().name
U.initialize()
#Sample initial actor params
tf.get_default_session().run(self._use_sampled_actor_params)
def _init(self, ob_space, ac_space, hid_layers=[],
deterministic=True, diagonal=True,
use_bias=True, use_critic=False,
seed=None):
"""Params:
ob_space: task observation space
ac_space : task action space
hid__layers: list with width of each hidden layer
deterministic: whether the actor is deterministic
diagonal: whether the higher order policy has a diagonal covariance
matrix
use_bias: whether to include bias in neurons
use_critic: whether to include a critic network
seed: optional random seed
"""
assert isinstance(ob_space, gym.spaces.Box)
assert len(ac_space.shape)==1
self.diagonal = diagonal
self.use_bias = use_bias
batch_length = None #Accepts a sequence of episodes of arbitrary length
self.ac_dim = ac_space.shape[0]
self.ob_dim = ob_space.shape[0]
self.linear = not hid_layers
if seed is not None:
set_global_seeds(seed)
self._ob = ob = U.get_placeholder(name="ob", dtype=tf.float32, shape=[None] + list(ob_space.shape))
#Critic (normally not used)
if use_critic:
with tf.variable_scope('critic'):
last_out = ob
for i, hid_size in enumerate(hid_layers):
last_out = tf.nn.tanh(tf.layers.dense(last_out, hid_size, name="fc%i"%(i+1), kernel_initializer=U.normc_initializer(1.0)))
self.vpred = tf.layers.dense(last_out, 1, name='final', kernel_initializer=U.normc_initializer(1.0))[:,0]
#Actor (N.B.: weight initialization is irrelevant)
with tf.variable_scope('actor'):
last_out = ob
for i, hid_size in enumerate(hid_layers):
#Mlp feature extraction
last_out = tf.nn.tanh(tf.layers.dense(last_out, hid_size,
name='fc%i'%(i+1),
kernel_initializer=U.normc_initializer(1),use_bias=use_bias))
if deterministic and isinstance(ac_space, gym.spaces.Box):
#Determinisitc action selection
self.actor_mean = actor_mean = tf.layers.dense(last_out, ac_space.shape[0],
name='action',
kernel_initializer=U.normc_initializer(0.01),
use_bias=use_bias)
else:
raise NotImplementedError #Currently supports only deterministic action policies
#Higher order policy (Gaussian)
with tf.variable_scope('actor') as scope:
self.actor_weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, \
scope=scope.name)
self.layers = [tf.reshape(w, [-1]) for w in self.actor_weights]
self.layer_lens = [w.shape[0].value for w in self.layers]
print('# Independent Gaussians:', len(self.layer_lens))
self.flat_actor_weights = tf.concat([tf.reshape(w, [-1]) for w in \
self.actor_weights], axis=0) #flatten
self._n_actor_weights = n_actor_weights = self.flat_actor_weights.shape[0]
with tf.variable_scope('higher'):
#Initial means sampled from a normal distribution N(0,1)
higher_mean_init = tf.where(tf.not_equal(self.flat_actor_weights, tf.constant(0, dtype=tf.float32)),
tf.random_normal(shape=[n_actor_weights.value], stddev=0.01), tf.zeros(shape=[n_actor_weights]))
self.higher_mean = higher_mean = tf.get_variable(name='higher_mean',
initializer=higher_mean_init)
if diagonal:
#Diagonal covariance matrix; all stds initialized to 0
self.higher_logstd = higher_logstd = tf.get_variable(name='higher_logstd',
shape=[n_actor_weights],
initializer=tf.initializers.constant(0.))
pdparam = tf.concat([higher_mean, higher_mean * 0. +
higher_logstd], axis=0)
self.pdtype = pdtype = MultiGaussianVectorPdType(n_actor_weights.value)
else:
raise NotImplementedError
#Sample actor weights
self.pd = pdtype.pdfromflat(pdparam, self.layer_lens)
sampled_actor_params = self.pd.sample()
symm_sampled_actor_params = self.pd.sample_symmetric()
self._sample_symm_actor_params = U.function(
[],list(symm_sampled_actor_params))
self._sample_actor_params = U.function([], [sampled_actor_params])
#Assign actor weights
with tf.variable_scope('actor') as scope:
actor_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, \
scope=scope.name)
self._use_sampled_actor_params = U.assignFromFlat(actor_params,
sampled_actor_params)
self._set_actor_params = U.SetFromFlat(actor_params)
self._get_actor_params = U.GetFlat(actor_params)
#Act
self._action = action = actor_mean
self._act = U.function([ob],[action])
#Higher policy weights
with tf.variable_scope('higher') as scope:
self._higher_params = higher_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, \
scope=scope.name)
self.flat_higher_params = tf.concat([tf.reshape(w, [-1]) for w in \
self._higher_params], axis=0) #flatten
self._n_higher_params = self.flat_higher_params.shape[0]
self._get_flat_higher_params = U.GetFlat(higher_params)
self._set_higher_params = U.SetFromFlat(self._higher_params)
#Batch PGPE
self._actor_params_in = U.get_placeholder(name='actor_params_in',
dtype=tf.float32,
shape=[batch_length] + [n_actor_weights])
self._rets_in = rets_in = U.get_placeholder(name='returns_in',
dtype=tf.float32,
shape=[batch_length])
ret_mean, ret_std = tf.nn.moments(rets_in, axes=[0])
self._get_ret_mean = U.function([self._rets_in], [ret_mean])
self._get_ret_std = U.function([self._rets_in], [ret_std])
#Renyi computation
self._det_sigma = tf.exp(tf.reduce_sum(self.higher_logstd))
#Fisher computation (diagonal case)
mean_fisher_diag = tf.exp(-2*self.higher_logstd)
cov_fisher_diag = mean_fisher_diag*0 + 2
self._fisher_diag = tf.concat([mean_fisher_diag, cov_fisher_diag], axis=0)
self._get_fisher_diag = U.function([], [self._fisher_diag])
self._behavioral = None
self._renyi_other = None
#Black box usage
def act(self, ob, resample=False):
"""
Sample weights for the actor network, then sample action(s) from the
resulting actor depending on state(s)
Params:
ob: current state, or a list of states
resample: whether to resample actor params before acting
"""
if resample:
actor_param = self.resample()
action = self._act(np.atleast_2d(ob))[0]
return (action, actor_param) if resample else action
class _FrozenLinearActor(object):
def __init__(self, higher_params, ob_dim, ac_dim, use_bias):
self.higher_params = np.ravel(higher_params)
self.ob_dim = ob_dim
self.ac_dim = ac_dim
self.use_bias = use_bias
self.higher_mean = self.higher_params[:len(self.higher_params)//2]
self.higher_cov = np.diag(np.exp(2*self.higher_params[len(self.higher_params)//2:]))
self.resample()
def resample(self):
self.actor_params = np.random.multivariate_normal(self.higher_mean, self.higher_cov)
return self.actor_params
def act(self, ob, resample=False):
if resample:
self.resample()
ob = np.ravel(ob)
if self.use_bias:
np.append(ob, 1)
ob = ob.reshape((self.ob_dim + self.use_bias, 1))
theta = self.actor_params.reshape((self.ac_dim, self.ob_dim + self.use_bias))
return np.ravel(np.dot(theta, ob))
def seed(self, seed):
np.random.seed(seed)
def freeze(self):
if not self.linear:
return self
return self._FrozenLinearActor(self.eval_params(),
self.ob_dim,
self.ac_dim,
self.use_bias)
def act_with(self, ob, actor_params):
self.set_actor_params(actor_params)
return self.act(ob)
def resample(self):
"""Resample actor params
Returns:
the sampled actor params
"""
tf.get_default_session().run(self._use_sampled_actor_params)
return self.eval_actor_params()
def eval_params(self):
"""Get current params of the higher order policy"""
return self._get_flat_higher_params()
def set_params(self, new_higher_params):
"""Set higher order policy parameters from flat sequence"""
self._set_higher_params(new_higher_params)
def seed(self, seed):
if seed is not None:
set_global_seeds(seed)
#Direct actor policy manipulation
def draw_actor_params(self):
"""Sample params for an actor (without using them)"""
sampled_actor_params = self._sample_actor_params()[0]
return sampled_actor_params
def draw_symmetric_actor_params(self):
return tuple(self._sample_symm_actor_params())
def eval_actor_params(self):
"""Get actor params as last assigned"""
return self._get_actor_params()
def set_actor_params(self, new_actor_params):
"""Manually set actor policy parameters from flat sequence"""
self._set_actor_params(new_actor_params)
#Distribution properties
def eval_renyi(self, other, order=2):
"""Renyi divergence
Special case: order=1 is kl divergence
Params:
other: policy to evaluate the distance from
order: order of the Renyi divergence
exponentiate: if true, actually returns e^Renyi(self||other)
"""
if other is not self._renyi_other:
print('EXTENDING!!')
self._renyi_order = tf.placeholder(name='renyi_order', dtype=tf.float32, shape=[])
self._renyi_other = other
if order<1:
raise ValueError('Order must be >= 1')
else:
renyi = self.pd.renyi(other.pd, alpha=self._renyi_order)
self._get_renyi = U.function([self._renyi_order], [renyi])
return np.sum(self._get_renyi(order)[0])
def eval_fisher(self):
if not self.diagonal:
raise NotImplementedError(
'Only diagonal covariance currently supported')
return np.ravel(self._get_fisher_diag()[0])
def fisher_product(self, x):
if not self.diagonal:
raise NotImplementedError(
'Only diagonal covariance currently supported')
return x/self.eval_fisher()
#Performance evaluation
def eval_performance(self, actor_params, rets, behavioral=None):
batch_size = len(rets)
if behavioral is None:
#On policy
return self._get_ret_mean(rets)[0], self._get_ret_std(rets)[0]
else:
#Off policy
if behavioral is not self._behavioral:
self._build_iw_graph(behavioral)
self._behavioral = behavioral
return np.sum(self._get_off_ret_mean(rets, actor_params)[0]), \
np.sum(self._get_off_ret_std(rets, actor_params, batch_size)[0])
def eval_iws(self, actor_params, behavioral, normalize=True):
if behavioral is not self._behavioral:
self._build_iw_graph(behavioral)
self._behavioral = behavioral
if normalize:
return self._get_iws(actor_params)[0]
else:
return self._get_unn_iws(actor_params)[0]
def eval_bound(self, actor_params, rets, behavioral, rmax, normalize=True,
use_rmax = True, use_renyi=True, delta=0.2):
if behavioral is not self._behavioral:
self._build_iw_graph(behavioral)
self._behavioral = behavioral
batch_size = len(rets)
"""
if use_rmax:
ppf = sts.norm.ppf(1 - delta)
else:
ppf = sts.t.ppf(1 - delta, batch_size - 1)
"""
ppf = np.sqrt(1./delta - 1)
#"""
index = int(str(int(normalize)) + str(int(use_rmax)) + str(int(use_renyi)), 2)
bound_getter = self._get_bound[index]
bound = bound_getter(actor_params, rets, batch_size, ppf, rmax)[0]
return bound
def eval_bound_and_grad(self, actor_params, rets, behavioral, rmax, normalize=True,
use_rmax=True, use_renyi=True, delta=0.2):
if behavioral is not self._behavioral:
self._build_iw_graph(behavioral)
self._behavioral = behavioral
batch_size = len(rets)
"""
if use_rmax:
ppf = sts.norm.ppf(1 - delta)
else:
ppf = sts.t.ppf(1 - delta, batch_size - 1)
"""
ppf = np.sqrt(1./delta - 1)
#"""
index = int(str(int(normalize)) + str(int(use_rmax)) + str(int(use_renyi)), 2)
bound_and_grad_getter = self._get_bound_grad[index]
bound, grad = bound_and_grad_getter(actor_params, rets, batch_size, ppf, rmax)
return bound, grad
def _build_iw_graph(self, behavioral):
print('EXTENDING!!')
self._batch_size = batch_size = tf.placeholder(name='batchsize', dtype=tf.float32, shape=[])
#Self-normalized importance weights
#unn_iws = self._probs/behavioral._probs
unn_iws = tf.exp(self.pd.logp(self._actor_params_in) -
behavioral.pd.logp(self._actor_params_in))
iws = unn_iws/tf.reduce_sum(unn_iws, axis=0)
self._get_unn_iws = U.function([self._actor_params_in], [unn_iws])
self._get_iws = U.function([self._actor_params_in], [iws])
#Offline performance
ret_mean = tf.reduce_sum(tf.expand_dims(self._rets_in, -1) * iws, axis=0)
unn_ret_mean = tf.reduce_mean(tf.expand_dims(self._rets_in, -1) * unn_iws, axis=0)
self._get_off_ret_mean = U.function([self._rets_in, self._actor_params_in], [ret_mean])
ret_std = tf.sqrt(tf.reduce_sum(iws ** 2 * (tf.expand_dims(self._rets_in, -1) - ret_mean) ** 2) * batch_size)
self._get_off_ret_std = U.function([self._rets_in, self._actor_params_in, self._batch_size], [ret_std])
#Renyi
renyi = self.pd.renyi(behavioral.pd)
renyi = tf.where(tf.is_nan(renyi), tf.constant(np.inf, shape=renyi.shape), renyi)
renyi = tf.where(renyi<0., tf.constant(np.inf, shape=renyi.shape), renyi)
#Weight norm
iws2norm = tf.norm(iws, axis=0)
#Return properties
self._rmax = tf.placeholder(name='R_max', dtype=tf.float32, shape=[])
on_ret_mean, on_ret_var = tf.nn.moments(self._rets_in, axes=[0])
#Penalization coefficient
self._ppf = tf.placeholder(name='penal_coeff', dtype=tf.float32, shape=[])
#All the bounds
bounds = []
bounds.append(unn_ret_mean - self._ppf * tf.sqrt(on_ret_var) * iws2norm) #000
bounds.append(unn_ret_mean - self._ppf * tf.sqrt(on_ret_var) * tf.exp(0.5*renyi)/tf.sqrt(batch_size)) #001
bounds.append(unn_ret_mean - self._ppf * self._rmax * iws2norm) #010
bounds.append(unn_ret_mean - self._ppf * self._rmax * tf.exp(0.5*renyi)/tf.sqrt(batch_size)) #011
bounds.append(ret_mean - self._ppf * tf.sqrt(on_ret_var) * iws2norm) #100
bounds.append(ret_mean - self._ppf * tf.sqrt(on_ret_var) * tf.exp(0.5*renyi)/tf.sqrt(batch_size)) #101
bounds.append(ret_mean - self._ppf * self._rmax * iws2norm) #110
bounds.append(ret_mean - self._ppf * self._rmax * tf.exp(0.5*renyi)/tf.sqrt(batch_size)) #111
inputs = [self._actor_params_in, self._rets_in, self._batch_size, self._ppf, self._rmax]
self._get_bound = [U.function(inputs, [bounds[i]]) | |
r25c4 = request.POST.get('r25c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r25c5 = request.POST.get('r25c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r25c6 = request.POST.get('r25c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r25c7 = request.POST.get('r25c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r25c8 = request.POST.get('r25c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r25c9 = request.POST.get('r25c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r25c10 = request.POST.get('r25c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r25c11 = request.POST.get('r25c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r25c12 = request.POST.get('r25c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r26c1 = request.POST.get('r26c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r26c2 = request.POST.get('r26c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r26c3 = request.POST.get('r26c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r26c4 = request.POST.get('r26c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r26c5 = request.POST.get('r26c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r26c6 = request.POST.get('r26c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r26c7 = request.POST.get('r26c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r26c8 = request.POST.get('r26c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r26c9 = request.POST.get('r26c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r26c10 = request.POST.get('r26c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r26c11 = request.POST.get('r26c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r26c12 = request.POST.get('r26c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r27c1 = request.POST.get('r27c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r27c2 = request.POST.get('r27c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r27c3 = request.POST.get('r27c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r27c4 = request.POST.get('r27c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r27c5 = request.POST.get('r27c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r27c6 = request.POST.get('r27c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r27c7 = request.POST.get('r27c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r27c8 = request.POST.get('r27c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r27c9 = request.POST.get('r27c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r27c10 = request.POST.get('r27c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r27c11 = request.POST.get('r27c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r27c12 = request.POST.get('r27c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r28c1 = request.POST.get('r28c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r28c2 = request.POST.get('r28c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r28c3 = request.POST.get('r28c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r28c4 = request.POST.get('r28c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r28c5 = request.POST.get('r28c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r28c6 = request.POST.get('r28c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r28c7 = request.POST.get('r28c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r28c8 = request.POST.get('r28c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r28c9 = request.POST.get('r28c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r28c10 = request.POST.get('r28c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r28c11 = request.POST.get('r28c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r28c12 = request.POST.get('r28c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r29c1 = request.POST.get('r29c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r29c2 = request.POST.get('r29c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r29c3 = request.POST.get('r29c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r29c4 = request.POST.get('r29c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r29c5 = request.POST.get('r29c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r29c6 = request.POST.get('r29c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r29c7 = request.POST.get('r29c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r29c8 = request.POST.get('r29c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r29c9 = request.POST.get('r29c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r29c10 = request.POST.get('r29c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r29c11 = request.POST.get('r29c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r29c12 = request.POST.get('r29c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
body = '<!doctype html>' + \
'<html lang="en">' + \
'<head>' + \
'<meta charset="utf-8">' + \
'<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">' + \
'<link rel="stylesheet"' + \
'href="https://cdn.jsdelivr.net/npm/bootstrap@4.5.3/dist/css/bootstrap.min.css"' + \
'integrity="<KEY>"' + \
'crossorigin="anonymous">' + \
'<title>Projected income statement</title>' + \
'</head>' + \
'<body>' + \
'<div class="container">' + \
'<div class="card text-center">' + \
'<div class="card-header text-center">Projected income statement</div>' + \
'<div class="card-body">'
body += '<h6>Comapny name : ' + company_name + '</h6>' + \
'<h6>Share capital : ' + share_capital + '</h6>' + \
'<h6>Head office address : ' + head_office_address + '</h6>' + \
'<h6>Establishment number : ' + establishment_number + '</h6>' + \
'<h6>Register of Trade and Companies : ' + register_of_trade_and_companies + '</h6>' + \
'<h6>Main activities : ' + main_activities + '</h6>' + \
'<h6>Activity number : ' + activity_number + '</h6>' + \
'<h6>Intra-community VAT number : ' + intra_community_vat_number + '</h6>' + \
'<h6>President : ' + president + '</h6>' + \
'<h6>Registration date : ' + registration_date + '</h6>' + \
'<br>'
body += '<br>'
body += '<table class="table table-striped table-bordered">' + \
'<thead>' + \
'<tr>' + \
'<th scope="col">Details</th>' + \
'<th scope="col">M1</th>' + \
'<th scope="col">M2</th>' + \
'<th scope="col">M3</th>' + \
'<th scope="col">M4</th>' + \
'<th scope="col">M5</th>' + \
'<th scope="col">M6</th>' + \
'<th scope="col">M7</th>' + \
'<th scope="col">M8</th>' + \
'<th scope="col">M9</th>' + \
'<th scope="col">M10</th>' + \
'<th scope="col">M11</th>' + \
'<th scope="col">M12</th>' + \
'</tr>' + \
'</thead>' + \
'<tbody>' + \
'<tr>' + \
'<td>Total Revenue</td>' + \
'<td>' + r1c1 + '</td>' + \
'<td>' + r1c2 + '</td>' + \
'<td>' + r1c3 + '</td>' + \
'<td>' + r1c4 + '</td>' + \
'<td>' + r1c5 + '</td>' + \
'<td>' + r1c6 + '</td>' + \
'<td>' + r1c7 + '</td>' + \
'<td>' + r1c8 + '</td>' + \
'<td>' + r1c9 + '</td>' + \
'<td>' + r1c10 + '</td>' + \
'<td>' + r1c11 + '</td>' + \
'<td>' + r1c12 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Cost of Goods Sold</td>' + \
'<td>' + r2c1 + '</td>' + \
'<td>' + r2c2 + '</td>' + \
'<td>' + r2c3 + '</td>' + \
'<td>' + r2c4 + '</td>' + \
'<td>' + r2c5 + '</td>' + \
'<td>' + r2c6 + '</td>' + \
'<td>' + r2c7 + '</td>' + \
'<td>' + r2c8 + '</td>' + \
'<td>' + r2c9 + '</td>' + \
'<td>' + r2c10 + '</td>' + \
'<td>' + r2c11 + '</td>' + \
'<td>' + r2c12 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Gross Profit</td>' + \
'<td>' + r3c1 + '</td>' + \
'<td>' + r3c2 + '</td>' + \
'<td>' + r3c3 + '</td>' + \
'<td>' + r3c4 + '</td>' + \
'<td>' + r3c5 + '</td>' + \
'<td>' + r3c6 + '</td>' + \
'<td>' + r3c7 + '</td>' + \
'<td>' + r3c8 + '</td>' + \
'<td>' + r3c9 + '</td>' + \
'<td>' + r3c10 + '</td>' + \
'<td>' + r3c11 + '</td>' + \
'<td>' + r3c12 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Admin Salaries / Bonuses</td>' + \
'<td>' + r4c1 + '</td>' + \
'<td>' + r4c2 + '</td>' + \
'<td>' + r4c3 + '</td>' + \
'<td>' + r4c4 + '</td>' + \
'<td>' + r4c5 + '</td>' + \
'<td>' + r4c6 + '</td>' + \
'<td>' + r4c7 + '</td>' + \
'<td>' + r4c8 + '</td>' + \
'<td>' + r4c9 + '</td>' + \
'<td>' + r4c10 + '</td>' + \
'<td>' + r4c11 + '</td>' + \
'<td>' + r4c12 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Rent / Mortgage</td>' + \
'<td>' + r5c1 + '</td>' + \
'<td>' + r5c2 + '</td>' + \
'<td>' + r5c3 + '</td>' + \
'<td>' + r5c4 + '</td>' + \
'<td>' + r5c5 + '</td>' + \
'<td>' + r5c6 + '</td>' + \
'<td>' + r5c7 + '</td>' + \
'<td>' + r5c8 + '</td>' + \
'<td>' + r5c9 + '</td>' + \
'<td>' + r5c10 + '</td>' + \
'<td>' + r5c11 + '</td>' + \
'<td>' + r5c12 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Equipment Rental</td>' + \
'<td>' + r6c1 + '</td>' + \
'<td>' + r6c2 + '</td>' + \
'<td>' + r6c3 + '</td>' + \
'<td>' + r6c4 + '</td>' + \
'<td>' + r6c5 + '</td>' + | |
<reponame>Southampton-Maritime-Robotics/Autonomous-Ship-and-Wavebuoys<filename>ASV/ASV/nodes/averager.py
#!/usr/bin/python
##############################################################################
#averager.py
#
#This code has been created by <NAME> (<EMAIL>) for
#averaging the main readings required during the QinetiQ tests. These values
#averaged over one minute will be published to an external logfile.
#
#Modifications to code
#16/02/2013 code created
#17/02/2013 removal of the calls to library_highlevel.py because whenever
# one of the nodes was not being published the node exited with
# errors.
#
##############################################################################
#Notes
#
#At the moment this file publishes to an external log file the values for the
#motor demand (rpm, voltage or power), the propeller rpm, the motor voltage or
#power, the battery voltage and the case temperature (hence, 4 values in total
#plus the time at which they have been sampled). Other variables may be added
#as required.
#
##############################################################################
import roslib; roslib.load_manifest('ASV')
import rospy
import time
import csv
import os
import numpy
from datetime import datetime
from std_msgs.msg import Float32
from std_msgs.msg import Int8
from std_msgs.msg import String
from ASV.msg import status
# Defining global variables
global time_zero
global counter
global Motor_setting
global Motor_target
global total_motor
global Prop_rpm
global total_rpm
global avg_rpm
global Voltage
global total_voltage
global avg_voltage
global Motor_current
global total_current
global avg_current
global Power
global total_power
global avg_power
global battery_voltage
global total_BatteryVoltage
global avg_BatteryVoltage
global Temperature
global total_temperature
global avg_temperature
global Thrust
global total_thrust
global avg_thrust
###############################################################
#The following functions write the values this node subscribes to into different
#log files in .cvs format within the folder ~/logFiles created within the main
#function.
###############################################################
def printer(setting, target, rpm, voltage, current, power, BatteryVoltage, temperature, thrust):
#The stringtime variable is used in all these functions to store the time of
#the reading (starting from the time of the start-up (zero))-expressed in seconds.
stringtime = time.time()-time_zero
averageList = [stringtime, setting, target, rpm, voltage, current, power, BatteryVoltage, temperature, thrust]
title = ['time', 'setting', 'target', 'rpm', 'volt', 'current', 'power', 'battery', 'temp', 'thrust']
print title
print averageList
with open('%s/averageLog.csv' %(dirname), "a") as f:
try:
Writer = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
Writer.writerow(title)
Writer.writerow(averageList)
except ValueError:
print 'writerow error'
########################## Callback Functions #################################
def motor_setting_cb(Motor_setting):
global motor_setting
motor_setting = Motor_setting.data
def motor_target_cb(Motor_target):
global motor_target
motor_target = Motor_target.data
def prop_rpm_cb(Prop_rpm):
global prop_rpm
prop_rpm = Prop_rpm.data
def motor_voltage_cb(Voltage):
global voltage
voltage = Voltage.data
def motor_current_cb(Motor_current):
global motor_current
motor_current = Motor_current.data
def motor_power_cb(Motor_power):
global motor_power
motor_power = Motor_power.data
def thrust_cb(Thrust):
global thrust
thrust = Thrust.data
def battery_voltage_cb(battery_voltage):
global BatteryVoltage
BatteryVoltage = battery_voltage.data
def temperature_cb(Temperature):
global temperature
temperature = Temperature.data
##############################################################
#def shutdown():
#shutdown behaviour - close all files
#print 'shutting down'
# with open('%s/path.kml' %(dirname), "a") as f:
# try:
# f.write('</coordinates>\n </LineString>\n </Placemark>\n </kml>\n')
# except ValueError:
# print 'write error'
################################## MAIN FUNCTION ###############################
if __name__ == '__main__':
#Initialising the node
rospy.init_node('averager')
stringtime = datetime.now()
stringtime = stringtime.strftime('%Y-%m-%d_%H-%M-%S')
rospy.loginfo('Logger started at %s.'%(stringtime))
pub_folder = rospy.Publisher('folder', String)
########################################################################
######## FOLDERS #######################################################
########################################################################
#define files and writers
logfolder = 'AverageValues'
dirname = logfolder + '/' + stringtime
if not os.path.isdir(logfolder):
print 'made logfolder'
os.mkdir(logfolder)
if not os.path.isdir(dirname):
print 'made test folder'
os.mkdir(dirname)
time.sleep(5)
pub_folder.publish(dirname)
########################################################################
#Setting the zero time
time_zero = time.time()
# Initialising global variables
counter =0
motor_setting =0
motor_target =0
prop_rpm =0
voltage =0
motor_current =0
motor_power =0
BatteryVoltage =0
temperature =0
thrust =0
total_motor =0
avg_motor =0
total_rpm =0
avg_rpm =0
total_voltage =0
avg_voltage =0
total_current =0
avg_current =0
total_power =0
avg_power =0
total_BatteryVoltage =0
avg_BatteryVoltage =0
total_temperature =0
avg_temperature =0
total_thrust =0
avg_thrust =0
########################SET UP THE SUBSCRIBERS##########################
rospy.Subscriber('setMotorTargetMethod', Int8, motor_setting_cb)
rospy.Subscriber('setMotorTarget', Float32, motor_target_cb)
rospy.Subscriber('prop_rpm', Float32, prop_rpm_cb)
rospy.Subscriber('motor_voltage', Float32, motor_voltage_cb)
rospy.Subscriber('motor_current', Float32, motor_current_cb)
rospy.Subscriber('motor_power', Float32, motor_power_cb)
rospy.Subscriber('thrust', Float32, thrust_cb)
rospy.Subscriber('battery_voltage', Float32, battery_voltage_cb)
rospy.Subscriber('CaseTemperature', Float32, temperature_cb)
#Publish the propeller rpm demand only when the node is not shutdown
#while not rospy.is_shutdown():
while (time.time()-time_zero)<=20:
counter = counter + 1
total_rpm = prop_rpm + total_rpm
total_voltage = voltage + total_voltage
total_current = motor_current + total_current
total_power = motor_power + total_power
total_BatteryVoltage = BatteryVoltage
total_temperature = temperature + total_temperature
total_thrust = thrust + total_thrust
rospy.sleep(0.1)
#For debugging purposes only
#print counter
avg_rpm = total_rpm / (counter+1)
avg_voltage = total_voltage / (counter+1)
avg_current = total_current / (counter+1)
avg_power = total_power / (counter+1)
avg_BatteryVoltage = total_BatteryVoltage / (counter+1)
avg_temperature = total_temperature / (counter+1)
avg_thrust = total_thrust / (counter+1)
printer(motor_setting, motor_target, avg_rpm, avg_voltage, avg_current, avg_power, avg_BatteryVoltage, avg_temperature, avg_thrust)
while (time.time()-time_zero)>20 and (time.time()-time_zero)<=40:
counter = counter + 1
total_rpm = prop_rpm + total_rpm
total_voltage = voltage + total_voltage
total_current = motor_current + total_current
total_power = motor_power + total_power
total_BatteryVoltage = BatteryVoltage
total_temperature = temperature + total_temperature
total_thrust = thrust + total_thrust
rospy.sleep(0.1)
#For debugging purposes only
#print counter
avg_rpm = total_rpm / (counter+1)
avg_voltage = total_voltage / (counter+1)
avg_current = total_current / (counter+1)
avg_power = total_power / (counter+1)
avg_BatteryVoltage = total_BatteryVoltage / (counter+1)
avg_temperature = total_temperature / (counter+1)
avg_thrust = total_thrust / (counter+1)
printer(motor_setting, motor_target, avg_rpm, avg_voltage, avg_current, avg_power, avg_BatteryVoltage, avg_temperature, avg_thrust)
while (time.time()-time_zero)>40 and (time.time()-time_zero)<=60:
counter = counter + 1
total_rpm = prop_rpm + total_rpm
total_voltage = voltage + total_voltage
total_current = motor_current + total_current
total_power = motor_power + total_power
total_BatteryVoltage = BatteryVoltage
total_temperature = temperature + total_temperature
total_thrust = thrust + total_thrust
rospy.sleep(0.1)
#For debugging purposes only
#print counter
avg_rpm = total_rpm / (counter+1)
avg_voltage = total_voltage / (counter+1)
avg_current = total_current / (counter+1)
avg_power = total_power / (counter+1)
avg_BatteryVoltage = total_BatteryVoltage / (counter+1)
avg_temperature = total_temperature / (counter+1)
avg_thrust = total_thrust / (counter+1)
printer(motor_setting, motor_target, avg_rpm, avg_voltage, avg_current, avg_power, avg_BatteryVoltage, avg_temperature, avg_thrust)
while (time.time()-time_zero)>60 and (time.time()-time_zero)<=80:
counter = counter + 1
total_rpm = prop_rpm + total_rpm
total_voltage = voltage + total_voltage
total_current = motor_current + total_current
total_power = motor_power + total_power
total_BatteryVoltage = BatteryVoltage
total_temperature = temperature + total_temperature
total_thrust = thrust + total_thrust
rospy.sleep(0.1)
#For debugging purposes only
#print counter
avg_rpm = total_rpm / (counter+1)
avg_voltage = total_voltage / (counter+1)
avg_current = total_current / (counter+1)
avg_power = total_power / (counter+1)
avg_BatteryVoltage = total_BatteryVoltage / (counter+1)
avg_temperature = total_temperature / (counter+1)
avg_thrust = total_thrust / (counter+1)
printer(motor_setting, motor_target, avg_rpm, avg_voltage, avg_current, avg_power, avg_BatteryVoltage, avg_temperature, avg_thrust)
while (time.time()-time_zero)>80 and (time.time()-time_zero)<=100:
counter = counter + 1
total_rpm = prop_rpm + total_rpm
total_voltage = voltage + total_voltage
total_current = motor_current + total_current
total_power = motor_power + total_power
total_BatteryVoltage = BatteryVoltage
total_temperature = temperature + total_temperature
total_thrust = thrust + total_thrust
rospy.sleep(0.1)
#For debugging purposes only
#print counter
avg_rpm = total_rpm / (counter+1)
avg_voltage = total_voltage / (counter+1)
avg_current = total_current / (counter+1)
avg_power = total_power / (counter+1)
avg_BatteryVoltage = total_BatteryVoltage / (counter+1)
avg_temperature = total_temperature / (counter+1)
avg_thrust = total_thrust / (counter+1)
printer(motor_setting, motor_target, avg_rpm, avg_voltage, avg_current, avg_power, avg_BatteryVoltage, avg_temperature, avg_thrust)
while (time.time()-time_zero)>100 and (time.time()-time_zero)<=120:
counter = counter + 1
total_rpm = prop_rpm + total_rpm
total_voltage = voltage + total_voltage
total_current = motor_current + total_current
total_power = motor_power + total_power
total_BatteryVoltage = BatteryVoltage
total_temperature = temperature + total_temperature
total_thrust = thrust + total_thrust
rospy.sleep(0.1)
#For debugging purposes only
#print counter
avg_rpm = total_rpm / (counter+1)
avg_voltage = total_voltage / (counter+1)
avg_current = total_current / (counter+1)
avg_power = total_power / (counter+1)
avg_BatteryVoltage = total_BatteryVoltage / (counter+1)
avg_temperature = total_temperature / (counter+1)
avg_thrust = total_thrust / (counter+1)
printer(motor_setting, motor_target, avg_rpm, avg_voltage, avg_current, avg_power, avg_BatteryVoltage, avg_temperature, avg_thrust)
while (time.time()-time_zero)>120 and (time.time()-time_zero)<=140:
counter = counter + 1
total_rpm = prop_rpm + total_rpm
total_voltage = voltage + total_voltage
total_current = motor_current + total_current
total_power = motor_power + total_power
total_BatteryVoltage = BatteryVoltage
total_temperature = temperature + total_temperature
total_thrust = thrust + total_thrust
rospy.sleep(0.1)
#For debugging purposes only
#print counter
avg_rpm = total_rpm / (counter+1)
avg_voltage = total_voltage / (counter+1)
avg_current = total_current / (counter+1)
avg_power = total_power / (counter+1)
avg_BatteryVoltage = total_BatteryVoltage / (counter+1)
avg_temperature = total_temperature / (counter+1)
avg_thrust = total_thrust / (counter+1)
printer(motor_setting, motor_target, avg_rpm, avg_voltage, avg_current, avg_power, avg_BatteryVoltage, avg_temperature, avg_thrust)
while (time.time()-time_zero)>140 and (time.time()-time_zero)<=160:
counter = counter + 1
total_rpm = prop_rpm + total_rpm
total_voltage = voltage + total_voltage
total_current = motor_current + total_current
total_power = motor_power + total_power
total_BatteryVoltage = BatteryVoltage
total_temperature = temperature + total_temperature
total_thrust = | |
<gh_stars>0
import logging
import pprint
import os
import sys
import math
import torch
import time
import random
from sklearn.metrics import average_precision_score, roc_auc_score
from scipy.sparse import coo_matrix
import numpy as np
import utils
import joblib # import Parallel, delayed
from torch.utils.tensorboard import SummaryWriter
class Logger:
def __init__(
self,
args,
num_classes,
num_nodes,
classifier_name="decoder",
minibatch_log_interval=10,
train_encoder=True,
):
if args is not None:
self.log_name = utils.get_log_name(args, classifier_name, train_encoder)
if args.use_logfile:
print(classifier_name, " Log file:", self.log_name)
handler = logging.FileHandler(self.log_name)
utils.remove_log_lock(args)
event_folder = self.log_name.rpartition(".")[0]
os.makedirs(event_folder, exist_ok=True)
self.writer = SummaryWriter(event_folder)
else:
print("Log: STDOUT")
handler = logging.StreamHandler(sys.stdout)
else:
print("Log: STDOUT")
handler = logging.StreamHandler(sys.stdout)
self.logger = logging.getLogger(classifier_name)
self.logger.setLevel(logging.INFO)
self.logger.addHandler(handler)
self.logger.info("*** PARAMETERS ***")
self.logger.info(pprint.pformat(args.__dict__)) # displays the string
self.logger.info("")
self.classifier_name = classifier_name
self.num_classes = num_classes
self.num_nodes = num_nodes
self.minibatch_log_interval = minibatch_log_interval
self.eval_k_list = [10, 100, 1000]
self.args = args
def close(self):
self.logger.info("##### FINISHED")
handlers = self.logger.handlers[:]
for handler in handlers:
handler.close()
self.logger.removeHandler(handler)
def get_log_file_name(self):
return self.log_name
def log_epoch_start(
self, epoch, num_minibatches, partition, minibatch_log_interval=None
):
self.epoch = epoch
self.partition = partition
self.info_preamble = self.partition
self.losses = []
self.errors = []
self.MRRs = []
self.GMAUCs = []
self.LP_MAPs = []
self.LP_AUCs = []
self.MAPs = []
self.AUCs = []
self.neg_samples = []
self.conf_mat_tp = {}
self.conf_mat_fn = {}
self.conf_mat_fp = {}
self.conf_mat_tp_at_k = {}
self.conf_mat_fn_at_k = {}
self.conf_mat_fp_at_k = {}
for k in self.eval_k_list:
self.conf_mat_tp_at_k[k] = {}
self.conf_mat_fn_at_k[k] = {}
self.conf_mat_fp_at_k[k] = {}
for cl in range(self.num_classes):
self.conf_mat_tp[cl] = 0
self.conf_mat_fn[cl] = 0
self.conf_mat_fp[cl] = 0
for k in self.eval_k_list:
self.conf_mat_tp_at_k[k][cl] = 0
self.conf_mat_fn_at_k[k][cl] = 0
self.conf_mat_fp_at_k[k][cl] = 0
if self.partition == "TEST":
self.conf_mat_tp_list = {}
self.conf_mat_fn_list = {}
self.conf_mat_fp_list = {}
for cl in range(self.num_classes):
self.conf_mat_tp_list[cl] = []
self.conf_mat_fn_list[cl] = []
self.conf_mat_fp_list[cl] = []
self.batch_sizes = []
self.minibatch_done = 0
self.num_minibatches = num_minibatches
if minibatch_log_interval is not None:
self.minibatch_log_interval = minibatch_log_interval
self.logger.info(
"################ "
+ self.info_preamble
+ " epoch "
+ str(epoch)
+ " ###################"
+ " time "
+ str(int(time.time()))
)
self.lasttime = time.monotonic()
self.ep_time = self.lasttime
def log_minibatch(self, loss, predictions, probs, true_classes, **kwargs):
loss = loss.cpu()
predictions = predictions.cpu()
probs = probs.cpu()
true_classes = true_classes.cpu()
probs_np = probs.numpy()
true_classes_np = true_classes.numpy()
# Initialize everything
MRR = torch.tensor(0.0)
GMAUC = torch.tensor(0.0)
LP_MAP = torch.tensor(0.0)
LP_AUC = torch.tensor(0.0)
MAP = torch.tensor(0.0)
AUC = torch.tensor(0.0)
error = torch.tensor(0.0)
conf_mat_per_class = {}
for cl in range(self.num_classes):
self.conf_mat_tp[cl] += torch.tensor(0.0)
self.conf_mat_fn[cl] += torch.tensor(0.0)
self.conf_mat_fp[cl] += torch.tensor(0.0)
for k in self.eval_k_list:
self.conf_mat_tp_at_k[k][cl] += torch.tensor(0.0)
self.conf_mat_fn_at_k[k][cl] += torch.tensor(0.0)
self.conf_mat_fp_at_k[k][cl] += torch.tensor(0.0)
adj, adj_np, prev_adj, new_probs, new_tc, rec_probs, rec_tc = [None] * 7
# Ensure that continuous-DGNN link interpolation training
# doesn't waste time calculating metrics
calc_lp_metrics = True
if "calc_lp_metrics" in kwargs and kwargs["calc_lp_metrics"] == False:
calc_lp_metrics = False
if self.partition == "VALID":
target_measure = self.args.target_measure.lower()
if target_measure == "mrr":
adj = kwargs["adj"].cpu()
adj_np = adj.numpy()
elif target_measure in ["gmauc", "lp_map", "lp_auc"]:
adj = kwargs["adj"].cpu()
prev_adj = kwargs["prev_adj"]
new_probs, new_tc, rec_probs, rec_tc = self.new_reappear_split_tensor(
probs, true_classes, adj, prev_adj
)
metrics_to_calc = [target_measure]
elif (
calc_lp_metrics
and self.partition in ["TEST", "VALID"]
and self.args.task == "link_pred"
):
adj = kwargs["adj"].cpu()
adj_np = adj.numpy()
prev_adj = kwargs["prev_adj"]
new_probs, new_tc, rec_probs, rec_tc = self.new_reappear_split_tensor(
probs, true_classes, adj, prev_adj
)
metrics_to_calc = [
# "mrr", #Caused crash in WSDM-B
"gmauc",
"lp_map",
"lp_auc",
"map",
"auc",
"confmat",
]
else:
metrics_to_calc = ["map", "auc", "confmat"]
def calculate_metric(
metric_name,
probs,
true_classes,
predictions_torch,
true_classes_torch,
new_probs,
new_tc,
rec_probs,
rec_tc,
adj,
):
metric = torch.tensor(0.0)
try:
with np.errstate(divide="ignore", invalid="ignore"):
if metric_name == "mrr":
metric = self.get_MRR(probs, true_classes, adj)
elif metric_name == "gmauc":
metric = torch.tensor(
self.get_GMAUC(new_probs, new_tc, rec_probs, rec_tc)
)
elif metric_name == "lp_map":
metric = torch.tensor(self.get_MAP(new_probs, new_tc))
elif metric_name == "lp_auc":
metric = torch.tensor(self.get_AUC(new_probs, new_tc))
elif metric_name == "map":
metric = torch.tensor(self.get_MAP(probs, true_classes))
elif metric_name == "auc":
metric = torch.tensor(self.get_AUC(probs, true_classes))
elif metric_name == "confmat":
metric = self.eval_predicitions(
predictions_torch, true_classes_torch, self.num_classes
)
else:
raise Exception("Metric {}, not found".format(metric_name))
except ValueError as e:
self.logger.info(
"Encountered value error in calculating metric {}, most likely there are no new links or no reoccurring links. Error: {}".format(
metric_name, e
)
)
if type(metric) == type(torch.tensor(0.0)) and torch.isnan(metric):
# self.logger.info("Metric {} was nan setting to 0".format(metric_name))
metric[
torch.isnan(metric)
] = 0 # Set metric to 0 if nan, may happen with GMAUC or LP_MAP
return (metric_name, metric)
if len(metrics_to_calc) == 1:
results = [
calculate_metric(
metrics_to_calc[0],
probs_np,
true_classes_np,
predictions,
true_classes,
new_probs,
new_tc,
rec_probs,
rec_tc,
adj_np,
)
]
else:
# Threads here are not great. But the summary writer clashes with the multiprocessing.
# Simply remove the prefer and summary writer to get true parallelism here.
parallel = joblib.Parallel(n_jobs=len(metrics_to_calc), prefer="threads")
results = parallel(
joblib.delayed(calculate_metric)(
metric_name,
probs_np,
true_classes_np,
predictions,
true_classes,
new_probs,
new_tc,
rec_probs,
rec_tc,
adj_np,
)
for metric_name in metrics_to_calc
)
for metric_name, metric in results:
if metric_name == "mrr":
MRR = metric
elif metric_name == "gmauc":
GMAUC = metric
elif metric_name == "lp_map":
LP_MAP = metric
elif metric_name == "lp_auc":
LP_AUC = metric
elif metric_name == "map":
MAP = metric
elif metric_name == "auc":
AUC = metric
elif metric_name == "confmat":
error, conf_mat_per_class = metric
else:
raise Exception("Metric {}, not found".format(metric_name))
batch_size = predictions.size(0)
self.batch_sizes.append(batch_size)
self.losses.append(loss)
self.errors.append(error)
self.MRRs.append(MRR)
self.GMAUCs.append(GMAUC)
self.LP_MAPs.append(LP_MAP)
self.LP_AUCs.append(LP_AUC)
self.MAPs.append(MAP)
self.AUCs.append(AUC)
if self.partition == "VALID":
# Return early to save time
self.lasttime = time.monotonic()
return
if self.partition == "TEST":
# Negative sampling metrics
if (
hasattr(self.args, "log_negative_sample_range")
and self.args.log_negative_sample_range == True
):
negative_sampling = self.get_negative_sample_metrics(
probs_np, true_classes_np
)
else:
ks = [1000, 100, 10, 1]
empty_neg_samples = {}
for k in ks:
empty_neg_samples["{}_{}".format(k, "map")] = torch.tensor(0.0)
empty_neg_samples["{}_{}".format(k, "auc")] = torch.tensor(0.0)
negative_sampling = empty_neg_samples
self.neg_samples.append(negative_sampling)
conf_mat_per_class_at_k = {}
for k in self.eval_k_list:
conf_mat_per_class_at_k[k] = self.eval_predicitions_at_k(
predictions, true_classes, self.num_classes, k
)
for cl in range(self.num_classes):
self.conf_mat_tp[cl] += conf_mat_per_class.true_positives[cl]
self.conf_mat_fn[cl] += conf_mat_per_class.false_negatives[cl]
self.conf_mat_fp[cl] += conf_mat_per_class.false_positives[cl]
for k in self.eval_k_list:
self.conf_mat_tp_at_k[k][cl] += conf_mat_per_class_at_k[
k
].true_positives[cl]
self.conf_mat_fn_at_k[k][cl] += conf_mat_per_class_at_k[
k
].false_negatives[cl]
self.conf_mat_fp_at_k[k][cl] += conf_mat_per_class_at_k[
k
].false_positives[cl]
if self.partition == "TEST":
self.conf_mat_tp_list[cl].append(conf_mat_per_class.true_positives[cl])
self.conf_mat_fn_list[cl].append(conf_mat_per_class.false_negatives[cl])
self.conf_mat_fp_list[cl].append(conf_mat_per_class.false_positives[cl])
self.minibatch_done += 1
if self.minibatch_done % self.minibatch_log_interval == 0:
mb_error = self.calc_epoch_metric(self.batch_sizes, self.errors)
mb_MRR = self.calc_epoch_metric(self.batch_sizes, self.MRRs)
mb_GMAUC = self.calc_epoch_metric(self.batch_sizes, self.GMAUCs)
mb_LP_MAP = self.calc_epoch_metric(self.batch_sizes, self.LP_MAPs)
mb_LP_AUC = self.calc_epoch_metric(self.batch_sizes, self.LP_AUCs)
mb_MAP = self.calc_epoch_metric(self.batch_sizes, self.MAPs)
mb_AUC = self.calc_epoch_metric(self.batch_sizes, self.AUCs)
partial_losses = torch.stack(self.losses)
self.logger.info(
self.info_preamble
+ " batch %d / %d - partial error %0.4f - partial loss %0.4f - partial MRR %0.4f - partial GMAUC %0.4f - partial MAP %0.4f - partial AUC %0.4f"
% (
self.minibatch_done,
self.num_minibatches,
mb_error,
partial_losses.mean(),
mb_MRR,
mb_GMAUC,
mb_MAP,
mb_AUC,
)
)
self.logger.info(
self.info_preamble
+ "LP partial MAP %0.4f - AUC %0.4f" % (mb_LP_MAP, mb_LP_AUC)
)
tp = conf_mat_per_class.true_positives
fn = conf_mat_per_class.false_negatives
fp = conf_mat_per_class.false_positives
self.logger.info(
self.info_preamble
+ " batch %d / %d - partial tp %s,fn %s,fp %s"
% (self.minibatch_done, self.num_minibatches, tp, fn, fp)
)
precision, recall, f1 = self.calc_microavg_eval_measures(tp, fn, fp)
self.logger.info(
self.info_preamble
+ " batch %d / %d - measures partial microavg - precision %0.4f - recall %0.4f - f1 %0.4f "
% (self.minibatch_done, self.num_minibatches, precision, recall, f1)
)
for cl in range(self.num_classes):
cl_precision, cl_recall, cl_f1 = self.calc_eval_measures_per_class(
tp, fn, fp, cl
)
self.logger.info(
self.info_preamble
+ " batch %d / %d - measures partial for class %d - precision %0.4f - recall %0.4f - f1 %0.4f "
% (
self.minibatch_done,
self.num_minibatches,
cl,
cl_precision,
cl_recall,
cl_f1,
)
)
self.logger.info(
self.info_preamble
+ " batch %d / %d - Batch time %d "
% (
self.minibatch_done,
self.num_minibatches,
(time.monotonic() - self.lasttime),
)
)
self.lasttime = time.monotonic()
def log_epoch_done(self):
epoch_metrics = {}
self.losses = torch.stack(self.losses)
epoch_metrics["loss"] = self.losses.mean()
epoch_metrics["error"] = self.calc_epoch_metric(self.batch_sizes, self.errors)
epoch_metrics["mrr"] = self.calc_epoch_metric(self.batch_sizes, self.MRRs)
epoch_metrics["gmauc"] = self.calc_epoch_metric(self.batch_sizes, self.GMAUCs)
epoch_metrics["lp_map"] = self.calc_epoch_metric(self.batch_sizes, self.LP_MAPs)
epoch_metrics["lp_auc"] = self.calc_epoch_metric(self.batch_sizes, self.LP_AUCs)
epoch_metrics["map"] = self.calc_epoch_metric(self.batch_sizes, self.MAPs)
epoch_metrics["auc"] = self.calc_epoch_metric(self.batch_sizes, self.AUCs)
self.logger.info(
"{} {}".format(
self.info_preamble,
" - ".join(
"mean {} {}".format(metric, value)
for metric, value in epoch_metrics.items()
),
)
)
if self.partition == "TEST":
# Reshape neg_sampling metrics for calc_epoch_metric
neg_sampling_lists = {}
for neg_sampling_batch in self.neg_samples:
for key in neg_sampling_batch.keys():
value = neg_sampling_batch[key]
if key not in neg_sampling_lists:
neg_sampling_lists[key] = []
neg_sampling_lists[key].append(value)
neg_sampling_aggregated = {}
for metric, value_list in neg_sampling_lists.items():
if len(self.batch_sizes) == len(value_list):
neg_sampling_aggregated[metric] = self.calc_epoch_metric(
self.batch_sizes, value_list
)
else:
# | |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class ApplicationList(ListResource):
def __init__(self, version, account_sid):
"""
Initialize the ApplicationList
:param Version version: Version that contains the resource
:param account_sid: The SID of the Account that created the resource
:returns: twilio.rest.api.v2010.account.application.ApplicationList
:rtype: twilio.rest.api.v2010.account.application.ApplicationList
"""
super(ApplicationList, self).__init__(version)
# Path Solution
self._solution = {'account_sid': account_sid, }
self._uri = '/Accounts/{account_sid}/Applications.json'.format(**self._solution)
def create(self, api_version=values.unset, voice_url=values.unset,
voice_method=values.unset, voice_fallback_url=values.unset,
voice_fallback_method=values.unset, status_callback=values.unset,
status_callback_method=values.unset,
voice_caller_id_lookup=values.unset, sms_url=values.unset,
sms_method=values.unset, sms_fallback_url=values.unset,
sms_fallback_method=values.unset, sms_status_callback=values.unset,
message_status_callback=values.unset, friendly_name=values.unset):
"""
Create the ApplicationInstance
:param unicode api_version: The API version to use to start a new TwiML session
:param unicode voice_url: The URL to call when the phone number receives a call
:param unicode voice_method: The HTTP method to use with the voice_url
:param unicode voice_fallback_url: The URL to call when a TwiML error occurs
:param unicode voice_fallback_method: The HTTP method to use with voice_fallback_url
:param unicode status_callback: The URL to send status information to your application
:param unicode status_callback_method: The HTTP method to use to call status_callback
:param bool voice_caller_id_lookup: Whether to lookup the caller's name
:param unicode sms_url: The URL to call when the phone number receives an incoming SMS message
:param unicode sms_method: The HTTP method to use with sms_url
:param unicode sms_fallback_url: The URL to call when an error occurs while retrieving or executing the TwiML
:param unicode sms_fallback_method: The HTTP method to use with sms_fallback_url
:param unicode sms_status_callback: The URL to send status information to your application
:param unicode message_status_callback: The URL to send message status information to your application
:param unicode friendly_name: A string to describe the new resource
:returns: The created ApplicationInstance
:rtype: twilio.rest.api.v2010.account.application.ApplicationInstance
"""
data = values.of({
'ApiVersion': api_version,
'VoiceUrl': voice_url,
'VoiceMethod': voice_method,
'VoiceFallbackUrl': voice_fallback_url,
'VoiceFallbackMethod': voice_fallback_method,
'StatusCallback': status_callback,
'StatusCallbackMethod': status_callback_method,
'VoiceCallerIdLookup': voice_caller_id_lookup,
'SmsUrl': sms_url,
'SmsMethod': sms_method,
'SmsFallbackUrl': sms_fallback_url,
'SmsFallbackMethod': sms_fallback_method,
'SmsStatusCallback': sms_status_callback,
'MessageStatusCallback': message_status_callback,
'FriendlyName': friendly_name,
})
payload = self._version.create(method='POST', uri=self._uri, data=data, )
return ApplicationInstance(self._version, payload, account_sid=self._solution['account_sid'], )
def stream(self, friendly_name=values.unset, limit=None, page_size=None):
"""
Streams ApplicationInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode friendly_name: The string that identifies the Application resources to read
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.application.ApplicationInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(friendly_name=friendly_name, page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, friendly_name=values.unset, limit=None, page_size=None):
"""
Lists ApplicationInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode friendly_name: The string that identifies the Application resources to read
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.application.ApplicationInstance]
"""
return list(self.stream(friendly_name=friendly_name, limit=limit, page_size=page_size, ))
def page(self, friendly_name=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of ApplicationInstance records from the API.
Request is executed immediately
:param unicode friendly_name: The string that identifies the Application resources to read
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of ApplicationInstance
:rtype: twilio.rest.api.v2010.account.application.ApplicationPage
"""
data = values.of({
'FriendlyName': friendly_name,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(method='GET', uri=self._uri, params=data, )
return ApplicationPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of ApplicationInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of ApplicationInstance
:rtype: twilio.rest.api.v2010.account.application.ApplicationPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return ApplicationPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a ApplicationContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.application.ApplicationContext
:rtype: twilio.rest.api.v2010.account.application.ApplicationContext
"""
return ApplicationContext(self._version, account_sid=self._solution['account_sid'], sid=sid, )
def __call__(self, sid):
"""
Constructs a ApplicationContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.application.ApplicationContext
:rtype: twilio.rest.api.v2010.account.application.ApplicationContext
"""
return ApplicationContext(self._version, account_sid=self._solution['account_sid'], sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.ApplicationList>'
class ApplicationPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the ApplicationPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: The SID of the Account that created the resource
:returns: twilio.rest.api.v2010.account.application.ApplicationPage
:rtype: twilio.rest.api.v2010.account.application.ApplicationPage
"""
super(ApplicationPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of ApplicationInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.application.ApplicationInstance
:rtype: twilio.rest.api.v2010.account.application.ApplicationInstance
"""
return ApplicationInstance(self._version, payload, account_sid=self._solution['account_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.ApplicationPage>'
class ApplicationContext(InstanceContext):
def __init__(self, version, account_sid, sid):
"""
Initialize the ApplicationContext
:param Version version: Version that contains the resource
:param account_sid: The SID of the Account that created the resource to fetch
:param sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.application.ApplicationContext
:rtype: twilio.rest.api.v2010.account.application.ApplicationContext
"""
super(ApplicationContext, self).__init__(version)
# Path Solution
self._solution = {'account_sid': account_sid, 'sid': sid, }
self._uri = '/Accounts/{account_sid}/Applications/{sid}.json'.format(**self._solution)
def delete(self):
"""
Deletes the ApplicationInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete(method='DELETE', uri=self._uri, )
def fetch(self):
"""
Fetch the ApplicationInstance
:returns: The fetched ApplicationInstance
:rtype: twilio.rest.api.v2010.account.application.ApplicationInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return ApplicationInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
sid=self._solution['sid'],
)
def update(self, friendly_name=values.unset, api_version=values.unset,
voice_url=values.unset, voice_method=values.unset,
voice_fallback_url=values.unset, voice_fallback_method=values.unset,
status_callback=values.unset, status_callback_method=values.unset,
voice_caller_id_lookup=values.unset, sms_url=values.unset,
sms_method=values.unset, sms_fallback_url=values.unset,
sms_fallback_method=values.unset, sms_status_callback=values.unset,
message_status_callback=values.unset):
"""
Update the ApplicationInstance
:param unicode friendly_name: A string to describe the resource
:param unicode api_version: The API version to use to start a new TwiML session
:param unicode voice_url: The URL to call when the phone number receives a call
:param unicode voice_method: The HTTP method to use with the voice_url
:param unicode voice_fallback_url: The URL to call when a TwiML error occurs
:param unicode voice_fallback_method: The HTTP method to use with voice_fallback_url
:param unicode status_callback: The URL to send status information to your application
:param unicode status_callback_method: The HTTP method to use to call status_callback
:param bool voice_caller_id_lookup: Whether to lookup the caller's name
:param unicode sms_url: The URL to call when the phone number receives an incoming SMS message
:param unicode sms_method: The HTTP method to use with sms_url
:param unicode sms_fallback_url: The URL to call when an error occurs while retrieving or executing the TwiML
:param unicode sms_fallback_method: The HTTP method to use with sms_fallback_url
:param unicode sms_status_callback: Same as message_status_callback. Deprecated, included for backwards compatibility.
:param unicode message_status_callback: The URL to send message status information to your application
:returns: The updated ApplicationInstance
:rtype: twilio.rest.api.v2010.account.application.ApplicationInstance
"""
data = values.of({
'FriendlyName': friendly_name,
'ApiVersion': api_version,
'VoiceUrl': voice_url,
'VoiceMethod': voice_method,
'VoiceFallbackUrl': voice_fallback_url,
'VoiceFallbackMethod': voice_fallback_method,
'StatusCallback': status_callback,
'StatusCallbackMethod': status_callback_method,
'VoiceCallerIdLookup': voice_caller_id_lookup,
'SmsUrl': sms_url,
'SmsMethod': sms_method,
'SmsFallbackUrl': sms_fallback_url,
'SmsFallbackMethod': sms_fallback_method,
'SmsStatusCallback': sms_status_callback,
'MessageStatusCallback': message_status_callback,
})
payload = self._version.update(method='POST', uri=self._uri, data=data, )
return ApplicationInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
sid=self._solution['sid'],
)
def __repr__(self):
"""
Provide a friendly | |
self.m_staticText_fuse4e0.SetLabel('0x4e0')
self.m_staticText_fuse4f0.SetLabel('0x4f0')
self.m_staticText_fuse500.SetLabel('OTPMK')
self.m_staticText_fuse510.SetLabel('OTPMK')
self.m_staticText_fuse520.SetLabel('OTPMK')
self.m_staticText_fuse530.SetLabel('OTPMK')
self.m_staticText_fuse540.SetLabel('OTPMK')
self.m_staticText_fuse550.SetLabel('OTPMK')
self.m_staticText_fuse560.SetLabel('OTPMK')
self.m_staticText_fuse570.SetLabel('OTPMK')
self.m_staticText_fuse580.SetLabel('SRK0')
self.m_staticText_fuse590.SetLabel('SRK1')
self.m_staticText_fuse5a0.SetLabel('SRK2')
self.m_staticText_fuse5b0.SetLabel('SRK3')
self.m_staticText_fuse5c0.SetLabel('SRK4')
self.m_staticText_fuse5d0.SetLabel('SRK5')
self.m_staticText_fuse5e0.SetLabel('SRK6')
self.m_staticText_fuse5f0.SetLabel('SRK7')
self.m_staticText_fuse600.SetLabel('0x600')
self.m_staticText_fuse610.SetLabel('0x610')
self.m_staticText_fuse620.SetLabel('0x620')
self.m_staticText_fuse630.SetLabel('0x630')
self.m_staticText_fuse640.SetLabel('0x640')
self.m_staticText_fuse650.SetLabel('0x650')
self.m_staticText_fuse660.SetLabel('0x660')
self.m_staticText_fuse670.SetLabel('0x670')
self.m_staticText_fuse680.SetLabel('0x680')
self.m_staticText_fuse690.SetLabel('SwGp2')
self.m_staticText_fuse6a0.SetLabel('SwGp2')
self.m_staticText_fuse6b0.SetLabel('SwGp2')
self.m_staticText_fuse6c0.SetLabel('SwGp2')
self.m_button_fuse6d0.SetLabel('Conf0')
self.m_button_fuse6e0.SetLabel('Conf1')
self.m_staticText_fuse6f0.SetLabel('0x6f0')
self.m_staticText_fuse700.SetLabel('0x700')
self.m_staticText_fuse710.SetLabel('0x710')
self.m_staticText_fuse720.SetLabel('0x720')
self.m_staticText_fuse730.SetLabel('0x730')
self.m_staticText_fuse740.SetLabel('0x740')
self.m_staticText_fuse750.SetLabel('0x750')
self.m_staticText_fuse760.SetLabel('0x760')
self.m_staticText_fuse770.SetLabel('0x770')
self.m_staticText_fuse780.SetLabel('0x780')
self.m_staticText_fuse790.SetLabel('0x790')
self.m_staticText_fuse7a0.SetLabel('0x7a0')
self.m_staticText_fuse7b0.SetLabel('0x7b0')
self.m_staticText_fuse7c0.SetLabel('0x7c0')
self.m_staticText_fuse7d0.SetLabel('0x7d0')
self.m_staticText_fuse7e0.SetLabel('0x7e0')
self.m_staticText_fuse7f0.SetLabel('0x7f0')
self.m_staticText_fuse800.SetLabel('0x800')
self.m_staticText_fuse810.SetLabel('0x810')
self.m_staticText_fuse820.SetLabel('0x820')
self.m_staticText_fuse830.SetLabel('0x830')
self.m_staticText_fuse840.SetLabel('0x840')
self.m_staticText_fuse850.SetLabel('0x850')
self.m_staticText_fuse860.SetLabel('0x860')
self.m_staticText_fuse870.SetLabel('0x870')
self.m_staticText_fuse880.SetLabel('0x880')
self.m_staticText_fuse890.SetLabel('0x890')
self.m_staticText_fuse8a0.SetLabel('0x8a0')
self.m_staticText_fuse8b0.SetLabel('0x8b0')
self.m_staticText_fuse8c0.SetLabel('Gp4')
self.m_staticText_fuse8d0.SetLabel('Gp4')
self.m_staticText_fuse8e0.SetLabel('Gp4')
self.m_staticText_fuse8f0.SetLabel('Gp4')
elif self.mcuSeries == uidef.kMcuSeries_iMXRT11yy:
if self.efuseGroupSel == 0:
self.m_button_fuse400.SetLabel('800H')
self.m_staticText_fuse410.SetLabel('0x810')
self.m_staticText_fuse420.SetLabel('0x820')
self.m_staticText_fuse430.SetLabel('0x830')
self.m_staticText_fuse440.SetLabel('0x840')
self.m_button_fuse450.SetLabel('850H')
self.m_button_fuse460.SetLabel('860H')
self.m_button_fuse470.SetLabel('870H')
self.m_staticText_fuse480.SetLabel('0x880')
self.m_staticText_fuse490.SetLabel('0x890')
self.m_staticText_fuse4a0.SetLabel('0x8a0')
self.m_staticText_fuse4b0.SetLabel('0x8b0')
self.m_staticText_fuse4c0.SetLabel('0x8c0')
self.m_staticText_fuse4d0.SetLabel('0x8d0')
self.m_staticText_fuse4e0.SetLabel('0x8e0')
self.m_staticText_fuse4f0.SetLabel('0x8f0')
self.m_staticText_fuse500.SetLabel('UUID0')
self.m_staticText_fuse510.SetLabel('UUID1')
self.m_staticText_fuse520.SetLabel('0x920')
self.m_staticText_fuse530.SetLabel('0x930')
self.m_staticText_fuse540.SetLabel('Cfg0')
self.m_staticText_fuse550.SetLabel('Cfg1')
self.m_staticText_fuse560.SetLabel('0x960')
self.m_staticText_fuse570.SetLabel('0x970')
self.m_staticText_fuse580.SetLabel('0x980')
self.m_staticText_fuse590.SetLabel('0x990')
self.m_staticText_fuse5a0.SetLabel('0x9a0')
self.m_staticText_fuse5b0.SetLabel('0x9b0')
self.m_staticText_fuse5c0.SetLabel('0x9c0')
self.m_staticText_fuse5d0.SetLabel('0x9d0')
self.m_staticText_fuse5e0.SetLabel('0x9e0')
self.m_staticText_fuse5f0.SetLabel('0x9f0')
self.m_staticText_fuse600.SetLabel('0xa00')
self.m_staticText_fuse610.SetLabel('0xa10')
self.m_staticText_fuse620.SetLabel('0xa20')
self.m_staticText_fuse630.SetLabel('0xa30')
self.m_staticText_fuse640.SetLabel('0xa40')
self.m_staticText_fuse650.SetLabel('0xa50')
self.m_staticText_fuse660.SetLabel('0xa60')
self.m_staticText_fuse670.SetLabel('0xa70')
self.m_staticText_fuse680.SetLabel('0xa80')
self.m_staticText_fuse690.SetLabel('0xa90')
self.m_staticText_fuse6a0.SetLabel('0xaa0')
self.m_staticText_fuse6b0.SetLabel('0xab0')
self.m_staticText_fuse6c0.SetLabel('0xac0')
self.m_button_fuse6d0.SetLabel('ad0H')
self.m_button_fuse6e0.SetLabel('ae0H')
self.m_staticText_fuse6f0.SetLabel('0xaf0')
self.m_staticText_fuse700.SetLabel('M7SRK')
self.m_staticText_fuse710.SetLabel('M7SRK')
self.m_staticText_fuse720.SetLabel('M7SRK')
self.m_staticText_fuse730.SetLabel('M7SRK')
self.m_staticText_fuse740.SetLabel('M7SRK')
self.m_staticText_fuse750.SetLabel('M7SRK')
self.m_staticText_fuse760.SetLabel('M7SRK')
self.m_staticText_fuse770.SetLabel('M7SRK')
self.m_staticText_fuse780.SetLabel('M4SRK')
self.m_staticText_fuse790.SetLabel('M4SRK')
self.m_staticText_fuse7a0.SetLabel('M4SRK')
self.m_staticText_fuse7b0.SetLabel('M4SRK')
self.m_staticText_fuse7c0.SetLabel('M4SRK')
self.m_staticText_fuse7d0.SetLabel('M4SRK')
self.m_staticText_fuse7e0.SetLabel('M4SRK')
self.m_staticText_fuse7f0.SetLabel('M4SRK')
self.m_staticText_fuse800.SetLabel('0xc00')
self.m_staticText_fuse810.SetLabel('0xc10')
self.m_staticText_fuse820.SetLabel('0xc20')
self.m_staticText_fuse830.SetLabel('0xc30')
self.m_staticText_fuse840.SetLabel('0xc40')
self.m_staticText_fuse850.SetLabel('0xc50')
self.m_staticText_fuse860.SetLabel('0xc60')
self.m_staticText_fuse870.SetLabel('0xc70')
self.m_staticText_fuse880.SetLabel('0xc80')
self.m_staticText_fuse890.SetLabel('0xc90')
self.m_staticText_fuse8a0.SetLabel('0xca0')
self.m_staticText_fuse8b0.SetLabel('0xcb0')
self.m_staticText_fuse8c0.SetLabel('0xcc0')
self.m_staticText_fuse8d0.SetLabel('0xcd0')
self.m_staticText_fuse8e0.SetLabel('0xce0')
self.m_staticText_fuse8f0.SetLabel('0xcf0')
elif self.efuseGroupSel == 1:
self.m_button_fuse400.SetLabel('OTPMK')
self.m_staticText_fuse410.SetLabel('OTPMK')
self.m_staticText_fuse420.SetLabel('OTPMK')
self.m_staticText_fuse430.SetLabel('OTPMK')
self.m_staticText_fuse440.SetLabel('OTPMK')
self.m_button_fuse450.SetLabel('OTPMK')
self.m_button_fuse460.SetLabel('OTPMK')
self.m_button_fuse470.SetLabel('OTPMK')
self.m_staticText_fuse480.SetLabel('0xd80')
self.m_staticText_fuse490.SetLabel('0xd90')
self.m_staticText_fuse4a0.SetLabel('0xda0')
self.m_staticText_fuse4b0.SetLabel('0xdb0')
self.m_staticText_fuse4c0.SetLabel('0xdc0')
self.m_staticText_fuse4d0.SetLabel('0xdd0')
self.m_staticText_fuse4e0.SetLabel('0xde0')
self.m_staticText_fuse4f0.SetLabel('0xdf0')
self.m_staticText_fuse500.SetLabel('KEY1')
self.m_staticText_fuse510.SetLabel('KEY1')
self.m_staticText_fuse520.SetLabel('KEY1')
self.m_staticText_fuse530.SetLabel('KEY1')
self.m_staticText_fuse540.SetLabel('KEY1')
self.m_staticText_fuse550.SetLabel('KEY1')
self.m_staticText_fuse560.SetLabel('KEY1')
self.m_staticText_fuse570.SetLabel('KEY1')
self.m_staticText_fuse580.SetLabel('KEY2')
self.m_staticText_fuse590.SetLabel('KEY2')
self.m_staticText_fuse5a0.SetLabel('KEY2')
self.m_staticText_fuse5b0.SetLabel('KEY2')
self.m_staticText_fuse5c0.SetLabel('KEY2')
self.m_staticText_fuse5d0.SetLabel('KEY2')
self.m_staticText_fuse5e0.SetLabel('KEY2')
self.m_staticText_fuse5f0.SetLabel('KEY2')
self.m_staticText_fuse600.SetLabel('KEY3')
self.m_staticText_fuse610.SetLabel('KEY3')
self.m_staticText_fuse620.SetLabel('KEY3')
self.m_staticText_fuse630.SetLabel('KEY3')
self.m_staticText_fuse640.SetLabel('KEY3')
self.m_staticText_fuse650.SetLabel('KEY3')
self.m_staticText_fuse660.SetLabel('KEY3')
self.m_staticText_fuse670.SetLabel('KEY3')
self.m_staticText_fuse680.SetLabel('KEY4')
self.m_staticText_fuse690.SetLabel('KEY4')
self.m_staticText_fuse6a0.SetLabel('KEY4')
self.m_staticText_fuse6b0.SetLabel('KEY4')
self.m_staticText_fuse6c0.SetLabel('KEY4')
self.m_button_fuse6d0.SetLabel('KEY4')
self.m_button_fuse6e0.SetLabel('KEY4')
self.m_staticText_fuse6f0.SetLabel('KEY4')
self.m_staticText_fuse700.SetLabel('KEY5')
self.m_staticText_fuse710.SetLabel('KEY5')
self.m_staticText_fuse720.SetLabel('KEY5')
self.m_staticText_fuse730.SetLabel('KEY5')
self.m_staticText_fuse740.SetLabel('KEY5')
self.m_staticText_fuse750.SetLabel('KEY5')
self.m_staticText_fuse760.SetLabel('KEY5')
self.m_staticText_fuse770.SetLabel('KEY5')
self.m_staticText_fuse780.SetLabel('1080H')
self.m_staticText_fuse790.SetLabel('1090H')
self.m_staticText_fuse7a0.SetLabel('10a0H')
self.m_staticText_fuse7b0.SetLabel('10b0H')
self.m_staticText_fuse7c0.SetLabel('10c0H')
self.m_staticText_fuse7d0.SetLabel('10d0H')
self.m_staticText_fuse7e0.SetLabel('10e0H')
self.m_staticText_fuse7f0.SetLabel('10f0H')
self.m_staticText_fuse800.SetLabel('RomP1')
self.m_staticText_fuse810.SetLabel('RomP1')
self.m_staticText_fuse820.SetLabel('RomP1')
self.m_staticText_fuse830.SetLabel('RomP1')
self.m_staticText_fuse840.SetLabel('RomP1')
self.m_staticText_fuse850.SetLabel('RomP1')
self.m_staticText_fuse860.SetLabel('RomP1')
self.m_staticText_fuse870.SetLabel('RomP1')
self.m_staticText_fuse880.SetLabel('RomP1')
self.m_staticText_fuse890.SetLabel('RomP1')
self.m_staticText_fuse8a0.SetLabel('RomP1')
self.m_staticText_fuse8b0.SetLabel('RomP1')
self.m_staticText_fuse8c0.SetLabel('RomP1')
self.m_staticText_fuse8d0.SetLabel('RomP1')
self.m_staticText_fuse8e0.SetLabel('RomP1')
self.m_staticText_fuse8f0.SetLabel('RomP1')
elif self.efuseGroupSel == 2:
self.m_button_fuse400.SetLabel('RomP2')
self.m_staticText_fuse410.SetLabel('RomP2')
self.m_staticText_fuse420.SetLabel('RomP2')
self.m_staticText_fuse430.SetLabel('RomP2')
self.m_staticText_fuse440.SetLabel('RomP2')
self.m_button_fuse450.SetLabel('RomP2')
self.m_button_fuse460.SetLabel('RomP2')
self.m_button_fuse470.SetLabel('RomP2')
self.m_staticText_fuse480.SetLabel('RomP2')
self.m_staticText_fuse490.SetLabel('RomP2')
self.m_staticText_fuse4a0.SetLabel('RomP2')
self.m_staticText_fuse4b0.SetLabel('RomP2')
self.m_staticText_fuse4c0.SetLabel('RomP2')
self.m_staticText_fuse4d0.SetLabel('RomP2')
self.m_staticText_fuse4e0.SetLabel('RomP2')
self.m_staticText_fuse4f0.SetLabel('RomP2')
self.m_staticText_fuse500.SetLabel('GP1')
self.m_staticText_fuse510.SetLabel('GP1')
self.m_staticText_fuse520.SetLabel('GP1')
self.m_staticText_fuse530.SetLabel('GP1')
self.m_staticText_fuse540.SetLabel('GP1')
self.m_staticText_fuse550.SetLabel('GP1')
self.m_staticText_fuse560.SetLabel('GP1')
self.m_staticText_fuse570.SetLabel('GP1')
self.m_staticText_fuse580.SetLabel('GP1')
self.m_staticText_fuse590.SetLabel('GP1')
self.m_staticText_fuse5a0.SetLabel('GP1')
self.m_staticText_fuse5b0.SetLabel('GP1')
self.m_staticText_fuse5c0.SetLabel('GP1')
self.m_staticText_fuse5d0.SetLabel('GP1')
self.m_staticText_fuse5e0.SetLabel('GP1')
self.m_staticText_fuse5f0.SetLabel('GP1')
self.m_staticText_fuse600.SetLabel('GP2')
self.m_staticText_fuse610.SetLabel('GP2')
self.m_staticText_fuse620.SetLabel('GP2')
self.m_staticText_fuse630.SetLabel('GP2')
self.m_staticText_fuse640.SetLabel('GP2')
self.m_staticText_fuse650.SetLabel('GP2')
self.m_staticText_fuse660.SetLabel('GP2')
self.m_staticText_fuse670.SetLabel('GP2')
self.m_staticText_fuse680.SetLabel('GP2')
self.m_staticText_fuse690.SetLabel('GP2')
self.m_staticText_fuse6a0.SetLabel('GP2')
self.m_staticText_fuse6b0.SetLabel('GP2')
self.m_staticText_fuse6c0.SetLabel('GP2')
self.m_button_fuse6d0.SetLabel('GP2')
self.m_button_fuse6e0.SetLabel('GP2')
self.m_staticText_fuse6f0.SetLabel('GP2')
self.m_staticText_fuse700.SetLabel('GP3')
self.m_staticText_fuse710.SetLabel('GP3')
self.m_staticText_fuse720.SetLabel('GP3')
self.m_staticText_fuse730.SetLabel('GP3')
self.m_staticText_fuse740.SetLabel('GP3')
self.m_staticText_fuse750.SetLabel('GP3')
self.m_staticText_fuse760.SetLabel('GP3')
self.m_staticText_fuse770.SetLabel('GP3')
self.m_staticText_fuse780.SetLabel('GP3')
self.m_staticText_fuse790.SetLabel('GP3')
self.m_staticText_fuse7a0.SetLabel('GP3')
self.m_staticText_fuse7b0.SetLabel('GP3')
self.m_staticText_fuse7c0.SetLabel('GP3')
self.m_staticText_fuse7d0.SetLabel('GP3')
self.m_staticText_fuse7e0.SetLabel('GP3')
self.m_staticText_fuse7f0.SetLabel('GP3')
self.m_staticText_fuse800.SetLabel('GP4')
self.m_staticText_fuse810.SetLabel('GP4')
self.m_staticText_fuse820.SetLabel('GP4')
self.m_staticText_fuse830.SetLabel('GP4')
self.m_staticText_fuse840.SetLabel('GP4')
self.m_staticText_fuse850.SetLabel('GP4')
self.m_staticText_fuse860.SetLabel('GP4')
self.m_staticText_fuse870.SetLabel('GP4')
self.m_staticText_fuse880.SetLabel('GP4')
self.m_staticText_fuse890.SetLabel('GP4')
self.m_staticText_fuse8a0.SetLabel('GP4')
self.m_staticText_fuse8b0.SetLabel('GP4')
self.m_staticText_fuse8c0.SetLabel('GP4')
self.m_staticText_fuse8d0.SetLabel('GP4')
self.m_staticText_fuse8e0.SetLabel('GP4')
self.m_staticText_fuse8f0.SetLabel('GP4')
elif self.efuseGroupSel == 3:
self.m_button_fuse400.SetLabel('GP5')
self.m_staticText_fuse410.SetLabel('GP5')
self.m_staticText_fuse420.SetLabel('GP5')
self.m_staticText_fuse430.SetLabel('GP5')
self.m_staticText_fuse440.SetLabel('GP5')
self.m_button_fuse450.SetLabel('GP5')
self.m_button_fuse460.SetLabel('GP5')
self.m_button_fuse470.SetLabel('GP5')
self.m_staticText_fuse480.SetLabel('GP5')
self.m_staticText_fuse490.SetLabel('GP5')
self.m_staticText_fuse4a0.SetLabel('GP5')
self.m_staticText_fuse4b0.SetLabel('GP5')
self.m_staticText_fuse4c0.SetLabel('GP5')
self.m_staticText_fuse4d0.SetLabel('GP5')
self.m_staticText_fuse4e0.SetLabel('GP5')
self.m_staticText_fuse4f0.SetLabel('GP5')
self.m_staticText_fuse500.SetLabel('1800H')
self.m_staticText_fuse510.SetLabel('1810H')
self.m_staticText_fuse520.SetLabel('1820H')
self.m_staticText_fuse530.SetLabel('1830H')
self.m_staticText_fuse540.SetLabel('1840H')
self.m_staticText_fuse550.SetLabel('1850H')
self.m_staticText_fuse560.SetLabel('1860H')
self.m_staticText_fuse570.SetLabel('1870H')
self.m_staticText_fuse580.SetLabel('1880H')
self.m_staticText_fuse590.SetLabel('1890H')
self.m_staticText_fuse5a0.SetLabel('18a0H')
self.m_staticText_fuse5b0.SetLabel('18b0H')
self.m_staticText_fuse5c0.SetLabel('18c0H')
self.m_staticText_fuse5d0.SetLabel('18d0H')
self.m_staticText_fuse5e0.SetLabel('18e0H')
self.m_staticText_fuse5f0.SetLabel('18f0H')
self.m_staticText_fuse600.SetLabel('N/A')
self.m_staticText_fuse610.SetLabel('N/A')
self.m_staticText_fuse620.SetLabel('N/A')
self.m_staticText_fuse630.SetLabel('N/A')
self.m_staticText_fuse640.SetLabel('N/A')
self.m_staticText_fuse650.SetLabel('N/A')
self.m_staticText_fuse660.SetLabel('N/A')
self.m_staticText_fuse670.SetLabel('N/A')
self.m_staticText_fuse680.SetLabel('N/A')
self.m_staticText_fuse690.SetLabel('N/A')
self.m_staticText_fuse6a0.SetLabel('N/A')
self.m_staticText_fuse6b0.SetLabel('N/A')
self.m_staticText_fuse6c0.SetLabel('N/A')
self.m_button_fuse6d0.SetLabel('N/A')
self.m_button_fuse6e0.SetLabel('N/A')
self.m_staticText_fuse6f0.SetLabel('N/A')
self.m_staticText_fuse700.SetLabel('N/A')
self.m_staticText_fuse710.SetLabel('N/A')
self.m_staticText_fuse720.SetLabel('N/A')
self.m_staticText_fuse730.SetLabel('N/A')
self.m_staticText_fuse740.SetLabel('N/A')
self.m_staticText_fuse750.SetLabel('N/A')
self.m_staticText_fuse760.SetLabel('N/A')
self.m_staticText_fuse770.SetLabel('N/A')
self.m_staticText_fuse780.SetLabel('N/A')
self.m_staticText_fuse790.SetLabel('N/A')
self.m_staticText_fuse7a0.SetLabel('N/A')
self.m_staticText_fuse7b0.SetLabel('N/A')
self.m_staticText_fuse7c0.SetLabel('N/A')
self.m_staticText_fuse7d0.SetLabel('N/A')
self.m_staticText_fuse7e0.SetLabel('N/A')
self.m_staticText_fuse7f0.SetLabel('N/A')
self.m_staticText_fuse800.SetLabel('N/A')
self.m_staticText_fuse810.SetLabel('N/A')
self.m_staticText_fuse820.SetLabel('N/A')
self.m_staticText_fuse830.SetLabel('N/A')
self.m_staticText_fuse840.SetLabel('N/A')
self.m_staticText_fuse850.SetLabel('N/A')
self.m_staticText_fuse860.SetLabel('N/A')
self.m_staticText_fuse870.SetLabel('N/A')
self.m_staticText_fuse880.SetLabel('N/A')
self.m_staticText_fuse890.SetLabel('N/A')
self.m_staticText_fuse8a0.SetLabel('N/A')
self.m_staticText_fuse8b0.SetLabel('N/A')
self.m_staticText_fuse8c0.SetLabel('N/A')
self.m_staticText_fuse8d0.SetLabel('N/A')
self.m_staticText_fuse8e0.SetLabel('N/A')
self.m_staticText_fuse8f0.SetLabel('N/A')
else:
pass
else:
pass
def _resetFuseRegionField( self ):
color = wx.SYS_COLOUR_WINDOW
self.m_textCtrl_fuse400.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse410.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse420.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse430.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse440.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse450.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse460.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse470.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse480.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse490.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse4a0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse4b0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse4c0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse4d0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse4e0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse4f0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse500.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse510.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse520.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse530.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse540.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse550.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse560.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse570.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse580.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse590.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse5a0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse5b0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse5c0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse5d0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse5e0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse5f0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse600.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse610.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse620.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse630.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse640.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse650.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse660.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse670.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse680.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse690.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse6a0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse6b0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse6c0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse6d0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse6e0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse6f0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse700.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse710.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse720.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse730.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse740.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse750.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse760.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse770.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse780.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse790.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse7a0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse7b0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse7c0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse7d0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse7e0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse7f0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse800.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse810.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse820.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse830.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse840.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse850.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse860.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse870.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse880.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse890.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse8a0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse8b0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse8c0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse8d0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse8e0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse8f0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.Refresh()
def updateFuseRegionField( self ):
self._resetFuseRegionField()
color = None
if self.isToolRunAsEntryMode:
color = wx.SYS_COLOUR_GRAYTEXT
if self.mcuSeries == uidef.kMcuSeries_iMXRT10yy:
self.m_textCtrl_fuse400.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse430.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse440.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse480.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse490.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse4a0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse4b0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse4c0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse4d0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse4e0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse4f0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse500.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse510.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse520.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse530.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse540.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse550.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse560.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse570.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse600.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse610.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse620.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse630.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse640.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse650.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse660.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse670.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse680.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse6f0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse700.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse710.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse720.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse730.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse740.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse750.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse760.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse770.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse780.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse790.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse7a0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse7b0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse7c0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse7d0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse7e0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse7f0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse800.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse810.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse820.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse830.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse840.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse850.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse860.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse870.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse880.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse890.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse8a0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse8b0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
elif self.mcuSeries == uidef.kMcuSeries_iMXRT11yy:
if self.efuseGroupSel == 0:
self.m_textCtrl_fuse400.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse410.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse420.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse430.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse440.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse450.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse460.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse470.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse480.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse490.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse4a0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse4b0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse4c0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse4d0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse4e0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse4f0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse5c0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse5d0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse5e0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse5f0.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse600.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse610.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse620.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse630.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse640.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse650.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse660.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse670.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse680.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse690.SetBackgroundColour( wx.SystemSettings.GetColour( color ) )
self.m_textCtrl_fuse6a0.SetBackgroundColour( wx.SystemSettings.GetColour( | |
input sequence length {sequence_length} equals 1, when `past_buckets_states` is passed."
past_buckets = past_buckets_states[0]
past_states = past_buckets_states[1]
# get query vector
query_vectors = self.query_key(hidden_states)
query_vectors = self._split_hidden_size_dim(
query_vectors, self.num_attention_heads, self.attention_head_size
)
if past_buckets is not None:
key_value_hidden_states, sorted_bucket_idx, buckets = self._get_relevant_hid_states_and_buckets(
query_vectors=query_vectors,
attention_mask=attention_mask,
num_hashes=num_hashes,
hidden_states=hidden_states,
past_states=past_states,
past_buckets=past_buckets,
)
query_key_vectors = self._query_per_attn_head(key_value_hidden_states)
value_vectors = self._value_per_attn_head(key_value_hidden_states)
# split key & value vectors by num hashes to apply
# self attention on each separately
query_key_vectors = self._split_seq_length_dim_to(
query_key_vectors, num_hashes, -1, self.num_attention_heads, self.attention_head_size,
)
value_vectors = self._split_seq_length_dim_to(
value_vectors, num_hashes, -1, self.num_attention_heads, self.attention_head_size,
)
# repeat query vectors across hash dimension
query_vectors = query_vectors.unsqueeze(2).repeat(1, 1, num_hashes, 1, 1)
else:
key_value_hidden_states = torch.cat([past_states, hidden_states], dim=1)
query_key_vectors = self.query_key(key_value_hidden_states)
value_vectors = self.value(key_value_hidden_states)
else:
# project hidden_states to query_key and value
query_vectors = None
query_key_vectors = self.query_key(hidden_states)
value_vectors = self.value(hidden_states)
# if query key is not already split
if not do_cached_attention or past_buckets is None:
query_key_vectors = self._split_hidden_size_dim(
query_key_vectors, self.num_attention_heads, self.attention_head_size
)
value_vectors = self._split_hidden_size_dim(
value_vectors, self.num_attention_heads, self.attention_head_size
)
# cache buckets for next incremental decoding
if do_cached_attention and past_buckets is None and key_value_hidden_states.shape[1] >= self.chunk_length:
buckets = self._hash_vectors(query_key_vectors, num_hashes, attention_mask)
# free memory
del hidden_states
assert (
query_key_vectors.shape[-1] == self.attention_head_size
), "last dim of query_key_vectors is {} but should be {}.".format(
query_key_vectors.shape[-1], self.attention_head_size
)
assert (
value_vectors.shape[-1] == self.attention_head_size
), "last dim of value_vectors is {} but should be {}.".format(
value_vectors.shape[-1], self.attention_head_size
)
do_standard_self_attention = (sequence_length <= self.chunk_length) or (
use_cache and past_buckets_states[1] is not None
)
# LSH attention only makes sense if chunked attention should be performed
if not do_standard_self_attention:
# set `num_buckets` on the fly, recommended way to do it
if self.num_buckets is None:
self._set_num_buckets(sequence_length)
# use cached buckets for backprop only
if buckets is None:
# hash query key vectors into buckets
buckets = self._hash_vectors(query_key_vectors, num_hashes, attention_mask)
else:
# make sure buckets has correct shape for LSH attention
buckets = buckets.view(batch_size, self.num_attention_heads, num_hashes * sequence_length)
assert (
int(buckets.shape[-1]) == num_hashes * sequence_length
), "last dim of buckets is {}, but should be {}".format(buckets.shape[-1], num_hashes * sequence_length)
sorted_bucket_idx, undo_sorted_bucket_idx = self._get_sorted_bucket_idx_and_undo_sorted_bucket_idx(
sequence_length, buckets, num_hashes
)
# make sure bucket idx is not longer then sequence length
sorted_bucket_idx_per_hash = sorted_bucket_idx % sequence_length
# cluster query key value vectors according to hashed buckets
query_key_vectors = self._gather_by_expansion(query_key_vectors, sorted_bucket_idx_per_hash, num_hashes)
value_vectors = self._gather_by_expansion(value_vectors, sorted_bucket_idx_per_hash, num_hashes)
query_key_vectors = self._split_seq_length_dim_to(
query_key_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size,
)
value_vectors = self._split_seq_length_dim_to(
value_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size,
)
if self.chunk_length is None:
assert (
self.num_chunks_before == 0 and self.num_chunks_after == 0
), "If `config.chunk_length` is `None`, make sure `config.num_chunks_after` and `config.num_chunks_before` are set to 0."
elif do_cached_attention and past_buckets is not None:
# use max sequence length
sorted_bucket_idx_per_hash = sorted_bucket_idx
else:
# get sequence length indices
sorted_bucket_idx_per_hash = torch.arange(sequence_length, device=query_key_vectors.device).repeat(
batch_size, self.num_attention_heads, 1
)
# scale key vectors
key_vectors = self._len_and_dim_norm(query_key_vectors)
# set query_vectors to query key vectors if LSH self attention
query_vectors = query_vectors if query_vectors is not None else query_key_vectors
# free memory
del query_key_vectors
# get attention probs
out_vectors, logits, attention_probs = self._attend(
query_vectors=query_vectors,
key_vectors=key_vectors,
value_vectors=value_vectors,
sorted_bucket_idx_per_hash=sorted_bucket_idx_per_hash,
attention_mask=attention_mask,
head_mask=head_mask,
do_standard_self_attention=do_standard_self_attention,
do_cached_attention=do_cached_attention,
)
# free memory
del key_vectors, value_vectors
# re-order out_vectors and logits
if not do_standard_self_attention:
# sort clusters back to correct ordering
out_vectors, logits = ReverseSort.apply(out_vectors, logits, sorted_bucket_idx, undo_sorted_bucket_idx)
if not do_standard_self_attention or (do_cached_attention and past_buckets is not None):
# sum up all hash rounds
if num_hashes > 1:
out_vectors = self._split_seq_length_dim_to(
out_vectors, num_hashes, sequence_length, self.num_attention_heads, self.attention_head_size,
)
logits = self._split_seq_length_dim_to(
logits, num_hashes, sequence_length, self.num_attention_heads, self.attention_head_size,
).unsqueeze(-1)
probs_vectors = torch.exp(logits - torch.logsumexp(logits, dim=2, keepdim=True))
out_vectors = torch.sum(out_vectors * probs_vectors, dim=2)
# free memory
del probs_vectors
# free memory
del logits
assert out_vectors.shape == (
batch_size,
self.num_attention_heads,
sequence_length,
self.attention_head_size,
), "out_vectors have be of shape `[batch_size, config.num_attention_heads, sequence_length, config.attention_head_size]`."
out_vectors = self._merge_hidden_size_dims(out_vectors, self.num_attention_heads, self.attention_head_size)
if output_attentions is False:
attention_probs = ()
if buckets is not None:
buckets = buckets.view(batch_size, self.num_attention_heads, num_hashes, -1)
return LSHSelfAttentionOutput(hidden_states=out_vectors, attention_probs=attention_probs, buckets=buckets)
def _query_per_attn_head(self, hidden_states):
per_head_query_key = self.query_key.weight.reshape(
self.num_attention_heads, self.attention_head_size, self.hidden_size
).transpose(-2, -1)
# only relevant for inference and no bias => we can use einsum here
query_key_vectors = torch.einsum("balh,ahr->balr", hidden_states, per_head_query_key)
return query_key_vectors
def _value_per_attn_head(self, hidden_states):
per_head_value = self.value.weight.reshape(
self.num_attention_heads, self.attention_head_size, self.hidden_size
).transpose(-2, -1)
# only relevant for inference and no bias => we can use einsum here
value_vectors = torch.einsum("balh,ahr->balr", hidden_states, per_head_value)
return value_vectors
def _hash_vectors(self, vectors, num_hashes, attention_mask, increase_num_buckets=False):
batch_size = vectors.shape[0]
# See https://arxiv.org/pdf/1509.02897.pdf
# We sample a different random rotation for each round of hashing to
# decrease the probability of hash misses.
if isinstance(self.num_buckets, int):
assert (
self.num_buckets % 2 == 0
), "There should be an even number of bucktes, but `self.num_bucktes`: {}".format(self.num_buckets)
rotation_size = self.num_buckets
num_buckets = self.num_buckets
else:
# Factorize the hash if self.num_buckets is a list or tuple
rotation_size, num_buckets = 0, 1
for bucket_factor in self.num_buckets:
assert bucket_factor % 2 == 0, "The number of buckets should be even, but `num_bucket`: {}".format(
bucket_factor
)
rotation_size = rotation_size + bucket_factor
num_buckets = num_buckets * bucket_factor
# remove gradient
vectors = vectors.detach()
if self.hash_seed is not None:
# for determinism
torch.manual_seed(self.hash_seed)
rotations_shape = (self.num_attention_heads, vectors.shape[-1], num_hashes, rotation_size // 2)
# create a random self.attention_head_size x num_hashes x num_buckets/2
random_rotations = torch.randn(rotations_shape, device=vectors.device, dtype=vectors.dtype)
# Output dim: Batch_Size x Num_Attn_Heads x Num_Hashes x Seq_Len x Num_Buckets/2
rotated_vectors = torch.einsum("bmtd,mdhr->bmhtr", vectors, random_rotations)
if isinstance(self.num_buckets, int) or len(self.num_buckets) == 1:
rotated_vectors = torch.cat([rotated_vectors, -rotated_vectors], dim=-1)
buckets = torch.argmax(rotated_vectors, dim=-1)
else:
# Get the buckets for them and combine.
buckets, cur_sum, cur_product = None, 0, 1
for bucket_factor in self.num_buckets:
rotated_vectors_factor = rotated_vectors[..., cur_sum : cur_sum + (bucket_factor // 2)]
cur_sum = cur_sum + bucket_factor // 2
rotated_vectors_factor = torch.cat([rotated_vectors_factor, -rotated_vectors_factor], dim=-1)
if buckets is None:
buckets = torch.argmax(rotated_vectors_factor, dim=-1)
else:
buckets = buckets + (cur_product * torch.argmax(rotated_vectors_factor, dim=-1))
cur_product = cur_product * bucket_factor
if attention_mask is not None and (attention_mask.sum().item() < batch_size * attention_mask.shape[-1]):
# add an extra bucket for padding tokens only
num_buckets = num_buckets + 1
# assign padding tokens extra bucket
buckets_mask = attention_mask.to(torch.uint8)[:, None, None, :].expand(buckets.shape)
buckets = torch.where(
buckets_mask, buckets, torch.tensor(num_buckets - 1, dtype=torch.long, device=buckets.device)
)
elif increase_num_buckets:
num_buckets = num_buckets + 1
# buckets is now (Batch_size x Num_Attn_Heads x Num_Hashes x Seq_Len).
# Next we add offsets so that bucket numbers from different hashing rounds don't overlap.
offsets = torch.arange(num_hashes, device=vectors.device)
offsets = (offsets * num_buckets).view((1, 1, -1, 1))
# expand to batch size and num attention heads
offsets = offsets.expand((batch_size, self.num_attention_heads) + offsets.shape[-2:])
offset_buckets = (buckets + offsets).flatten(start_dim=2, end_dim=3)
return offset_buckets
def _get_sorted_bucket_idx_and_undo_sorted_bucket_idx(self, sequence_length, buckets, num_hashes):
# no gradients are needed
with torch.no_grad():
# hash-based sort
sorted_bucket_idx = _stable_argsort(buckets, dim=-1)
# create simple indices to scatter to, to have undo sort
indices = (
torch.arange(sorted_bucket_idx.shape[-1], device=buckets.device)
.view(1, 1, -1)
.expand(sorted_bucket_idx.shape)
)
# get undo sort
undo_sorted_bucket_idx = sorted_bucket_idx.new(*sorted_bucket_idx.size())
undo_sorted_bucket_idx.scatter_(-1, sorted_bucket_idx, indices)
return sorted_bucket_idx, undo_sorted_bucket_idx
def _set_num_buckets(self, sequence_length):
# `num_buckets` should be set to 2 * sequence_length // chunk_length as recommended in paper
num_buckets_pow_2 = (2 * (sequence_length // self.chunk_length)).bit_length() - 1
# make sure buckets are power of 2
num_buckets = 2 ** num_buckets_pow_2
# factorize `num_buckets` if `num_buckets` becomes too large
num_buckets_limit = 2 * max(
int((self.max_position_embeddings // self.chunk_length) ** (0.5)), self.chunk_length,
)
if num_buckets > num_buckets_limit:
num_buckets = [2 ** (num_buckets_pow_2 // 2), 2 ** (num_buckets_pow_2 - num_buckets_pow_2 // 2)]
logger.warning("config.num_buckets is not set. Setting config.num_buckets to {}...".format(num_buckets))
# set num buckets in config to be properly saved
self.config.num_buckets = num_buckets
self.num_buckets = num_buckets
def _attend(
self,
query_vectors,
key_vectors,
value_vectors,
sorted_bucket_idx_per_hash,
attention_mask,
head_mask,
do_standard_self_attention,
do_cached_attention,
):
# look at previous and following chunks if chunked attention
if not do_standard_self_attention:
key_vectors = self._look_adjacent(key_vectors, self.num_chunks_before, self.num_chunks_after)
value_vectors = self._look_adjacent(value_vectors, self.num_chunks_before, self.num_chunks_after)
# get logits and dots
# (BS, NumAttn, NumHash x NumChunk, Chunk_L x Hidden),(BS, NumAttn, NumHash x NumChunk, Chunk_L * (Num_bef + Num_aft + 1) x Hidden) -> (BS, NumAttn, NumHash x | |
import numpy as np
def lanc(numwt, haf):
"""
Generates a numwt + 1 + numwt lanczos cosine low pass filter with -6dB
(1/4 power, 1/2 amplitude) point at haf
Parameters
----------
numwt : int
number of points
haf : float
frequency (in 'cpi' of -6dB point, 'cpi' is cycles per interval.
For hourly data cpi is cph,
Examples
--------
>>> from oceans.filters import lanc
>>> import matplotlib.pyplot as plt
>>> t = np.arange(500) # Time in hours.
>>> h = 2.5 * np.sin(2 * np.pi * t / 12.42)
>>> h += 1.5 * np.sin(2 * np.pi * t / 12.0)
>>> h += 0.3 * np.random.randn(len(t))
>>> wt = lanc(96+1+96, 1./40)
>>> low = np.convolve(wt, h, mode='same')
>>> high = h - low
>>> fig, (ax0, ax1) = plt.subplots(nrows=2)
>>> _ = ax0.plot(high, label='high')
>>> _ = ax1.plot(low, label='low')
>>> _ = ax0.legend(numpoints=1)
>>> _ = ax1.legend(numpoints=1)
"""
summ = 0
numwt += 1
wt = np.zeros(numwt)
# Filter weights.
ii = np.arange(numwt)
wt = 0.5 * (1.0 + np.cos(np.pi * ii * 1.0 / numwt))
ii = np.arange(1, numwt)
xx = np.pi * 2 * haf * ii
wt[1 : numwt + 1] = wt[1 : numwt + 1] * np.sin(xx) / xx
summ = wt[1 : numwt + 1].sum()
xx = wt.sum() + summ
wt /= xx
return np.r_[wt[::-1], wt[1 : numwt + 1]]
def smoo1(datain, window_len=11, window="hanning"):
"""
Smooth the data using a window with requested size.
Parameters
----------
datain : array_like
input series
window_len : int
size of the smoothing window; should be an odd integer
window : str
window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'.
flat window will produce a moving average smoothing.
Returns
-------
data_out : array_like
smoothed signal
See Also
--------
scipy.signal.lfilter
Notes
-----
original from: https://scipy-cookbook.readthedocs.io/items/SignalSmooth.html
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal (with
the window size) in both ends so that transient parts are minimized in the
beginning and end part of the output signal.
Examples
--------
>>> from oceans.filters import smoo1
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> time = np.linspace( -4, 4, 100 )
>>> series = np.sin(time)
>>> noise_series = series + np.random.randn( len(time) ) * 0.1
>>> data_out = smoo1(series)
>>> ws = 31
>>> ax = plt.subplot(211)
>>> _ = ax.plot(np.ones(ws))
>>> windows = ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']
>>> for w in windows[1:]:
... _ = eval('plt.plot(np.' + w + '(ws) )')
>>> _ = ax.axis([0, 30, 0, 1.1])
>>> leg = ax.legend(windows)
>>> _ = plt.title('The smoothing windows')
>>> ax = plt.subplot(212)
>>> l1, = ax.plot(series)
>>> l2, = ax.plot(noise_series)
>>> for w in windows:
... _ = plt.plot(smoo1(noise_series, 10, w))
>>> l = ['original signal', 'signal with noise']
>>> l.extend(windows)
>>> leg = ax.legend(l)
>>> _ = plt.title('Smoothing a noisy signal')
TODO: window parameter can be the window itself (i.e. an array)
instead of a string.
"""
datain = np.asarray(datain)
if datain.ndim != 1:
raise ValueError("Smooth only accepts 1 dimension arrays.")
if datain.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 3:
return datain
if window not in ["flat", "hanning", "hamming", "bartlett", "blackman"]:
msg = "Window must be is one of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'" # noqa
raise ValueError(msg)
s = np.r_[
2 * datain[0] - datain[window_len:1:-1],
datain,
2 * datain[-1] - datain[-1:-window_len:-1],
]
if window == "flat": # Moving average.
w = np.ones(window_len, "d")
else:
w = eval("np." + window + "(window_len)")
data_out = np.convolve(w / w.sum(), s, mode="same")
return data_out[window_len - 1 : -window_len + 1]
def smoo2(A, hei, wid, kind="hann", badflag=-9999, beta=14):
"""
Calculates the smoothed array 'As' from the original array 'A' using the
specified window of type 'kind' and shape ('hei', 'wid').
Usage:
As = smoo2(A, hei, wid, kind='hann', badflag=-9999, beta=14)
Parameters
----------
A : 2D array
Array to be smoothed.
hei : integer
Window height. Must be odd and greater than or equal to 3.
wid : integer
Window width. Must be odd and greater than or equal to 3.
kind : string, optional
Refer to Numpy for details about each window type.
badflag : float, optional
The bad data flag. Elements of the input array 'A' holding this value are ignored.
beta : float, optional
Shape parameter for the kaiser window.
Returns
-------
As : 2D array
The smoothed array.
<NAME> (<EMAIL>)
April 2012
"""
# Checking window type and dimensions
kinds = ["hann", "hamming", "blackman", "bartlett", "kaiser"]
if kind not in kinds:
raise ValueError("Invalid window type requested: %s" % kind)
if (np.mod(hei, 2) == 0) or (np.mod(wid, 2) == 0):
raise ValueError("Window dimensions must be odd")
if (hei <= 1) or (wid <= 1):
raise ValueError("Window shape must be (3,3) or greater")
# Creating the 2D window.
if kind == "kaiser": # If the window kind is kaiser (beta is required).
wstr = "np.outer(np.kaiser(hei, beta), np.kaiser(wid, beta))"
# If the window kind is hann, hamming, blackman or bartlett
# (beta is not required).
else:
if kind == "hann":
# Converting the correct window name (Hann) to the numpy function
# name (numpy.hanning).
kind = "hanning"
# Computing outer product to make a 2D window out of the original 1d
# windows.
# TODO: Get rid of this evil eval.
wstr = "np.outer(np." + kind + "(hei), np." + kind + "(wid))"
wdw = eval(wstr)
A = np.asanyarray(A)
Fnan = np.isnan(A)
imax, jmax = A.shape
As = np.NaN * np.ones((imax, jmax))
for i in range(imax):
for j in range(jmax):
# Default window parameters.
wupp = 0
wlow = hei
wlef = 0
wrig = wid
lh = np.floor(hei / 2)
lw = np.floor(wid / 2)
# Default array ranges (functions of the i, j indices).
upp = i - lh
low = i + lh + 1
lef = j - lw
rig = j + lw + 1
# Tiling window and input array at the edges.
# Upper edge.
if upp < 0:
wupp = wupp - upp
upp = 0
# Left edge.
if lef < 0:
wlef = wlef - lef
lef = 0
# Bottom edge.
if low > imax:
ex = low - imax
wlow = wlow - ex
low = imax
# Right edge.
if rig > jmax:
ex = rig - jmax
wrig = wrig - ex
rig = jmax
# Computing smoothed value at point (i, j).
Ac = A[upp:low, lef:rig]
wdwc = wdw[wupp:wlow, wlef:wrig]
fnan = np.isnan(Ac)
Ac[fnan] = 0
wdwc[fnan] = 0 # Eliminating NaNs from mean computation.
fbad = Ac == badflag
wdwc[fbad] = 0 # Eliminating bad data from mean computation.
a = Ac * wdwc
As[i, j] = a.sum() / wdwc.sum()
# Assigning NaN to the positions holding NaNs in the original array.
As[Fnan] = np.NaN
return As
def weim(x, N, kind="hann", badflag=-9999, beta=14):
"""
Calculates the smoothed array 'xs' from the original array 'x' using the
specified window of type 'kind' and size 'N'. 'N' must be an odd number.
Usage:
xs = weim(x, N, kind='hann', badflag=-9999, beta=14)
Parameters
----------
x : 1D array
Array to be smoothed.
N : integer
Window size. Must be odd.
kind : string, optional
Refer to Numpy for details about each window type.
badflag : float, optional
The bad data flag. Elements of the input array 'A' holding this
value are ignored.
beta : float, optional
Shape parameter for the kaiser window. For windows other than the
kaiser window, this parameter does nothing.
Returns
-------
xs : 1D array
The smoothed array.
---------------------------------------
<NAME> (<EMAIL>) June 2012
"""
# Checking window type and dimensions.
kinds = ["hann", "hamming", "blackman", "bartlett", "kaiser"]
if kind not in kinds:
raise ValueError("Invalid | |
finally:
with self.lock:
self.valid_session = valid_session
return valid_session
def rest_tb_target_list(self, all_targets = False, target_id = None):
"""
List targets in this server
:param bool all_targets: If True, include also targets that are marked
as disabled.
:param str target_id: Only get information for said target id
"""
if target_id:
r = self.send_request("GET", "targets/" + target_id)
else:
r = self.send_request("GET", "targets/")
_targets = []
for rt in r['targets']:
# Skip disabled targets
if target_id != None and rt.get('disabled', False) == True \
and all_targets != True:
continue
rt['fullid'] = self.aka + "/" + rt['id']
rt["url"] = self._base_url + 'targets/' + rt["id"]
rt['rtb'] = self
_targets.append(rt)
del r
return _targets
def rest_tb_target_update(self, target_id):
"""
Update information about a target
:param str target_id: ID of the target to operate on
:returns: updated target tags
"""
fullid = self.aka + "/" + target_id
r = self.rest_tb_target_list(target_id = target_id, all_targets = True)
if r:
rtd = self._rt_list_to_dict(r)
# Update the cache
type(self)._rts_cache.update(rtd)
return rtd[fullid]
else:
raise ValueError("%s/%s: unknown target" % (self.aka, target_id))
def rest_tb_target_acquire(self, rt, ticket = ''):
return self.send_request("PUT", "targets/%s/acquire" % rt['id'],
data = { 'ticket': ticket })
def rest_tb_target_active(self, rt, ticket = ''):
self.send_request("PUT", "targets/%s/active" % rt['id'],
data = { 'ticket': ticket })
def rest_tb_target_enable(self, rt, ticket = ''):
data = { 'ticket': ticket }
return self.send_request("PUT", "targets/%s/enable" % rt['id'], data)
def rest_tb_target_disable(self, rt, ticket = ''):
data = { 'ticket': ticket }
return self.send_request("PUT", "targets/%s/disable" % rt['id'], data)
def rest_tb_thing_plug(self, rt, thing, ticket = ''):
return self.send_request("PUT", "targets/%s/thing" % rt['id'],
data = { 'thing': thing, 'ticket': ticket })
def rest_tb_thing_list(self, rt, ticket = ''):
r = self.send_request("GET", "targets/%s/thing" % rt['id'],
data = { 'ticket': ticket })
return r['result']
def rest_tb_thing_unplug(self, rt, thing, ticket = ''):
self.send_request("DELETE", "targets/%s/thing" % rt['id'],
data = { 'thing': thing, 'ticket': ticket })
def rest_tb_target_release(self, rt, ticket = '', force = False):
self.send_request(
"PUT", "targets/%s/release" % rt['id'],
data = { 'force': force, 'ticket': ticket })
def rest_tb_property_set(self, rt, prop, value, ticket = ''):
self.send_request(
"PUT", "targets/%s/property_set" % rt['id'],
data = {
'ticket': ticket,
'property': prop,
'value': value
})
def rest_tb_property_get(self, rt, prop, ticket = ''):
r = self.send_request(
"PUT", "targets/%s/property_get" % rt['id'],
data = {
'ticket': ticket,
'property': prop
})
return r['value']
def rest_tb_target_ip_tunnel_add(self, rt, ip_addr, port, proto,
ticket = ''):
r = self.send_request("POST", "targets/%s/ip_tunnel" % rt['id'],
data = {
'ip_addr': ip_addr,
'port': port,
'proto': proto,
'ticket': ticket,
})
return int(r['port'])
def rest_tb_target_ip_tunnel_remove(self, rt, ip_addr, port, proto,
ticket = ''):
self.send_request("DELETE", "targets/%s/ip_tunnel" % rt['id'],
data = {
'ip_addr': ip_addr,
'port': port,
'proto': proto,
'ticket': ticket,
})
def rest_tb_target_ip_tunnel_list(self, rt, ticket = ''):
r = self.send_request("GET", "targets/%s/ip_tunnel" % rt['id'],
data = { 'ticket': ticket })
return r['tunnels']
def rest_tb_target_power_on(self, rt, ticket = ''):
self.send_request(
"PUT", "targets/%s/power_on" % rt['id'],
data = { 'ticket': ticket })
def rest_tb_target_power_off(self, rt, ticket = ''):
self.send_request(
"PUT", "targets/%s/power_off" % rt['id'],
data = { 'ticket': ticket })
def rest_tb_target_reset(self, rt, ticket = ''):
self.send_request(
"PUT", "targets/%s/reset" % rt['id'],
data = { 'ticket': ticket })
def rest_tb_target_power_cycle(self, rt, ticket = '', wait = None):
data = { 'ticket': ticket }
if wait != None:
data['wait'] = "%f" % wait
self.send_request("PUT", "targets/%s/power_cycle" % rt['id'],
data = data)
def rest_tb_target_power_get(self, rt):
r = self.send_request(
"GET", "targets/%s/power_get" % rt['id'])
return r['powered']
def rest_tb_target_images_set(self, rt, images, ticket = ''):
"""
Write/configure images to the targets (depending on the
target)
:param images: Dictionary of image types and filenames, like in
:meth:`ttbl.test_target_images_mixin.images_set`.
:type images: dict
:raises: Exception in case of errors
"""
data = { 'ticket': ticket }
data.update(images)
self.send_request(
"PUT", "targets/%s/images_set" % rt['id'],
data = data)
def rest_tb_file_upload(self, remote_filename, local_filename):
with open(local_filename, 'rb') as f:
self.send_request(
"POST", "files/" + remote_filename,
files = { 'file': f })
def rest_tb_file_dnload(self, remote_filename, local_filename):
"""
Download a remote file from the broker to a local file
:param str remote_filename: filename in the broker's user
storage area
:params str local_filename: local filename where to download it
"""
with open(local_filename, "w") as lf:
self.rest_tb_file_dnload_to_fd(lf.fileno(), remote_filename)
def rest_tb_file_dnload_to_fd(self, fd, remote_filename):
"""
Download a remote file from the broker to a local file
:param str remote_filename: filename in the broker's user
storage area
:params int fd: file descriptor where to write the data to
"""
url = "files/%s" % remote_filename
with contextlib.closing(self.send_request("GET", url, data = {},
stream = True,
raw = True)) as r:
# http://docs.python-requests.org/en/master/user/quickstart/#response-content
chunk_size = 1024
total = 0
for chunk in r.iter_content(chunk_size):
os.write(fd, chunk)
total += len(chunk)
return total
def rest_tb_file_delete(self, remote_filename):
self.send_request("DELETE", url = "files/" + remote_filename,
data = {})
def rest_tb_file_list(self):
"""
Return a dictionary of files names available to the user in the
broker and their sha256 hash.
"""
return self.send_request("GET", "files/")
def rest_tb_target_console_read(self, rt, console, offset, ticket = ''):
url = "targets/%s/console/" % rt['id']
data = {
'offset': offset,
}
if console:
data['console'] = console
if ticket != '':
data['ticket'] = ticket
return self.send_request("GET", url, data = data,
stream = False, raw = True)
def rest_tb_target_console_size(self, rt, console, ticket = ''):
r = self.send_request(
'GET', "targets/%s/console_size" % rt['id'],
data = {
'console': console,
'ticket': ticket
}
)
return r['byte_count']
def rest_tb_target_console_read_to_fd(self, fd, rt, console, offset,
max_size = 0, ticket = ''):
url = "targets/%s/console/" % rt['id']
data = {
'offset': offset,
}
if console:
data['console'] = console
if ticket != '':
data['ticket'] = ticket
with contextlib.closing(self.send_request("GET", url, data = data,
stream = True,
raw = True)) as r:
# http://docs.python-requests.org/en/master/user/quickstart/#response-content
chunk_size = 1024
total = 0
for chunk in r.iter_content(chunk_size):
os.write(fd, chunk)
# don't use chunk_size, as it might be less
total += len(chunk)
if max_size > 0 and total >= max_size:
break
return total
def rest_tb_target_console_write(self, rt, console, data, ticket = ''):
url = "targets/%s/console/" % rt['id']
# gosh this naming sucks...
_data = dict(data = data)
if console:
_data['console'] = console
if ticket != '':
_data['ticket'] = ticket
return self.send_request('POST', url, data = _data)
def rest_tb_target_debug_info(self, rt, ticket = ''):
r = self.send_request('GET', "targets/%s/debug" % rt['id'],
data = { 'ticket': ticket })
return r['info']
def rest_tb_target_debug_start(self, rt, ticket = ''):
return self.send_request('PUT', "targets/%s/debug" % rt['id'],
data = { 'ticket': ticket })
def rest_tb_target_debug_stop(self, rt, ticket = ''):
return self.send_request('DELETE', "targets/%s/debug" % rt['id'],
data = { 'ticket': ticket })
def rest_tb_target_debug_halt(self, rt, ticket = ''):
self.send_request(
"PUT", "targets/%s/debug_halt" % rt['id'],
data = { 'ticket': ticket })
def rest_tb_target_debug_reset(self, rt, ticket = ''):
self.send_request(
"PUT", "targets/%s/debug_reset" % rt['id'],
data = { 'ticket': ticket })
def rest_tb_target_debug_reset_halt(self, rt, ticket = ''):
self.send_request(
"PUT", "targets/%s/debug_reset_halt" % rt['id'],
data = { 'ticket': ticket })
def rest_tb_target_debug_resume(self, rt, ticket = ''):
self.send_request(
"PUT", "targets/%s/debug_resume" % rt['id'],
data = { 'ticket': ticket })
def rest_tb_target_debug_openocd(self, rt, command, ticket = ''):
r = self.send_request(
"PUT", "targets/%s/debug_openocd" % rt['id'],
data = {
'ticket': ticket,
'command': command
})
return r['openocd_output']
def rest_init(path, url, ignore_ssl = False, aka = None):
"""
Initialize access to a remote target broker.
:param state_path: Path prefix where to load state from
:type state_path: str
:param url: URL for which we are loading state
:type url: str
:returns: True if information was loaded for the URL, False otherwise
"""
rtb = rest_target_broker(path, url, ignore_ssl, aka)
rest_target_brokers[url] = rtb
return rtb
def rest_shutdown(path):
"""
Shutdown REST API, saving state in *path*.
:param path: Path to where to save state information
:type path: str
"""
for rtb in rest_target_brokers.itervalues():
rtb.tb_state_save(path)
def rest_login(args):
"""
Login into remote servers.
:param argparse.Namespace args: login arguments like -q (quiet) or
userid.
:returns: True if it can be logged into at least 1 remote server.
"""
logged = False
for rtb in rest_target_brokers.itervalues():
logger.info("%s: checking for a valid session", rtb._url)
if not rtb.valid_session:
if args.quiet:
if rtb.aka:
aka = "_" + rtb.aka
else:
aka = ""
userid = os.environ.get("TCF_USER" + aka,
os.environ.get("TCF_USER", None))
userpass = os.environ.get("TCF_PASSWORD" + aka,
os.environ.get("TCF_PASSWORD", None))
if not userid:
logger.error("Unable to get user from env variable: "
"TCF_USER" + aka)
continue
if | |
The syntax allow both attribute/command execution and the usage of multiple typed arguments:
ACTION(alarm:command,mach/dummy/motor/move,int(1),int(10))
ACTION(reset:attribute,mach/dummy/motor/position,int(0))
Accepted keywords are:
$ALARM : Alarm name
$DESCRIPTION : Description text
$VALUES : last values stored for that alarm
$REPORT : full report sent when the alarm was raised
$DATETIME : current time as YYYYMMDD_hhmm
$MESSAGE : type of alarm event (RESET,ALARM,REMINDER,...)
"""
if fun.isString(alarm): alarm = self.Alarms[alarm]
action = args if fun.isSequence(args) else re.split('[,:;]',args)
self.info('In PyAlarm.trigger_action(%s,%s)'%(alarm.tag,args))
if action[0] in ('command','attribute'):
try:
dev = action[1].rsplit('/',1)[0]
dp = PyTango.DeviceProxy(dev)
if not fandango.tango.check_device(dev):
exc = '%s receiver is not running!'%dev
self.error(exc)
raise Exception(exc)
cmd = [action[1].rsplit('/',1)[1]]+action[2:]
keywords = {
'\$ALARM':alarm.tag,
'\$DESCRIPTION':alarm.description,
'\$MESSAGE':message,
'\$DATETIME':fun.time2str(cad='%Y%m%d_%H%M%S')
}
print keywords
for k,v in keywords.items():
print cmd,k,v
cmd = [fun.clsub(k,v,c,lower=0) for c in cmd]
print cmd
if fun.clsearch('\$values',str(cmd)):
values = str(self.PastValues.get(alarm.tag))
cmd = [fun.clsub('\$values',values,c,lower=0) for c in cmd]
if fun.clsearch('\$report',str(cmd)):
report = '\n'.join(self.GenerateReport(alarm.tag,message=message))
cmd = [fun.clsub('\$report',report,c,lower=0) for c in cmd]
try: #This eval will try to pass numeric/float arguments
arg = [eval(c) for c in cmd[1:]]
except:
arg = [s.strip("' ") for s in cmd[1:]]
if arg and len(arg)==1:
if action[0] == 'command':
t = str(dp.command_query(cmd[0]).in_type)
else:
t = str(dp.attribute_query(cmd[0]).data_format)
if not fun.clsearch('array|spectrum|image',t):
arg = arg[0]
if action[0] == 'command':
self.info('\tlaunching: %s / %s (%s)' % (dev,cmd[0],cmd[1:]))
cargs = [cmd[0],arg] if arg else [cmd[0]]
val = dp.command_inout(*cargs)
self.info('\t'+str(val))
else:
self.info('\tlaunching: %s / %s = %s' % (dev,cmd[0],cmd[1:]))
val = PyTango.DeviceProxy(dev).write_attribute(cmd[0],arg)
self.info('\t'+str(val))
except:
self.warning('Unable to execute action %s'%str(action))
self.warning(traceback.format_exc())
elif action[0]=='system' and action[1] in self.AllowedActions:
try:
os.system(action[1]+'&')
except:
self.warning('Unable to execute action %s'%str(action))
self.warning(traceback.format_exc())
else:
self.warning('\tUnknown Action: %s'%action[0])
##@}
#########################################################################################################
## @name Snap Contexts Methods
#@{
#SnapContext(359,Name,Author,Reason,Attributes[3],Snapshots[0])
def trigger_snapshot(self,tag_name, user_comment=None):
"""
First check for existing contexts with name=tag_name.
If 1 exists and reason is Alarm, it is used. If reason is not, a new one is created.
Then an snapshot is taken for the cotnext.
"""
self.info( "In "+self.get_name()+"::trigger_snapshot(%s)"%tag_name)
try:
formula = self.Alarms[tag_name].formula
self.info(formula)
variables = self.Eval.parse_variables(formula)
existingAttrsAllowed=[]
for var in variables:
if (self.snap.check_attribute_allowed(str(var[0]+'/'+var[1]))):
existingAttrsAllowed.append(str(var[0]+'/'+var[1]))
if (len(existingAttrsAllowed)==0):
self.warning('Cannot take a snapshot - alarm attributes list empty!')
return
res=self.snap.db.search_context(tag_name)
res = sorted(c for c in res if c['reason']=='ALARM')
cids = sorted(c['id_context'] for c in res)
if not res:
if not self.CreateNewContexts:
self.warning('Automatic Context Creation DISABLED!, Sorry, use CreateAlarmContext method')
return
self.info('Creating ctx: name: '+tag_name+', descr: '+formula)
self.info('atts:')
for a in existingAttrsAllowed:
self.info(a)
cid = self.CreateAlarmContext([tag_name]+existingAttrsAllowed)
ctx = self.snap.get_context(cid)
self.info('snap.descr: '+formula)
else:
if len(res)>1: self.warning('Multiple contexts declared for this Alarm, using newest!: %s'%str(cids))
ctx=self.snap.get_context(cids[-1])
self.snap.db.update_context_attributes(self.snap.db.get_context_ids(tag_name)[0], existingAttrsAllowed)
if user_comment and user_comment!='ALARM':
if user_comment in ('DISABLED','RECOVERED'):
ctx.take_snapshot(comment=shortstr(user_comment,255))
else:
ctx.take_snapshot(comment=shortstr('ACKNOWLEDGED: %s'%user_comment,255))
else:
ctx.take_snapshot(comment=shortstr('ALARM: %s'%self.Alarms[tag_name].description,255))
except Exception,e:
self.warning( 'Exception in trigger snapshot: %s' % traceback.format_exc())
return
##@}
def AddNewAlarm(self, argin):
#@todo: This command has to be refactored and added as Expert command
self.info( "In "+ self.get_name()+ "::AddNewAlarm()")
# Add your own code here
argout = ['FAILED']
try:
self.lock.acquire()
alarm = self.parse_alarm(argin)
if not alarm:
argout = 'INVALID ALARM FORMAT: %s'%alarm['tag']
elif alarm['tag'] in self.Alarms:
argout = 'ALARM TAG %s ALREADY EXISTS!'%alarm['tag']
else:
self.AlarmList.append(argin)
self.init_device(update_properties=False)
self.db.put_device_property(self.get_name(),{'AlarmList':self.AlarmList})
argout = self.AlarmList[:]
finally:
self.lock.release()
return argout
def RemoveAlarm(self, argin):
#@todo: This command has to be refactored and added as Expert command
self.info("In "+self.get_name()+"::RemoveAlarm()")
# Add your own code here
argout = ['FAILED']
try:
self.lock.acquire()
if argin in self.Alarms and self.Alarms[argin].active: return 'ALARM SHOULD BE ACKNOWLEDGED FIRST!'
match = [a for a in self.AlarmList if a.lower().startswith(argin.lower()+':')]
if not match: return 'UNKNOWN ALARM TAG!'
self.AlarmList.pop(match[0])
self.init_device(update_properties=False)
self.db.put_device_property(self.get_name(),{'AlarmList':self.AlarmList})
argout = self.AlarmList[:]
finally:
self.lock.release()
return argout
# #########################################################################################################
## @name POGO Generated Methods
#------------------------------------------------------------------
# Device constructor
#------------------------------------------------------------------
def __init__(self,cl, name):
PyTango.Device_4Impl.__init__(self,cl,name)
self.call__init__(fandango.log.Logger,name,format='%(levelname)-8s %(asctime)s %(name)s: %(message)s')
self.setLogLevel('DEBUG')
panic._proxies[name] = self
fandango.tango.get_all_devices.set_keeptime(180)
#Persistent data:
self.TStarted = time.time() #Used to calcullate StartupDelay and Enabled behavior
self.Alarms = None #dictionary for Alarm Structs
self.FailedAlarms = fandango.CaselessDict() #This list will keep a track of those alarms that couldn't be evaluated
self.SMS_Sent = collections.deque() #Epochs when messages has been sent
self.Proxies = defaultdict_fromkey(lambda key: PyTango.DeviceProxy(key)) #list of proxies
self.PastAlarms = defaultdict_fromkey(lambda key: list()) #{date1:[tag1,tag2,tag3,...],date2:...}
self.PastValues = {} #It will store the attribute values when the alarm was triggered.
self.AcknowledgedAlarms = set() #This list will keep a track of acknowledged alarms
self.DisabledAlarms = {} #This list will keep a track of disabled alarms, and its timeouts
self.LastAlarms = []
self.SentEmails = collections.defaultdict(int)
self.SentSMS = collections.defaultdict(int)
self.Eval = None
self.EvalTimes = {}
self.Uncatched = ''
self.DynamicAttributes = []
self._initialized = False #Enabled once dyn_attr has finished
self.worker = None
self.lock=threading.RLock();
self.event=threading.Event();
self.threadname=name
self.updateThread = None
self.last_attribute_check = 0
self.db = PyTango.Database()
#A class object will keep all declared alarms to search for children alarms and duplications
if type(self).Panic is None: type(self).Panic = panic.AlarmAPI()
self.debug('Out of __init__()')
PyAlarm.init_device(self,allow=True)
#------------------------------------------------------------------
# Device destructor
#------------------------------------------------------------------
def __del__(self):
self.info( 'In PyAlarm.__del__() method ...')
self.delete_device(True)
def delete_device(self,allow=False):
self.warning( "0[Device delete_device method] for device %s"%self.get_name())
if allow:
self.set_state(PyTango.DevState.INIT)
self.stop()
if not allow: self.warning('init() is not allowed, please restart the server')
# Do not do that or you may have seg faults!
#print "1[Device delete_device method] for device",self.get_name()
#for dp in self.Proxies.values(): del dp
#print "2[Device delete_device method] for device",self.get_name()
#------------------------------------------------------------------
# Device initialization
#------------------------------------------------------------------
def init_device(self,update_properties=True,allow=True):
"""
This method will be called first for creating the device.
It will be called afterwards to force a reloading of Alarms or Properties
"""
self.info( "In "+self.get_name()+"::init_device()")
if not allow: raise Exception('init() is not allowed, please restart the server')
try:
if update_properties or not self._initialized:
try:
#Reloading the alarm properties
self.lock.acquire()
if self.Alarms is None:
self.Alarms = panic.AlarmAPI(self.get_name()) #Everything that was active/inactive is erased here?
#This second part is not called for the first init(); only for the next ones
#It's just for checking if alarms has been added/removed from the API
else:
self.Alarms.load(self.get_name())
alarm_attrs = dict((a,v.get_attribute()) for a,v in self.Alarms.items())
for a in self.DynamicAttributes[:]:
if a not in alarm_attrs.values():
try:
self.info('Removing %s attribute'%a)
self.remove_attribute(a)
self.DynamicAttributes.remove(a)
if self.worker: self.worker.pop(a)
except:
self.warning( traceback.format_exc())
for a,v in alarm_attrs.items():
if v not in self.DynamicAttributes[:]:
self.create_alarm_attribute(a)
except Exception,e:
raise e
finally: self.lock.release()
self.get_device_properties(self.get_device_class())
self.info("Current Alarm server configuration is:\n\t"+"\n\t".join(
sorted("%s: %s"%(k,getattr(self,k,None)) for k in
panic.PyAlarmDefaultProperties)))
#@TODO: Period should be in SECONDS!: this patch must be DEPRECATED
if self.PollingPeriod>3600:
self.warning('PERIODS IN MILLISECONDS ARE DEPRECATED!!!!!!!: '+
'%s ms >> %s s'%(self.PollingPeriod,self.PollingPeriod*1e-3))
self.PollingPeriod = self.PollingPeriod*1e-3 #Converting from ms to s
if str(self.AlertOnRecovery).strip().lower() in ('false','no','none'): self.AlertOnRecovery=''
if str(self.Reminder).strip().lower()=='false': self.Reminder=0
if not self.UseTaurus:
fandango.tango.USE_TAU,fandango.tango.TAU = False,None
else:
self.info('UseTaurus = %s'%self.UseTaurus)
if self.Eval is None:
self.Eval = self.Panic._eval = (SingletonTangoEval if self.UseProcess and PROCESS else fandango.tango.TangoEval)(
timeout=self.EvalTimeout,keeptime=self.PollingPeriod/2.,
trace=False, #self.LogLevel.upper()=='DEBUG',
cache=1+self.AlarmThreshold, #Note, a cache too large will filter oscillations in alarms using .delta
use_tau=False
)
[self.Eval.add_macro(*m) for m in self.Panic.macros]
self.update_locals(check=True,update=True)
self.Eval.set_timeout(self.EvalTimeout)
if hasattr(self.Eval,'clear'): self.Eval.clear()
if self.UseProcess and PROCESS and not self.worker:
#Do not reduce worker timeout or you may have problems if main thread idles (e.g. Tango database is down)
self.info('Configuring WorkerProcess ...')
self.worker = WorkerProcess(self.Eval,start=True,timeout=self.PollingPeriod*max((self.AlarmThreshold,3)))
self.worker.send('set_timeout','set_timeout',self.EvalTimeout)
self.worker.command('import threading')
#,timewait=0.05*self.PollingPeriod/len(self.Alarms))
self.worker.add('previous',target='dict([(k,str(v)) for k,v in executor.previous.items()])',args='',period=self.PollingPeriod/2.,expire=self.AlarmThreshold*self.PollingPeriod)
if self.UseTaurus:
try:
self.worker.command('import taurus')
self.worker.add('update_polling','[taurus.Attribute(a).changePollingPeriod(%d) for a in taurus.Factory().tango_attrs.keys()]'%max((500,1e3*self.PollingPeriod/2.)),period=self.PollingPeriod)
except: print traceback.format_exc()
#raise Exception,'The StartupDelay should be asynchronous!!! It cannot be called before any "command" call or will block!'
self.worker.pause(self.StartupDelay)
self.info('Configured WorkerProcess, waiting %s seconds in background ...'%self.StartupDelay)
self.PhoneBook = self.Alarms.phonebook
if '$NAME' in self.LogFile:
self.LogFile = self.LogFile.replace('$NAME',self.get_name().replace('/','-'))
self.AddressList = dict(self.PhoneBook)
for tag,alarm in self.Alarms.items():
self.info('\n\t%s: %s\n\t\tFormula: %s\n\t\tSeverity: %s\n\t\tReceivers: %s'%(tag,alarm.description,alarm.formula,alarm.severity,alarm.receivers))
#Create Alarm Attributes (not called in first init(), only afterwards
if self._initialized: self.dyn_attr()
#Get SnapConfig
#if SNAP_ALLOWED and self.UseSnap:
if SNAP_ALLOWED:
try:
self.snap = snap.SnapAPI()
except Exception,e: self.warning('SnapConfig failed: %s'%e)
if not self._initialized:
self.set_state(PyTango.DevState.ON)
if not self.updateThread or not self.updateThread.isAlive():
self.start()
self.info( 'Ready to accept request ...'+'<'*40)
self.setLogLevel(self.LogLevel)
except Exception,e:
self.info( 'Exception in PyAlarm.init_device(): \n%s'%traceback.format_exc())
self.set_state(PyTango.DevState.FAULT)
raise e
return
#------------------------------------------------------------------
# Always excuted hook method
#------------------------------------------------------------------
def always_executed_hook(self):
self.debug("In "+ self.get_name()+ "::always_excuted_hook()")
try:
actives = list(reversed([(v.active,k) for k,v in self.Alarms.items() if v.active]))
if self.last_attribute_check and self.last_attribute_check<(time.time()-600.):
self.set_state(PyTango.DevState.FAULT)
| |
backup_db_file(copy_flag):
DB.session.close()
try: # generate timestamp from last-modified time of database file
time_str = datetime.fromtimestamp(os.stat(DB_FILE_NAME).st_mtime).strftime('%Y%m%d_%H%M%S')
except: # if error then use 'now' timestamp
time_str = datetime.now().strftime('%Y%m%d_%H%M%S')
try:
(dbname, dbext) = os.path.splitext(DB_FILE_NAME)
bkp_name = DB_BKP_DIR_NAME + '/' + dbname + '_' + time_str + dbext
if not os.path.exists(DB_BKP_DIR_NAME):
os.makedirs(DB_BKP_DIR_NAME)
if os.path.isfile(bkp_name): # if target file exists then use 'now' timestamp
time_str = datetime.now().strftime('%Y%m%d_%H%M%S')
bkp_name = DB_BKP_DIR_NAME + '/' + dbname + '_' + time_str + dbext
if copy_flag:
shutil.copy2(DB_FILE_NAME, bkp_name);
logger.info('Copied database file to: ' + bkp_name)
else:
os.renames(DB_FILE_NAME, bkp_name);
logger.info('Moved old database file to: ' + bkp_name)
except Exception:
logger.exception('Error backing up database file')
return bkp_name
def get_legacy_table_data(metadata, table_name, filter_crit=None, filter_value=None):
try:
table = Table(table_name, metadata, autoload=True)
if filter_crit is None:
return table.select().execute().fetchall()
return table.select().execute().filter(filter_crit==filter_value).fetchall()
except Exception as ex:
logger.warn('Unable to read "{0}" table from previous database: {1}'.format(table_name, ex))
def restore_table(class_type, table_query_data, **kwargs):
if table_query_data:
try:
for row_data in table_query_data:
if (class_type is not Database.Pilot) or getattr(row_data, 'callsign', '') != '-' or \
getattr(row_data, 'name', '') != '-None-':
if 'id' in class_type.__table__.columns.keys() and \
'id' in row_data.keys():
db_update = class_type.query.filter(getattr(class_type,'id')==row_data['id']).first()
else:
db_update = None
if db_update is None:
new_data = class_type()
for col in class_type.__table__.columns.keys():
if col in row_data.keys():
if col != 'id':
setattr(new_data, col, row_data[col])
else:
if col != 'id':
setattr(new_data, col, kwargs['defaults'][col])
#logger.info('DEBUG row_data add: ' + str(getattr(new_data, match_name)))
DB.session.add(new_data)
else:
#logger.info('DEBUG row_data update: ' + str(getattr(row_data, match_name)))
for col in class_type.__table__.columns.keys():
if col in row_data.keys():
setattr(db_update, col, row_data[col])
else:
if col != 'id':
setattr(db_update, col, kwargs['defaults'][col])
DB.session.flush()
logger.info('Database table "{0}" restored'.format(class_type.__name__))
except Exception as ex:
logger.warn('Error restoring "{0}" table from previous database: {1}'.format(class_type.__name__, ex))
logger.debug(traceback.format_exc())
def recover_database():
try:
logger.info('Recovering data from previous database')
# load file directly
engine = create_engine('sqlite:///%s' % DB_FILE_NAME, convert_unicode=True)
metadata = MetaData(bind=engine)
pilot_query_data = get_legacy_table_data(metadata, 'pilot')
heat_query_data = get_legacy_table_data(metadata, 'heat')
heatnode_query_data = get_legacy_table_data(metadata, 'heatnode')
raceFormat_query_data = get_legacy_table_data(metadata, 'race_format')
profiles_query_data = get_legacy_table_data(metadata, 'profiles')
raceClass_query_data = get_legacy_table_data(metadata, 'race_class')
raceMeta_query_data = get_legacy_table_data(metadata, 'saved_race_meta')
racePilot_query_data = get_legacy_table_data(metadata, 'saved_pilot_race')
raceLap_query_data = get_legacy_table_data(metadata, 'saved_race_lap')
engine.dispose() # close connection after loading
migrate_db_api = int(Options.get('server_api'))
carryoverOpts = [
"timerName",
"timerLogo",
"hue_0",
"sat_0",
"lum_0_low",
"lum_0_high",
"contrast_0_low",
"contrast_0_high",
"hue_1",
"sat_1",
"lum_1_low",
"lum_1_high",
"contrast_1_low",
"contrast_1_high",
"currentLanguage",
"currentProfile",
"currentFormat",
"calibrationMode",
"MinLapSec",
"MinLapBehavior",
"ledBrightness",
"colorNode_0",
"colorNode_1",
"colorNode_2",
"colorNode_3",
"colorNode_4",
"colorNode_5",
"colorNode_6",
"colorNode_7",
"osd_lapHeader",
"osd_positionHeader"
]
carryOver = {}
for opt in carryoverOpts:
val = Options.get(opt, None)
if val is not None:
carryOver[opt] = val
# RSSI reduced by half for 2.0.0
if migrate_db_api < 23:
for profile in profiles_query_data:
if profile.enter_ats:
enter_ats = json.loads(profile.enter_ats)
enter_ats["v"] = [val/2 for val in enter_ats["v"]]
profile.enter_ats = json.dumps(enter_ats)
if profile.exit_ats:
exit_ats = json.loads(profile.exit_ats)
exit_ats["v"] = [val/2 for val in exit_ats["v"]]
profile.exit_ats = json.dumps(exit_ats)
except Exception as ex:
logger.warn('Error reading data from previous database: ' + str(ex))
backup_db_file(False) # rename and move DB file
db_init()
# primary data recovery
try:
if pilot_query_data:
DB.session.query(Database.Pilot).delete()
restore_table(Database.Pilot, pilot_query_data, defaults={
'name': 'New Pilot',
'callsign': 'New Callsign',
'team': DEF_TEAM_NAME,
'phonetic': ''
})
if migrate_db_api < 27:
# old heat DB structure; migrate node 0 to heat table
# build list of heat meta
heat_extracted_meta = []
for row in heat_query_data:
if row['node_index'] == 0:
heat_extracted_meta.append(row)
restore_table(Database.Heat, heat_extracted_meta, defaults={
'class_id': Database.CLASS_ID_NONE,
'results': None,
'cacheStatus': Results.CacheStatus.INVALID
})
# extract pilots from hets and load into heatnode
heatnode_extracted_data = []
for row in heat_query_data:
heatnode_row = {}
heatnode_row['heat_id'] = int(row['heat_id'])
heatnode_row['node_index'] = int(row['node_index'])
heatnode_row['pilot_id'] = int(row['pilot_id'])
heatnode_extracted_data.append(heatnode_row)
DB.session.query(Database.HeatNode).delete()
restore_table(Database.HeatNode, heatnode_extracted_data, defaults={
'pilot_id': Database.PILOT_ID_NONE
})
else:
# current heat structure; use basic migration
restore_table(Database.Heat, heat_query_data, defaults={
'class_id': Database.CLASS_ID_NONE,
'results': None,
'cacheStatus': Results.CacheStatus.INVALID
})
restore_table(Database.HeatNode, heatnode_query_data, defaults={
'pilot_id': Database.PILOT_ID_NONE
})
restore_table(Database.RaceFormat, raceFormat_query_data, defaults={
'name': __("Migrated Format"),
'race_mode': 0,
'race_time_sec': 120,
'start_delay_min': 2,
'start_delay_max': 5,
'staging_tones': 2,
'number_laps_win': 0,
'win_condition': WinCondition.MOST_LAPS,
'team_racing_mode': False
})
restore_table(Database.Profiles, profiles_query_data, defaults={
'name': __("Migrated Profile"),
'frequencies': json.dumps(default_frequencies()),
'enter_ats': json.dumps({'v': [None, None, None, None, None, None, None, None]}),
'exit_ats': json.dumps({'v': [None, None, None, None, None, None, None, None]})
})
restore_table(Database.RaceClass, raceClass_query_data, defaults={
'name': 'New class',
'format_id': 0,
'results': None,
'cacheStatus': Results.CacheStatus.INVALID
})
for opt in carryOver:
Options.set(opt, carryOver[opt])
logger.info('UI Options restored')
except Exception as ex:
logger.warn('Error while writing data from previous database: ' + str(ex))
logger.debug(traceback.format_exc())
# secondary data recovery
try:
if migrate_db_api < 23:
# don't attempt to migrate race data older than 2.0
pass
else:
restore_table(Database.SavedRaceMeta, raceMeta_query_data, defaults={
'results': None,
'cacheStatus': Results.CacheStatus.INVALID
})
restore_table(Database.SavedPilotRace, racePilot_query_data, defaults={
'history_values': None,
'history_times': None,
'penalty_time': None,
'penalty_desc': None,
'enter_at': None,
'exit_at': None
})
restore_table(Database.SavedRaceLap, raceLap_query_data, defaults={
'source': None,
'deleted': False
})
except Exception as ex:
logger.warn('Error while writing data from previous database: ' + str(ex))
logger.debug(traceback.format_exc())
DB.session.commit()
Events.trigger(Evt.DATABASE_RECOVER)
def expand_heats():
for heat_ids in Database.Heat.query.all():
for node in range(RACE.num_nodes):
heat_row = Database.HeatNode.query.filter_by(heat_id=heat_ids.id, node_index=node)
if not heat_row.count():
DB.session.add(Database.HeatNode(heat_id=heat_ids.id, node_index=node, pilot_id=Database.PILOT_ID_NONE))
DB.session.commit()
def init_LED_effects():
# start with defaults
effects = {
Evt.RACE_STAGE: "stripColorOrange2_1",
Evt.RACE_START: "stripColorGreenSolid",
Evt.RACE_FINISH: "stripColorWhite4_4",
Evt.RACE_STOP: "stripColorRedSolid",
Evt.LAPS_CLEAR: "clear",
Evt.CROSSING_ENTER: "stripColorSolid",
Evt.CROSSING_EXIT: "stripColor1_1_4s",
Evt.STARTUP: "rainbowCycle",
Evt.SHUTDOWN: "clear"
}
# update with DB values (if any)
effect_opt = Options.get('ledEffects')
if effect_opt:
effects.update(json.loads(effect_opt))
# set effects
led_manager.setEventEffect("manualColor", "stripColor")
for item in effects:
led_manager.setEventEffect(item, effects[item])
def initVRxController():
try:
vrx_config = Config.VRX_CONTROL
try:
vrx_enabled = vrx_config["ENABLED"]
if vrx_enabled:
try:
from VRxController import VRxController
except ImportError as e:
logger.error("VRxController unable to be imported")
logger.error(e)
return None
else:
logger.info('VRxController disabled by config option')
return None
except KeyError:
logger.error('VRxController disabled: config needs "ENABLED" key.')
return None
except AttributeError:
logger.info('VRxController disabled: No VRX_CONTROL config option')
return None
# If got through import success, create the VRxController object
vrx_config = Config.VRX_CONTROL
return VRxController(Events,
vrx_config,
[node.frequency for node in INTERFACE.nodes])
def killVRxController(*args):
logger.info('Killing VRxController')
vrx_controller = None
#
# Program Initialize
#
logger.info('Release: {0} / Server API: {1} / Latest Node API: {2}'.format(RELEASE_VERSION, SERVER_API, NODE_API_BEST))
idAndLogSystemInfo()
# log results of module initializations
Config.logInitResultMessage()
Language.logInitResultMessage()
# check if current log file owned by 'root' and change owner to 'pi' user if so
if Current_log_path_name and checkSetFileOwnerPi(Current_log_path_name):
logger.debug("Changed log file owner from 'root' to 'pi' (file: '{0}')".format(Current_log_path_name))
logger.info("Using log file: {0}".format(Current_log_path_name))
interface_type = os.environ.get('RH_INTERFACE', 'RH')
try:
interfaceModule = importlib.import_module(interface_type + 'Interface')
INTERFACE = interfaceModule.get_hardware_interface(config=Config)
except (ImportError, RuntimeError, IOError) as ex:
logger.info('Unable to initialize nodes via ' + interface_type + 'Interface: ' + str(ex))
if not INTERFACE or not INTERFACE.nodes or len(INTERFACE.nodes) <= 0:
if not Config.SERIAL_PORTS or len(Config.SERIAL_PORTS) <= 0:
interfaceModule = importlib.import_module('MockInterface')
INTERFACE = interfaceModule.get_hardware_interface(config=Config)
else:
try:
importlib.import_module('serial')
print 'Unable to initialize specified serial node(s): {0}'.format(Config.SERIAL_PORTS)
except ImportError:
print "Unable to import library for serial node(s) - is 'pyserial' installed?"
sys.exit()
CLUSTER = Cluster()
hasMirrors = False
for index, slave_info in enumerate(Config.GENERAL['SLAVES']):
if isinstance(slave_info, basestring):
slave_info = {'address': slave_info, 'mode': Slave.TIMER_MODE}
if 'timeout' not in slave_info:
slave_info['timeout'] = Config.GENERAL['SLAVE_TIMEOUT']
if 'mode' in slave_info and slave_info['mode'] == Slave.MIRROR_MODE:
hasMirrors = True
elif hasMirrors:
print '** Mirror slaves must be last - ignoring remaining slave config **'
break
slave = Slave(index, slave_info)
CLUSTER.addSlave(slave)
# set callback functions invoked by interface module
INTERFACE.pass_record_callback = pass_record_callback
INTERFACE.new_enter_or_exit_at_callback = new_enter_or_exit_at_callback
INTERFACE.node_crossing_callback = node_crossing_callback
# Save number of nodes found
RACE.num_nodes = len(INTERFACE.nodes)
if RACE.num_nodes == 0:
logger.warning('*** WARNING: NO RECEIVER NODES FOUND ***')
else:
logger.info('Number of nodes found: {0}'.format(RACE.num_nodes))
# Delay to get I2C addresses through interface class initialization
gevent.sleep(0.500)
# if no DB file then create it now (before "__()" fn used in 'buildServerInfo()')
db_inited_flag = False
if not os.path.exists(DB_FILE_NAME):
logger.info('No database.db file found; creating initial database')
db_init()
db_inited_flag = True
# check if DB file owned by 'root' and change owner to 'pi' user if so
if checkSetFileOwnerPi(DB_FILE_NAME):
logger.debug("Changed DB-file owner from 'root' to 'pi' (file: '{0}')".format(DB_FILE_NAME))
Options.primeGlobalsCache()
# collect server info for About panel
serverInfo = buildServerInfo()
if serverInfo['node_api_match'] is False:
logger.info('** WARNING: Node API mismatch. **')
if RACE.num_nodes > 0:
if serverInfo['node_api_lowest'] < NODE_API_SUPPORTED:
logger.info('** WARNING: Node firmware is out of date and may not function properly **')
elif serverInfo['node_api_lowest'] < NODE_API_BEST:
logger.info('** NOTICE: Node firmware update is available **')
elif serverInfo['node_api_lowest'] > NODE_API_BEST:
logger.warn('** WARNING: Node firmware is newer than this server version supports **')
if not db_inited_flag:
try:
if int(Options.get('server_api')) < SERVER_API:
logger.info('Old server API version; recovering database')
recover_database()
elif not Database.Heat.query.count():
logger.info('Heats are empty; recovering database')
recover_database()
elif not Database.Profiles.query.count():
logger.info('Profiles are empty; recovering database')
recover_database()
elif not Database.RaceFormat.query.count():
logger.info('Formats are empty; recovering database')
recover_database()
except | |
5.905610E+02, 5.953200E+02, 6.001026E+02, 6.049090E+02, 6.097391E+02, 6.145930E+02,
6.194709E+02, 6.243727E+02, 6.292986E+02, 6.342486E+02, 6.392228E+02, 6.442212E+02,
6.492439E+02, 6.542909E+02, 6.593625E+02, 6.644585E+02, 6.695791E+02, 6.747243E+02,
6.798943E+02, 6.850890E+02, 6.903085E+02, 6.955530E+02, 7.008224E+02, 7.061168E+02,
7.114364E+02, 7.167811E+02, 7.221510E+02, 7.275463E+02, 7.329669E+02, 7.384129E+02,
7.438844E+02, 7.493815E+02, 7.549042E+02, 7.604525E+02, 7.660266E+02, 7.716266E+02,
7.772523E+02, 7.829041E+02, 7.885818E+02, 7.942856E+02, 8.000155E+02, 8.057716E+02,
8.115539E+02, 8.173626E+02, 8.231976E+02, 8.290591E+02, 8.349470E+02, 8.408615E+02,
8.468026E+02, 8.527704E+02, 8.587650E+02, 8.647863E+02, 8.708345E+02, 8.769095E+02,
8.830116E+02, 8.891406E+02, 8.952968E+02, 9.014801E+02, 9.076906E+02, 9.139283E+02,
9.201934E+02, 9.264858E+02, 9.328056E+02, 9.391529E+02, 9.455278E+02, 9.519302E+02,
9.583602E+02, 9.648179E+02, 9.713034E+02, 9.778167E+02, 9.843578E+02, 9.909268E+02,
9.975237E+02, 1.004149E+03, 1.010802E+03, 1.017483E+03, 1.024192E+03, 1.030929E+03,
1.037695E+03, 1.044489E+03, 1.051311E+03, 1.058162E+03, 1.065041E+03, 1.071948E+03,
1.078884E+03, 1.085849E+03, 1.092842E+03, 1.099863E+03, 1.106914E+03, 1.113993E+03,
1.121101E+03, 1.128237E+03, 1.135403E+03, 1.142597E+03, 1.149821E+03, 1.157073E+03,
1.164354E+03,
])
# ---------------------- M = 46, I = 1 ---------------------------
M = 46
I = 1
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[2]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
1.290560E+00, 1.735586E+01, 3.438114E+01, 5.141645E+01, 6.846079E+01, 8.551397E+01,
1.025760E+02, 1.196469E+02, 1.367275E+02, 1.538201E+02, 1.709292E+02, 1.880624E+02,
2.052309E+02, 2.224487E+02, 2.397327E+02, 2.571016E+02, 2.745752E+02, 2.921739E+02,
3.099177E+02, 3.278264E+02, 3.459187E+02, 3.642124E+02, 3.827237E+02, 4.014681E+02,
4.204596E+02, 4.397109E+02, 4.592337E+02, 4.790386E+02, 4.991353E+02, 5.195323E+02,
5.402377E+02, 5.612585E+02, 5.826011E+02, 6.042715E+02, 6.262748E+02, 6.486160E+02,
6.712993E+02, 6.943287E+02, 7.177078E+02, 7.414400E+02, 7.655282E+02, 7.899751E+02,
8.147835E+02, 8.399554E+02, 8.654931E+02, 8.913986E+02, 9.176737E+02, 9.443201E+02,
9.713393E+02, 9.987328E+02, 1.026502E+03, 1.054648E+03, 1.083173E+03, 1.112076E+03,
1.141360E+03, 1.171026E+03, 1.201073E+03, 1.231504E+03, 1.262319E+03, 1.293518E+03,
1.325104E+03, 1.357075E+03, 1.389434E+03, 1.422180E+03, 1.455315E+03, 1.488838E+03,
1.522751E+03, 1.557055E+03, 1.591749E+03, 1.626834E+03, 1.662311E+03, 1.698180E+03,
1.734442E+03, 1.771097E+03, 1.808146E+03, 1.845590E+03, 1.883427E+03, 1.921660E+03,
1.960288E+03, 1.999313E+03, 2.038733E+03, 2.078551E+03, 2.118765E+03, 2.159377E+03,
2.200387E+03, 2.241795E+03, 2.283602E+03, 2.325808E+03, 2.368413E+03, 2.411418E+03,
2.454824E+03, 2.498630E+03, 2.542836E+03, 2.587444E+03, 2.632454E+03, 2.677865E+03,
2.723679E+03, 2.769895E+03, 2.816514E+03, 2.863536E+03, 2.910962E+03, 2.958792E+03,
3.007026E+03, 3.055665E+03, 3.104709E+03, 3.154158E+03, 3.204012E+03, 3.254272E+03,
3.304939E+03, 3.356012E+03, 3.407492E+03, 3.459379E+03, 3.511673E+03, 3.564376E+03,
3.617486E+03, 3.671005E+03, 3.724933E+03, 3.779269E+03, 3.834015E+03, 3.889171E+03,
3.944736E+03, 4.000712E+03, 4.057099E+03, 4.113896E+03, 4.171105E+03, 4.228725E+03,
4.286756E+03, 4.345200E+03, 4.404057E+03, 4.463326E+03, 4.523008E+03, 4.583104E+03,
4.643613E+03, 4.704536E+03, 4.765873E+03, 4.827625E+03, 4.889792E+03, 4.952374E+03,
5.015371E+03, 5.078784E+03, 5.142613E+03, 5.206859E+03, 5.271521E+03, 5.336600E+03,
5.402097E+03, 5.468011E+03, 5.534343E+03, 5.601093E+03, 5.668261E+03, 5.735848E+03,
5.803855E+03, 5.872280E+03, 5.941126E+03, 6.010391E+03, 6.080076E+03, 6.150183E+03,
6.220709E+03, 6.291658E+03, 6.363027E+03, 6.434819E+03, 6.507032E+03, 6.579668E+03,
6.652726E+03, 6.726207E+03, 6.800112E+03, 6.874440E+03, 6.949192E+03, 7.024368E+03,
7.099969E+03, 7.175994E+03, 7.252444E+03, 7.329320E+03, 7.406621E+03, 7.484349E+03,
7.562502E+03, 7.641082E+03,
])
# ---------------------- M = 46, I = 2 ---------------------------
M = 46
I = 2
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[2]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
1.301900E+00, 1.763255E+01, 3.493474E+01, 5.224710E+01, 6.956862E+01, 8.689912E+01,
1.042386E+02, 1.215871E+02, 1.389456E+02, 1.563165E+02, 1.737047E+02, 1.911184E+02,
2.085692E+02, 2.260721E+02, 2.436447E+02, 2.613063E+02, 2.790777E+02, 2.969798E+02,
3.150333E+02, 3.332584E+02, 3.516744E+02, 3.702993E+02, 3.891500E+02, 4.082419E+02,
4.275893E+02, 4.472052E+02, 4.671014E+02, 4.872886E+02, 5.077767E+02, 5.285742E+02,
5.496893E+02, 5.711290E+02, 5.928998E+02, 6.150077E+02, 6.374580E+02, 6.602555E+02,
6.834045E+02, 7.069091E+02, 7.307729E+02, 7.549992E+02, 7.795910E+02, 8.045510E+02,
8.298819E+02, 8.555859E+02, 8.816653E+02, 9.081219E+02, 9.349576E+02, 9.621742E+02,
9.897732E+02, 1.017756E+03, 1.046124E+03, 1.074879E+03, 1.104021E+03, 1.133552E+03,
1.163473E+03, 1.193785E+03, 1.224489E+03, 1.255586E+03, 1.287076E+03, 1.318961E+03,
1.351240E+03, 1.383916E+03, 1.416989E+03, 1.450458E+03, 1.484326E+03, 1.518592E+03,
1.553258E+03, 1.588323E+03, 1.623789E+03, 1.659656E+03, 1.695923E+03, 1.732593E+03,
1.769666E+03, 1.807141E+03, 1.845020E+03, 1.883302E+03, 1.921989E+03, 1.961081E+03,
2.000577E+03, 2.040480E+03, 2.080788E+03, 2.121503E+03, 2.162625E+03, 2.204154E+03,
2.246091E+03, 2.288436E+03, 2.331189E+03, 2.374352E+03, 2.417923E+03, 2.461904E+03,
2.506295E+03, 2.551096E+03, 2.596308E+03, 2.641930E+03, 2.687965E+03, 2.734411E+03,
2.781269E+03, 2.828539E+03, 2.876222E+03, 2.924319E+03, 2.972829E+03, 3.021752E+03,
3.071090E+03, 3.120842E+03, 3.171009E+03, 3.221591E+03, 3.272589E+03, 3.324002E+03,
3.375832E+03, 3.428078E+03, 3.480740E+03, 3.533820E+03, 3.587317E+03, 3.641232E+03,
3.695565E+03, 3.750317E+03, 3.805487E+03, 3.861076E+03, 3.917084E+03, 3.973512E+03,
4.030360E+03, 4.087629E+03, 4.145318E+03, 4.203427E+03, 4.261959E+03, 4.320911E+03,
4.380286E+03, 4.440082E+03, 4.500301E+03, 4.560943E+03, 4.622008E+03, 4.683497E+03,
4.745409E+03, 4.807745E+03, 4.870506E+03, 4.933691E+03, 4.997301E+03, 5.061336E+03,
5.125797E+03, 5.190684E+03, 5.255997E+03, 5.321737E+03, 5.387903E+03, 5.454497E+03,
5.521518E+03, 5.588967E+03, 5.656843E+03, 5.725148E+03, 5.793882E+03, 5.863045E+03,
5.932637E+03, 6.002659E+03, 6.073110E+03, 6.143992E+03, 6.215304E+03, 6.287047E+03,
6.359221E+03, 6.431827E+03, 6.504864E+03, 6.578333E+03, 6.652235E+03, 6.726569E+03,
6.801336E+03, 6.876537E+03, 6.952171E+03, 7.028239E+03, 7.104741E+03, 7.181677E+03,
7.259049E+03, 7.336855E+03, 7.415097E+03, 7.493774E+03, 7.572888E+03, 7.652438E+03,
7.732424E+03, 7.812847E+03,
])
# ---------------------- M = 46, I = 3 ---------------------------
M = 46
I = 3
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[2]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
2.665010E+00, 3.673026E+01, 7.280106E+01, 1.088928E+02, 1.450036E+02, 1.811332E+02,
2.172814E+02, 2.534487E+02, 2.896373E+02, 3.258533E+02, 3.621084E+02, 3.984219E+02,
4.348208E+02, 4.713391E+02, 5.080160E+02, 5.448948E+02, 5.820205E+02, 6.194387E+02,
6.571942E+02, 6.953301E+02, 7.338872E+02, 7.729036E+02, 8.124146E+02, 8.524529E+02,
8.930479E+02, 9.342266E+02, 9.760135E+02, 1.018431E+03, 1.061498E+03, 1.105234E+03,
1.149654E+03, 1.194773E+03, 1.240604E+03, 1.287160E+03, 1.334451E+03, 1.382487E+03,
1.431277E+03, 1.480828E+03, 1.531149E+03, 1.582246E+03, 1.634124E+03, 1.686790E+03,
1.740249E+03, 1.794505E+03, 1.849563E+03, 1.905427E+03, 1.962100E+03, 2.019586E+03,
2.077888E+03, 2.137009E+03, 2.196951E+03, 2.257718E+03, 2.319312E+03, 2.381735E+03,
2.444990E+03, 2.509077E+03, 2.574000E+03, 2.639760E+03, 2.706358E+03, 2.773797E+03,
2.842078E+03, 2.911202E+03, 2.981172E+03, 3.051987E+03, 3.123651E+03, 3.196163E+03,
3.269525E+03, 3.343739E+03, 3.418805E+03, 3.494725E+03, 3.571500E+03, 3.649130E+03,
3.727618E+03, 3.806963E+03, 3.887167E+03, 3.968231E+03, 4.050155E+03, 4.132942E+03,
4.216590E+03, 4.301103E+03, 4.386479E+03, 4.472721E+03, 4.559829E+03, 4.647804E+03,
4.736646E+03, 4.826357E+03, 4.916938E+03, 5.008388E+03, 5.100710E+03, 5.193903E+03,
5.287969E+03, 5.382908E+03, 5.478720E+03, 5.575408E+03, 5.672971E+03, 5.771411E+03,
5.870727E+03, 5.970921E+03, 6.071993E+03, 6.173945E+03, 6.276777E+03, 6.380489E+03,
6.485082E+03, 6.590557E+03, 6.696916E+03, 6.804157E+03, 6.912283E+03, 7.021293E+03,
7.131189E+03, 7.241971E+03, 7.353640E+03, 7.466197E+03, 7.579641E+03, 7.693975E+03,
7.809198E+03, 7.925312E+03, 8.042316E+03, 8.160212E+03, 8.279001E+03, 8.398682E+03,
8.519257E+03, 8.640727E+03, 8.763091E+03, 8.886351E+03, 9.010508E+03, 9.135561E+03,
9.261512E+03, 9.388361E+03, 9.516109E+03, 9.644757E+03, 9.774306E+03, 9.904755E+03,
1.003611E+04, 1.016836E+04, 1.030151E+04, 1.043557E+04, 1.057054E+04, 1.070641E+04,
1.084318E+04, 1.098086E+04, 1.111945E+04, 1.125894E+04, 1.139935E+04, 1.154066E+04,
1.168288E+04, 1.182601E+04, 1.197005E+04, 1.211501E+04, 1.226087E+04, 1.240765E+04,
1.255535E+04, 1.270395E+04, 1.285347E+04, 1.300391E+04, 1.315526E+04, 1.330753E+04,
1.346072E+04, 1.361482E+04, 1.376984E+04, 1.392579E+04, 1.408265E+04, 1.424043E+04,
1.439914E+04, 1.455876E+04, 1.471931E+04, 1.488078E+04, 1.504318E+04, 1.520650E+04,
1.537074E+04, 1.553591E+04, 1.570201E+04, 1.586903E+04, 1.603699E+04, 1.620587E+04,
1.637567E+04, 1.654641E+04,
])
# ---------------------- M = 46, I = 4 ---------------------------
M = 46
I = 4
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[2]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
5.185410E+00, 6.999021E+01, 1.386585E+02, 2.073673E+02, 2.761123E+02, 3.448931E+02,
4.137093E+02, 4.825616E+02, 5.514533E+02, 6.203936E+02, 6.894014E+02, 7.585087E+02,
8.277606E+02, 8.972156E+02, 9.669423E+02, 1.037017E+03, 1.107521E+03, 1.178536E+03,
1.250145E+03, 1.322428E+03, 1.395459E+03, 1.469311E+03, 1.544051E+03, 1.619739E+03,
1.696433E+03, 1.774184E+03, 1.853038E+03, 1.933040E+03, 2.014227E+03, 2.096634E+03,
2.180293E+03, 2.265233E+03, 2.351479E+03, 2.439054E+03, 2.527982E+03, 2.618279E+03,
2.709964E+03, 2.803053E+03, 2.897560E+03, 2.993499E+03, 3.090881E+03, 3.189718E+03,
3.290019E+03, 3.391794E+03, 3.495052E+03, 3.599800E+03, 3.706046E+03, 3.813797E+03,
3.923059E+03, 4.033837E+03, 4.146138E+03, 4.259966E+03, 4.375326E+03, 4.492223E+03,
4.610661E+03, 4.730643E+03, 4.852175E+03, 4.975258E+03, 5.099898E+03, 5.226096E+03,
5.353856E+03, 5.483181E+03, 5.614074E+03, 5.746537E+03, 5.880573E+03, 6.016185E+03,
6.153375E+03, 6.292145E+03, 6.432497E+03, 6.574434E+03, 6.717958E+03, 6.863070E+03,
7.009774E+03, 7.158070E+03, 7.307961E+03, 7.459448E+03, 7.612533E+03, 7.767219E+03,
7.923506E+03, 8.081397E+03, 8.240893E+03, 8.401995E+03, 8.564707E+03, 8.729028E+03,
8.894960E+03, 9.062506E+03, 9.231667E+03, 9.402443E+03, 9.574837E+03, 9.748851E+03,
9.924485E+03, 1.010174E+04, 1.028062E+04, 1.046112E+04, 1.064325E+04, 1.082701E+04,
1.101240E+04, 1.119942E+04, 1.138807E+04, 1.157835E+04, 1.177027E+04, 1.196382E+04,
1.215901E+04, 1.235584E+04, 1.255431E+04, 1.275442E+04, 1.295617E+04, 1.315956E+04,
1.336460E+04, 1.357129E+04, 1.377962E+04, 1.398961E+04, 1.420124E+04, 1.441453E+04,
1.462946E+04, 1.484606E+04, 1.506430E+04, 1.528421E+04, 1.550577E+04, 1.572899E+04,
1.595387E+04, 1.618041E+04, 1.640861E+04, 1.663848E+04, 1.687002E+04, 1.710322E+04,
1.733808E+04, 1.757462E+04, 1.781283E+04, 1.805271E+04, 1.829426E+04, 1.853748E+04,
1.878238E+04, 1.902896E+04, 1.927721E+04, 1.952715E+04, 1.977876E+04, 2.003205E+04,
2.028703E+04, 2.054369E+04, 2.080203E+04, 2.106207E+04, 2.132378E+04, 2.158719E+04,
2.185229E+04, 2.211908E+04, 2.238756E+04, 2.265773E+04, 2.292960E+04, 2.320316E+04,
2.347842E+04, 2.375538E+04, 2.403404E+04, 2.431440E+04, 2.459646E+04, 2.488023E+04,
2.516569E+04, 2.545287E+04, 2.574175E+04, 2.603234E+04, 2.632464E+04, 2.661864E+04,
2.691436E+04, 2.721180E+04, 2.751094E+04, 2.781181E+04, 2.811438E+04, 2.841868E+04,
2.872470E+04, 2.903243E+04, 2.934189E+04, 2.965306E+04, 2.996597E+04, 3.028059E+04,
3.059695E+04, 3.091503E+04,
])
# ---------------------- M = 47, I = 1 ---------------------------
M = 47
I = 1
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[2]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
1.414270E+00, 1.057361E+02, 2.984678E+02, 5.479928E+02, 8.436907E+02, 1.180696E+03,
1.558672E+03, 1.981177E+03, 2.454584E+03, 2.987094E+03, 3.588121E+03, 4.268040E+03,
5.038147E+03, 5.910737E+03, 6.899230E+03, 8.018317E+03, 9.284110E+03, 1.071430E+04,
1.232829E+04, 1.414738E+04, 1.619491E+04, 1.849642E+04, 2.107982E+04, 2.397557E+04,
2.721685E+04, 3.083979E+04, 3.488362E+04, 3.939089E+04, 4.440770E+04, 4.998392E+04,
5.617341E+04, 6.303426E+04, 7.062902E+04, 7.884530E+04, 8.807839E+04, 9.825772E+04,
1.094663E+05, 1.217930E+05, 1.353330E+05, 1.501882E+05, 1.664674E+05, 1.842866E+05,
2.037697E+05, 2.250484E+05, 2.482633E+05, 2.735634E+05, 3.011073E+05, 3.310633E+05,
3.636098E+05, 3.989359E+05, 4.372419E+05, 4.787394E+05, 5.236525E+05, 5.722175E+05,
6.246841E+05, 6.813156E+05, 7.423894E+05, 8.081977E+05, 8.790482E+05, 9.552643E+05,
1.037186E+06, 1.125171E+06, 1.219593E+06, 1.320847E+06, 1.429344E+06, 1.545517E+06,
1.669818E+06, 1.802721E+06, 1.944721E+06, 2.096336E+06, 2.258109E+06, 2.430603E+06,
2.614408E+06, 2.810141E+06, 3.018444E+06, 3.239985E+06, 3.475461E+06, 3.725598E+06,
3.991152E+06, 4.272907E+06, 4.571682E+06, 4.888326E+06, 5.223722E+06, 5.578785E+06,
5.954468E+06, 6.351757E+06, 6.771677E+06, 7.215291E+06, 7.683698E+06, 8.178039E+06,
8.699497E+06, 9.249293E+06, 9.828694E+06, 1.043901E+07, 1.108160E+07, 1.175785E+07,
1.246923E+07, 1.321721E+07, 1.400336E+07, 1.482927E+07, 1.569657E+07, 1.660698E+07,
1.756225E+07, 1.856419E+07, 1.961465E+07, 2.071558E+07, 2.186893E+07, 2.307677E+07,
2.434119E+07, 2.566435E+07, 2.704850E+07, 2.849591E+07, 3.000895E+07, 3.159005E+07,
3.324171E+07, 3.496649E+07, 3.676703E+07, 3.864605E+07, 4.060633E+07, 4.265074E+07,
4.478221E+07, 4.700377E+07, 4.931851E+07, 5.172961E+07, 5.424035E+07, 5.685405E+07,
5.957418E+07, 6.240423E+07, 6.534783E+07, 6.840867E+07, 7.159056E+07, 7.489738E+07,
7.833310E+07, 8.190183E+07, 8.560772E+07, 8.945507E+07, 9.344824E+07, 9.759174E+07,
1.018901E+08, 1.063481E+08, 1.109706E+08, 1.157623E+08, 1.207284E+08, 1.258740E+08,
1.312043E+08, 1.367248E+08, 1.424409E+08, 1.483582E+08, 1.544824E+08, 1.608195E+08,
1.673754E+08, 1.741563E+08, 1.811683E+08, 1.884179E+08, 1.959115E+08, 2.036558E+08,
2.116577E+08, 2.199239E+08, 2.284616E+08, 2.372780E+08, 2.463805E+08, 2.557765E+08,
2.654736E+08, 2.754798E+08, 2.858029E+08, 2.964510E+08, 3.074325E+08, 3.187557E+08,
3.304293E+08, 3.424619E+08, 3.548625E+08, 3.676402E+08, 3.808043E+08, 3.943641E+08,
4.083293E+08, 4.227097E+08,
])
# ---------------------- M = 48, I = 1 ---------------------------
M = 48
I = 1
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[2]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
2.147898E+01, 3.996775E+02, 7.982281E+02, 1.204932E+03, 1.643306E+03, 2.140834E+03,
2.722097E+03, 3.410126E+03, 4.228639E+03, 5.203092E+03, 6.361663E+03, 7.735468E+03,
9.359297E+03, 1.127186E+04, 1.351617E+04, 1.614009E+04, 1.919639E+04, 2.274374E+04,
2.684666E+04, 3.157632E+04, 3.701089E+04, 4.323629E+04, 5.034661E+04, 5.844492E+04,
6.764383E+04, 7.806626E+04, 8.984612E+04, 1.031293E+05, 1.180743E+05, 1.348531E+05,
1.536525E+05, 1.746748E+05, 1.981385E+05, 2.242802E+05, 2.533549E+05, 2.856378E+05,
3.214253E+05, 3.610363E+05, 4.048136E+05, 4.531253E+05, 5.063665E+05, 5.649605E+05,
6.293605E+05, 7.000516E+05, 7.775523E+05, 8.624165E+05, 9.552351E+05, 1.056639E+06,
1.167298E+06, 1.287929E+06, 1.419291E+06, 1.562193E+06, 1.717494E+06, 1.886106E+06,
2.068996E+06, 2.267189E+06, 2.481773E+06, 2.713896E+06, 2.964776E+06, 3.235699E+06,
3.528026E+06, 3.843193E+06, 4.182715E+06, 4.548192E+06, 4.941312E+06, 5.363849E+06,
5.817677E+06, 6.304765E+06, 6.827188E+06, 7.387122E+06, 7.986861E+06, 8.628813E+06,
9.315504E+06, 1.004959E+07, 1.083385E+07, 1.167121E+07, 1.256472E+07, 1.351760E+07,
1.453319E+07, 1.561502E+07, 1.676676E+07, 1.799226E+07, 1.929555E+07, 2.068082E+07,
2.215247E+07, 2.371510E+07, 2.537348E+07, 2.713262E+07, 2.899773E+07, 3.097426E+07,
3.306787E+07, 3.528449E+07, 3.763024E+07, 4.011156E+07, 4.273513E+07, 4.550790E+07,
4.843709E+07, 5.153021E+07, 5.479511E+07, 5.823992E+07, 6.187306E+07, 6.570334E+07,
6.973986E+07, 7.399210E+07, 7.846990E+07, 8.318344E+07, 8.814330E+07, 9.336047E+07,
9.884634E+07, 1.046127E+08, 1.106718E+08, 1.170363E+08, 1.237193E+08, 1.307344E+08,
1.380958E+08, 1.458179E+08, 1.539159E+08, 1.624053E+08, 1.713024E+08, 1.806237E+08,
1.903866E+08, 2.006089E+08, 2.113091E+08, 2.225062E+08, 2.342199E+08, 2.464705E+08,
2.592791E+08, 2.726673E+08, 2.866575E+08, 3.012728E+08, 3.165370E+08, 3.324747E+08,
3.491113E+08, 3.664730E+08, 3.845867E+08, 4.034802E+08, 4.231821E+08, 4.437221E+08,
4.651304E+08, 4.874384E+08, 5.106785E+08, 5.348838E+08, 5.600887E+08, 5.863282E+08,
6.136387E+08, 6.420574E+08, 6.716228E+08, 7.023745E+08, 7.343529E+08, 7.675998E+08,
8.021584E+08, 8.380725E+08, 8.753878E+08, 9.141511E+08, 9.544098E+08, 9.962137E+08,
1.039613E+09, 1.084660E+09, 1.131409E+09, 1.179913E+09, 1.230230E+09, 1.282416E+09,
1.336532E+09, 1.392638E+09, 1.450797E+09, 1.511073E+09, 1.573532E+09, 1.638241E+09,
1.705270E+09, 1.774690E+09, 1.846572E+09, 1.920993E+09, 1.998028E+09, 2.077756E+09,
2.160258E+09, 2.245616E+09,
])
# ---------------------- M = 48, I = 2 ---------------------------
M = 48
I = 2
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[2]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
1.010601E+01, 1.888698E+02, 3.772493E+02, 5.694819E+02, 7.766830E+02, 1.011842E+03,
1.286579E+03, 1.611778E+03, 1.998651E+03, 2.459229E+03, 3.006830E+03, 3.656162E+03,
4.423668E+03, 5.327646E+03, 6.388423E+03, | |
is there ...
if isinstance(alpha, (int, float)):
pass # regular behavior
else: # dynamic alpha which can be calc'ed by _mean_
self._alpha_ = alpha
kwargs['alpha'] = 1.0
elif 'halflife' in kwargs:
# period cannot be recovered, force the user to specify it
self._pval = kwargs.pop('span') # exception if not there
elif 'com' in kwargs:
self._pval = kwargs.get('com') + 1 # alpha = 1 / (com + 1)
elif 'span' in kwargs:
# must be, period cannot be infered from alpha/halflife
self._pval = kwargs.get('span') # alpha = 2 / (alpha + 1)
else:
self._pval = kwargs.get(parg)
# set alphaperiod which is needed in the future
self._alpha_p = getattr(self._alpha_, '_minperiod', 1)
# Extra processing if special _ewm
if name == '_ewm': # specific behavior for custom _ewm
# exp smoothing in tech analysis uses 'adjust=False'
kwargs.setdefault('adjust', False) # set if not given
# collect special parameters
self._pearly = _pearly = kwargs.pop('_pearly', 0)
self._poffset = kwargs.pop('_poffset', 0)
self._seed = _seed = kwargs.pop('_seed', SEED_AVG)
# Determine where the actual calculation is offset to. _poffset
# is there to support the failure made by ta-lib when offseting
# the fast ema in the macd. _pofffset > _pval
poffset = self._poffset or self._pval
# For a dynamic alpha like in KAMA, the period of the dynamic
# alpha can exceed that of the calculated offset. But ta-lib
# makes a mistake an calculates that without taking that period
# into account if _seed is activated
# If no pval has been provided (span), don't take the alpha
# period, the period of the calling line will be used
if self._pval and self._alpha_p > poffset:
poffset += self._alpha_p - poffset - 1
p2 = self._minperiod - 1 + poffset - _pearly # seed end calc
p1 = p2 - self._pval # beginning of seed calculation
# beginning of result calculation. Includes the calculated seed
# value which is the 1st value to be returned. Except in KAMA,
# where ta-lib uses the value before that as seed for the
# exponential smoothing calculation
self._minidx = pidx = p2 - 1 # beginning of result calculation
trailprefix = pd.Series(np.nan, index=series.index[pidx:p2])
# Determine the actul seed value to use
if _seed == SEED_AVG:
trailprefix[-1] = series[p1:p2].mean()
elif _seed == SEED_LAST:
trailprefix[-1] = series[pidx]
elif _seed == SEED_SUM:
trailprefix[-1] = series[p1:p2].sum()
elif _seed == SEED_NONE:
pass # no seed wished ... do nothing
elif _seed == SEED_ZERO:
trailprefix[-1] = 0.0
elif _seed == SEED_ZFILL:
trailprefix[:] = 0.0
# complete trailer: prefix (seed at end) + series vals to calc
trailer = trailprefix.append(series[p2:])
else:
self._pearly = 0 # it will be checked in getattr
self._minidx = self._minperiod - 1
trailer = series[self._minidx:]
self._multifunc = getattr(trailer, lsname)(*args, **kwargs)
def _mean_exp(self, alpha, beta=None): # recurisive definition
# alpha => new data, beta => old data (similar to 1-alpha)
if not beta:
beta = 1.0 - alpha
def _sm_acc(x):
prev = x[0]
for i in range(1, len(x)):
x[i] = prev = beta * prev + alpha * x[i]
return x
return self._apply(_sm_acc) # trigger __getattr__ for _apply
def _lfilter(self, alpha, beta=None): # recurisive definition
try:
import scipy.signal
except ImportError: # if not available use tight loop
return self._mean_exp(alpha, beta)
# alpha => new data, beta => old data (similar to 1-alpha)
if not beta:
beta = 1.0 - alpha
def _sp_lfilter(x):
# Initial conditions "ic" can be used for the calculation, the
# next two lines detail that. A simple scaling of x[0] achieves
# the same in the 1-d case
# zi = lfiltic([alpha], [1.0, -beta], y=[x[0]])
# x[1:], _ = lfilter([alpha], [1.0, -beta], x[1:], zi=zi)
x[0] /= alpha # scale start val, descaled in 1st op by alpha
return scipy.signal.lfilter([alpha], [1.0, -beta], x)
return self._apply(_sp_lfilter) # trigger __getattr__ for _apply
def _mean(self): # meant for ewm with dynamic alpha
def _dynalpha(vals):
# reuse vals: not the original series, it's the trailer abvoe
alphas = self._alpha_[self._alpha_p - 1:] # -1: get array idx
prev = vals[0] # seed value, which isn't part of the result
vals[0] = np.nan # made 1 tick longer to carry seed, nan it
for i, alphai in enumerate(alphas, 1): # tight-loop-calc
vals[i] = prev = prev + alphai * (vals[i] - prev)
return vals # can return vals, made Series via __getattr__
return self._apply(_dynalpha) # triggers __getattr__ for _apply
def __getattr__(self, attr):
if self._pval is not None and not self._is_seeded:
# window operation overlap with the 1st calc point ... -1
self._minperiod += self._pval - self._pearly - 1
# for a dynamic alpha, the period of the alpha can exceed minp
self._minperiod = max(self._minperiod, self._alpha_p)
op = getattr(self._multifunc, attr) # get real op/let exp propag
def call_op(*args, **kwargs): # actual op executor
result = pd.Series(np.nan, index=self._series.index) # prep
sargs = [] # cov takes an "other" parameter for example
for arg in args:
if isinstance(arg, Line):
arg = arg._series[self._minidx:]
sargs.append(arg)
result[self._minidx:] = r = op(*sargs, **kwargs) # run/store
result = result.astype(r.dtype, copy=False)
return self._line._clone(result, period=self._minperiod)
return call_op
def __getitem__(self, item):
return self._line._clone(self._series.iloc[item])
@property
def _seeded(self):
self._is_seeded = True # call if applied after a seed
return self
def real_multifunc_op(self, *args, **kwargs):
return _MultiFunc_Op(self, *args, **kwargs)
linesops.install_cls(name=name, attr=real_multifunc_op,
propertize=propertize)
class MetaLine(type):
def _line_from_dataframe(cls, self, df, colname):
# it must be dataframe(-like) with dimensions
colnames = [x.lower() for x in df.columns]
try:
idx = colnames.index(colname) # try first by name
except ValueError: # else pre-def index ... or default to 0
idx = config.OHLC_INDICES.get(colname, 0)
# TBD: In this situation the user could be made aware of the invalid
# inputindex (warning and reset to 0 or exception)
if idx >= len(colnames): # sanity check, not beyond possible
idx = 0 # default mapping if sanity check fails
# Finally, assign values
self._minperiod = 1
self._series = df.iloc[:, idx]
def __call__(cls, val=None, name='', index=None, *args, **kwargs):
self = cls.__new__(cls, *args, **kwargs) # create instance
# Process input
if isinstance(val, linesholder.LinesHolder):
val = val.outputs[0] # get 1st line and process
self._minperiod = val._minperiod
self._series = val._series
elif isinstance(val, Lines):
val = val[0] # get 1st line and process
self._minperiod = val._minperiod
self._series = val._series
elif isinstance(val, Line):
self._minperiod = val._minperiod
self._series = val._series
elif isinstance(val, Line):
self._minperiod = val._minperiod
self._series = val._series
elif isinstance(val, pd.Series):
self._minperiod = 1
self._series = val
elif isinstance(val, pd.DataFrame):
cls._line_from_dataframe(self, val, name)
else:
# Don't know how to convert, store and pray
self._minperiod = 1
if index is None:
self._series = val # 1st column
else:
self._series = pd.Series(val, index=index)
self._name = name # fix the name of the data series
self.__init__(*args, **kwargs) # init instance
return self # return the instance
class Line(metaclass=MetaLine):
_minperiod = 1
_series = None
_name = None
def __hash__(self):
return super().__hash__()
# Install the different proxy operations
for name in linesops._BINOPS:
binary_op(name)
for name, opargs in linesops._REDOPS.items():
reduction_op(name, **opargs)
for name, opargs in linesops._STDOPS.items():
standard_op(name, **opargs)
for name, opargs in linesops._MULTIFUNCOPS.items():
multifunc_op(name, **opargs)
def __call__(self, ago=0, val=np.nan):
if ago:
return self.shift(periods=-ago)
if ago is None:
val = None # called as in (None, ...) ago wasn't meant
if val is None:
val = self._series.copy()
return self._clone(val, index=self._series.index)
def __iter__(self):
return iter(self._series)
def __len__(self):
return len(self._series)
def __getitem__(self, item):
return self._clone(self._series.iloc[item])
def __setitem__(self, item, value):
self._series[item] = value
def _clone(self, series, period=None, index=None):
line = self.__class__(series, index=index)
line._minperiod = period or self._minperiod
return line
@property
def mpseries(self):
return self._series[self._minperiod - 1:]
@property
def series(self):
return self._series.rename(self._name, inplace=True)
@property
def index(self):
return self._series.index
def _period(self, period, rolling=False, val=None):
# return the line with the period increased by period
inc = period - rolling
if not inc:
return self
if val is not None: # set entire changed period to val
idx0 = self._minperiod - 1
idx1 = idx0 + (inc or 1) # maybe no period inc only setval
if idx1 < idx0: # | |
os.path.join(settings.DEIS_LOG_DIR, self.id + '.log')
if not os.path.exists(path):
raise EnvironmentError('Could not locate logs')
data = subprocess.check_output(['tail', '-n', str(settings.LOG_LINES), path])
return data
def run(self, command):
"""Run a one-off command in an ephemeral app container."""
# TODO: add support for interactive shell
nodes = self.formation.node_set.filter(layer__runtime=True).order_by('?')
if not nodes:
raise EnvironmentError('No nodes available to run command')
app_id, node = self.id, nodes[0]
release = self.release_set.order_by('-created')[0]
# prepare ssh command
version = release.version
image = release.build.image + ":v{}".format(release.version)
docker_args = ' '.join(['-a', 'stdout', '-a', 'stderr', '-rm', image])
env_args = ' '.join(["-e '{k}={v}'".format(**locals())
for k, v in release.config.values.items()])
log_event(self, "deis run '{}'".format(command))
command = "sudo docker run {env_args} {docker_args} {command}".format(**locals())
return node.run(command)
class ContainerManager(models.Manager):
def scale(self, app, structure, **kwargs):
"""Scale containers up or down to match requested."""
requested_containers = structure.copy()
formation = app.formation
# increment new container nums off the most recent container
all_containers = app.container_set.all().order_by('-created')
container_num = 1 if not all_containers else all_containers[0].num + 1
msg = 'Containers scaled ' + ' '.join(
"{}={}".format(k, v) for k, v in requested_containers.items())
# iterate and scale by container type (web, worker, etc)
changed = False
for container_type in requested_containers.keys():
containers = list(app.container_set.filter(type=container_type).order_by('created'))
requested = requested_containers.pop(container_type)
diff = requested - len(containers)
if diff == 0:
continue
changed = True
while diff < 0:
# get the next node with the most containers
node = Node.objects.next_runtime_node(
formation, container_type, reverse=True)
# delete a container attached to that node
for c in containers:
if node == c.node:
containers.remove(c)
c.delete()
diff += 1
break
while diff > 0:
# get the next node with the fewest containers
node = Node.objects.next_runtime_node(formation, container_type)
port = Node.objects.next_runtime_port(formation)
c = Container.objects.create(owner=app.owner,
formation=formation,
node=node,
app=app,
type=container_type,
num=container_num,
port=port)
containers.append(c)
container_num += 1
diff -= 1
log_event(app, msg)
return changed
def balance(self, formation, **kwargs):
runtime_nodes = formation.node_set.filter(layer__runtime=True).order_by('created')
all_containers = self.filter(formation=formation).order_by('-created')
# get the next container number (e.g. web.19)
container_num = 1 if not all_containers else all_containers[0].num + 1
changed = False
app = None
# iterate by unique container type
for container_type in set([c.type for c in all_containers]):
# map node container counts => { 2: [b3, b4], 3: [ b1, b2 ] }
n_map = {}
for node in runtime_nodes:
ct = len(node.container_set.filter(type=container_type))
n_map.setdefault(ct, []).append(node)
# loop until diff between min and max is 1 or 0
while max(n_map.keys()) - min(n_map.keys()) > 1:
# get the most over-utilized node
n_max = max(n_map.keys())
n_over = n_map[n_max].pop(0)
if len(n_map[n_max]) == 0:
del n_map[n_max]
# get the most under-utilized node
n_min = min(n_map.keys())
n_under = n_map[n_min].pop(0)
if len(n_map[n_min]) == 0:
del n_map[n_min]
# delete the oldest container from the most over-utilized node
c = n_over.container_set.filter(type=container_type).order_by('created')[0]
app = c.app # pull ref to app for recreating the container
c.delete()
# create a container on the most under-utilized node
self.create(owner=formation.owner,
formation=formation,
app=app,
type=container_type,
num=container_num,
node=n_under,
port=Node.objects.next_runtime_port(formation))
container_num += 1
# update the n_map accordingly
for n in (n_over, n_under):
ct = len(n.container_set.filter(type=container_type))
n_map.setdefault(ct, []).append(n)
changed = True
if app:
log_event(app, 'Containers balanced')
return changed
@python_2_unicode_compatible
class Container(UuidAuditedModel):
"""
Docker container used to securely host an application process.
"""
objects = ContainerManager()
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
formation = models.ForeignKey('Formation')
node = models.ForeignKey('Node')
app = models.ForeignKey('App')
type = models.CharField(max_length=128)
num = models.PositiveIntegerField()
port = models.PositiveIntegerField()
# TODO: add celery beat tasks for monitoring node health
status = models.CharField(max_length=64, default='up')
def short_name(self):
return "{}.{}".format(self.type, self.num)
short_name.short_description = 'Name'
def __str__(self):
return "{0} {1}".format(self.formation.id, self.short_name())
class Meta:
get_latest_by = '-created'
ordering = ['created']
unique_together = (('app', 'type', 'num'),
('formation', 'port'))
@python_2_unicode_compatible
class Config(UuidAuditedModel):
"""
Set of configuration values applied as environment variables
during runtime execution of the Application.
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
version = models.PositiveIntegerField()
values = JSONField(default='{}', blank=True)
class Meta:
get_latest_by = 'created'
ordering = ['-created']
unique_together = (('app', 'version'),)
def __str__(self):
return "{0}-v{1}".format(self.app.id, self.version)
@python_2_unicode_compatible
class Push(UuidAuditedModel):
"""
Instance of a push used to trigger an application build
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
sha = models.CharField(max_length=40)
fingerprint = models.CharField(max_length=255)
receive_user = models.CharField(max_length=255)
receive_repo = models.CharField(max_length=255)
ssh_connection = models.CharField(max_length=255)
ssh_original_command = models.CharField(max_length=255)
class Meta:
get_latest_by = 'created'
ordering = ['-created']
unique_together = (('app', 'uuid'),)
def __str__(self):
return "{0}-{1}".format(self.app.id, self.sha[:7])
@python_2_unicode_compatible
class Build(UuidAuditedModel):
"""
Instance of a software build used by runtime nodes
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
sha = models.CharField('SHA', max_length=255, blank=True)
output = models.TextField(blank=True)
image = models.CharField(max_length=256, default='deis/slugbuilder')
procfile = JSONField(blank=True)
dockerfile = models.TextField(blank=True)
config = JSONField(blank=True)
url = models.URLField('URL')
size = models.IntegerField(blank=True, null=True)
checksum = models.CharField(max_length=255, blank=True)
class Meta:
get_latest_by = 'created'
ordering = ['-created']
unique_together = (('app', 'uuid'),)
def __str__(self):
return "{0}-{1}".format(self.app.id, self.sha[:7])
@python_2_unicode_compatible
class Release(UuidAuditedModel):
"""
Software release deployed by the application platform
Releases contain a :class:`Build` and a :class:`Config`.
"""
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
app = models.ForeignKey('App')
version = models.PositiveIntegerField()
summary = models.TextField(blank=True, null=True)
config = models.ForeignKey('Config')
build = models.ForeignKey('Build', blank=True, null=True)
class Meta:
get_latest_by = 'created'
ordering = ['-created']
unique_together = (('app', 'version'),)
def __str__(self):
return "{0}-v{1}".format(self.app.id, self.version)
def previous(self):
"""
Return the previous Release to this one.
:return: the previous :class:`Release`, or None
"""
releases = self.app.release_set
if self.pk:
releases = releases.exclude(pk=self.pk)
try:
# Get the Release previous to this one
prev_release = releases.latest()
except Release.DoesNotExist:
prev_release = None
return prev_release
def save(self, *args, **kwargs):
if not self.summary:
self.summary = ''
prev_release = self.previous()
# compare this build to the previous build
old_build = prev_release.build if prev_release else None
# if the build changed, log it and who pushed it
if self.build != old_build and self.build.sha:
self.summary += "{} deployed {}".format(self.build.owner, self.build.sha[:7])
# compare this config to the previous config
old_config = prev_release.config if prev_release else None
# if the config data changed, log the dict diff
if self.config != old_config:
dict1 = self.config.values
dict2 = old_config.values if old_config else {}
diff = dict_diff(dict1, dict2)
# try to be as succinct as possible
added = ', '.join(k for k in diff.get('added', {}))
added = 'added ' + added if added else ''
changed = ', '.join(k for k in diff.get('changed', {}))
changed = 'changed ' + changed if changed else ''
deleted = ', '.join(k for k in diff.get('deleted', {}))
deleted = 'deleted ' + deleted if deleted else ''
changes = ', '.join(i for i in (added, changed, deleted) if i)
if changes:
if self.summary:
self.summary += ' and '
self.summary += "{} {}".format(self.config.owner, changes)
if not self.summary:
if self.version == 1:
self.summary = "{} created the initial release".format(self.owner)
else:
self.summary = "{} changed nothing".format(self.owner)
super(Release, self).save(*args, **kwargs)
@receiver(release_signal)
def new_release(sender, **kwargs):
"""
Catch a release_signal and create a new release
using the latest Build and Config for an application.
Releases start at v1 and auto-increment.
"""
user, app, = kwargs['user'], kwargs['app']
last_release = app.release_set.latest()
config = kwargs.get('config', last_release.config)
build = kwargs.get('build', last_release.build)
# overwrite config with build.config if the keys don't exist
if build and build.config:
new_values = {}
for k, v in build.config.items():
if not k in config.values:
new_values[k] = v
if new_values:
# update with current config
new_values.update(config.values)
config = Config.objects.create(
version=config.version + 1, owner=user,
app=app, values=new_values)
# create new release and auto-increment version
new_version = last_release.version + 1
release = Release.objects.create(
owner=user, app=app, config=config,
build=build, version=new_version)
# publish release to registry as new docker image
if settings.REGISTRY_URL:
repository_path = "{}/{}".format(user.username, app.id)
tag = 'v{}'.format(new_version)
publish_release(repository_path, config.values, tag)
return release
# define update/delete callbacks for synchronizing
# models with the configuration management backend
def _publish_to_cm(**kwargs):
kwargs['instance'].publish()
def _log_build_created(**kwargs):
if kwargs.get('created'):
build = kwargs['instance']
log_event(build.app, "Build {} created".format(build))
def _log_release_created(**kwargs):
if kwargs.get('created'):
release = kwargs['instance']
log_event(release.app, "Release {} created".format(release))
def _log_config_updated(**kwargs):
config = kwargs['instance']
log_event(config.app, "Config {} updated".format(config))
def _etcd_publish_key(**kwargs):
key = kwargs['instance']
_etcd_client.write('/deis/builder/users/{}/{}'.format(
key.owner.username, fingerprint(key.public)), key.public)
def _etcd_purge_key(**kwargs):
key = kwargs['instance']
_etcd_client.delete('/deis/builder/users/{}/{}'.format(
key.owner.username, fingerprint(key.public)))
def _etcd_purge_user(**kwargs):
username = kwargs['instance'].username
_etcd_client.delete('/deis/builder/users/{}'.format(username), dir=True, recursive=True)
# Connect Django model signals
# Sync database updates with the configuration management backend
post_save.connect(_publish_to_cm, sender=App, dispatch_uid='api.models')
post_save.connect(_publish_to_cm, sender=Formation, dispatch_uid='api.models')
# Log significant app-related events
post_save.connect(_log_build_created, sender=Build, dispatch_uid='api.models')
post_save.connect(_log_release_created, sender=Release, dispatch_uid='api.models')
post_save.connect(_log_config_updated, sender=Config, dispatch_uid='api.models')
# wire up etcd publishing if we can connect
try:
_etcd_client = etcd.Client(host=settings.ETCD_HOST, port=int(settings.ETCD_PORT))
_etcd_client.get('/deis')
except etcd.EtcdException:
logger.log(logging.WARNING, 'Cannot synchronize with etcd cluster')
_etcd_client = None
if _etcd_client:
| |
<reponame>mburakkalkan/thonny
from thonny.common import (
InputSubmission,
InterruptCommand,
EOFCommand,
parse_message,
ToplevelCommand,
ToplevelResponse,
InlineCommand,
InlineResponse,
UserError,
serialize_message,
BackendEvent,
ValueInfo,
execute_system_command,
)
import sys
import logging
import traceback
import queue
from thonny.plugins.micropython.connection import (
ConnectionClosedException,
ConnectionFailedException,
)
from textwrap import dedent
import ast
import re
from queue import Queue, Empty
import threading
import os
import time
from thonny.misc_utils import find_volumes_by_name, sizeof_fmt
import jedi
import io
import tokenize
from thonny.running import EXPECTED_TERMINATION_CODE
import binascii
import shutil
# See https://github.com/dhylands/rshell/blob/master/rshell/main.py
# for UART_BUFFER_SIZE vs USB_BUFFER_SIZE
# ampy uses 32 bytes: https://github.com/pycampers/ampy/blob/master/ampy/files.py
# I'm not worrying so much, because reader thread reads continuously
# and writer (SerialConnection) has it's own blocks and delays
BUFFER_SIZE = 512
BAUDRATE = 115200
ENCODING = "utf-8"
# Commands
RAW_MODE_CMD = b"\x01"
NORMAL_MODE_CMD = b"\x02"
INTERRUPT_CMD = b"\x03"
SOFT_REBOOT_CMD = b"\x04"
# Output tokens
THONNY_MSG_START = b"\x02<thonny>"
THONNY_MSG_END = b"</thonny>\x04"
EOT = b"\x04"
NORMAL_PROMPT = b">>> "
LF = b"\n"
OK = b"OK"
# first prompt when switching to raw mode (or after soft reboot in raw mode)
# Looks like it's not translatable in CP
# https://github.com/adafruit/circuitpython/blob/master/locale/circuitpython.pot
FIRST_RAW_PROMPT = b"raw REPL; CTRL-B to exit\r\n>"
FIRST_RAW_PROMPT_SUFFIX = b"\r\n>"
RAW_PROMPT = b">"
BLOCK_CLOSERS = re.compile(
b"|".join(map(re.escape, [LF, EOT, THONNY_MSG_START, NORMAL_PROMPT, FIRST_RAW_PROMPT]))
)
logger = logging.getLogger("thonny.micropython.backend")
def debug(msg):
return
print(msg, file=sys.stderr)
class MicroPythonBackend:
def __init__(self, connection, clean, api_stubs_path):
self._connection = connection
self._local_cwd = None
self._cwd = None
self._interrupt_requested = False
self._cancel_requested = False
self._command_queue = Queue() # populated by reader thread
self._progress_times = {}
self._api_stubs_path = api_stubs_path
self._command_reading_thread = threading.Thread(target=self._read_commands, daemon=True)
self._command_reading_thread.start()
self._startup_time = time.time()
self._ctrl_suggestion_given = False
try:
self._prepare(clean)
self._mainloop()
except ConnectionClosedException as e:
self._on_connection_closed(e)
except Exception:
logger.exception("Crash in backend")
traceback.print_exc()
def _prepare(self, clean):
if clean:
self._interrupt_to_raw_prompt()
self._clear_environment()
else:
self._process_until_initial_raw_prompt()
self._cwd = self._fetch_cwd()
self._welcome_text = self._fetch_welcome_text()
self._builtin_modules = self._fetch_builtin_modules()
self._builtins_info = self._fetch_builtins_info()
self._send_ready_message()
def _mainloop(self):
while True:
try:
self._cancel_requested = False
self._interrupt_requested = False
self._check_for_connection_errors()
try:
cmd = self._command_queue.get(timeout=0.1)
except Empty:
# No command in queue, but maybe a thread produced output meanwhile
# or the user resetted the device
self._forward_unexpected_output()
continue
if isinstance(cmd, InputSubmission):
self._submit_input(cmd.data)
elif isinstance(cmd, EOFCommand):
self._soft_reboot(False)
elif isinstance(cmd, InterruptCommand):
self._interrupt()
else:
self.handle_command(cmd)
except KeyboardInterrupt:
self._interrupt()
def _fetch_welcome_text(self):
self._connection.write(NORMAL_MODE_CMD)
welcome_text = self._connection.read_until(NORMAL_PROMPT).strip(b"\r\n >")
if os.name != "nt":
welcome_text = welcome_text.replace(b"\r\n", b"\n")
# Go back to raw prompt
self._connection.write(RAW_MODE_CMD)
self._connection.read_until(FIRST_RAW_PROMPT)
return welcome_text.decode(ENCODING, errors="replace")
def _fetch_uname(self):
res = self._evaluate("__thonny_os.uname()", prelude="import os as __thonny_os")
return {
"sysname": res[0],
"nodename": res[1],
"release": res[2],
"version": res[3],
"machine": res[4],
}
def _fetch_builtin_modules(self):
out, err, _ = self._execute("help('modules')", capture_output=True)
assert not err, "Error was: %r" % err
modules_str_lines = out.strip().splitlines()
last_line = modules_str_lines[-1].strip()
if last_line.count(" ") > 0 and " " not in last_line and "\t" not in last_line:
# probably something like "plus any modules on the filesystem"
# (can be in different languages)
modules_str_lines = modules_str_lines[:-1]
modules_str = (
" ".join(modules_str_lines)
.replace("/__init__", "")
.replace("__main__", "")
.replace("/", ".")
)
return modules_str.split()
def _fetch_builtins_info(self):
"""
for p in self._get_api_stubs_path():
builtins_file = os.path.join(p, "__builtins__.py")
if os.path.exists(builtins_file):
return parse_api_information(builtins_file)
"""
path = os.path.join(self._api_stubs_path, "builtins.py")
if os.path.exists(path):
return parse_api_information(path)
else:
return {}
def _fetch_cwd(self):
return self._evaluate(
"__thonny_os.getcwd() if hasattr(__thonny_os, 'getcwd') else ''",
prelude="import os as __thonny_os",
)
def _send_ready_message(self):
self.send_message(ToplevelResponse(welcome_text=self._welcome_text, cwd=self._cwd))
def _check_send_inline_progress(self, cmd, value, maximum, description=None):
assert "id" in cmd
prev_time = self._progress_times.get(cmd["id"], 0)
if value != maximum and time.time() - prev_time < 0.2:
# Don't notify too often
return
else:
self._progress_times[cmd["id"]] = time.time()
if description is None:
description = cmd.get("description", "Working...")
self.send_message(
BackendEvent(
event_type="InlineProgress",
command_id=cmd["id"],
value=value,
maximum=maximum,
description=description,
)
)
def _interrupt(self):
self._connection.write(INTERRUPT_CMD)
def _check_for_interrupt(self, action_scope):
if action_scope == "device" and self._interrupt_requested:
self._interrupt()
self._interrupt_requested = False
if action_scope == "local" and self._cancel_requested:
self._cancel_requested = False
raise KeyboardInterrupt()
def _interrupt_to_raw_prompt(self):
# NB! Sometimes disconnecting and reconnecting (on macOS?)
# too quickly causes anomalies. See CalliopeMiniProxy for more details
discarded_bytes = b""
for delay in [0.05, 0.5, 0.1, 1.0, 3.0, 5.0]:
# Interrupt several times, because with some drivers first interrupts seem to vanish
if delay >= 1:
self._show_error(
"Could not enter REPL. Trying again with %d second waiting time..." % delay
)
self._connection.reset_output_buffer()
self._connection.write(INTERRUPT_CMD)
self._connection.write(RAW_MODE_CMD)
time.sleep(delay)
discarded_bytes += self._connection.read_all()
if discarded_bytes.endswith(FIRST_RAW_PROMPT) or discarded_bytes.endswith(b"\r\n>"):
break
else:
max_tail_length = 500
if len(discarded_bytes) > max_tail_length:
discarded_bytes_str = (
"[skipping %d bytes] ..." % (len(discarded_bytes) - max_tail_length)
) + repr(discarded_bytes[:-max_tail_length])
else:
discarded_bytes_str = repr(discarded_bytes)
self._show_error(
"Could not enter REPL. Giving up. Read bytes:\n"
+ discarded_bytes_str
+ "\n\nYour options:\n\n"
+ " - check connection properties;\n"
+ " - make sure the device has suitable firmware;\n"
+ " - make sure the device is not in bootloader mode;\n"
+ " - reset the device and try again;\n"
+ " - try other serial clients (Putty, TeraTerm, screen, ...);\n"
+ " - ask for help in Thonny's forum or issue tracker."
)
sys.exit()
def _soft_reboot(self, side_command):
if side_command:
self._interrupt_to_raw_prompt()
# Need to go to normal mode. MP doesn't run user code in raw mode
# (CP does, but it doesn't hurt to do it there as well)
self._connection.write(NORMAL_MODE_CMD)
self._connection.read_until(NORMAL_PROMPT)
self._connection.write(SOFT_REBOOT_CMD)
if not side_command:
self._process_until_raw_prompt()
self.send_message(ToplevelResponse(cwd=self._cwd))
def _read_commands(self):
"works in separate thread"
while True:
line = sys.stdin.readline()
if line == "":
logger.info("Read stdin EOF")
sys.exit()
cmd = parse_message(line)
if isinstance(cmd, InterruptCommand):
self._interrupt_requested = True
self._cancel_requested = True
else:
self._command_queue.put(cmd)
def handle_command(self, cmd):
assert isinstance(cmd, (ToplevelCommand, InlineCommand))
if "local_cwd" in cmd:
self._local_cwd = cmd["local_cwd"]
def create_error_response(**kw):
if not "error" in kw:
kw["error"] = traceback.format_exc()
if isinstance(cmd, ToplevelCommand):
return ToplevelResponse(command_name=cmd.name, **kw)
else:
return InlineResponse(command_name=cmd.name, **kw)
handler = getattr(self, "_cmd_" + cmd.name, None)
if handler is None:
response = create_error_response(error="Unknown command: " + cmd.name)
else:
try:
response = handler(cmd)
except SystemExit:
# Must be caused by Thonny or plugins code
if isinstance(cmd, ToplevelCommand):
traceback.print_exc()
response = create_error_response(SystemExit=True)
except UserError as e:
sys.stderr.write(str(e) + "\n")
response = create_error_response()
except KeyboardInterrupt:
response = create_error_response(error="Interrupted", interrupted=True)
except Exception:
_report_internal_error()
response = create_error_response(context_info="other unhandled exception")
if response is None:
response = {}
if response is False:
# Command doesn't want to send any response
return
elif isinstance(response, dict):
if isinstance(cmd, ToplevelCommand):
response = ToplevelResponse(command_name=cmd.name, **response)
elif isinstance(cmd, InlineCommand):
response = InlineResponse(cmd.name, **response)
if "id" in cmd and "command_id" not in response:
response["command_id"] = cmd["id"]
debug("cmd: " + str(cmd) + ", respin: " + str(response))
self.send_message(response)
def _submit_input(self, cdata: str) -> None:
# TODO: what if there is a previous unused data waiting
assert self._connection.outgoing_is_empty()
assert cdata.endswith("\n")
if not cdata.endswith("\r\n"):
# submission is done with CRLF
cdata = cdata[:-1] + "\r\n"
bdata = cdata.encode(ENCODING)
self._connection.write(bdata)
# Try to consume the echo
try:
echo = self._connection.read(len(bdata))
except queue.Empty:
# leave it.
logging.warning("Timeout when reading echo")
return
if echo != bdata:
# because of autoreload? timing problems? interruption?
# Leave it.
logging.warning("Unexpected echo. Expected %s, got %s" % (bdata, echo))
self._connection.unread(echo)
def send_message(self, msg):
if "cwd" not in msg:
msg["cwd"] = self._cwd
sys.stdout.write(serialize_message(msg) + "\n")
sys.stdout.flush()
def _send_output(self, data, stream_name):
if not data:
return
data = self._transform_output(data)
msg = BackendEvent(event_type="ProgramOutput", stream_name=stream_name, data=data)
self.send_message(msg)
def _transform_output(self, data):
# Any keypress wouldn't work
return data.replace(
"Press any key to enter the REPL. Use CTRL-D to reload.",
"Press Ctrl-C to enter the REPL. Use CTRL-D to reload.",
)
def _ensure_raw_propmt(self):
# similar to _interrupt_to_raw_prompt, but assumes we are already in a prompt
self._forward_unexpected_output()
self._connection.write(RAW_MODE_CMD)
prompt = self._connection.read_until(FIRST_RAW_PROMPT_SUFFIX, 1, True)
if not prompt.endswith(FIRST_RAW_PROMPT_SUFFIX):
raise TimeoutError("Could not ensure raw prompt")
def _execute(self, script, capture_output=False):
self._ensure_raw_propmt()
# send command
self._connection.write(script.encode(ENCODING) + EOT)
debug("Wrote " + script + "\n--------\n")
# fetch command confirmation
ok = self._connection.read(2)
debug("GOTOK")
assert ok == OK, "Expected OK, got %r, followed by %r" % (ok, self._connection.read_all())
return self._process_until_raw_prompt(capture_output)
def _execute_without_output(self, script):
out, err, value = self._execute(script, capture_output=True)
if err or out:
raise RuntimeError("Failed MP script: " + str(out) + "\n" + str(err))
return value
def _execute_print_expr(self, expr, prelude="", cleanup="", capture_output=False):
# assuming expr really contains an expression
# separator is for separating side-effect output and printed value
script = ""
if prelude:
script += prelude + "\n"
script += "print(%r, repr(%s), sep='', end=%r)" % (
THONNY_MSG_START.decode(),
expr,
THONNY_MSG_END.decode(),
)
# assuming cleanup doesn't cause output
if cleanup:
script += "\n" + cleanup
return self._execute(script, capture_output)
def _evaluate(self, expr, prelude="", cleanup=""):
_, _, value_repr = self._execute_print_expr(expr, prelude, cleanup)
if value_repr is None:
return None
else:
return ast.literal_eval(value_repr)
def _process_until_initial_raw_prompt(self):
self._connection.write(RAW_MODE_CMD)
try:
self._process_until_raw_prompt()
| |
cfg.LED_PIN_G, cfg.LED_PIN_B, cfg.LED_INVERT)
from parts import RGB_LED
led = RGB_LED(pgio, cfg.LED_PIN_R, cfg.LED_PIN_G, cfg.LED_PIN_B, cfg.LED_INVERT)
led.set_rgb(cfg.LED_R, cfg.LED_G, cfg.LED_B)
V.add(LedConditionLogic(cfg), inputs=['user/mode', 'recording', "records/alert", 'behavior/state', 'modelfile/modified', "pilot/loc"],
outputs=['led/blink_rate'])
V.add(led, inputs=['led/blink_rate'])
def get_record_alert_color(num_records):
"""
Tubデータ件数を色PWMタプル(r, g, b):各0-100に変換する。
件数範囲、PWMタプルはconfig.py上のRECORD_ALERT_COLOR_ARR
を参照している。
引数:
num_records Tubデータ件数
戻り値
色PWMタプル (r, g, b)形式(0-100)
"""
col = (0, 0, 0)
for count, color in cfg.RECORD_ALERT_COLOR_ARR:
if num_records >= count:
col = color
return col
'''
レコードトラッカ
'''
class RecordTracker:
def __init__(self):
self.last_num_rec_print = 0
self.dur_alert = 0
self.force_alert = 0
def run(self, num_records):
if num_records is None:
return 0
if self.last_num_rec_print != num_records or self.force_alert:
self.last_num_rec_print = num_records
if num_records % 10 == 0:
print("recorded", num_records, "records")
if num_records % cfg.REC_COUNT_ALERT == 0 or self.force_alert:
self.dur_alert = num_records // cfg.REC_COUNT_ALERT * cfg.REC_COUNT_ALERT_CYC
self.force_alert = 0
if self.dur_alert > 0:
self.dur_alert -= 1
if self.dur_alert != 0:
return get_record_alert_color(num_records)
return 0
rec_tracker_part = RecordTracker()
V.add(rec_tracker_part, inputs=["tub/num_records"], outputs=['records/alert'])
'''
自動記録
'''
if cfg.AUTO_RECORD_ON_THROTTLE and isinstance(ctr, JoystickController):
"""
自動記録指定されておりかつジョイスティックを使用する場合
"""
#then we are not using the circle button. hijack that to force a record count indication
def show_record_acount_status():
rec_tracker_part.last_num_rec_print = 0
rec_tracker_part.force_alert = 1
# ジョイスティックのボタンにレコード件数表示用のボタン割当
# F710
if cfg.CONTROLLER_TYPE == 'F710' or cfg.CONTROLLER_TYPE == 'F710_Forklift':
ctr.set_button_down_trigger('back', show_record_acount_status)
# JC-U3912T
elif cfg.CONTROLLER_TYPE == 'JC-U3912T':
ctr.set_button_down_trigger('7', show_record_acount_status)
# default
else:
ctr.set_button_down_trigger('circle', show_record_acount_status)
'''
Sombero HAT
'''
#Sombrero
if cfg.HAVE_SOMBRERO:
from donkeycar.parts.sombrero import Sombrero
_ = Sombrero()
# この段階でカメラ or 2Dマップ画像が cam/image_array に格納されている
'''
画像前処理
'''
class ImgPreProcess():
'''
preprocess camera image for inference.
normalize and crop if needed.
'''
def __init__(self, cfg):
self.cfg = cfg
def run(self, img_arr):
return normalize_and_crop(img_arr, self.cfg)
if "coral" in model_type:
'''
coral を使用する場合は画像をそのまま使用
'''
inf_input = 'cam/image_array'
else:
'''
自動運転時のみ正規化・CROP処理する
'''
inf_input = 'cam/normalized/cropped'
V.add(ImgPreProcess(cfg),
inputs=['cam/image_array'],
outputs=[inf_input],
run_condition='run_pilot')
#Behavioral state
if cfg.TRAIN_BEHAVIORS:
'''
behaviorモデルを使用する場合
'''
bh = BehaviorPart(cfg.BEHAVIOR_LIST)
V.add(bh, outputs=['behavior/state', 'behavior/label', "behavior/one_hot_state_array"])
try:
# L1ボタンにbehavior状態更新を割当
ctr.set_button_down_trigger('L1', bh.increment_state)
except:
pass
inputs = [inf_input, "behavior/one_hot_state_array"]
#IMU
elif model_type == "imu":
'''
IMU(MPU9250/MPU6050)モデル
'''
#assert(cfg.HAVE_IMU)
#Run the pilot if the mode is not user.
if cfg.HAVE_IMU:
# 機械学習モデルのインプットにMPU9250/6050の加速度、角速度を追加
inputs=['cam/image_array',
'imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z',
]
elif cfg.HAVE_HEDGE and (use_hedge or cfg.USE_HEDGE_AS_DEFAULT) and cfg.USE_HEDGE_IMU:
# 機械学習モデルのインプットにMarvelmindの加速度、角速度を追加
inputs=[
'cam/image_array',
'imu/ax', 'imu/ay', 'imu/az',
'imu/vx', 'imu/vy', 'imu/vz'
]
else:
raise ValueError('can not use imu model without imu data')
else:
# 機械学習モデルのインプットは画像のみ
inputs=[inf_input]
'''
機械学習モデル
'''
def load_model(kl, model_path):
"""
機械学習モデルに学習済みパラメータをロードする。
引数:
kl 機械学習モデルオブジェクト
model_path 学習済みパラーメータファイルのパス
戻り値:
なし
"""
start = time.time()
print('loading model', model_path)
kl.load(model_path)
print('finished loading in %s sec.' % (str(time.time() - start)) )
def load_weights(kl, weights_path):
"""
機械学習モデルに学習済みパラメータをロードする。
引数:
kl 機械学習モデルオブジェクト
model_path 学習済みパラーメータファイルのパス
戻り値:
なし
"""
start = time.time()
try:
print('loading model weights', weights_path)
kl.model.load_weights(weights_path)
print('finished loading in %s sec.' % (str(time.time() - start)) )
except Exception as e:
print(e)
print('ERR>> problems loading weights', weights_path)
def load_model_json(kl, json_fnm):
start = time.time()
print('loading model json', json_fnm)
try:
from tensorflow.python import keras
except:
raise
try:
with open(json_fnm, 'r') as handle:
contents = handle.read()
kl.model = keras.models.model_from_json(contents)
print('finished loading json in %s sec.' % (str(time.time() - start)) )
except Exception as e:
print(e)
print("ERR>> problems loading model json", json_fnm)
if model_path:
#When we have a model, first create an appropriate Keras part
kl = dk.utils.get_model_by_type(model_type, cfg)
model_reload_cb = None
if '.h5' in model_path or '.uff' in model_path or 'tflite' in model_path or '.pkl' in model_path:
#when we have a .h5 extension
#load everything from the model file
load_model(kl, model_path)
def reload_model(filename):
load_model(kl, filename)
model_reload_cb = reload_model
elif '.json' in model_path:
#when we have a .json extension
#load the model from there and look for a matching
#.wts file with just weights
load_model_json(kl, model_path)
weights_path = model_path.replace('.json', '.weights')
load_weights(kl, weights_path)
def reload_weights(filename):
weights_path = filename.replace('.json', '.weights')
load_weights(kl, weights_path)
model_reload_cb = reload_weights
else:
print("ERR>> Unknown extension type on model file!!")
return
'''
モデルファイル更新監視
'''
#this part will signal visual LED, if connected
V.add(FileWatcher(model_path, verbose=True), outputs=['modelfile/modified'])
#these parts will reload the model file, but only when ai is running so we don't interrupt user driving
V.add(FileWatcher(model_path), outputs=['modelfile/dirty'], run_condition="ai_running")
V.add(DelayedTrigger(100), inputs=['modelfile/dirty'], outputs=['modelfile/reload'], run_condition="ai_running")
V.add(TriggeredCallback(model_path, model_reload_cb), inputs=["modelfile/reload"], run_condition="ai_running")
outputs=['pilot/angle', 'pilot/throttle']
'''
ローカライザモデルの場合の出力編集
'''
if cfg.TRAIN_LOCALIZER:
outputs.append("pilot/loc")
V.add(kl, inputs=inputs,
outputs=outputs,
run_condition='run_pilot')
'''
モデルがない場合の自動運転結果初期値設定
'''
V.mem['pilot/throttle'] = 0.0
V.mem['pilot/angle'] = 0.0
V.mem['pilot/lift_throttle'] = 0.0
'''
運転モードから、モータ入力値決定
'''
#Choose what inputs should change the car.
class DriveMode:
def run(self, mode,
user_angle, user_throttle, user_lift_throttle,
pilot_angle, pilot_throttle, pilot_lift_throttle):
if mode == 'user':
return user_angle, user_throttle, user_lift_throttle
elif mode == 'local_angle':
return pilot_angle, user_throttle, user_lift_throttle
else:
return pilot_angle, (pilot_throttle * cfg.AI_THROTTLE_MULT), (pilot_lift_throttle * cfg.AI_THROTTLE_MULT)
V.add(DriveMode(),
inputs=['user/mode', 'user/angle', 'user/throttle', 'user/lift_throttle',
'pilot/angle', 'pilot/throttle', 'pilot/lift_throttle'],
outputs=['angle', 'throttle', 'lift_throttle'])
# Donkeycarへの最終的な指示が angle, throttle, lift_throttle 値となる
'''
AIランチャ
'''
#to give the car a boost when starting ai mode in a race.
aiLauncher = AiLaunch(cfg.AI_LAUNCH_DURATION, cfg.AI_LAUNCH_THROTTLE, cfg.AI_LAUNCH_KEEP_ENABLED)
V.add(aiLauncher,
inputs=['user/mode', 'throttle'],
outputs=['throttle'])
if isinstance(ctr, JoystickController):
ctr.set_button_down_trigger(cfg.AI_LAUNCH_ENABLE_BUTTON, aiLauncher.enable_ai_launch)
'''
AIによる自動運転かどうかの判定
'''
class AiRunCondition:
'''
A bool part to let us know when ai is running.
'''
def run(self, mode):
if mode == "user":
return False
return True
V.add(AiRunCondition(), inputs=['user/mode'], outputs=['ai_running'])
'''
AI運転時の記録モード
'''
#Ai Recording
class AiRecordingCondition:
'''
return True when ai mode, otherwize respect user mode recording flag
'''
def run(self, mode, recording):
if mode == 'user':
return recording
return True
if cfg.RECORD_DURING_AI:
V.add(AiRecordingCondition(), inputs=['user/mode', 'recording'], outputs=['recording'])
'''
モータ駆動
'''
#Drive train setup
if cfg.DONKEY_GYM:
pass
elif cfg.DRIVE_TRAIN_TYPE == "SERVO_ESC":
from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle
steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
throttle = PWMThrottle(controller=throttle_controller,
max_pulse=cfg.THROTTLE_FORWARD_PWM,
zero_pulse=cfg.THROTTLE_STOPPED_PWM,
min_pulse=cfg.THROTTLE_REVERSE_PWM)
V.add(steering, inputs=['angle'])
V.add(throttle, inputs=['throttle'])
V.mem['lift_throttle'] = 0
elif cfg.DRIVE_TRAIN_TYPE == "DC_STEER_THROTTLE":
from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM
steering = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT, cfg.HBRIDGE_PIN_RIGHT)
throttle = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD)
V.add(steering, inputs=['angle'])
V.add(throttle, inputs=['throttle'])
V.mem['lift_throttle'] = 0
elif cfg.DRIVE_TRAIN_TYPE == "DC_TWO_WHEEL":
from donkeycar.parts.actuator import TwoWheelSteeringThrottle, Mini_HBridge_DC_Motor_PWM
left_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT_FWD, cfg.HBRIDGE_PIN_LEFT_BWD)
right_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_RIGHT_FWD, cfg.HBRIDGE_PIN_RIGHT_BWD)
two_wheel_control = TwoWheelSteeringThrottle()
V.add(two_wheel_control,
inputs=['throttle', 'angle'],
outputs=['left_motor_speed', 'right_motor_speed'])
V.add(left_motor, inputs=['left_motor_speed'])
V.add(right_motor, inputs=['right_motor_speed'])
V.mem['lift_motor_throttle'] = 0
# Forklift 駆動モータ操作
elif cfg.DRIVE_TRAIN_TYPE == "THREE_MOTORS_PIGPIO":
'''
フォークリフト3モータ駆動
'''
from parts import PIGPIO_OUT, PIGPIO_PWM, ForkliftMotorDriver
motor_driver = ForkliftMotorDriver(
left_balance=cfg.LEFT_PWM_BALANCE,
right_balance=cfg.RIGHT_PWM_BALANCE, debug=False)
V.add(motor_driver,
inputs=['throttle', 'angle', 'lift_throttle'],
outputs=[
'left_motor_vref', 'left_motor_in1', 'left_motor_in2',
'right_motor_vref', 'right_motor_in1', 'right_motor_in2',
'lift_motor_vref', 'lift_motor_in1', 'lift_motor_in2'
])
if use_debug:
class Prt:
def run(self, left_vref, left_in1, left_in2, right_vref, right_in1, right_in2, lift_vref, lift_in1, lift_in2):
print('ForkliftMD left vref:{}, in1:{}, in2:{}'.format(str(left_vref), str(left_in1), str(left_in2)))
print('ForkliftMD right vref:{}, in1:{}, in2:{}'.format(str(right_vref), str(right_in1), str(right_in2)))
print('ForkliftMD lift vref:{}, in1:{}, in2:{}'.format(str(lift_vref), str(lift_in1), str(lift_in2)))
V.add(Prt(),inputs=[
'left_motor_vref', 'left_motor_in1', 'left_motor_in2',
'right_motor_vref', 'right_motor_in1', 'right_motor_in2',
'lift_motor_vref', 'lift_motor_in1', 'lift_motor_in2'
])
# TB6612#1 A系(右モータ、左折時駆動する)
V.add(PIGPIO_OUT(
pin=cfg.LEFT_MOTOR_IN1_GPIO, pgio=pgio), inputs=['left_motor_in1'])
V.add(PIGPIO_OUT(
pin=cfg.LEFT_MOTOR_IN2_GPIO, pgio=pgio), inputs=['left_motor_in2'])
V.add(PIGPIO_PWM(
pin=cfg.LEFT_MOTOR_PWM_GPIO, pgio=pgio, freq=cfg.PWM_FREQ,
range=cfg.PWM_RANGE, threshold=cfg.PWM_INPUT_THRESHOLD), inputs = ['left_motor_vref'])
# TB6612#1 B系(左モータ、右折時駆動する)
V.add(PIGPIO_OUT(
pin=cfg.RIGHT_MOTOR_IN1_GPIO, pgio=pgio), inputs = ['right_motor_in1'])
V.add(PIGPIO_OUT(
pin=cfg.RIGHT_MOTOR_IN2_GPIO, pgio=pgio), inputs = ['right_motor_in2'])
V.add(PIGPIO_PWM(
pin=cfg.RIGHT_MOTOR_PWM_GPIO, pgio=pgio, freq=cfg.PWM_FREQ,
range=cfg.PWM_RANGE, threshold=cfg.PWM_INPUT_THRESHOLD), inputs = ['right_motor_vref'])
# TB6612#2 A系(リフトモータ)
V.add(PIGPIO_OUT(
pin=cfg.LIFT_MOTOR_IN1_GPIO, pgio=pgio), inputs = ['lift_motor_in1'])
V.add(PIGPIO_OUT(
pin=cfg.LIFT_MOTOR_IN2_GPIO, pgio=pgio), inputs = ['lift_motor_in2'])
V.add(PIGPIO_PWM(
pin=cfg.LIFT_MOTOR_PWM_GPIO, pgio=pgio, freq=cfg.PWM_FREQ,
range=cfg.PWM_RANGE, threshold=cfg.PWM_INPUT_THRESHOLD), inputs = ['lift_motor_vref'])
elif cfg.DRIVE_TRAIN_TYPE == "SERVO_HBRIDGE_PWM":
from donkeycar.parts.actuator import ServoBlaster, PWMSteering
steering_controller = ServoBlaster(cfg.STEERING_CHANNEL) #really pin
#PWM pulse values should be in the range of 100 to 200
assert(cfg.STEERING_LEFT_PWM <= 200)
assert(cfg.STEERING_RIGHT_PWM <= 200)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM
motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD)
V.add(steering, inputs=['angle'])
V.add(motor, inputs=["throttle"])
V.mem['lift_throttle'] = 0
'''
Tubデータ
'''
# ベースとなるTubデータ
if write_both_images:
from parts.datastore import FWD_CAMERA_KEY
inputs=['cam/image_array', FWD_CAMERA_KEY]
types=['image_array', 'image_array']
else:
inputs=['cam/image_array']
types=['image_array']
inputs += user_items
types += user_types
if cfg.TRAIN_BEHAVIORS:
'''
behaviorモデルを使用する場合の入力データを追加
'''
inputs += ['behavior/state', 'behavior/label', 'behavior/one_hot_state_array']
types += ['int', 'str', 'vector']
'''
IMU(MPU6050)データ追加
'''
# Tubへ格納する加速度、角速度
tub_imu_inputs = [
'imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z',
]
tub_imu_input_types = [
'float', 'float', 'float',
'float', 'float', 'float',
]
if not cfg.HAVE_IMU:
'''
MPU9250/MPU6050を持っていない場合
'''
if cfg.HAVE_HEDGE and (use_hedge or cfg.USE_HEDGE_AS_DEFAULT):
'''
Marvelmind が有効な場合
'''
if cfg.USE_HEDGE_IMU:
'''
Marvelmind IMUが使用可能な場合
'''
# 必要な Marvelmind IMUデータを移動させる
class MoveIMU:
def run(self, ax, ay, az, gx, gy, gz):
return ax, ay, az, gx, gy, gz
move | |
# Aim is to apply a multiplier to the natural capital scores to reflect the degree of public access
# Challenge is that it is difficult to clip or intersect the complex public access layer with the large and
# detailed OSMM-based base map - it takes days to run and then fails.
# So here we extract a subset of the base map that excludes gardens and manmade features, to cut the processing load.
# Create a public access layer from component datasets and set up a multiplier for recreation
# Intersect the public access layer with the subset and merge back into the base map
# A separate multiplier can then be applied to all gardens to reflect their private value if required
# -----------------------------------------------------------------------------------------------------------------
import time
import arcpy
import os
import MyFunctions
arcpy.CheckOutExtension("Spatial")
print(''.join(["## Started on : ", time.ctime()]))
arcpy.env.overwriteOutput = True # Overwrites files
arcpy.env.qualifiedFieldNames = False # Joined fields will be exported without the join table name
arcpy.env.XYTolerance = "0.001 Meters"
# *** Enter parameters
# --------------------
# region = "Arc"
# region = "Oxon"
region = "NP"
# Choice of method that has been used to generate the input files - this determines location and names of input files
method = "CROME_PHI"
# method = "LERC"
# method = "HLU"
if region == "Oxon" and method == "HLU":
gdbs = [r"D:\cenv0389\Oxon_GIS\Oxon_county\Data\Public_access.gdb"]
region_boundary = "Oxfordshire"
boundary = "Oxfordshire"
base_map = "OSMM_HLU_CR_ALC_Des_GS"
area_tag = "Oxon"
hab_field = "Interpreted_habitat"
# Name of OSMM fields used for interpretation
MakeField = "Make"
DescGroup = "DescriptiveGroup"
DescTerm = "DescriptiveTerm"
delete_1 = True
elif region == "Arc" or region == "NP" or (region == "Oxon" and method == "CROME_PHI"):
if region == "NP":
folder = r"M:\urban_development_natural_capital"
region_boundary = os.path.join(folder, "Data.gdb\NP_boundary")
else:
folder = r"D:\cenv0389\OxCamArc\NatCap_Arc_FreeData"
region_boundary = os.path.join(folder, "Arc_outline.shp")
arcpy.env.workspace = folder
if region == "Arc":
gdbs = arcpy.ListWorkspaces("*", "FileGDB")
# Or comment out previous line and use this format (one row per gdb) if repeating certain gdbs only
# gdbs = []
# gdbs.append(os.path.join(folder, "AylesburyVale.gdb"))
# gdbs.append(os.path.join(folder, "Chiltern.gdb"))
# gdbs.append(os.path.join(folder, "SouthOxfordshire.gdb"))
area_tag = "Arc"
elif region == "NP":
# Remember Leeds not in the list below because already done
# "Allerdale.gdb", "Barnsley.gdb", "Barrow-in-Furness.gdb", "Blackburn with Darwen.gdb", "Blackpool.gdb",
# "Bolton.gdb", "Bradford.gdb", "Burnley.gdb", "Bury.gdb", "Calderdale.gdb", "Carlisle.gdb",
# "Cheshire East.gdb", "Cheshire West and Chester.gdb", "Chorley.gdb", "Copeland.gdb", "County Durham.gdb",
# "Craven.gdb", "Darlington.gdb", "Doncaster.gdb",
# "East Riding of Yorkshire.gdb", "Eden.gdb", "Fylde.gdb", "Gateshead.gdb",
# "Halton.gdb", "Hambleton.gdb", "Harrogate.gdb", "Hartlepool.gdb", "Hyndburn.gdb", "Kirklees.gdb", "Knowsley.gdb",
# "Lancaster.gdb", "Liverpool.gdb", "Manchester.gdb", "Middlesbrough.gdb", "Newcastle upon Tyne.gdb",
# "North East Lincolnshire.gdb", "North Lincolnshire.gdb", "Northumberland.gdb", "North Tyneside.gdb", "Oldham.gdb",
# "Pendle.gdb", "Preston.gdb", "Redcar and Cleveland.gdb", "Ribble Valley.gdb",
# "Richmondshire.gdb", "Rochdale.gdb", "Rossendale.gdb", "Rotherham.gdb", "Ryedale.gdb", "Salford.gdb",
# "Scarborough.gdb", "Sefton.gdb", "Selby.gdb", "Sheffield.gdb", "South Lakeland.gdb", "South Ribble.gdb",
# "South Tyneside.gdb", "St Helens.gdb", "Stockport.gdb", "Stockton-on-Tees.gdb", "Sunderland.gdb",
# "Tameside.gdb", "Trafford.gdb", "Wakefield.gdb", "Warrington.gdb", "West Lancashire.gdb",
# "Wigan.gdb", "Wirral.gdb", "Wyre.gdb", "York.gdb"
gdb_names = ["East Riding of Yorkshire.gdb"]
gdbs = []
for gdb_name in gdb_names:
gdbs.append(os.path.join(r"M:\urban_development_natural_capital\LADs", gdb_name.replace(" ", "")))
area_tag = "NP"
elif region == "Oxon":
gdbs = []
LADs = ["Cherwell.gdb", "Oxford.gdb", "SouthOxfordshire.gdb", "ValeofWhiteHorse.gdb", "WestOxfordshire.gdb"]
for LAD in LADs:
gdbs.append(os.path.join(folder, LAD))
boundary = "boundary"
if method == "LERC":
base_map = "LERC_ALC_Desig_GS"
# Name of OSMM fields used for interpretation
MakeField = "make"
DescGroup = "DescGroup"
DescTerm = "DescTerm"
# Do not tidy up by deleting fields containing the string "_1" as there are lots we want to keep in this dataset!
delete_1 = False
# Feature classes to keep - the others will be deleted if you select 'tidy_workspace' = true
keep_fcs = ["boundary", "Designations", "LERC", "LERC_ALC", "LERC_ALC_Desig", "LERC_ALC_Desig_GS",
"LERC_ALC_Desig_GS_PA", "OS_Open_GS", "OS_Open_GS_clip", "OSGS", "New_snap_union_sp_delid_elim_del", "Public_access"]
else:
base_map = "OSMM_CR_PHI_ALC_Desig_GS"
# Name of OSMM fields used for interpretation
if region == "NP":
MakeField = "make"
DescGroup = "descriptivegroup"
DescTerm = "descriptiveterm"
else:
MakeField = "Make"
DescGroup = "DescriptiveGroup"
DescTerm = "DescriptiveTerm"
delete_1 = True
# Feature classes to keep - the others will be deleted if you select 'tidy_workspace' = true
keep_fcs = ["ALC_diss_Union", "boundary", "Designations", "LCM_arable", "LCM_improved_grassland",
"OS_Open_GS", "OS_Open_GS_clip", "OSGS",
"OSMM", "OSMM_CROME", "OSMM_CROME_PHI", "OSMM_CR_PHI_ALC", "OSMM_CR_PHI_ALC_Desig",
"OSMM_CR_PHI_ALC_Desig_GS", "OSMM_CR_PHI_ALC_Desig_GS_PA", "PHI", "Public_access"]
hab_field = "Interpreted_habitat"
# Source of public access data and gdb where public access layer will be created
if region == "Oxon":
data_gdb = r"D:\cenv0389\Oxon_GIS\Oxon_county\Data\Public_access.gdb"
elif region == "Arc":
data_gdb = r"D:\cenv0389\Oxon_GIS\OxCamArc\Data\Public_access.gdb"
elif region == "NP":
data_gdb = r"M:\urban_development_natural_capital\Public_access.gdb"
# Do not delete fid field at end (when all other surplus fields are deleted) as this is now the new name for TOID
protected_fields = ["fid"]
des_list = ['CountryPk', 'NT', 'NNR', 'LNR', 'DoorstepGn', 'MillenGn', 'RSPB']
des_list_expression = "(((CountryPk + NT + NNR + LNR + MillenGn + DoorstepGn + RSPB) = 0) OR " \
"(CountryPk IS NULL AND NT IS NULL AND NNR IS NULL AND LNR IS NULL AND MillenGn IS NULL AND DoorstepGn IS " \
"NULL AND RSPB IS NULL))"
# Table containing info for each input layer - user needs to set it up. Note: we also use OS Greenspace, OS Open Greenspace and
# various designations (e.g. nature reserves), but these are already merged into the base map so do not need to be listed in the info table.
InfoTable = os.path.join(data_gdb, "PublicAccessFiles")
AccessTable_name = "AccessMultipliers"
AccessTable = os.path.join(data_gdb, "AccessMultipliers")
# Buffer distance for paths
buffer_distance = "50 Meters"
# Need to dissolve all paths into a single buffer area if networks are complex, otherwise the process may crash
dissolve_paths = True
# Which stages of the process do we want to run? Useful for debugging or updates
create_access_layer = False
# These four stages will only be run if create_access_layer is True
prep_OSM_paths = True
clip_region = True
buffer_paths = True
merge_paths = True
clip_PA_into_LAD_gdb = True # Do not use this if the public access layer is made in the same gdb
extract_relevant_polygons = True
intersect_access = True
# *** note there is currently a temporary correction in the code here that needs to be removed in due course!
NT_correction = True # CORRECTION for Northern Powerhouse only
sp_and_repair = True
interpret_access = True
tidy_fields = True
# Recommend not using tidy_workspace here but using the separate code Delete_fcs_from_gdb instead - it is safer!
# if method == "CROME_PHI" or method == "LERC":
# tidy_workspace = False # DO NOT USE THIS FOR OXON HLU method!! It is not set up yet.
# else:
# tidy_workspace = False
# *** End of parameter entry
# --------------------------
if create_access_layer:
# Create public access layer by merging multiple input files, reading info from a table
# Linear features (paths, cycle routes) are converted to a 50m buffer zone
# Set up Type, Description and Name field for each file, reading info from InfoTable, and populate by copying existing relevant fields
arcpy.env.workspace = data_gdb
InAreas = []
InPaths = []
ipath = 0
# First loop through to find max length for Name and Description fields
max_NameLen = 0
max_DescLen = 0
cursor = arcpy.SearchCursor(InfoTable)
for row in cursor:
if dissolve_paths == False or (dissolve_paths == True and row.getValue("Path") == 0):
DescLen = row.getValue("DescLength")
if DescLen > max_DescLen:
max_DescLen = DescLen
NameLen = row.getValue("NameLength")
if NameLen > max_NameLen:
max_NameLen = NameLen
# Deal with paths first.
# If we are dissolving paths, merge all the path input line files first
if dissolve_paths:
if merge_paths:
cursor = arcpy.SearchCursor(InfoTable)
for row in cursor:
if row.getValue("Path") == 1:
in_file = row.getValue("Filename")
if clip_region:
print("Clipping " + in_file)
arcpy.Clip_analysis(in_file, region_boundary, in_file + "_" + area_tag)
if area_tag <> "":
in_file = in_file + "_" + area_tag
InPaths.append(in_file)
print "Merging paths"
arcpy.Merge_management(InPaths, "Paths_merge")
print("Buffering and dissolving merged paths")
arcpy.Buffer_analysis("Paths_merge", "Paths_merge_buffer", buffer_distance, dissolve_option="ALL")
# Add PAType
print("Adding Type field")
MyFunctions.check_and_add_field("Paths_merge_buffer", "PAType", "TEXT", 50)
arcpy.CalculateField_management("Paths_merge_buffer", "PAType", "'Path'", "PYTHON_9.3")
arcpy.MultipartToSinglepart_management("Paths_merge_buffer", "Paths_merge_buffer_sp")
# Now loop through the other areas (and paths if keeping separate) to set up the Type, Description and Name fields
cursor = arcpy.SearchCursor(InfoTable)
for row in cursor:
exit_flag = False
in_file = row.getValue("Filename")
ShortName = row.getValue("ShortName")
print("Processing " + ShortName)
Type = row.getValue("Type")
Path = row.getValue("Path")
NameField = row.getValue("NameField")
DescField = row.getValue("DescField")
if Path == 1:
if dissolve_paths:
exit_flag = True
else:
exit_flag = False
if exit_flag == False:
if clip_region:
print("Clipping " + in_file)
arcpy.Clip_analysis(in_file, region_boundary, in_file + "_" + area_tag)
| |
batch_text_or_text_pairs=batch_text_or_text_pairs,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
else:
return self.encode_plus(
text=text,
text_pair=text_pair,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def encode_plus(
self,
text: Union[TextInput, PreTokenizedInput, EncodedInput],
text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._encode_plus(
text=text,
text_pair=text_pair,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def _encode_plus(
self,
text: Union[TextInput, PreTokenizedInput, EncodedInput],
text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
raise NotImplementedError
def batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
List[TextInput],
List[TextInputPair],
List[PreTokenizedInput],
List[PreTokenizedInputPair],
List[EncodedInput],
List[EncodedInputPair],
],
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._batch_encode_plus(
batch_text_or_text_pairs=batch_text_or_text_pairs,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def _batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
List[TextInput],
List[TextInputPair],
List[PreTokenizedInput],
List[PreTokenizedInputPair],
List[EncodedInput],
List[EncodedInputPair],
],
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
raise NotImplementedError
def pad(
self,
encoded_inputs: Union[
BatchEncoding,
List[BatchEncoding],
Dict[str, EncodedInput],
Dict[str, List[EncodedInput]],
List[Dict[str, EncodedInput]],
],
padding: Union[bool, str, PaddingStrategy] = True,
max_length: Optional[int] = None,
pad_to_multiple_of: Optional[int] = None,
return_attention_mask: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
verbose: bool = True,
) -> BatchEncoding:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(encoded_inputs, (list, tuple)) and isinstance(encoded_inputs[0], (dict, BatchEncoding)):
encoded_inputs = {key: [example[key] for example in encoded_inputs] for key in encoded_inputs[0].keys()}
assert "input_ids" in encoded_inputs, (
"You should supply an encoding or a list of encodings to this method. "
"An encoding is the output of one the encoding methods of the tokenizer, i.e. "
"__call__/encode_plus/batch_encode_plus. "
)
if not encoded_inputs["input_ids"]:
if return_attention_mask:
encoded_inputs["attention_mask"] = []
return encoded_inputs
# If we have PyTorch/TF/NumPy tensors/arrays as inputs, we cast them as python objects
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
first_element = encoded_inputs["input_ids"][0]
if isinstance(first_element, (list, tuple)) and first_element:
first_element = first_element[0]
if not isinstance(first_element, int):
if is_tf_available() and isinstance(first_element, tf.Tensor):
return_tensors = "tf" if return_tensors is None else return_tensors
elif is_torch_available() and isinstance(first_element, torch.Tensor):
return_tensors = "pt" if return_tensors is None else return_tensors
elif isinstance(first_element, np.ndarray):
return_tensors = "np" if return_tensors is None else return_tensors
else:
raise ValueError(
f"type of {first_element} unknown: {type(first_element)}. "
f"Should be one of a python, numpy, pytorch or tensorflow object."
)
def to_py_obj(obj):
if isinstance(obj, (list, tuple)):
return [to_py_obj(o) for o in obj]
elif is_tf_available() and isinstance(obj, tf.Tensor):
return obj.numpy().tolist()
elif is_torch_available() and isinstance(obj, torch.Tensor):
return obj.cpu().tolist()
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj
for key, value in encoded_inputs.items():
encoded_inputs[key] = to_py_obj(value)
# Convert padding_strategy in PaddingStrategy
padding_strategy, _, max_length, _ = self._get_padding_truncation_strategies(
padding=padding, max_length=max_length, verbose=verbose
)
if encoded_inputs["input_ids"] and not isinstance(encoded_inputs["input_ids"][0], (list, tuple)):
encoded_inputs = self._pad(
encoded_inputs,
max_length=max_length,
padding_strategy=padding_strategy,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
return BatchEncoding(encoded_inputs, tensor_type=return_tensors)
batch_size = len(encoded_inputs["input_ids"])
assert all(
len(v) == batch_size for v in encoded_inputs.values()
), "Some items in the output dictionnary have a different batch size than others."
if padding_strategy == PaddingStrategy.LONGEST:
max_length = max(len(inputs) for inputs in encoded_inputs["input_ids"])
padding_strategy = PaddingStrategy.MAX_LENGTH
batch_outputs = {}
for i in range(batch_size):
inputs = dict((k, v[i]) for k, v in encoded_inputs.items())
outputs = self._pad(
inputs,
max_length=max_length,
padding_strategy=padding_strategy,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
return BatchEncoding(batch_outputs, tensor_type=return_tensors)
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
if token_ids_1 is None:
return len(token_ids_0) * [0]
return [0] * len(token_ids_0) + [1] * len(token_ids_1)
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
if token_ids_1 is None:
return token_ids_0
return token_ids_0 + token_ids_1
def prepare_for_model(
self,
ids: List[int],
pair_ids: Optional[List[int]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
prepend_batch_axis: bool = False,
**kwargs
) -> BatchEncoding:
if "return_lengths" in kwargs:
if verbose:
warnings.warn(
"The PreTrainedTokenizerBase.prepare_for_model `return_lengths` parameter is deprecated. "
"Please use `return_length` instead.",
FutureWarning,
)
return_length = kwargs["return_lengths"]
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
pair = bool(pair_ids is not None)
len_ids = len(ids)
len_pair_ids = len(pair_ids) if pair else 0
# Load from model defaults
if return_token_type_ids is None:
return_token_type_ids = "token_type_ids" in self.model_input_names
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
encoded_inputs = {}
# Compute the total size of the returned encodings
total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
# Truncation: Handle max sequence length
overflowing_tokens = []
if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length:
ids, pair_ids, overflowing_tokens = self.truncate_sequences(
ids,
pair_ids=pair_ids,
num_tokens_to_remove=total_len - max_length,
truncation_strategy=truncation_strategy,
stride=stride,
)
if return_overflowing_tokens:
encoded_inputs["overflowing_tokens"] = overflowing_tokens
encoded_inputs["num_truncated_tokens"] = total_len - max_length
# Add special tokens
if add_special_tokens:
sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
else:
sequence = ids + pair_ids if pair else ids
token_type_ids = [0] * len(ids) + ([1] * len(pair_ids) if pair else [])
# Build output dictionnary
encoded_inputs["input_ids"] = sequence
if return_token_type_ids:
encoded_inputs["token_type_ids"] = token_type_ids
if return_special_tokens_mask:
if add_special_tokens:
encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
else:
encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
# Check lengths
if max_length is None and len(encoded_inputs["input_ids"]) > self.model_max_length and verbose:
logger.warning(
"Token indices sequence length is longer than the specified maximum sequence length "
"for this model ({} > {}). Running this sequence through the model will result in "
"indexing errors".format(len(encoded_inputs["input_ids"]), self.model_max_length)
)
# Padding
if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
encoded_inputs = self.pad(
encoded_inputs,
max_length=max_length,
padding=padding_strategy.value,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
if return_length:
encoded_inputs["length"] = len(encoded_inputs["input_ids"])
batch_outputs = BatchEncoding(
encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis
)
return batch_outputs
def truncate_sequences(
self,
ids: List[int],
pair_ids: Optional[List[int]] | |
["title","author","notes"] : ## Specific coding !!??
if jsondata['group'] in ['sdl'] :
try:
self.logger.info('Before encoding :\t%s:%s' % (key,jsondata[key]))
jsondata[key]=jsondata[key].encode("iso-8859-1") ## encode to display e.g. 'Umlauts' correctly
self.logger.info('After encoding :\t%s:%s' % (key,jsondata[key]))
except UnicodeEncodeError as e :
self.logger.error("%s : ( %s:%s[...] )" % (e,key,jsondata[key]))
except Exception as e:
self.logger.error('%s : ( %s:%s[...] )' % (e,key,jsondata[key[20]]))
finally:
pass
jsondata['extras']=list()
extrafields=sorted(set(self.b2findfields.keys()) - set(self.b2fckandeffields))
self.logger.debug(' CKAN extra fields')
for key in extrafields :
if key in jsondata :
if key in ['Contact','Format','Language','Publisher','PublicationYear','Checksum', 'Rights','ResourceType']:
value=';'.join(jsondata[key])
elif key in ['oai_identifier']:
if isinstance(jsondata[key],list) or isinstance(jsondata[key],set) :
value=jsondata[key][-1]
else:
value=jsondata[key]
jsondata['extras'].append({
"key" : key,
"value" : value
})
del jsondata[key]
self.logger.debug(' | %-15s | %-25s' % (key,value))
else:
self.logger.debug(' | %-15s | %-25s' % (key,'-- No data available'))
return jsondata
def check(self, jsondata):
## check(UPLOADER object, json data) - method
# Checks the jsondata and returns the correct ones
#
# Parameters:
# -----------
# 1. (dict) jsondata - json dictionary with metadata fields with B2FIND standard
#
# Return Values:
# --------------
# 1. (dict)
# Raise errors:
# -------------
# 0 - critical error occured
# 1 - non-critical error occured
# 2 - no error occured
errmsg = ''
## check ds name (must be lowercase, alphanumeric + ['_-']
if not re.match("^[a-z0-9_-]*$", jsondata['name']):
self.logger.error("The dataset name '%s' must be lowercase and alphanumeric + ['_-']" % jsondata['name'])
jsondata['name']=jsondata['name'].lower()
self.logger.error(" ... and is converted now to '%s'" % jsondata['name'])
## check mandatory fields ...
mandFields=['title','oai_identifier']
for field in mandFields :
if field not in jsondata: ## or jsondata[field] == ''):
self.logger.critical("The mandatory field '%s' is missing" % field)
return None
identFields=['DOI','PID','url']
identFlag=False
for field in identFields :
if field in jsondata:
identFlag=True
if identFlag == False:
self.logger.critical("At least one identifier from %s is mandatory" % identFields)
return None
if 'PublicationYear' in jsondata :
try:
datetime.datetime.strptime(jsondata['PublicationYear'][0], '%Y')
except (ValueError,TypeError) as e:
self.logger.debug("%s : Facet %s must be in format YYYY, given valueis : %s" % (e,'PublicationYear',jsondata['PublicationYear']))
##HEW-D raise Exception("Error %s : Key %s value %s has incorrect data format, should be YYYY" % (e,'PublicationYear',jsondata['PublicationYear']))
# delete this field from the jsondata:
del jsondata['PublicationYear']
# check Date-Times for consistency with UTC format
dt_keys=['PublicationTimestamp', 'TemporalCoverage:BeginDate', 'TemporalCoverage:EndDate']
for key in dt_keys:
if key in jsondata :
try:
datetime.datetime.strptime(jsondata[key], '%Y-%m-%d'+'T'+'%H:%M:%S'+'Z')
except ValueError:
self.logger.error("Value %s of key %s has incorrect data format, should be YYYY-MM-DDThh:mm:ssZ" % (jsondata[key],key))
del jsondata[key] # delete this field from the jsondata
except TypeError:
self.logger.error("Value %s of key %s has incorrect type, must be string YYYY-MM-DDThh:mm:ssZ" % (jsondata[key],key))
del jsondata[key] # delete this field from the jsondata
return jsondata
def upload(self, request):
## upload(UPLOADER object, request) - method
# uploads a JSON dataset to a B2FIND instance (CKAN).
results = collections.defaultdict(int)
# set processing parameters
community=request[0]
source=request[1]
mdprefix=request[3]
mdsubset=request[4] if len(request)>4 else None
target_mdschema=request[8] if len(request)>8 else None
mdschemasfile='%s/mapfiles/mdschemas.json' % (os.getcwd())
with open(mdschemasfile, 'r') as f:
mdschemas=json.loads(f.read())
# available of sub dirs and extention
insubdir='/json'
infformat='json'
# read target_mdschema (degfault : B2FIND_schema) and set mapfile
if (target_mdschema and not target_mdschema.startswith('#')):
print('target_mdschema %s' % target_mdschema)
# community-mdschema root path
cmpath='%s/%s-%s/' % (self.base_outdir,community,mdprefix)
self.logger.info('\t|- Input path:\t%s' % cmpath)
subdirs=next(os.walk(cmpath))[1] ### [x[0] for x in os.walk(cmpath)]
fcount=0 # total counter of processed files
subsettag=re.compile(r'_\d+')
start = time.time()
# loop over all available subdirs
for subdir in sorted(subdirs) :
if mdsubset and not subdir.startswith(mdsubset) :
self.logger.warning('\t |- Subdirectory %s does not match %s - no processing required' % (subdir,mdsubset))
continue
elif self.fromdate :
datematch = re.search(r'\d{4}-\d{2}-\d{2}$', subdir[:-2])
if datematch :
subdirdate = datetime.datetime.strptime(datematch.group(), '%Y-%m-%d').date()
fromdate = datetime.datetime.strptime(self.fromdate, '%Y-%m-%d').date()
if (fromdate > subdirdate) :
self.logger.warning('\t |- Subdirectory %s has timestamp older than fromdate %s - no processing required' % (subdir,self.fromdate))
continue
else :
self.logger.warning('\t |- Subdirectory %s with timestamp newer than fromdate %s is processed' % (subdir,self.fromdate))
else:
self.logger.warning('\t |- Subdirectory %s does not contain a timestamp %%Y-%%m-%%d - no processing required' % subdir)
continue
else:
print('\t |- Subdirectory %s is processed' % subdir)
self.logger.debug('Processing of subdirectory %s' % subdir)
# check input path
inpath='%s/%s/%s' % (cmpath,subdir,insubdir)
if not os.path.exists(inpath):
self.logger.critical('Can not access directory %s' % inpath)
return results
files = list(filter(lambda x: x.endswith(infformat), os.listdir(inpath)))
results['tcount'] += len(list(files))
oldperc=0
err = None
self.logger.debug(' |- Processing of %s files in %s' % (infformat.upper(),inpath))
## start processing loop
startsubdir = time.time()
scount = 0
fcount=0 # counter per sub dir !
for filename in files:
## counter and progress bar
fcount+=1
if (fcount<scount): continue
perc=int(fcount*100/int(len(list(files))))
bartags=int(perc/5)
if perc%10 == 0 and perc != oldperc:
oldperc=perc
print ("\r\t [%-20s] %5d (%3d%%) in %d sec" % ('='*bartags, fcount, perc, time.time()-startsubdir ))
sys.stdout.flush()
self.logger.debug(' | m | %-4d | %-45s |' % (fcount,filename))
jsondata = dict()
datasetRecord = dict()
pathfname= inpath+'/'+filename
if ( os.path.getsize(pathfname) > 0 ):
with open(pathfname, 'r') as f:
try:
jsondata=json.loads(f.read(),encoding = 'utf-8')
except:
self.logger.error(' | [ERROR] Cannot load the json file %s' % pathfname)
results['ecount'] += 1
continue
else:
results['ecount'] += 1
continue
# get dataset id (CKAN name) from filename (a uuid generated identifier):
ds_id = os.path.splitext(filename)[0]
self.logger.warning(' | u | %-4d | %-40s |' % (fcount,ds_id))
# add some general CKAN specific fields to dictionary:
jsondata["name"] = ds_id
jsondata["state"]='active'
jsondata["groups"]=[{ "name" : community }]
jsondata["owner_org"]=self.ckanorg
# get OAI identifier from json data extra field 'oai_identifier':
if 'oai_identifier' not in jsondata :
jsondata['oai_identifier'] = [ds_id]
oai_id = jsondata['oai_identifier'][0]
self.logger.debug(" |-> identifier: %s\n" % (oai_id))
### CHECK JSON DATA for upload
jsondata=self.check(jsondata)
if jsondata == None :
self.logger.critical('File %s failed check and will not been uploaded' % filename)
continue
# generate get record request for field MetaDataAccess:
if (mdprefix == 'json'):
reqpre = source + '/dataset/'
mdaccess = reqpre + oai_id
else:
reqpre = source + '?verb=GetRecord&metadataPrefix=' + mdprefix
mdaccess = reqpre + '&identifier=' + oai_id
##HEW-MV2mapping!!! : urlcheck=self.check_url(mdaccess)
index1 = mdaccess
# exceptions for some communities:
if (community == 'clarin' and oai_id.startswith('mi_')):
mdaccess = 'http://www.meertens.knaw.nl/oai/oai_server.php?verb=GetRecord&metadataPrefix=cmdi&identifier=http://hdl.handle.net/10744/' + oai_id
elif (community == 'sdl'):
mdaccess =reqpre+'&identifier=oai::record/'+oai_id
elif (community == 'b2share'):
if mdsubset.startswith('trng') :
mdaccess ='https://trng-b2share.eudat.eu/api/oai2d?verb=GetRecord&metadataPrefix=marcxml&identifier='+oai_id
else:
mdaccess ='https://b2share.eudat.eu/api/oai2d?verb=GetRecord&metadataPrefix=marcxml&identifier='+oai_id
if self.check_url(mdaccess) == False :
logging.debug('URL to metadata record %s is broken' % (mdaccess))
else:
jsondata['MetaDataAccess']=mdaccess
jsondata['group']=community
## Prepare jsondata for upload to CKAN (decode UTF-8, build CKAN extra dict's, ...)
jsondata=self.json2ckan(jsondata)
# Set the tag ManagerVersion:
ManagerVersion = '2.3.1' ##HEW-??? Gloaal Variable ManagerVersion
jsondata['extras'].append({
"key" : "ManagerVersion",
"value" : '2.3.1' ##HEW-??? Gloaal Variable ManagerVersionManagerVersion
})
datasetRecord["EUDAT/B2FINDVERSION"]=ManagerVersion
### datasetRecord["B2FINDHOST"]=self.iphost
self.logger.debug(' JSON dump\n%s' % json.dumps(jsondata, sort_keys=True))
# determine checksum of json record and append
try:
encoding='utf-8' ##HEW-D 'ISO-8859-15' / 'latin-1'
checksum=hashlib.md5(json.dumps(jsondata, sort_keys=True).encode('latin1')).hexdigest()
except UnicodeEncodeError as err :
self.logger.critical(' %s during md checksum determination' % err)
checksum=None
else:
self.logger.debug('Checksum of JSON record %s' % checksum)
jsondata['version'] = checksum
datasetRecord["CHECKSUM"]=checksum
### check status of dataset (unknown/new/changed/unchanged)
dsstatus="unknown"
# check against handle server
handlestatus="unknown"
pidRecord=dict()
b2findds='http://b2find.eudat.eu/dataset/'+ds_id
ckands='http://'+self.iphost+'/dataset/'+ds_id
datasetRecord["URL"]=b2findds
datasetRecord["EUDAT/ROR"]=ckands
datasetRecord["EUDAT/PPID"]=''
datasetRecord["EUDAT/REPLICA"]=''
datasetRecord["EUDAT/METADATATYPE"]=mdschemas[mdprefix]
datasetRecord["EUDAT/B2FINDSTATUS"]="REGISTERED"
datasetRecord["EUDAT/B2FINDCOMMUNITY"]=community
datasetRecord["EUDAT/B2FINDSUBSET"]=mdsubset
if (self.cred): ##HEW-D??? options.handle_check):
pidAttrs=["URL","CHECKSUM","EUDAT/ROR","EUDAT/PPID","EUDAT/REPLICA","EUDAT/METADATATYPE","EUDAT/B2FINDSTATUS","EUDAT/B2FINDVERSION","EUDAT/B2FINDCOMMUNITY","EUDAT/B2FINDSUBSET"]
##HEW-D pidAttrs=["URL","CHECKSUM","JMDVERSION","B2FINDHOST","IS_METADATA","MD_STATUS","MD_SCHEMA","COMMUNITY","SUBSET"]
try:
pid = self.cred.get_prefix() + '/eudat-jmd_' + ds_id
rec = self.HandleClient.retrieve_handle_record_json(pid)
except Exception as err :
self.logger.error("%s in self.HandleClient.retrieve_handle_record_json(%s)" % (err,pid))
else:
self.logger.debug("Retrieved PID %s" % pid )
chargs={}
if rec : ## Handle exists
for pidAttr in pidAttrs :##HEW-D ["CHECKSUM","JMDVERSION","B2FINDHOST"] :
try:
pidRecord[pidAttr] = self.HandleClient.get_value_from_handle(pid,pidAttr,rec)
except Exception as err:
self.logger.critical("%s in self.HandleClient.get_value_from_handle(%s)" % (err,pidAttr) )
else:
self.logger.debug("Got value %s from attribute %s sucessfully" % (pidRecord[pidAttr],pidAttr))
if ( pidRecord[pidAttr] == datasetRecord[pidAttr] ) :
chmsg="-- not changed --"
if pidAttr == 'CHECKSUM' :
handlestatus="unchanged"
self.logger.info(" |%-12s\t|%-30s\t|%-30s|" % (pidAttr,pidRecord[pidAttr],chmsg))
else:
chmsg=datasetRecord[pidAttr]
handlestatus="changed"
chargs[pidAttr]=datasetRecord[pidAttr]
self.logger.info(" |%-12s\t|%-30s\t|%-30s|" % (pidAttr,pidRecord[pidAttr],chmsg))
else:
handlestatus="new"
dsstatus=handlestatus
if handlestatus == "unchanged" : # no action required :-) !
self.logger.warning('No action required :-) - next record')
results['ncount']+=1
continue
elif handlestatus == "changed" : # update dataset !
self.logger.warning('Update handle and dataset !')
else : # create new handle !
self.logger.warning('Create handle and dataset !')
chargs=datasetRecord
# check against CKAN database
ckanstatus | |
<reponame>Hicham-Belhseine/tarp-detect
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File name: "SL_CIP.py"
Date created: 1/17/2018
Date last modified: 10/4/2018
Python Version: 3.6
A simple script for determining a region of interest based on target color
and shape that switches towards the use of the CAMShift algorithm after a
region of interest is found.
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import cv2
import numpy as np
import imutils
import math
class ROIDetection:
"""
A set of functions useful for searching for and isolating a region of interest
based on color.
"""
def __init__(self, rangeMinOne, rangeMaxOne, rangeMinTwo, rangeMaxTwo):
# Initialize color ranges
self.rangeMinOne = rangeMinOne
self.rangeMaxOne = rangeMaxOne
self.rangeMinTwo = rangeMinTwo
self.rangeMaxTwo = rangeMaxTwo
def searchForROI(self, image):
# Converting image to HSV
imageHSV = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# Both masks are eroded and dilated to remove any noise from the image
# both inside and outside the specified color ranges
# First mask for color range one
maskOne = cv2.inRange(imageHSV, self.rangeMinOne, self.rangeMaxOne)
maskOne = cv2.erode(maskOne, None, iterations=1)
maskOne = cv2.dilate(maskOne, None, iterations=1)
# Second mask for color range two
maskTwo = cv2.inRange(imageHSV, self.rangeMinTwo, self.rangeMaxTwo)
maskTwo = cv2.erode(maskTwo, None, iterations=1)
maskTwo = cv2.dilate(maskTwo, None, iterations=1)
# Get largest contours in the first mask
contoursOne = cv2.findContours(maskOne, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
self.contoursOne = contoursOne[0] if imutils.is_cv2() else contoursOne[1]
# Get largest contours in the second mask
contoursTwo = cv2.findContours(maskTwo, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
self.contoursTwo = contoursTwo[0] if imutils.is_cv2() else contoursTwo[1]
# Check contour location
self.centerContourOne, self.contourOne = self.contourLocation(self.contoursOne)
self.centerContourTwo, self.contourTwo = self.contourLocation(self.contoursTwo)
def checkCenters(self):
# Checks to see if the attained contour centers are close enough to be two adacent
# tarps
# First, check if the contours are empty
if not self.centerContourOne or not self.centerContourTwo:
return False
else:
x1 = self.centerContourOne[0]
y1 = self.centerContourOne[1]
x2 = self.centerContourTwo[0]
y2 = self.centerContourTwo[1]
# Size of region
regionDimensions = 2 * int(math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2))
if regionDimensions > 800:
return False
else:
# Check to see if the contour areas are similar
areaCntOne = cv2.contourArea(max(self.contoursOne, key=cv2.contourArea))
areaCntTwo = cv2.contourArea(max(self.contoursTwo, key=cv2.contourArea))
# Top and bottom error bounds
errorTop = 1.6 * areaCntOne
errorBot = .4 * areaCntOne
if areaCntTwo < errorBot or areaCntTwo > errorTop:
return False
elif areaCntOne == 0 or areaCntTwo == 0:
return False
else:
return True
def contourLocation(self, contourList):
# Returns contour location (x and y)
if len(contourList) is 0:
return [], []
else:
contour = max(contourList, key=cv2.contourArea)
moment = cv2.moments(contour)
if moment["m00"] != 0:
centerX = int((moment["m10"] / moment["m00"]))
centerY = int((moment["m01"] / moment["m00"]))
return [centerX, centerY], contour
else:
return [], []
def getROI(self, image):
# Get the image of the region of interest used to initialize
# CAMShift
# Get the region bounds
x1 = self.centerContourOne[0]
x2 = self.centerContourTwo[0]
y1 = self.centerContourOne[1]
y2 = self.centerContourTwo[1]
regionDimensions = 4 * int(math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2))
midPoint = (int((x1 + x2) / 2), int((y1 + y2) / 2))
# Create a minimally enclosing rectangle to contain the image
sideLeft = midPoint[0] - regionDimensions
sideRight = midPoint[0] + regionDimensions
sideTop = midPoint[1] - regionDimensions
sideBottom = midPoint[1] + regionDimensions
# Ensure the rectangle does go past the bounds of the image
if sideLeft < 0:
sideLeft = 0
if sideRight < 0:
sideRight = 0
if sideTop < 0:
sideTop = 0
if sideBottom < 0:
sideBottom = 0
# Crop original image
imageROI = image[sideTop:sideBottom, sideLeft:sideRight]
# region dimension
trackWindow = (sideLeft, sideTop, regionDimensions, regionDimensions)
return imageROI, trackWindow
class CAMShift:
"""
A set of functions useful for initializing a region of interest and continually
tracking it until certain specifications are met.
"""
def __init__(self, rangeMinOne, rangeMaxOne, rangeMinTwo, rangeMaxTwo):
# Termination criteria
self.termCriteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 2, 1)
# Tarp ranges
self.rangeMinOne = rangeMinOne
self.rangeMaxOne = rangeMaxOne
self.rangeMinTwo = rangeMinTwo
self.rangeMaxTwo = rangeMaxTwo
def getROIMask(self, roi):
# Get HSV from ROI for histogram backprojection and masking
roiHSV = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
# Masks for both ranges
maskOne = cv2.inRange(roiHSV, self.rangeMinOne, self.rangeMaxOne)
maskTwo = cv2.inRange(roiHSV, self.rangeMinTwo, self.rangeMaxTwo)
# combine mask into single image
combinedMask = cv2.bitwise_or(maskOne, maskTwo)
return combinedMask
def camShiftTracking(self, roi, roiMask, image, imageStream, trackWindow):
# Termination criteria
termCrit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 3, 1)
self.trackWindow = trackWindow
roiHSV = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
# Histogram backprojection
roiHist = cv2.calcHist([roiHSV], [0], roiMask, [16], [0, 180])
roiHist = cv2.normalize(roiHist, roiHist, 0, 255, cv2.NORM_MINMAX)
# initial error is zero (frames where tarps are not detected)
error = 0
while error < 8:
# Get the next image in the image stream
ret, image = imageStream.read()
# Check to see if image is not NoneType
if ret == True:
# Get the HSV image
imageHSV = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([imageHSV], [0], roiHist, [0, 180], 1)
# Find new tracking window
ret, self.trackWindow = cv2.CamShift(dst, self.trackWindow, termCrit)
points = cv2.boxPoints(ret)
points = np.int0(points)
if points[0] is [0, 0]:
continue
imageCAMShift = cv2.polylines(image, [points], True, 255, 1)
# New window of analysis
windowOfAnalysis = self.getWindow(points)
# Define new region of interest
roiNew = image[windowOfAnalysis[2]:windowOfAnalysis[3], windowOfAnalysis[0]:windowOfAnalysis[1]]
# check if tarps are found
tarpsFound = self.findTarps(image, roiNew, windowOfAnalysis)
# Updating error count
if not tarpsFound:
error += 1
else:
cv2.imshow("image", image)
if error > 0:
error -= 1
else:
error += 1
if error == 4:
break
if cv2.waitKey(1) & 0xFF is 27:
break
def getWindow(self, points):
# Returns the window for CAMShift
xList = []
yList = []
for point in points:
xList.append(point[0])
yList.append(point[1])
return min(xList), max(xList), min(yList), max(yList)
def findTarps(self, image, roi, points):
# Find tarps in the CAMShift window
roiHSV = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
# Both masks are eroded and dilated to remove any noise from the image
# both inside and outside the specified color ranges
# First mask for color range one
maskOne = cv2.inRange(roiHSV, self.rangeMinOne, self.rangeMaxOne)
maskOne = cv2.erode(maskOne, None, iterations=1)
maskOne = cv2.dilate(maskOne, None, iterations=1)
# Second mask for color range Two
maskTwo = cv2.inRange(roiHSV, self.rangeMinTwo, self.rangeMaxTwo)
maskTwo = cv2.erode(maskTwo, None, iterations=1)
maskTwo = cv2.dilate(maskTwo, None, iterations=1)
# Get both contours
contoursOne = cv2.findContours(maskOne, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contoursOne = contoursOne[0] if imutils.is_cv2() else contoursOne[1]
contoursTwo = cv2.findContours(maskTwo, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contoursTwo = contoursTwo[0] if imutils.is_cv2() else contoursTwo[1]
# Find the centers and the minimally enclosing radious of the contours
self.centerContourOne, radiusOne = self.contourLocation(contoursOne)
self.centerContourTwo, radiusTwo = self.contourLocation(contoursTwo)
# Check if either contour is empty
if not self.centerContourOne:
return False
if not self.centerContourTwo:
return False
# Check similarity in contour area
areaCntOne = cv2.contourArea(max(contoursOne, key=cv2.contourArea))
areaCntTwo = cv2.contourArea(max(contoursTwo, key=cv2.contourArea))
# Error top and bottom bounds
errorTop = 1.5 * areaCntOne
errorBot = .5 * areaCntOne
if areaCntTwo < errorBot or areaCntTwo > errorTop:
return False
if areaCntOne == 0 or areaCntTwo == 0:
return False
# Find center of contours relative to entire image
self.centerContourOne[0] = points[0] + self.centerContourOne[0]
self.centerContourOne[1] = points[2] + self.centerContourOne[1]
self.centerContourTwo[0] = points[0] + self.centerContourTwo[0]
self.centerContourTwo[1] = points[2] + self.centerContourTwo[1]
# Outline the tarps in the image
image = cv2.circle(image, tuple(self.centerContourOne), int(radiusOne), (0, 255, 0), 2)
image = cv2.circle(image, tuple(self.centerContourTwo), int(radiusTwo), (0, 255, 0), 2)
return True
def contourLocation(self, contourList):
# Finds the contour location
# If the contour list is empty, return false
if len(contourList) is 0:
return [], []
else:
# Take the largest contour
contour = max(contourList, key=cv2.contourArea)
# Center of contour
moment = cv2.moments(contour)
# Bypass div by 0 error
if moment["m00"] != 0:
centerX = int((moment["m10"] / moment["m00"]))
centerY = int((moment["m01"] / moment["m00"]))
(circleX, circleY), radius = cv2.minEnclosingCircle(contour)
return [centerX, centerY], radius
else:
return [], []
def main():
# Defining tarp predetermined color values
# Peach Ranges
rangeMinOne = np.array([152, 40, 120], dtype=np.uint8)
rangeMaxOne = np.array([190, 101, 180], dtype=np.uint8)
# Blue Ranges
rangeMinTwo = np.array([90, 100, 60], dtype=np.uint8)
rangeMaxTwo = np.array([134, 240, 170], dtype=np.uint8)
# Image Stream
imageStream = cv2.VideoCapture("CIP_test.mp4")
# Take first image from camera and get shape
ret, image = imageStream.read()
height, width, layers = image.shape
# Prep Region of Interest finder
roiDetect = ROIDetection(rangeMinOne, rangeMaxOne, rangeMinTwo, rangeMaxTwo)
# | |
np.zeros([len(loc),len(brd)*len(snsr)+1,129])
cnt = 0
else:
handle_list = []
title_list = []
for bo in brd:
for se in snsr:
if not pool:
# generate individual figure
fig = plt.figure(figsize=figsize)
handle_list.append(fig)
title = 'Spectral Density: %s L1-L6 %s %s %s Board %i, Sensor %i' % \
(cols[0].Gas, cols[0].SensorVoltage, cols[0].FanSpeed, cols[0].Trial, bo, se)
if show_title:
fig.suptitle(title)
title_list.append(title)
else:
# increment psd_list counter
cnt += 1
for i in range(len(loc)):
try:
# get time & board & sensor
# x = cols[i]._Time[win]
board = getattr(cols[i], 'Board%i' % (bo))
sensor = getattr(board, 'Sensor%i' % (se))
except:
print('(psd) -- Missing: L%i, Board %i, Sensor %i' % (i+1, bo, se))
continue
if not sensor.is_valid:
if not include_invalide:
print('(psd) -- Invalid: L%i, Board %i, Sensor %i' % (i+1, bo, se))
continue
# calculate psd using welch's method
y = sensor()[win]
y = sp.signal.filtfilt(b, a, y)
frq, psd = sp.signal.welch(y, fs=100, window=win_fcn)
if not pool:
# plot fft data
ax = plt.subplot(111)
line = plt.semilogy(frq, psd, color=rgb_tuples[i*45], \
linewidth=2)
lines.append(line[0])
else:
# add data to list
psd_list[i][cnt] += psd
if not pool:
# maybe add legend
if show_legend:
ax.legend(iter(lines), ['L%i'%(i) for i in loc], loc='center right')
plt.xlabel('Frequency [Hz]')
plt.ylabel('PSD [V**2/Hz]')
plt.xlim(0, 2.0)
plt.ylim(10e-5, 10e2)
# create custom minor ticks
# locator = AutoMinorLocator()
# ax.xaxis.set_minor_locator(locator)
# ax.tick_params(axis='x',which='minor')
# ax.grid(which='both')
plt.show()
if pool:
# create pooled figure
fig = plt.figure(figsize=figsize)
title = 'Spectral Density: %s L1-L6 %s %s, %i boards, %i sensors' % \
(cols[0].Gas, cols[0].SensorVoltage, cols[0].FanSpeed, len(brd), len(snsr))
sum_psd_list = None
if show_title:
fig.suptitle(title)
sum_psd_list = np.zeros([len(loc),len(frq)])
std_psd_list = np.zeros([len(loc),len(frq)])
for i in range(len(loc)):
ax = plt.subplot(111)
for j in range(1,np.shape(psd_list)[1]):
sum_psd_list[i] += psd_list[i][j]
std_psd_list = np.std(psd_list[i],axis=0) / ((len(brd)*len(snsr))+1)
sum_psd_list[i] /= ((len(brd)*len(snsr))+1)
line = plt.semilogy(frq, sum_psd_list[i], color=rgb_tuples[i*45], \
linewidth=2)
plt.errorbar(frq, sum_psd_list[i], yerr=std_psd_list, color=rgb_tuples[i*45])
lines.append(line[0])
if show_legend:
ax.legend(iter(lines), ['L%i'%(i) for i in loc], loc='center right')
plt.xlabel('Frequency [Hz]')
plt.ylabel('Power spectral density [V**2/Hz]')
plt.xlim(0, 2.0)
plt.ylim(10e-5, 10e2)
locator = AutoMinorLocator()
ax.xaxis.set_minor_locator(locator)
ax.tick_params(axis='x',which='minor')
ax.grid(which='both')
try:
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
except AttributeError:
pass
plt.show()
return fig, title, sum_psd_list
else:
return handle_list, title_list
def power_distrib(ds, **kwargs):
"""
Plot distribution of normalized spectral power over frequency range.
Returns figure handle and compiled fft data.
Keyword arguments:
gas -- int or list of range(1,12), gas name identifier
loc -- int or list of range(1,7), column location identifier
voltage -- int or list of range(1,6) , sensor voltage identifier
speed -- int or list of range(1,4) , fan speed identifier
trial -- int or list of range(1,21), experiment repetitions
board -- int or list of range(1,10), board identifier
sensor -- int or list of range(1,9), sensor identifier
win_fcn -- string, fourier transform window function to reduce leakage
win -- slice, time series selection window
"""
# TODO: Implement errorbars
assert isinstance(ds, DataSelector)
# set default values
default_gas = range(1,12)
default_loc = range(1,7)
default_voltage = 5
default_speed = 1
default_trial = range(1,21)
default_board = range(1,10)
default_sensor = range(1,9)
default_win = slice(10000, 20000)
# get arguments
gas = kwargs.get('gas', default_gas)
loc = kwargs.get('loc', default_loc)
voltage = kwargs.get('voltage', default_voltage)
speed = kwargs.get('speed', default_speed)
trial = kwargs.get('trial', default_trial)
brd = kwargs.get('board', default_board)
snsr = kwargs.get('sensor', default_sensor)
win_fcn = kwargs.get('win_fcn', None)
win = kwargs.get('win', default_win)
# assert argument datatype
assert isinstance(gas, (int, list))
assert isinstance(loc, (int, list))
assert isinstance(voltage, (int, list))
assert isinstance(speed, (int, list))
assert isinstance(trial, (int, list))
assert isinstance(brd, (int, list))
assert isinstance(snsr, (int, list))
assert isinstance(win_fcn, (str, type(None)))
assert isinstance(win, slice)
# cast int entries to list
if isinstance(gas, int):
gas = [gas]
if isinstance(loc, int):
loc = [loc]
if isinstance(voltage, int):
voltage = [voltage]
if isinstance(speed, int):
speed = [speed]
if isinstance(trial,int):
trial = [trial]
if isinstance(brd, int):
brd = [brd]
if isinstance(snsr, int):
snsr = [snsr]
# assert argument value
assert min(gas) > 0 and max(gas) <= 11
assert min(loc) > 0 and max(loc) <= 6
assert min(voltage) > 0 and max(voltage) <= 5
assert min(speed) > 0 and max(speed) <= 3
assert min(trial) > 0 and max(trial) <= 20
assert min(brd) > 0 and max(brd) <= 9
assert min(snsr) > 0 and max(snsr) <= 8
assert win.start >= 0 and win.stop <= 26000
# try to construct window function
if win_fcn is not None:
if win_fcn in ('Hamming', 'hamming'):
win_fcn = np.hamming(win.stop-win.start)
elif win_fcn in ('Blackman', 'blackman'):
win_fcn = np.blackman(win.stop-win.start)
elif win_fcn in ('Hanning', 'hanning'):
win_fcn = np.hanning(win.stop-win.start)
elif win_fcn in ('Bartlett','bartlett'):
win_fcn = np.bartlett(win.stop-win.start)
else:
win_fcn = None
b, a = sp.signal.butter(2, (float(0.01)/50, 1/float(50)), 'bandpass')
n = len(gas)*len(loc)*len(speed)*len(voltage)*len(trial)*len(brd)*len(snsr)
fft_list = np.zeros([n,int((win.stop-win.start)/2)])
std_list = np.zeros(int((win.stop-win.start)/2))
cnt = 0
# iterate over all parameters
for g in tqdm(gas):
for l in loc:
for s in speed:
for v in voltage:
for t in trial:
try:
# try to get column
cols = ds.select(gas = g, loc = l, voltage = v, \
speed = s, trial = t)
x = cols[0]._Time[win]
except:
print('-- Missing: Gas %i, L%i, Trial %i' % (g, l, t))
continue
for bo in brd:
try:
# get board
board = getattr(cols[0], 'Board%i' % (bo))
except:
print('-- Missing: Gas %i, L%i, %s, %s, Trial %i, Board %i' % \
(g, l, ds.AltFanSpeeds[s], ds.SensorVoltages[v], t, bo))
continue
for se in snsr:
# get sensor
sensor = getattr(board, 'Sensor%i' % (se))
if sensor.is_valid:
# fourier transform
y = sensor()[win]
y = sp.signal.filtfilt(b, a, y)
if win_fcn is not None:
fft = np.abs(np.fft.fft(y*win_fcn))
else:
fft = np.abs(np.fft.fft(y))
fft = fft[range(int(len(x)/2))]
# pool data
fft_list[cnt] = fft
cnt += 1
fft_list = fft_list[0:cnt-1]
# print 'fft_list: (%i,%i)' % (np.shape(fft_list)[0], np.shape(fft_list)[1])
std_list = np.std(fft_list, axis=0)
fft_list = np.sum(fft_list, axis=0)
# print 'Max Std. pre: %f' % np.max(std_list)
# print 'Max Pwr. pre: %f' % np.max(fft_list)
# normalize pooled data
std_list /= np.sum(fft_list)
fft_list /= np.sum(fft_list)
# print 'Max Std. post: %f' % np.max(std_list)
# print 'Max Pwr. post: %f' % np.max(fft_list)
# calculate frequencies
frq = np.fft.fftfreq(len(x), 0.01)
frq = frq[range(int(len(x)/2))]
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(frq, fft_list, 'k')
# plt.errorbar(frq, fft_list, yerr=std_list, color='k')
plt.fill_between(frq, fft_list, color = 'gray')
# TODO: Implement xlim & ylim as input variables.
plt.xlim(0.0, 0.5)
plt.ylim(0.0, 0.25)
# create custom minor ticks
locator = AutoMinorLocator()
ax.xaxis.set_minor_locator(locator)
ax.tick_params(axis='x',which='minor')
ax.grid(which='both')
plt.xlabel('Frequency [Hz]')
plt.ylabel('Power ratio')
# calculate amount of power in dsiplayed range
pwr = np.round(sum(fft_list[range(0,50)])*100, decimals=2)
# plt.text(0.15, 0.2, '%s %% of total Power' % (pwr), fontsize=12)
print('The displayed range contains %s %% of the total power' % (pwr))
print('%i Columns' % cnt)
try:
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
except AttributeError:
pass
plt.show()
return fig, fft_list
def peak_phase(ds, **kwargs):
"""
Plot distance-based differences in onset and recovery timing.
Highlight area around the selected window.
Returns figure handle and title.
Keyword arguments:
gas -- int of range(1,12), gas name identifier
loc -- int of range(1,7), column location identifier
voltage -- int of range(1,6) , sensor voltage identifier
speed -- int of range(1,4) , fan speed identifier
trial -- int of range(1,21), experiment repetitions
board -- int of range(1,10), board identifier
sensor -- int of range(1,9), sensor identifier
win -- slice, time series selection window
show_legend -- bool, if true, display legend
"""
assert isinstance(ds, DataSelector)
# set default values
default_gas = 1
default_voltage = 5
default_speed = 1
default_trial = 1
default_board = 5
default_sensor = 5
default_win = slice(10000, 20000)
# get arguments
gas = kwargs.get('gas', default_gas)
voltage = kwargs.get('voltage', default_voltage)
speed = kwargs.get('speed', default_speed)
trial = kwargs.get('trial', default_trial)
brd = kwargs.get('board', default_board)
snsr = kwargs.get('sensor', default_sensor)
win = kwargs.get('win', default_win)
show_legend = kwargs.get('show_legend', True)
# assert arguments type & value
assert isinstance(gas, int) and gas in range(1,12)
assert isinstance(voltage, int) and voltage in range(1,6)
assert isinstance(speed, int) and speed in range(1,4)
assert isinstance(trial, int) and trial in range(1,21)
assert isinstance(brd, int) and brd in range(1,10)
assert isinstance(snsr, int) and snsr in range(1,9)
assert isinstance(win, slice)
assert isinstance(show_legend, bool)
# select column
cols = ds.select(gas = gas, speed = speed, voltage = voltage, trial = trial)
# create title
title = 'Peak-phase: | |
f"Subscription {subscription_id}"
readable_output = tableToMarkdown(title, sub, headerTransform=pascalToSpace)
outputs = {"GoogleCloudPubSubSubscriptions(val && val.name === obj.name)": sub}
return readable_output, outputs, sub
def create_subscription_command(
client: PubSubClient,
subscription_id: str,
topic_id: str,
project_id: str,
push_endpoint: str = "",
push_attributes: str = "",
ack_deadline_seconds: str = "",
retain_acked_messages: str = "",
message_retention_duration: str = "",
labels: str = "",
expiration_ttl: str = "",
) -> Tuple[str, dict, dict]:
"""
Creates a subscription
Requires one of the following OAuth scopes:
https://www.googleapis.com/auth/pubsub
https://www.googleapis.com/auth/cloud-platform
:param client: GoogleClient
:param project_id: Name of the project from which the subscription is receiving messages.
:param subscription_id: Name of the created subscription.
:param topic_id: Name of the topic from which the subscription is receiving messages.
:param push_endpoint: A URL locating the endpoint to which messages should be pushed.
:param push_attributes: Input format: "key=val" pairs sepearated by ",".
:param ack_deadline_seconds: The amount of time Pub/Sub waits for the subscriber to ack.
:param retain_acked_messages: if 'true' then retain acknowledged messages
:param message_retention_duration: How long to retain unacknowledged messages
:param labels: Input format: "key=val" pairs sepearated by ",".
:param expiration_ttl: The "time-to-live" duration for the subscription.
:return: Created subscription
"""
full_sub_name = GoogleNameParser.get_subscription_project_name(
project_id, subscription_id
)
full_topic_name = GoogleNameParser.get_topic_name(project_id, topic_id)
labels = attribute_pairs_to_dict(labels)
push_attributes = attribute_pairs_to_dict(push_attributes)
raw_sub = client.create_subscription(
full_sub_name,
full_topic_name,
push_endpoint,
push_attributes,
ack_deadline_seconds,
retain_acked_messages,
message_retention_duration,
labels,
expiration_ttl,
)
sub = dict(raw_sub)
title = f"Subscription {subscription_id} was created successfully"
readable_output = tableToMarkdown(title, sub)
sub["projectName"] = project_id
sub["subscriptionName"] = subscription_id
sub["deliveryType"] = "Push" if sub.get("pushConfig") else "Pull"
outputs = {"GoogleCloudPubSubSubscriptions": sub}
return readable_output, outputs, raw_sub
def update_subscription_command(
client: PubSubClient,
subscription_id: str,
topic_id: str,
update_mask: str,
project_id: str,
push_endpoint: str = "",
push_attributes: str = "",
ack_deadline_seconds: str = "",
retain_acked_messages: str = "",
message_retention_duration: str = "",
labels: str = "",
expiration_ttl: str = "",
) -> Tuple[str, dict, dict]:
"""
Creates a subscription
Requires one of the following OAuth scopes:
https://www.googleapis.com/auth/pubsub
https://www.googleapis.com/auth/cloud-platform
:param client: GoogleClient
:param project_id: Name of the project from which the subscription is receiving messages.
:param subscription_id: Name of the created subscription.
:param topic_id: Name of the topic from which the subscription is receiving messages.
:param update_mask: Indicates which fields in the provided subscription to update.
:param push_endpoint: A URL locating the endpoint to which messages should be pushed.
:param push_attributes: Input format: "key=val" pairs sepearated by ",".
:param ack_deadline_seconds: The amount of time Pub/Sub waits for the subscriber to ack.
:param retain_acked_messages: if 'true' then retain acknowledged messages
:param message_retention_duration: How long to retain unacknowledged messages
:param labels: Input format: "key=val" pairs sepearated by ",".
:param expiration_ttl: The "time-to-live" duration for the subscription.
:return: Created subscription
"""
full_sub_name = GoogleNameParser.get_subscription_project_name(
project_id, subscription_id
)
full_topic_name = GoogleNameParser.get_topic_name(project_id, topic_id)
labels = attribute_pairs_to_dict(labels)
push_attributes = attribute_pairs_to_dict(push_attributes)
raw_sub = client.update_subscription(
full_sub_name,
full_topic_name,
update_mask,
push_endpoint,
push_attributes,
ack_deadline_seconds,
retain_acked_messages,
message_retention_duration,
labels,
expiration_ttl,
)
sub = dict(raw_sub)
title = f"Subscription {subscription_id} was updated successfully"
readable_output = tableToMarkdown(title, sub)
sub["projectName"] = project_id
sub["subscriptionName"] = subscription_id
sub["deliveryType"] = "Push" if sub.get("pushConfig") else "Pull"
outputs = {"GoogleCloudPubSubSubscriptions(val && val.name === obj.name)": sub}
return readable_output, outputs, raw_sub
def create_topic_command(
client: PubSubClient,
topic_id: str,
project_id: str,
allowed_persistence_regions: str = "",
kms_key_name: str = None,
labels: str = None,
) -> Tuple[str, dict, dict]:
"""
Creates a topic
:param client: PubSub client instance
:param project_id: project ID
:param topic_id: topic ID
:param labels: "key=val" pairs sepearated by ",".'
:param allowed_persistence_regions: an str representing a list of IDs of GCP regions
:param kms_key_name: The full name of the Cloud KMS CryptoKey to be used to restrict access on this topic.
:return: Created topic
"""
topic_name = GoogleNameParser.get_topic_name(project_id, topic_id)
allowed_persistence_regions = argToList(allowed_persistence_regions)
labels = attribute_pairs_to_dict(labels)
raw_topic = client.create_topic(
topic_name, labels, allowed_persistence_regions, kms_key_name
)
title = f"Topic **{topic_id}** was created successfully"
readable_output = tableToMarkdown(title, raw_topic, headerTransform=pascalToSpace)
outputs = {"GoogleCloudPubSubTopics": raw_topic}
return readable_output, outputs, raw_topic
def delete_topic_command(
client: PubSubClient, project_id: str, topic_id: str
) -> Tuple[str, dict, dict]:
"""
Delete a topic
:param client: PubSub client instance
:param project_id: project ID
:param topic_id: topic ID
:return: Command success/error message
"""
topic_name = GoogleNameParser.get_topic_name(project_id, topic_id)
raw_topic = client.delete_topic(topic_name)
readable_output = f"Topic **{topic_id}** was deleted successfully"
return readable_output, {}, raw_topic
def update_topic_command(
client: PubSubClient,
project_id: str,
topic_id: str,
update_mask: str,
allowed_persistence_regions: str = "",
kms_key_name: str = None,
labels: str = None,
) -> Tuple[str, dict, dict]:
"""
Creates a topic
:param client: PubSub client instance
:param project_id: project ID
:param topic_id: topic ID
:param labels: "key=val" pairs sepearated by ",".'
:param allowed_persistence_regions: an str representing a list of IDs of GCP regions
:param kms_key_name: The full name of the Cloud KMS CryptoKey to be used to restrict access on this topic.
:param update_mask: Indicates which fields in the provided topic to update.
:return: Created topic
"""
topic_name = GoogleNameParser.get_topic_name(project_id, topic_id)
allowed_persistence_regions = argToList(allowed_persistence_regions)
labels = attribute_pairs_to_dict(labels)
raw_topic = client.update_topic(
topic_name, labels, allowed_persistence_regions, kms_key_name, update_mask
)
title = f"Topic {topic_id} was updated successfully"
readable_output = tableToMarkdown(title, raw_topic, headerTransform=pascalToSpace)
outputs = {"GoogleCloudPubSubTopics(val && val.name === obj.name)": raw_topic}
return readable_output, outputs, raw_topic
def seek_message_command(
client: PubSubClient,
project_id: str,
subscription_id: str,
time_string: str = None,
snapshot: str = None,
) -> Tuple[str, dict, dict]:
"""
Get topics list by project_id
Requires one of the following OAuth scopes:
https://www.googleapis.com/auth/pubsub
https://www.googleapis.com/auth/cloud-platform
:param client: GoogleClient
:param project_id: ID of the subscription, without project/topic prefix.
:param subscription_id: ID of the project from which the subscription is receiving messages.
:param time_string: A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds,
:param snapshot: The snapshot to seek to.
:return: list of topics
"""
if not time_string and not snapshot:
return_error("Please provide either a time_string or a snapshot")
sub_name = GoogleNameParser.get_subscription_project_name(
project_id, subscription_id
)
raw_res = client.subscription_seek_message(sub_name, time_string, snapshot)
readable_output = (
"Message seek was successful for **"
+ (f"time: {time_string}" if time_string else f"snapshot:{snapshot}")
+ "**"
)
return readable_output, {}, raw_res
def snapshot_list_command(
client: PubSubClient,
project_id: str,
topic_id: str = None,
page_size: str = None,
page_token: str = None,
) -> Tuple[str, dict, dict]:
"""
Get snapshots list by project_id or topic_id
Requires one of the following OAuth scopes:
https://www.googleapis.com/auth/pubsub
https://www.googleapis.com/auth/cloud-platform
:param client: GoogleClient
:param project_id: project id
:param topic_id:
:param page_size: page size
:param page_token: page token, as returned from the api
:return: list of snapshots
"""
if topic_id:
topic_name = GoogleNameParser.get_topic_name(project_id, topic_id)
res = client.get_topic_snapshots_list(topic_name, page_size, page_token)
title = f"Snapshots for topic {topic_id}"
else:
project_name = GoogleNameParser.get_project_name(project_id)
res = client.get_project_snapshots_list(project_name, page_size, page_token)
title = f"Snapshots for project {project_id}"
snapshots = list(res.get("snapshots", []))
next_page_token = res.get("nextPageToken")
readable_output = tableToMarkdown(title, snapshots, ["name"])
outputs = {"GoogleCloudPubSubSnapshots(val && val.name === obj.name)": snapshots}
if next_page_token:
outputs["GoogleCloudPubSub.Snapshots.nextPageToken"] = next_page_token
readable_output += f"**Next Page Token: {next_page_token}**"
return readable_output, outputs, res
def snapshot_create_command(
client: PubSubClient,
project_id: str,
subscription_id: str,
snapshot_id: str,
labels: str = None,
) -> Tuple[str, dict, dict]:
"""
Create a snapshot
Requires one of the following OAuth scopes:
https://www.googleapis.com/auth/pubsub
https://www.googleapis.com/auth/cloud-platform
:param client: GoogleClient
:param project_id: project id
:param subscription_id: The subscription whose backlog the snapshot retains.
:param snapshot_id: The id of the snapshot.
:param labels: Input format: "key=val" pairs sepearated by ",".
:return: list of topics
"""
subscription_name = GoogleNameParser.get_subscription_project_name(
project_id, subscription_id
)
snapshot_name = GoogleNameParser.get_snapshot_project_name(project_id, snapshot_id)
labels = attribute_pairs_to_dict(labels)
raw_snapshot = client.create_snapshot(subscription_name, snapshot_name, labels)
title = f"Snapshot **{snapshot_id}** was created successfully"
readable_output = tableToMarkdown(
title, raw_snapshot, headerTransform=pascalToSpace
)
outputs = {"GoogleCloudPubSubSnapshots": raw_snapshot}
return readable_output, outputs, raw_snapshot
def snapshot_update_command(
client: PubSubClient,
project_id: str,
topic_id: str,
snapshot_id: str,
update_mask: str,
expire_time: str = None,
labels: str = None,
) -> Tuple[str, dict, dict]:
"""
Updates a snapshot
Requires one of the following OAuth scopes:
https://www.googleapis.com/auth/pubsub
https://www.googleapis.com/auth/cloud-platform
:param client: GoogleClient
:param project_id: ID of the project from which the subscription is receiving messages.
:param topic_id: The ID of the topic from which this snapshot is retaining messages.
:param snapshot_id: The id of the snapshot.
:param update_mask: Indicates which fields in the provided snapshot to update.
:param expire_time: The snapshot is guaranteed to exist up until this time
:param labels: An object containing a list of "key": value pairs
:return:
"""
snapshot_name = GoogleNameParser.get_snapshot_project_name(project_id, | |
additional key carried
result = cluster.to_dict()
result['action'] = action.id
return result
@request_context
def cluster_update(self, context, identity, name=None, profile_id=None,
parent=None, tags=None, timeout=None):
def update_cluster_properties(cluster):
changed = False
# Check out if fields other than profile_id have to be changed
if name is not None and name != cluster.name:
cluster.name = name
changed = True
if parent is not None:
db_parent = self.cluster_find(context, parent)
if cluster.parent != db_parent.id:
cluster.parent = db_parent.id
changed = True
if tags is not None and tags != cluster.tags:
cluster.tags = tags
changed = True
if timeout is not None and timeout != cluster.timeout:
cluster.timeout = utils.parse_int_param(consts.CLUSTER_TIMEOUT,
timeout)
changed = True
if changed is True:
cluster.store(context)
return cluster.to_dict()
# Get the database representation of the existing cluster
db_cluster = self.cluster_find(context, identity)
cluster = cluster_mod.Cluster.load(context, cluster=db_cluster)
update_cluster_properties(cluster)
if profile_id is None or profile_id == cluster.profile_id:
return cluster.to_dict()
if cluster.status == cluster.ERROR:
msg = _('Updating a cluster when it is in error state')
raise exception.NotSupported(feature=msg)
new_profile = self.profile_find(context, profile_id)
old_profile = self.profile_find(context, cluster.profile_id)
if new_profile.type != old_profile.type:
msg = _('Cannot update a cluster to a different profile type, '
'operation aborted.')
raise exception.ProfileTypeNotMatch(message=msg)
LOG.info(_LI("Updating cluster '%(cluster)s' to profile "
"'%(profile)s'.") % {'cluster': identity,
'profile': profile_id})
action = action_mod.Action(context, 'CLUSTER_UPDATE',
target=cluster.id,
cause=action_mod.CAUSE_RPC,
inputs={'profile_id': new_profile.id})
action.store(context)
# TODO(anyone): Uncomment the following line when update action
# is implemented.
# dispatcher.notify(context, self.dispatcher.NEW_ACTION,
# None, action_id=action.id)
result = cluster.to_dict()
result['action'] = action.id
return result
@request_context
def cluster_add_nodes(self, context, identity, nodes):
db_cluster = self.cluster_find(context, identity)
found = []
not_found = []
bad_nodes = []
owned_nodes = []
for node in nodes:
try:
db_node = self.node_find(context, node)
# Skip node in the same cluster already
if db_node.status != node_mod.Node.ACTIVE:
bad_nodes.append(db_node.id)
elif db_node.cluster_id is not None:
owned_nodes.append(node)
else:
found.append(db_node.id)
except exception.NodeNotFound:
not_found.append(node)
pass
error = None
if len(bad_nodes) > 0:
error = _("Nodes are not ACTIVE: %s") % bad_nodes
elif len(owned_nodes) > 0:
error = _("Nodes %s owned by other cluster, need to delete "
"them from those clusters first.") % owned_nodes
elif len(not_found) > 0:
error = _("Nodes not found: %s") % not_found
elif len(found) == 0:
error = _("No nodes to add: %s") % nodes
if error is not None:
raise exception.SenlinBadRequest(msg=error)
action_name = 'cluster_add_nodes_%s' % db_cluster.id[:8]
action = action_mod.Action(context, 'CLUSTER_ADD_NODES',
name=action_name,
target=db_cluster.id,
cause=action_mod.CAUSE_RPC,
inputs={'nodes': found})
action.store(context)
dispatcher.notify(context, self.dispatcher.NEW_ACTION,
None, action_id=action.id)
return {'action': action.id}
@request_context
def cluster_del_nodes(self, context, identity, nodes):
db_cluster = self.cluster_find(context, identity)
found = []
not_found = []
bad_nodes = []
for node in nodes:
try:
db_node = self.node_find(context, node)
if db_node.cluster_id != db_cluster.id:
bad_nodes.append(db_node.id)
else:
found.append(db_node.id)
except exception.NodeNotFound:
not_found.append(node)
pass
error = None
if len(not_found) > 0:
error = _("Nodes %s not found") % nodes
elif len(bad_nodes) > 0:
error = _("Nodes %s not member of specified cluster") % bad_nodes
elif len(found) == 0:
error = _("No nodes specified") % nodes
if error is not None:
raise exception.SenlinBadRequest(msg=error)
action_name = 'cluster_del_nodes_%s' % db_cluster.id[:8]
action = action_mod.Action(context, 'CLUSTER_DEL_NODES',
name=action_name,
target=db_cluster.id,
cause=action_mod.CAUSE_RPC,
inputs={'nodes': found})
action.store(context)
dispatcher.notify(context, self.dispatcher.NEW_ACTION,
None, action_id=action.id)
return {'action': action.id}
@request_context
def cluster_scale_out(self, context, identity, count=None):
# Validation
db_cluster = self.cluster_find(context, identity)
delta = utils.parse_int_param('count', count, allow_zero=False)
if delta is not None:
LOG.info(_LI('Scaling out cluster %(name)s by %(delta)s nodes'),
{'name': identity, 'delta': delta})
inputs = {'count': delta}
else:
LOG.info(_LI('Scaling out cluster %s'), db_cluster.name)
inputs = {}
action_name = 'cluster_scale_out_%s' % db_cluster.id[:8]
action = action_mod.Action(context, 'CLUSTER_SCALE_OUT',
name=action_name,
target=db_cluster.id,
inputs=inputs,
cause=action_mod.CAUSE_RPC)
action.store(context)
dispatcher.notify(context, self.dispatcher.NEW_ACTION,
None, action_id=action.id)
return {'action': action.id}
@request_context
def cluster_scale_in(self, context, identity, count=None):
db_cluster = self.cluster_find(context, identity)
delta = utils.parse_int_param('count', count, allow_zero=False)
if delta is not None:
LOG.info(_LI('Scaling in cluster %(name)s by %(delta)s nodes'),
{'name': identity, 'delta': delta})
inputs = {'count': delta}
else:
LOG.info(_LI('Scaling in cluster %s'), db_cluster.name)
inputs = {}
action_name = 'cluster_scale_in_%s' % db_cluster.id[:8]
action = action_mod.Action(context, 'CLUSTER_SCALE_IN',
name=action_name,
target=db_cluster.id,
inputs=inputs,
cause=action_mod.CAUSE_RPC)
action.store(context)
dispatcher.notify(context, self.dispatcher.NEW_ACTION,
None, action_id=action.id)
return {'action': action.id}
@request_context
def cluster_delete(self, context, identity):
cluster = self.cluster_find(context, identity)
LOG.info(_LI('Deleting cluster %s'), cluster.name)
action = action_mod.Action(context, 'CLUSTER_DELETE',
name='cluster_delete_%s' % cluster.id[:8],
target=cluster.id,
cause=action_mod.CAUSE_RPC)
action.store(context)
dispatcher.notify(context, self.dispatcher.NEW_ACTION,
None, action_id=action.id)
return {'action': action.id}
def node_find(self, context, identity, show_deleted=False):
'''Find a cluster with the given identity (could be name or ID).'''
if uuidutils.is_uuid_like(identity):
node = db_api.node_get(context, identity,
show_deleted=show_deleted)
if not node:
node = db_api.node_get_by_name(context, identity)
else:
node = db_api.node_get_by_name(context, identity)
if not node:
node = db_api.node_get_by_short_id(context, identity)
if node is None:
raise exception.NodeNotFound(node=identity)
return node
@request_context
def node_list(self, context, cluster_id=None, show_deleted=False,
limit=None, marker=None, sort_keys=None, sort_dir=None,
filters=None, tenant_safe=True):
# Maybe the cluster_id is a name or a short ID
if cluster_id is not None:
db_cluster = self.cluster_find(context, cluster_id)
cluster_id = db_cluster.id
nodes = node_mod.Node.load_all(context, cluster_id=cluster_id,
show_deleted=show_deleted,
limit=limit, marker=marker,
sort_keys=sort_keys, sort_dir=sort_dir,
filters=filters,
tenant_safe=tenant_safe)
return [node.to_dict() for node in nodes]
@request_context
def node_create(self, context, name, profile_id, cluster_id=None,
role=None, tags=None):
db_profile = self.profile_find(context, profile_id)
if cluster_id is not None:
db_cluster = self.cluster_find(context, cluster_id)
cluster_id = db_cluster.id
if context.project_id != db_cluster.project:
msg = _('Node and cluster are from different project, '
'operation is disallowed.')
raise exception.ProjectNotMatch(message=msg)
if profile_id != db_cluster.profile_id:
node_profile = self.profile_find(context, profile_id)
cluster_profile = self.profile_find(context,
db_cluster.profile_id)
if node_profile.type != cluster_profile.type:
msg = _('Node and cluster have different profile type, '
'operation aborted.')
raise exception.ProfileTypeNotMatch(message=msg)
LOG.info(_LI('Creating node %s'), name)
# Create a node instance
tags = tags or {}
node = node_mod.Node(name, db_profile.id, cluster_id, context,
role=role, tags=tags)
node.store(context)
action = action_mod.Action(context, 'NODE_CREATE',
name='node_create_%s' % node.id[:8],
target=node.id,
cause=action_mod.CAUSE_RPC)
action.store(context)
dispatcher.notify(context, self.dispatcher.NEW_ACTION,
None, action_id=action.id)
# We return a node dictionary with an additional key (action) carried
result = node.to_dict()
result['action'] = action.id
return result
@request_context
def node_get(self, context, identity):
db_node = self.node_find(context, identity)
node = node_mod.Node.load(context, node=db_node)
return node.to_dict()
@request_context
def node_update(self, context, identity, name=None, profile_id=None,
role=None, tags=None):
db_node = self.node_find(context, identity)
node = node_mod.Node.load(context, node=db_node)
changed = False
if name is not None and name != node.name:
node.name = name
changed = True
if role is not None and role != node.role:
node.role = role
changed = True
if tags is not None and tags != node.tags:
node.tags = tags
changed = True
if changed is True:
node.store(context)
if profile_id is None:
return
# The profile_id could be a name or a short ID, check it
db_profile = self.profile_find(context, profile_id)
profile_id = db_profile.id
# check if profile_type matches
node_profile = self.profile_find(context, node.profile_id)
if node_profile.type != db_profile.type:
msg = _('Cannot update a cluster to a different profile type, '
'operation aborted.')
raise exception.ProfileTypeNotMatch(message=msg)
LOG.info(_LI('Updating node %s'), identity)
action = action_mod.Action(context, 'NODE_UPDATE',
name='node_update_%s' % node.id[:8],
target=node.id,
cause=action_mod.CAUSE_RPC)
action.store(context)
# TODO(someone): uncomment this when it is implemented
# dispatcher.notify(context, self.dispatcher.NEW_ACTION,
# None, action_id=action.id)
return
@request_context
def node_delete(self, context, identity, force=False):
db_node = self.node_find(context, identity)
LOG.info(_LI('Deleting node %s'), identity)
node = node_mod.Node.load(context, node=db_node)
action = action_mod.Action(context, 'NODE_DELETE',
name='node_delete_%s' % node.id[:8],
target=node.id,
cause=action_mod.CAUSE_RPC)
action.store(context)
dispatcher.notify(context, self.dispatcher.NEW_ACTION,
None, action_id=action.id)
return action.to_dict()
@request_context
def node_join(self, context, identity, cluster_id):
db_node = self.node_find(context, identity)
db_cluster = self.cluster_find(context, cluster_id)
if db_node.project != db_cluster.project:
msg = _('Node and cluster are from different project, operation '
'is not allowed.')
raise exception.ProjectNotMatch(message=msg)
if db_node.profile_id != db_cluster.profile_id:
node_profile = self.profile_find(db_node.profile_id)
cluster_profile = self.profile_find(db_cluster.profile_id)
if node_profile.type != cluster_profile.type:
msg = _('Node and cluster have different profile type, '
'operation aborted.')
raise exception.ProfileTypeNotMatch(message=msg)
LOG.info(_LI('Joining node %(node)s to cluster %(cluster)s'),
{'node': identity, 'cluster': cluster_id})
action = action_mod.Action(context, 'NODE_JOIN',
name='node_join_%s' % db_node.id[:8],
target=db_node.id,
cause=action_mod.CAUSE_RPC,
inputs={'cluster_id': db_cluster.id})
action.store(context)
dispatcher.notify(context, self.dispatcher.NEW_ACTION,
None, action_id=action.id)
return {'action': action.id}
@request_context
def node_leave(self, context, identity):
db_node = self.node_find(context, identity)
LOG.info(_LI('Node %(node)s leaving cluster'), {'node': identity})
action = action_mod.Action(context, 'NODE_LEAVE',
name='node_leave_%s' % db_node.id[:8],
target=db_node.id,
cause=action_mod.CAUSE_RPC)
action.store(context)
dispatcher.notify(context, self.dispatcher.NEW_ACTION,
None, action_id=action.id)
return {'action': action.id}
@request_context
def cluster_policy_list(self, context, identity, filters=None,
sort_keys=None, sort_dir=None):
db_cluster = self.cluster_find(context, identity)
bindings = db_api.cluster_policy_get_all(context, db_cluster.id,
filters=filters,
sort_keys=sort_keys,
sort_dir=sort_dir)
result = []
for binding in bindings:
result.append({
'id': binding.id,
'cluster_id': binding.cluster_id,
'cluster_name': binding.cluster.name,
'policy_id': binding.policy_id,
'policy_name': binding.policy.name,
'policy_type': binding.policy.type,
'priority': binding.priority,
'level': binding.level,
'cooldown': binding.cooldown,
'enabled': binding.enabled,
})
return result
@request_context
def cluster_policy_get(self, context, identity, policy_id):
db_cluster = self.cluster_find(context, identity)
db_policy = self.policy_find(context, policy_id)
binding = db_api.cluster_policy_get(context, db_cluster.id,
db_policy.id)
return {
| |
"ambassador cast.",
(Characters.Morgan,),
(3 * 60 + 30.0),
None,
(Roles.Ambassador,),
(None,),
TimelineCategory.Cast,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"03:30.0",
"double agent cast.",
(Characters.Salmon,),
(3 * 60 + 30.0),
None,
(Roles.DoubleAgent,),
(None,),
TimelineCategory.Cast,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"03:30.0",
"suspected double agent cast.",
(Characters.Duke,),
(3 * 60 + 30.0),
None,
(Roles.DoubleAgent,),
(None,),
TimelineCategory.Cast,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"03:30.0",
"seduction target cast.",
(Characters.Queen,),
(3 * 60 + 30.0),
None,
(Roles.SeductionTarget,),
(None,),
TimelineCategory.Cast,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"03:30.0",
"civilian cast.",
(Characters.Alice,),
(3 * 60 + 30.0),
None,
(Roles.Civilian,),
(None,),
TimelineCategory.Cast,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"03:30.0",
"civilian cast.",
(Characters.Teal,),
(3 * 60 + 30.0),
None,
(Roles.Civilian,),
(None,),
TimelineCategory.Cast,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"03:30.0",
"civilian cast.",
(Characters.Boots,),
(3 * 60 + 30.0),
None,
(Roles.Civilian,),
(None,),
TimelineCategory.Cast,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"03:30.0",
"civilian cast.",
(Characters.Irish,),
(3 * 60 + 30.0),
None,
(Roles.Civilian,),
(None,),
TimelineCategory.Cast,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"03:30.0",
"civilian cast.",
(Characters.Plain,),
(3 * 60 + 30.0),
None,
(Roles.Civilian,),
(None,),
TimelineCategory.Cast,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"03:30.0",
"civilian cast.",
(Characters.Disney,),
(3 * 60 + 30.0),
None,
(Roles.Civilian,),
(None,),
TimelineCategory.Cast,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"03:30.0",
"civilian cast.",
(Characters.Smallman,),
(3 * 60 + 30.0),
None,
(Roles.Civilian,),
(None,),
TimelineCategory.Cast,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"03:30.0",
"civilian cast.",
(Characters.Bling,),
(3 * 60 + 30.0),
None,
(Roles.Civilian,),
(None,),
TimelineCategory.Cast,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"03:30.0",
"civilian cast.",
(Characters.Sikh,),
(3 * 60 + 30.0),
None,
(Roles.Civilian,),
(None,),
TimelineCategory.Cast,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"03:30.0",
"civilian cast.",
(Characters.General,),
(3 * 60 + 30.0),
None,
(Roles.Civilian,),
(None,),
TimelineCategory.Cast,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"03:30.0",
"bug ambassador selected.",
(None,),
(3 * 60 + 30.0),
None,
(None,),
(None,),
TimelineCategory.MissionSelected,
Missions.Bug,
ActionTest.NoAT,
),
(
"spy",
"03:30.0",
"contact double agent selected.",
(None,),
(3 * 60 + 30.0),
None,
(None,),
(None,),
TimelineCategory.MissionSelected,
Missions.Contact,
ActionTest.NoAT,
),
(
"spy",
"03:30.0",
"transfer microfilm selected.",
(None,),
(3 * 60 + 30.0),
None,
(None,),
(None,),
TimelineCategory.MissionSelected,
Missions.Transfer,
ActionTest.NoAT,
),
(
"spy",
"03:30.0",
"swap statue selected.",
(None,),
(3 * 60 + 30.0),
None,
(None,),
(None,),
TimelineCategory.MissionSelected,
Missions.Swap,
ActionTest.NoAT,
),
(
"spy",
"03:30.0",
"inspect 3 statues selected.",
(None,),
(3 * 60 + 30.0),
None,
(None,),
(None,),
TimelineCategory.MissionSelected,
Missions.Inspect,
ActionTest.NoAT,
),
(
"spy",
"03:30.0",
"seduce target selected.",
(None,),
(3 * 60 + 30.0),
None,
(None,),
(None,),
TimelineCategory.MissionSelected,
Missions.Seduce,
ActionTest.NoAT,
),
(
"spy",
"03:30.0",
"purloin guest list selected.",
(None,),
(3 * 60 + 30.0),
None,
(None,),
(None,),
TimelineCategory.MissionSelected,
Missions.Purloin,
ActionTest.NoAT,
),
(
"spy",
"03:30.0",
"fingerprint ambassador selected.",
(None,),
(3 * 60 + 30.0),
None,
(None,),
(None,),
TimelineCategory.MissionSelected,
Missions.Fingerprint,
ActionTest.NoAT,
),
(
"spy",
"03:30.0",
"bug ambassador enabled.",
(None,),
(3 * 60 + 30.0),
None,
(None,),
(None,),
TimelineCategory.MissionEnabled,
Missions.Bug,
ActionTest.NoAT,
),
(
"spy",
"03:30.0",
"contact double agent enabled.",
(None,),
(3 * 60 + 30.0),
None,
(None,),
(None,),
TimelineCategory.MissionEnabled,
Missions.Contact,
ActionTest.NoAT,
),
(
"spy",
"03:30.0",
"transfer microfilm enabled.",
(None,),
(3 * 60 + 30.0),
None,
(None,),
(None,),
TimelineCategory.MissionEnabled,
Missions.Transfer,
ActionTest.NoAT,
),
(
"spy",
"03:30.0",
"swap statue enabled.",
(None,),
(3 * 60 + 30.0),
None,
(None,),
(None,),
TimelineCategory.MissionEnabled,
Missions.Swap,
ActionTest.NoAT,
),
(
"spy",
"03:30.0",
"inspect 3 statues enabled.",
(None,),
(3 * 60 + 30.0),
None,
(None,),
(None,),
TimelineCategory.MissionEnabled,
Missions.Inspect,
ActionTest.NoAT,
),
(
"spy",
"03:30.0",
"seduce target enabled.",
(None,),
(3 * 60 + 30.0),
None,
(None,),
(None,),
TimelineCategory.MissionEnabled,
Missions.Seduce,
ActionTest.NoAT,
),
(
"spy",
"03:30.0",
"purloin guest list enabled.",
(None,),
(3 * 60 + 30.0),
None,
(None,),
(None,),
TimelineCategory.MissionEnabled,
Missions.Purloin,
ActionTest.NoAT,
),
],
),
(
"6173092994452987597",
[
(
"spy",
"03:30.0",
"fingerprint ambassador enabled.",
(None,),
(3 * 60 + 30.0),
None,
(None,),
(None,),
TimelineCategory.MissionEnabled,
Missions.Fingerprint,
ActionTest.NoAT,
),
(
"game",
"03:30.0",
"game started.",
(None,),
(3 * 60 + 30.0),
None,
(None,),
(None,),
TimelineCategory.GameStart,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"03:28.9",
"spy player takes control from ai.",
(None,),
(3 * 60 + 28.9),
None,
(None,),
(None,),
TimelineCategory.NoCategory,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"03:22.8",
"took last sip of drink.",
(Characters.Taft,),
(3 * 60 + 22.8),
None,
(Roles.Spy,),
(None,),
TimelineCategory.Drinks,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"03:16.8",
"get book from bookcase.",
(None,),
(3 * 60 + 16.8),
None,
(None,),
(Books.Blue,),
TimelineCategory.Books,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"03:14.3",
"action triggered: transfer microfilm",
(None,),
(3 * 60 + 14.3),
None,
(None,),
(None,),
TimelineCategory.Books | TimelineCategory.ActionTriggered,
Missions.Transfer,
ActionTest.NoAT,
),
(
"spy",
"03:13.6",
"action test white: transfer microfilm",
(None,),
(3 * 60 + 13.6),
None,
(None,),
(None,),
TimelineCategory.Books | TimelineCategory.ActionTest,
Missions.Transfer,
ActionTest.White,
),
(
"spy",
"03:10.7",
"remove microfilm from book.",
(None,),
(3 * 60 + 10.7),
None,
(None,),
(Books.Blue, Books.Blue),
TimelineCategory.Books | TimelineCategory.MissionPartial,
Missions.Transfer,
ActionTest.NoAT,
),
(
"spy",
"03:09.1",
"action triggered: transfer microfilm",
(None,),
(3 * 60 + 09.1),
None,
(None,),
(None,),
TimelineCategory.Books | TimelineCategory.ActionTriggered,
Missions.Transfer,
ActionTest.NoAT,
),
(
"spy",
"03:08.1",
"action test green: transfer microfilm",
(None,),
(3 * 60 + 08.1),
None,
(None,),
(None,),
TimelineCategory.Books | TimelineCategory.ActionTest,
Missions.Transfer,
ActionTest.Green,
),
(
"spy",
"03:05.4",
"hide microfilm in book.",
(None,),
(3 * 60 + 05.4),
None,
(None,),
(Books.Blue, Books.Blue),
TimelineCategory.Books | TimelineCategory.MissionPartial,
Missions.Transfer,
ActionTest.NoAT,
),
(
"spy",
"02:53.6",
"put book in bookcase.",
(None,),
(2 * 60 + 53.6),
None,
(None,),
(Books.Blue, Books.Green),
TimelineCategory.Books,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"02:53.6",
"transferred microfilm.",
(None,),
(2 * 60 + 53.6),
None,
(None,),
(Books.Blue, Books.Green),
TimelineCategory.Books | TimelineCategory.MissionComplete,
Missions.Transfer,
ActionTest.NoAT,
),
(
"spy",
"02:51.7",
"request drink from waiter.",
(Characters.Taft,),
(2 * 60 + 51.7),
None,
(Roles.Spy,),
(None,),
TimelineCategory.Drinks,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"02:45.3",
"spy picks up briefcase.",
(None,),
(2 * 60 + 45.3),
None,
(None,),
(None,),
TimelineCategory.Briefcase,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"02:45.3",
"picked up fingerprintable briefcase (difficult).",
(None,),
(2 * 60 + 45.3),
None,
(None,),
(None,),
TimelineCategory.Briefcase,
Missions.Fingerprint,
ActionTest.NoAT,
),
(
"spy",
"02:41.6",
"action triggered: fingerprint ambassador",
(None,),
(2 * 60 + 41.6),
None,
(None,),
(None,),
TimelineCategory.ActionTriggered,
Missions.Fingerprint,
ActionTest.NoAT,
),
(
"spy",
"02:41.6",
"started fingerprinting briefcase.",
(None,),
(2 * 60 + 41.6),
None,
(None,),
(None,),
TimelineCategory.Briefcase,
Missions.Fingerprint,
ActionTest.NoAT,
),
(
"spy",
"02:40.6",
"action test red: fingerprint ambassador",
(None,),
(2 * 60 + 40.6),
None,
(None,),
(None,),
TimelineCategory.ActionTest,
Missions.Fingerprint,
ActionTest.Red,
),
(
"spy",
"02:40.6",
"fingerprinting failed.",
(None,),
(2 * 60 + 40.6),
None,
(None,),
(None,),
TimelineCategory.NoCategory,
Missions.Fingerprint,
ActionTest.NoAT,
),
(
"spy",
"02:36.1",
"action triggered: bug ambassador",
(None,),
(2 * 60 + 36.1),
None,
(None,),
(None,),
TimelineCategory.ActionTriggered,
Missions.Bug,
ActionTest.NoAT,
),
(
"spy",
"02:36.1",
"begin planting bug while standing.",
(Characters.Morgan,),
(2 * 60 + 36.1),
None,
(Roles.Ambassador,),
(None,),
TimelineCategory.NoCategory,
Missions.Bug,
ActionTest.NoAT,
),
(
"spy",
"02:34.5",
"bugged ambassador while standing.",
(Characters.Morgan,),
(2 * 60 + 34.5),
None,
(Roles.Ambassador,),
(None,),
TimelineCategory.MissionComplete,
Missions.Bug,
ActionTest.NoAT,
),
(
"spy",
"02:31.6",
"spy puts down briefcase.",
(None,),
(2 * 60 + 31.6),
None,
(None,),
(None,),
TimelineCategory.Briefcase,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"02:31.2",
"missions reset.",
(None,),
(2 * 60 + 31.2),
None,
(None,),
(None,),
TimelineCategory.NoCategory,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"02:28.1",
"action triggered: bug ambassador",
(None,),
(2 * 60 + 28.1),
None,
(None,),
(None,),
TimelineCategory.ActionTriggered,
Missions.Bug,
ActionTest.NoAT,
),
(
"spy",
"02:28.1",
"begin planting bug while walking.",
(Characters.Morgan,),
(2 * 60 + 28.1),
None,
(Roles.Ambassador,),
(None,),
TimelineCategory.NoCategory,
Missions.Bug,
ActionTest.NoAT,
),
(
"spy",
"02:27.0",
"failed planting bug while walking.",
(Characters.Morgan,),
(2 * 60 + 27.0),
None,
(Roles.Ambassador,),
(None,),
TimelineCategory.NoCategory,
Missions.Bug,
ActionTest.NoAT,
),
(
"spy",
"02:24.4",
"action triggered: bug ambassador",
(None,),
(2 * 60 + 24.4),
None,
(None,),
(None,),
TimelineCategory.ActionTriggered,
Missions.Bug,
ActionTest.NoAT,
),
(
"spy",
"02:24.4",
"begin planting bug while walking.",
(Characters.Morgan,),
(2 * 60 + 24.4),
None,
(Roles.Ambassador,),
(None,),
TimelineCategory.NoCategory,
Missions.Bug,
ActionTest.NoAT,
),
],
),
(
"8346478285034783689",
[
(
"spy",
"02:31.0",
"action test white: inspect statues",
(None,),
(2 * 60 + 31.0),
None,
(None,),
(None,),
TimelineCategory.Statues | TimelineCategory.ActionTest,
Missions.Inspect,
ActionTest.White,
),
(
"spy",
"02:27.8",
"held statue inspected.",
(None,),
(2 * 60 + 27.8),
None,
(None,),
(None,),
TimelineCategory.Statues | TimelineCategory.MissionPartial,
Missions.Inspect,
ActionTest.NoAT,
),
(
"spy",
"02:27.8",
"all statues inspected.",
(None,),
(2 * 60 + 27.8),
None,
(None,),
(None,),
TimelineCategory.Statues | TimelineCategory.MissionComplete,
Missions.Inspect,
ActionTest.NoAT,
),
(
"spy",
"02:27.5",
"put back statue.",
(None,),
(2 * 60 + 27.5),
None,
(None,),
(None,),
TimelineCategory.Statues,
Missions.NoMission,
ActionTest.NoAT,
),
(
"sniper",
"02:23.1",
"marked suspicious.",
(Characters.Boots,),
(2 * 60 + 23.1),
None,
(Roles.Civilian,),
(None,),
TimelineCategory.SniperLights,
Missions.NoMission,
ActionTest.NoAT,
),
(
"spy",
"02:22.6",
"spy enters | |
""" PCBOT.
The main module which contains the Client. This is the module
that would be executed.
"""
import asyncio
import inspect
import logging
import sys
import traceback
from argparse import ArgumentParser
from copy import copy
from datetime import datetime
import discord
import plugins
from pcbot import utils, config
# Sets the version to enable accessibility for other modules
__version__ = config.set_version("PCBOT V3")
class Client(discord.Client):
""" Custom Client class to hold the event dispatch override and
some helper functions. """
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.time_started = datetime.utcnow()
self.last_deleted_messages = []
async def _handle_event(self, func, event, *args, **kwargs):
""" Handle the event dispatched. """
try:
result = await func(*args, **kwargs)
except AssertionError as e:
if event == "message": # Find the message object and send the proper feedback
message = args[0]
await self.send_message(message.channel, str(e))
else:
logging.error(traceback.format_exc())
await self.on_error(event, *args, **kwargs)
except:
logging.error(traceback.format_exc())
await self.on_error(event, *args, **kwargs)
else:
if result is True and event == "message":
log_message(args[0], prefix="... ")
def dispatch(self, event, *args, **kwargs):
""" Override event dispatch to handle plugin events. """
# Exclude blank messages
if event == "message":
message = args[0]
if not message.content and not message.attachments:
return
# Find every event that has a discord.Member argument, and filter out bots and self
member = None
for arg in list(args) + list(kwargs.values()):
if isinstance(arg, discord.User):
member = arg
break
if isinstance(arg, discord.Message):
member = arg.author
break
super().dispatch(event, *args, **kwargs)
# We get the method name and look through our plugins' event listeners
method = "on_" + event
if method in plugins.events:
for func in plugins.events[method]:
# We'll only ignore bot messages if the event has disabled for bots
if member and member.bot and not func.bot:
continue
# Same goes for messages sent by ourselves. Naturally this requires func.bot == True
if member and member == client.user and not func.self:
continue
client.loop.create_task(self._handle_event(func, event, *args, **kwargs))
@staticmethod
async def send_message(destination, content=None, *args, **kwargs):
""" Override to check if content is str and replace mass user mentions. """
# Convert content to str, but also log this sincecontent=None it shouldn't happen
if content is not None:
if not isinstance(content, str):
# Log the traceback too when the content is an exception (it was probably meant to be
# converted to string) as to make debugging easier
tb = ""
if isinstance(content, Exception):
tb = "\n" + "\n".join(traceback.format_exception(type(content), content, content.__traceback__))
logging.warning("type '%s' was passed to client.send_message: %s%s", type(content), content, tb)
content = str(content)
# Replace any @here and @everyone to avoid using them
if not kwargs.pop("allow_everyone", None):
content = content.replace("@everyone", "@ everyone").replace("@here", "@ here")
return await destination.send(content, *args, **kwargs)
async def send_file(self, destination, fp, *, filename=None, content=None, tts=False):
""" Override send_file to notify the guild when an attachment could not be sent. """
try:
return await destination.send(content=content, tts=tts,
file=discord.File(fp, filename=filename))
except discord.errors.Forbidden:
return await self.send_message(destination, "**I don't have the permissions to send my attachment.**")
async def delete_message(self, message):
""" Override to add info on the last deleted message. """
self.last_deleted_messages = [message]
await message.delete()
async def delete_messages(self, channel, messages):
""" Override to add info on the last deleted messages. """
self.last_deleted_messages = list(messages)
await channel.delete_messages(messages=messages)
async def wait_for_message(self, timeout=None, *, check=None, bot=False):
""" Override the check with the bot keyword: if bot=False, the function
won't accept messages from bot accounts, where if bot=True it doesn't care. """
def new_check(m):
return (
check(m) and (True if bot else not m.author.bot)
)
return await super().wait_for("message", check=new_check, timeout=timeout)
@staticmethod
async def say(message: discord.Message, content: str):
""" Equivalent to client.send_message(message.channel, content) """
msg = await client.send_message(message.channel, content)
return msg
def parse_arguments():
""" Parse startup arguments """
parser = ArgumentParser(description="Run PCBOT.")
parser.add_argument("--version", "-V", help="Return the current version.",
action="version", version=__version__)
# Setup a login group for handling only token or email, but not both
login_group = parser.add_mutually_exclusive_group()
login_group.add_argument("--token", "-t", help="The token to login with. Prompts if omitted.")
shard_group = parser.add_argument_group(title="Sharding",
description="Arguments for sharding for bots on 2500+ guilds")
shard_group.add_argument("--shard-id", help="Shard id. --shard-total must also be specified when used.", type=int,
default=None)
shard_group.add_argument("--shard-total", help="Total number of shards.", type=int, default=None)
parser.add_argument("--new-pass", "-n", help="Always prompts for password.", action="store_true")
parser.add_argument("--log-level", "-l",
help="Use the specified logging level (see the docs on logging for values).",
type=lambda s: getattr(logging, s.upper()), default=logging.INFO, metavar="LEVEL")
parser.add_argument("--enable-protocol-logging", "-p", help="Enables logging protocol events. THESE SPAM THE LOG.",
action="store_true")
parser.add_argument("--log-file", "-o", help="File to log to. Prints to terminal if omitted.")
parsed_args = parser.parse_args()
return parsed_args
start_args = parse_arguments()
# Setup our client
if start_args.shard_id is not None:
if start_args.shard_total is None:
raise ValueError("--shard-total must be specified")
client = Client(intents=discord.Intents.all(), shard_id=start_args.shard_id, shard_count=start_args.shard_total,
loop=asyncio.ProactorEventLoop() if sys.platform == "win32" else None)
else:
client = Client(intents=discord.Intents.all(),
loop=asyncio.ProactorEventLoop() if sys.platform == "win32" else None)
autosave_interval = 60 * 30
# Migrate deprecated values to updated values
config.migrate()
async def autosave():
""" Sleep for set time (default 30 minutes) before saving. """
while not client.is_closed:
await asyncio.sleep(autosave_interval)
await plugins.save_plugins()
logging.debug("Plugins saved")
def log_message(message: discord.Message, prefix: str = ""):
""" Logs a command/message. """
logging.info("%s@%s%s -> %s", prefix, message.author,
" ({})".format(message.guild.name) if not isinstance(message.channel,
discord.abc.PrivateChannel) else "",
message.content.split("\n")[0])
async def execute_command(command: plugins.Command, message: discord.Message, *args, **kwargs):
""" Execute a command and send any AttributeError exceptions. """
app_info = await client.application_info()
try:
await command.function(message, *args, **kwargs)
except AssertionError as e:
await client.say(message, str(e) or command.error or plugins.format_help(command, message.guild, message))
except:
logging.error(traceback.format_exc())
if plugins.is_owner(message.author) and config.owner_error:
await client.say(message, utils.format_code(traceback.format_exc()))
else:
await client.say(message, "An error occurred while executing this command. If the error persists, "
"please send a PM to {}.".format(app_info.owner))
def default_self(anno, default, message: discord.Message):
""" A silly function to make Annotate.Self work. """
if default is utils.Annotate.Self:
if anno is utils.Annotate.Member:
return message.author
if anno is utils.Annotate.Channel:
return message.channel
return default
def override_annotation(anno):
""" Returns an annotation of a discord object as an Annotate object. """
if anno is discord.Member:
return utils.Annotate.Member
if anno is discord.TextChannel:
return utils.Annotate.Channel
return anno
async def parse_annotation(param: inspect.Parameter, default, arg: str, index: int, message: discord.Message):
""" Parse annotations and return the command to use.
index is basically the arg's index in shelx.split(message.content) """
if default is param.empty:
default = None
if param.annotation is not param.empty: # Any annotation is a function or Annotation enum
anno = override_annotation(param.annotation)
def content(s):
return utils.split(s, maxsplit=index)[-1].strip("\" ")
# Valid enum checks
if isinstance(anno, utils.Annotate):
annotate = None
if anno is utils.Annotate.Content: # Split and get raw content from this point
annotate = content(message.content) or default
elif anno is utils.Annotate.LowerContent: # Lowercase of above check
annotate = content(message.content).lower() or default
elif anno is utils.Annotate.CleanContent: # Split and get clean raw content from this point
annotate = content(message.clean_content) or default
elif anno is utils.Annotate.LowerCleanContent: # Lowercase of above check
annotate = content(message.clean_content).lower() or default
elif anno is utils.Annotate.Member: # Checks member names or mentions
annotate = utils.find_member(message.guild, arg) or default_self(anno, default, message)
elif anno is utils.Annotate.Channel: # Checks text channel names or mentions
annotate = utils.find_channel(message.guild, arg) or default_self(anno, default, message)
elif anno is utils.Annotate.VoiceChannel: # Checks voice channel names or mentions
annotate = utils.find_channel(message.guild, arg, channel_type="voice")
elif anno is utils.Annotate.Code: # Works like Content but extracts code
annotate = utils.get_formatted_code(utils.split(message.content, maxsplit=index)[-1]) or default
return annotate
try: # Try running as a method
if getattr(anno, "allow_spaces", False):
arg = content(message.content)
# Pass the message if the argument has this specified
if getattr(anno, "pass_message", False):
result = anno(message, arg)
else:
result = anno(arg)
# The function can be a coroutine
if inspect.isawaitable(result):
result = await result
return result if result is not None else default
except TypeError as e:
raise TypeError(
"Command parameter annotation must be either pcbot.utils.Annotate, a callable or a coroutine") from e
except AssertionError as e: # raise the error in order to catch it at a lower level
raise AssertionError from e
except: # On error, eg when annotation is int and given argument is str
return None
return str(arg) or default # Return str of arg if there was no annotation
async def parse_command_args(command: plugins.Command, cmd_args: list, message: discord.Message):
""" Parse commands from chat and return args and kwargs to pass into the
command's function. """
signature = inspect.signature(command.function)
| |
<reponame>chuckie82/ami<filename>ami/flowchart/library/Psalg.py
from pyqtgraph.Qt import QtGui, QtWidgets
from amitypes import DataSource, Detector, Array1d, Array2d
from ami.flowchart.Node import Node, NodeGraphicsItem
from ami.flowchart.Units import ureg
from ami.flowchart.library.common import CtrlNode
from ami.flowchart.library.Editors import ChannelEditor
import ami.graph_nodes as gn
import numpy as np
import typing
try:
import constFracDiscrim as cfd
class CFD(CtrlNode):
"""
Constant fraction descriminator
"""
nodeName = "CFD"
uiTemplate = [('Sample Interval', 'doubleSpin', {'value': 1, 'min': 0.01}),
('horpos', 'doubleSpin', {'value': 0, 'min': 0}),
('gain', 'doubleSpin', {'value': 1, 'min': 0.01}),
('offset', 'doubleSpin', {'value': 0, 'min': 0}),
('delay', 'intSpin', {'value': 1, 'min': 0}),
('walk', 'doubleSpin', {'value': 0, 'min': 0}),
('threshold', 'doubleSpin', {'value': 0, 'min': 0}),
('fraction', 'doubleSpin', {'value': 0.5, 'min': 0})]
def __init__(self, name):
super().__init__(name, terminals={'In': {'io': 'in', 'ttype': Array1d},
'Out': {'io': 'out', 'ttype': float}})
def to_operation(self, **kwargs):
sampleInterval = self.values['Sample Interval']
horpos = self.values['horpos']
gain = self.values['gain']
offset = self.values['offset']
delay = self.values['delay']
walk = self.values['walk']
threshold = self.values['threshold']
fraction = self.values['fraction']
def cfd_func(waveform):
return cfd.cfd(sampleInterval, horpos, gain, offset, waveform, delay, walk, threshold, fraction)
return gn.Map(name=self.name()+"_operation", **kwargs, func=cfd_func)
except ImportError as e:
print(e)
try:
import psana.hexanode.WFPeaks as psWFPeaks
class WFPeaks(CtrlNode):
"""
WFPeaks
"""
nodeName = "WFPeaks"
def __init__(self, name):
super().__init__(name, terminals={'Times': {'io': 'in', 'ttype': Array2d},
'Waveform': {'io': 'in', 'ttype': Array2d},
'Num of Hits': {'io': 'out', 'ttype': Array1d},
'Index': {'io': 'out', 'ttype': Array2d},
'Values': {'io': 'out', 'ttype': Array2d},
'Peak Times': {'io': 'out', 'ttype': Array2d}})
self.values = {}
def display(self, topics, terms, addr, win, **kwargs):
if self.widget is None:
self.widget = ChannelEditor(parent=win)
self.values = self.widget.values
self.widget.sigStateChanged.connect(self.state_changed)
return self.widget
def to_operation(self, **kwargs):
numchs = len(self.widget.channel_groups)
cfdpars = {'numchs': numchs,
'numhits': self.values['num hits'],
'DLD': self.values['DLD'],
'version': 4,
'cfd_wfbinbeg': self.values['cfd_wfbinbeg'],
'cfd_wfbinend': self.values['cfd_wfbinend']}
paramsCFD = {}
for chn in range(0, numchs):
paramsCFD[chn] = self.values[f"Channel {chn}"]
cfdpars['paramsCFD'] = paramsCFD
wfpeaks = psWFPeaks.WFPeaks(**cfdpars)
def peakFinder(wts, wfs):
peaks = wfpeaks(wfs, wts)
return peaks
return gn.Map(name=self.name()+"_operation", **kwargs, func=peakFinder)
import psana.hexanode.DLDProcessor as psfDLD
class DLDProc():
def __init__(self, **params):
self.params = params
self.proc = None
def __call__(self, nev, nhits, pktsec, calib):
if self.params['consts'] != calib:
self.params['consts'] = calib
self.proc = psfDLD.DLDProcessor(**self.params)
r = self.proc.xyrt_list(nev, nhits, pktsec)
if r:
x, y, r, t = zip(*r)
return (np.array(x), np.array(y), np.array(r), np.array(t))
else:
return (np.array([]), np.array([]), np.array([]), np.array([]))
class Hexanode(CtrlNode):
"""
Hexanode
"""
nodeName = "Hexanode"
uiTemplate = [('num chans', 'combo', {'values': ["5", "7"]}),
('num hits', 'intSpin', {'value': 16, 'min': 1}),
('verbose', 'check', {'checked': False})]
def __init__(self, name):
super().__init__(name, terminals={'Event Number': {'io': 'in', 'ttype': float},
'Num of Hits': {'io': 'in', 'ttype': Array1d},
'Peak Times': {'io': 'in', 'ttype': Array2d},
'Calib': {'io': 'in', 'ttype': typing.Dict},
'X': {'io': 'out', 'ttype': Array1d},
'Y': {'io': 'out', 'ttype': Array1d},
'R': {'io': 'out', 'ttype': Array1d},
'T': {'io': 'out', 'ttype': Array1d}})
def to_operation(self, **kwargs):
dldpars = {'numchs': int(self.values['num chans']),
'numhits': self.values['num hits'],
'verbose': self.values['verbose'],
'consts': None}
return gn.Map(name=self.name()+"_operation", **kwargs, func=DLDProc(**dldpars))
import psana.hexanode.HitFinder as psfHitFinder
class HitFinder(CtrlNode):
"""
HitFinder
"""
nodeName = "HitFinder"
uiTemplate = [('runtime_u', 'doubleSpin'),
('runtime_v', 'doubleSpin'),
('tsum_avg_u', 'doubleSpin'),
('tsum_hw_u', 'doubleSpin'),
('tsum_avg_v', 'doubleSpin'),
('tsum_hw_v', 'doubleSpin'),
('f_u', 'doubleSpin'),
('f_v', 'doubleSpin'),
('Rmax', 'doubleSpin')]
def __init__(self, name):
super().__init__(name, terminals={'Num of Hits': {'io': 'in', 'ttype': Array1d},
'Peak Times': {'io': 'in', 'ttype': Array2d},
'X': {'io': 'out', 'ttype': Array1d},
'Y': {'io': 'out', 'ttype': Array1d},
'T': {'io': 'out', 'ttype': Array1d}})
def to_operation(self, **kwargs):
HF = psfHitFinder.HitFinder(self.values)
def func(nhits, pktsec):
HF.FindHits(pktsec[4, :nhits[4]],
pktsec[0, :nhits[0]],
pktsec[1, :nhits[1]],
pktsec[2, :nhits[2]],
pktsec[3, :nhits[3]])
return HF.GetXYT()
return gn.Map(name=self.name()+"_operation", **kwargs, func=func)
except ImportError as e:
print(e)
try:
import psana.xtcav.LasingOnCharacterization as psLOC
class LOCProc():
def __init__(self, **params):
self.params = params
self.proc = None
self.dets = None
self.src_key = 0
def __call__(self, src, cam, pars):
time = None
power = None
agreement = None
pulse = None
if self.proc is None or self.src_key != src.key:
if src.cfg['type'] == 'psana':
self.src_key = src.key
self.dets = psLOC.setDetectors(src.run, camera=cam.det, xtcavpars=pars.det)
self.proc = psLOC.LasingOnCharacterization(self.params, src.run, self.dets)
else:
raise NotImplementedError("XTCAVLasingOn does not support the %s source type!" % src.cfg['type'])
if self.proc.processEvent(src.evt):
time, power, agreement, pulse = self.proc.resultsProcessImage()
return time, power, agreement, pulse
class XTCAVLasingOn(CtrlNode):
"""
XTCAVLasingOn
"""
nodeName = "XTCAVLasingOn"
uiTemplate = [('num bunches', 'intSpin', {'value': 1, 'min': 1}),
('snr filter', 'doubleSpin', {'value': 10.0, 'min': 0}),
('roi expand', 'doubleSpin', {'value': 1.0}),
('roi fraction', 'doubleSpin', {'value': 0.001, 'min': 0, 'max': 1}),
('island split method', 'combo', {'values': ["scipyLabel", "contourLabel"]}),
('island split par1', 'doubleSpin', {'value': 3.0}),
('island split par2', 'doubleSpin', {'value': 5.0})]
def __init__(self, name):
super().__init__(name, terminals={'src': {'io': 'in', 'ttype': DataSource},
'cam': {'io': 'in', 'ttype': Detector},
'pars': {'io': 'in', 'ttype': Detector},
'time': {'io': 'out', 'ttype': Array2d, 'unit': ureg.femtosecond},
'power': {'io': 'out', 'ttype': Array2d, 'unit': ureg.gigawatt},
'agreement': {'io': 'out', 'ttype': float},
'pulse': {'io': 'out', 'ttype': Array1d}})
def to_operation(self, **kwargs):
locpars = {'num_bunches': self.values['num bunches'],
'snr_filter': self.values['snr filter'],
'roi_expand': self.values['roi expand'],
'roi_fraction': self.values['roi fraction'],
'island_split_method': self.values['island split method'],
'island_split_par1': self.values['island split par1'],
'island_split_par2': self.values['island split par2']}
return gn.Map(name=self.name()+"_operation", **kwargs, func=LOCProc(**locpars))
except ImportError as e:
print(e)
try:
from numba import jit
class PeakFinder1D(CtrlNode):
"""
1D Peakfinder
"""
nodeName = "PeakFinder1D"
uiTemplate = [('threshold lo', 'doubleSpin', {'value': 0}),
('threshold hi', 'doubleSpin', {'value': 1})]
def __init__(self, name):
super().__init__(name, terminals={"Waveform": {'io': 'in', 'ttype': Array1d},
"Centroid": {'io': 'out', 'ttype': Array1d},
"Width": {'io': 'out', 'ttype': Array1d}})
def to_operation(self, **kwargs):
threshold_lo = self.values['threshold lo']
threshold_hi = self.values['threshold hi']
@jit(nopython=True)
def peakfinder1d(waveform):
centroids = []
widths = []
for i in range(1, waveform.shape[0]-1):
if waveform[i] < threshold_hi:
continue
weighted_sum = 0
weights = 0
left = i - 1
right = i + 1
peak = waveform[i]
left_found = False
while threshold_lo < waveform[left] <= peak:
left_found = True
weighted_sum += waveform[left]*left
weights += waveform[left]
left -= 1
if left < 0:
break
right_found = False
while threshold_lo < waveform[right] <= peak:
right_found = True
weighted_sum += waveform[right]*right
weights += waveform[right]
right += 1
if right > waveform.shape[0] - 1:
break
if left_found and right_found:
weighted_sum += peak*i
weights += peak
centroids.append(weighted_sum/weights)
widths.append(right-left-1)
return np.array(centroids), np.array(widths)
return gn.Map(name=self.name()+"_operation", **kwargs, func=peakfinder1d)
except ImportError as e:
print(e)
try:
from psalg_ext import peak_finder_algos
peak_attrs = ['seg', 'row', 'col', 'npix', 'amp_max', 'amp_tot',
'row_cgrav', 'col_cgrav', 'row_sigma', 'col_sigma',
'row_min', 'row_max', 'col_min', 'col_max',
'bkgd', 'noise', 'son']
class PeakFinderGraphicsItem(NodeGraphicsItem):
def buildMenu(self, reset=False):
super().buildMenu(reset)
actions = self.menu.actions()
addInput = actions[2]
self.output_group = QtWidgets.QActionGroup(self.menu)
for attr in peak_attrs:
if attr not in self.node.terminals:
add_attr = QtGui.QAction(f"Add {attr}", self.menu)
add_attr.attr = attr
self.output_group.addAction(add_attr)
self.menu.insertAction(addInput, add_attr)
self.output_group.triggered.connect(self.output_added)
def output_added(self, action):
self.node.addTerminal(action.attr, io='out', ttype=Array1d, removable=True)
self.buildMenu(reset=True)
class PeakfinderAlgos():
def __init__(self, constructor_params={}, call_params={}, outputs=[]):
self.constructor_params = constructor_params
self.call_params = call_params
self.outputs = outputs
self.proc = None
def __call__(self, img):
if self.proc is None:
self.proc = peak_finder_algos(pbits=0)
self.proc.set_peak_selection_parameters(**self.constructor_params)
mask = np.ones(img.shape, dtype=np.uint16)
peaks = self.proc.peak_finder_v4r3_d2(img, mask, **self.call_params)
outputs = []
for output in self.outputs:
outputs.append(np.array(list(map(lambda peak: getattr(peak, output), peaks))))
return outputs
class PeakFinderV4R3(CtrlNode):
"""
psana peakfinder v4r3d2
"""
nodeName = "PeakFinderV4R3"
uiTemplate = [('npix min', 'doubleSpin', {'value': 20}),
('npix max', 'doubleSpin', {'value': 25}),
('amax thr', 'doubleSpin', {'value': 0}),
('atot thr', 'doubleSpin', {'value': 0}),
('son min', 'doubleSpin', {'value': 0}),
# pass to peak_finder_v4r3_d2
('thr low', 'doubleSpin', {'value': 35}),
('thr high', 'doubleSpin', {'value': 100}),
('rank', 'doubleSpin', {'value': 2}),
('r0', 'doubleSpin', {'value': 4}),
('dr', 'doubleSpin', {'value': 0.05})]
def __init__(self, name):
super().__init__(name, terminals={'Image': {'io': 'in', 'ttype': Array2d},
'row_cgrav': {'io': 'out', 'ttype': Array1d, 'removable': True},
'col_cgrav': {'io': 'out', 'ttype': Array1d, 'removable': True},
'npix': {'io': 'out', 'ttype': Array1d, 'removable': True},
'son': {'io': 'out', 'ttype': Array1d, 'removable': True},
'amp_tot': {'io': 'out', 'ttype': Array1d, 'removable': True}})
self.graphicsItem().buildMenu(reset=True)
def graphicsItem(self, brush=None):
if self._graphicsItem is None:
self._graphicsItem = PeakFinderGraphicsItem(self, brush)
return self._graphicsItem
def to_operation(self, **kwargs):
constructor_params = {'npix_min': self.values['npix min'],
'npix_max': self.values['npix max'],
'amax_thr': self.values['amax thr'],
'atot_thr': self.values['atot thr'],
'son_min': self.values['son min']}
call_params = {'thr_low': self.values['thr low'],
'thr_high': self.values['thr high'],
'rank': self.values['rank'],
'r0': self.values['r0'],
'dr': self.values['dr']}
node = gn.Map(name=self.name()+"_operation", **kwargs,
func=PeakfinderAlgos(constructor_params, call_params, list(self.outputs().keys())))
return node
except ImportError as e:
print(e)
try:
from psana.pyalgos.generic import edgefinder
class EdgeFinderProc():
def __init__(self, calibconsts={}):
self.calibconsts = calibconsts
self.proc = None
def __call__(self, image, iir, calib):
if self.calibconsts.keys() != calib.keys():
self.calibconsts = calib
self.proc = edgefinder.EdgeFinder(self.calibconsts)
elif all(np.array_equal(self.calibconsts[key], calib[key]) for key in calib):
self.calibconsts = calib
self.proc = edgefinder.EdgeFinder(self.calibconsts)
r = self.proc(image, iir)
if r:
return r.edge, r.fwhm, r.amplitude, r.amplitude_next, r.ref_amplitude
return np.nan, np.nan, np.nan, np.nan, np.nan
class EdgeFinder(Node):
"""
psana edgefinder
"""
nodeName = "EdgeFinder"
def __init__(self, name):
| |
import math
import time
import numpy as np
import matplotlib.pyplot as plt
from functions import *
import multiprocessing as mp
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
hist_all_nodes = []
hist_remaining_nodes = []
# COMMIT TEST
hist_time_for_method = []
hist_time_for_tarjan = []
class BaseGraph:
def __init__(self, graph_dict=None):
""" initializes a graph object
If no dictionary or None is given,
an empty dictionary will be used
"""
self.Time = 0
self.scc_field = {}
if graph_dict == None:
graph_dict = {}
self.__graph_dict = graph_dict
def vertices(self):
""" returns the vertices of a graph """
return list(self.__graph_dict.keys())
def edges(self):
""" returns the edges of a graph """
return self.__generate_edges()
def add_vertex(self, vertex):
""" If the vertex "vertex" is not in
self.__graph_dict, a key "vertex" with an empty
list as a value is added to the dictionary.
Otherwise nothing has to be done.
"""
if vertex not in self.__graph_dict:
self.__graph_dict[vertex] = []
def has_vertex(self, vertex):
if vertex in self.__graph_dict:
return 1
return 0
def number_of_vertices(self):
return len(self.vertices())
def has_edge(self, edge):
key = edge[0]
value = edge[1]
return value in self.__graph_dict[key]
def add_edge(self, edge):
""" assumes that edge is of type set, tuple or list;
between two vertices can be multiple edges!
"""
if(self.has_edge(edge) == 0):
self.__graph_dict[edge[0]].append(edge[1])
def number_of_edges(self):
number_of_edges = 0
for key in self.__graph_dict:
number_of_edges += len(self.__graph_dict[key])
return number_of_edges
def __generate_edges(self):
""" A static method generating the edges of the
graph "graph". Edges are represented as sets
with one (a loop back to the vertex) or two
vertices
"""
edges = []
for vertex in self.__graph_dict:
for neighbour in self.__graph_dict[vertex]:
edges.append({vertex, neighbour})
return edges
def SCC(self):
next_id = 0 # next_id index.
length = max(self.vertices()) + 1
index = [None] * length
lowlink = [None] * length
onstack = [False] * length
stack = []
next_idgroup = 0 # next_id SCC ID.
groups = [] # SCCs: list of vertices.
groupid = {} # Map from vertex to SCC ID.
for v in self.vertices():
if index[v] == None:
self.sconnect(v, next_id, next_idgroup, index, lowlink, onstack, stack, groups, groupid)
def sconnect(self, v, next_id, next_idgroup, index, lowlink, onstack, stack, groups, groupid):
work = [(v, 0)] # NEW: Recursion stack.
#k = 0
while work:
v, i = work[-1] # i is next_id successor to process.
del work[-1]
if i == 0: # When first visiting a vertex:
index[v] = next_id
lowlink[v] = next_id
next_id += 1
stack.append(v)
onstack[v] = True
recurse = False
for j in range(len(self.__graph_dict[v])):
w = self.__graph_dict[v][j]
if index[w] == None:
# CHANGED: Add w to recursion stack.
work.append((v, j+1))
work.append((w, 0))
recurse = True
break
elif onstack[w]:
lowlink[v] = min(lowlink[v], index[w])
if recurse: continue # NEW
k = 0
if index[v] == lowlink[v]:
com = []
while True:
w = stack[-1]
del stack[-1]
onstack[w] = False
com.append(w)
if(w != v or k or self.has_edge([w, w])):
self.scc_field[w] = 1
k = 1
groupid[w] = next_idgroup
if w == v: break
groups.append(com)
next_idgroup += 1
if work: # NEW: v was recursively visited.
w = v
v, _ = work[-1]
lowlink[v] = min(lowlink[v], lowlink[w])
def delete_scc_components(self):
start = timerStart()
self.SCC()
print("------------ Time for Tarjan: ", timeFinish(start))
# print(self.__graph_dict)
start = timerStart()
for vertex in self.vertices():
if(vertex not in self.scc_field):
self.__graph_dict.pop(vertex)
# print("------------", self.__graph_dict)
print("------------ Time for deletion: ", timeFinish(start))
def __str__(self):
res = str(self.__graph_dict)
res += "\nvertices: "
for k in self.__graph_dict:
res += str(k) + " "
return res
class FinalGraph():
def __init__(self, givenBoundaries, cell_size, cell_count=0, lengthOfSide=[0, 0], Time=0):
self.graph = BaseGraph()
self.boundaries = givenBoundaries
self.cell_size = cell_size
self.cell_count = cell_count
self.horizontal_length = lengthOfSide[0]
self.vertical_length = lengthOfSide[1]
def calculate_cell_count(self):
self.cell_count = abs(((self.boundaries[1][0] - self.boundaries[0][0]) / self.cell_size) * ((self.boundaries[1][1] - self.boundaries[0][1]) / self.cell_size))
def calculate_sides_of_area(self):
self.horizontal_length = int(abs((self.boundaries[1][0] - self.boundaries[0][0]) / self.cell_size))
self.vertical_length = int(abs((self.boundaries[1][1] - self.boundaries[0][1]) / self.cell_size))
def create_cells(self):
for row in range(self.vertical_length):
for col in range(self.horizontal_length):
self.graph.add_vertex(col + row * self.horizontal_length)
def get_local_coordinates_of_cell_from_number(self, number_of_cell):
return [int(math.floor(number_of_cell / self.horizontal_length)), number_of_cell % self.vertical_length]
def get_local_coordinates_of_cell_from_global(self, coord):
# col = math.floor(
# (coord[0] * self.horizontal_length / (self.boundaries[1][0] - self.boundaries[0][0])) + self.horizontal_length / 2)
# row = math.floor(
# (coord[1] * self.vertical_length / (self.boundaries[0][1] - self.boundaries[1][1])) + self.vertical_length / 2)
# row = math.fabs(row - self.vertical_length + 1)
col = math.floor((coord[0] - self.boundaries[0][0]) / self.cell_size)
row = math.floor((self.boundaries[0][1]- coord[1]) / self.cell_size)
return [row, col]
def get_local_coordinates_of_cell_from_global_advanced(self, coord):
all_local_coord = []
col = math.floor((coord[0] - self.boundaries[0][0]) / self.cell_size)
row = math.floor((self.boundaries[0][1]- coord[1]) / self.cell_size)
all_local_coord.append([row, col])
if((coord[0] - self.boundaries[0][0]) / self.cell_size == col):
new_col = col - 1
if(new_col >= 0):
all_local_coord.append([row, new_col])
if((self.boundaries[0][1]- coord[1]) / self.cell_size == row):
new_row = row - 1
if(new_row >= 0):
all_local_coord.append([new_row, col])
if((coord[0] - self.boundaries[0][0]) / self.cell_size == col and (self.boundaries[0][1]- coord[1]) / self.cell_size == row):
new_col = col - 1
new_row = row - 1
if(new_col >= 0 and new_row >=0):
all_local_coord.append([new_row, new_col])
return all_local_coord
def get_global_coordinates_of_cell_from_number(self, number_of_cell):
x = self.boundaries[0][0] + self.cell_size * self.get_local_coordinates_of_cell_from_number(number_of_cell)[1]
y = self.boundaries[1][0] - self.cell_size * self.get_local_coordinates_of_cell_from_number(number_of_cell)[0]
return [x, y]
def get_number_of_cell_from_local(self, coord):
return coord[1] + self.horizontal_length * coord[0]
def get_number_of_cell_from_global(self, coord):
return int(self.get_number_of_cell_from_local(self.get_local_coordinates_of_cell_from_global(coord)))
def get_number_of_cell_from_global_advanced(self, coord):
all_numbers = []
all_local_coord = self.get_local_coordinates_of_cell_from_global_advanced(coord)
for local_coord in all_local_coord:
all_numbers.append(self.get_number_of_cell_from_local(local_coord))
return all_numbers
def check_if_point_inside_boundaries(self, coord):
if (coord[0] < self.boundaries[0][0]):
return 0
elif (coord[0] > self.boundaries[1][0] + self.cell_size):
return 0
if (coord[1] > self.boundaries[0][1]):
return 0
elif (coord[1] < self.boundaries[1][1] - self.cell_size):
return 0
return 1
def clip_point_to_boundaries(self, coord):
x = coord[0]
y = coord[1]
if (x == self.boundaries[0][0]):
x = self.boundaries[0][0]
elif (x == self.boundaries[1][0]):
x = self.boundaries[1][0] - self.cell_size
if (y == self.boundaries[0][1]):
y = self.boundaries[0][1]
elif (y == self.boundaries[1][1]):
y = self.boundaries[1][1] + self.cell_size
return [x, y]
def fill_nodes(self):
self.calculate_cell_count()
self.calculate_sides_of_area()
self.create_cells()
def hit_cell(self, coord):
coord = self.clip_point_to_boundaries(coord)
if(self.check_if_point_inside_boundaries(coord)):
number_of_hit_cell = self.get_number_of_cell_from_global(coord)
if self.graph.has_vertex(number_of_hit_cell):
return number_of_hit_cell
return -1
def hit_cell_advanced(self, coord):
hit_cells = []
coord = self.clip_point_to_boundaries(coord)
if(self.check_if_point_inside_boundaries(coord)):
numbers_of_hit_cell = self.get_number_of_cell_from_global_advanced(coord)
for number in numbers_of_hit_cell:
if self.graph.has_vertex(number):
hit_cells.append(number)
if(len(hit_cells) != 0):
return hit_cells
else:
return -1
def point_method(self, increment = 1):
for vertex in self.graph.vertices():
for j in range(increment):
y = self.get_global_coordinates_of_cell_from_number(vertex)[1] - float(j * self.cell_size) / increment
for i in range(increment):
x = self.get_global_coordinates_of_cell_from_number(vertex)[0] + float(i * self.cell_size) / increment
number_of_hit_cell = self.hit_cell(f([x, y]))
if number_of_hit_cell != - 1:
self.graph.add_edge([vertex, int(number_of_hit_cell)])
def point_method_advanced(self, increment = 1):
for vertex in self.graph.vertices():
for j in range(increment):
y = self.get_global_coordinates_of_cell_from_number(vertex)[1] - float(j * self.cell_size) / increment
for i in range(increment):
x = self.get_global_coordinates_of_cell_from_number(vertex)[0] + float(i * self.cell_size) / increment
numbers_of_hit_cell = self.hit_cell_advanced(f([x, y]))
if numbers_of_hit_cell != - 1:
for number in numbers_of_hit_cell:
self.graph.add_edge([vertex, int(number)])
def hit_area(self, vertex, top_left_number, bot_right_number, top_right_number, bot_left_number):
all_local_coord_row = []
all_local_coord_col = []
all_local_coord_row.append(self.get_local_coordinates_of_cell_from_number(top_left_number)[0])
all_local_coord_row.append(self.get_local_coordinates_of_cell_from_number(top_right_number)[0])
all_local_coord_row.append(self.get_local_coordinates_of_cell_from_number(bot_left_number)[0])
all_local_coord_row.append(self.get_local_coordinates_of_cell_from_number(bot_right_number)[0])
all_local_coord_col.append(self.get_local_coordinates_of_cell_from_number(top_left_number)[1])
all_local_coord_col.append(self.get_local_coordinates_of_cell_from_number(top_right_number)[1])
all_local_coord_col.append(self.get_local_coordinates_of_cell_from_number(bot_left_number)[1])
all_local_coord_col.append(self.get_local_coordinates_of_cell_from_number(bot_right_number)[1])
col_min = min(all_local_coord_col)
col_max = max(all_local_coord_col)
row_min = min(all_local_coord_row)
row_max = max(all_local_coord_row)
low_bound = 1
high_bound = 2
for row in range(int(row_min) - low_bound, int(row_max) + high_bound):
for col in range(int(col_min) - low_bound, int(col_max) + high_bound):
number_of_cell = self.get_number_of_cell_from_local([row, col])
if self.graph.has_vertex(number_of_cell):
self.graph.add_edge([vertex, number_of_cell])
def linear_method(self):
top_left_corner_coord = [0, 0]
bottom_right_corner_coord = [0, 0]
for vertex in self.graph.vertices():
top_left_corner_coord = self.get_global_coordinates_of_cell_from_number(vertex)
bottom_left_corner_coord = [self.get_global_coordinates_of_cell_from_number(vertex)[0],
self.get_global_coordinates_of_cell_from_number(vertex)[1] - self.cell_size]
top_right_corner_coord = [self.get_global_coordinates_of_cell_from_number(vertex)[0] + self.cell_size,
self.get_global_coordinates_of_cell_from_number(vertex)[1]]
bottom_right_corner_coord = [self.get_global_coordinates_of_cell_from_number(vertex)[0] + self.cell_size,
self.get_global_coordinates_of_cell_from_number(vertex)[1] - self.cell_size]
hit_coord_top_left = f(top_left_corner_coord)
hit_coord_top_right = f(top_right_corner_coord)
hit_coord_bot_left = f(bottom_left_corner_coord)
hit_coord_bot_right = f(bottom_right_corner_coord)
#EXPERIMENTAL
amount_of_mapping = 0
for i in range(amount_of_mapping):
hit_coord_top_left = f(hit_coord_top_left)
hit_coord_bot_right = f(hit_coord_bot_right)
hit_coord_top_right = f(hit_coord_top_right)
hit_coord_bot_left = f(hit_coord_bot_left)
number_of_hit_cell_top_left = self.hit_cell(hit_coord_top_left)
number_of_hit_cell_top_right = self.hit_cell(hit_coord_top_right)
number_of_hit_cell_bot_left = self.hit_cell(hit_coord_bot_left)
number_of_hit_cell_bot_right = self.hit_cell(hit_coord_bot_right)
#DEBUG
# if(hit_coord_top_left[0] > 1.1 and hit_coord_top_left[0] < 1.7) and (hit_coord_top_left[1] > 0.9 and hit_coord_top_left[1] < 1.4):
# print("Vertex that maps to that point: ", vertex)
# print("Coordinates of that vertex: ", top_left_corner_coord)
# print("Vertex that is that point: ", number_of_hit_cell_top_left)
# print("Hit coordinates: ", hit_coord_top_left)
# if(hit_coord_bot_right[0] > 1.1 and hit_coord_bot_right[0] < 1.7) and (hit_coord_bot_right[1] > 0.9 and hit_coord_bot_right[1] | |
if model.Meta.table_format in [TableFormat.cell, TableFormat.multiple_cells]:
continue
if model.Meta.table_format == TableFormat.row:
ws_name = model.Meta.verbose_name_plural
else:
ws_name = model.Meta.verbose_name
hyperlinks.append(Hyperlink(len(content), 0,
"internal:'!!{}'!A1".format(ws_name),
tip='Click to view {}'.format(ws_name.lower())))
count_val = len(grouped_objects.get(model, []))
content.append([
ws_name,
model.Meta.description,
count_val,
])
style = WorksheetStyle(
title_rows=1 + (doc_metadata is not None),
head_rows=1,
extra_rows=0,
extra_columns=0,
hyperlinks=hyperlinks,
)
writer.write_worksheet(sheet_name, content, style=style, protected=protected)
def write_model(self, writer, model, objects, schema_name, date, doc_metadata, doc_metadata_model, model_metadata, sheet_models,
include_all_attributes=True, encoded=None, write_empty_models=True, write_empty_cols=True,
extra_entries=0, protected=True):
""" Write a list of model objects to a file
Args:
writer (:obj:`wc_utils.workbook.io.Writer`): io writer
model (:obj:`type`): model
objects (:obj:`list` of :obj:`Model`): list of instances of :obj:`Model`
schema_name (:obj:`str`): schema name
date (:obj:`str`): date
doc_metadata (:obj:`dict`): dictionary of document metadata to be saved to header row
(e.g., ``!!!ObjTables ...``)
doc_metadata_model (:obj:`type`): model whose worksheet contains the document metadata
model_metadata (:obj:`dict`): dictionary of model metadata
sheet_models (:obj:`list` of :obj:`Model`): models encoded as separate sheets
include_all_attributes (:obj:`bool`, optional): if :obj:`True`, export all attributes
including those not explictly included in :obj:`Model.Meta.attribute_order`
encoded (:obj:`dict`, optional): objects that have already been encoded and their assigned JSON identifiers
write_empty_models (:obj:`bool`, optional): if :obj:`True`, write models even when there are no instances
write_empty_cols (:obj:`bool`, optional): if :obj:`True`, write columns even when all values are :obj:`None`
extra_entries (:obj:`int`, optional): additional entries to display
protected (:obj:`bool`, optional): if :obj:`True`, protect the worksheet
"""
if not write_empty_models and not objects:
return
attrs, _, headings, merge_ranges, field_validations, metadata_headings = get_fields(
model, schema_name, date, doc_metadata, doc_metadata_model, model_metadata,
include_all_attributes=include_all_attributes,
sheet_models=sheet_models)
# objects
model.sort(objects)
data = []
for obj in objects:
# comments
for comment in obj._comments:
data.append(['%/ ' + comment + ' /%'])
# properties
obj_data = []
for attr in attrs:
val = getattr(obj, attr.name)
if isinstance(attr, RelatedAttribute):
if attr.related_class.Meta.table_format == TableFormat.multiple_cells:
sub_attrs = get_ordered_attributes(attr.related_class, include_all_attributes=include_all_attributes)
for sub_attr in sub_attrs:
if val:
sub_val = getattr(val, sub_attr.name)
if isinstance(sub_attr, RelatedAttribute):
obj_data.append(sub_attr.serialize(sub_val, encoded=encoded))
else:
obj_data.append(sub_attr.serialize(sub_val))
else:
obj_data.append(None)
else:
obj_data.append(attr.serialize(getattr(obj, attr.name), encoded=encoded))
else:
obj_data.append(attr.serialize(getattr(obj, attr.name)))
data.append(obj_data)
# optionally, remove empty columns
if not write_empty_cols:
# find empty columns
are_cols_empty = [True] * len(headings[0])
for row in data:
if len(row) == 1 and isinstance(row[0], str) and row[0].startswith('%/') and row[0].endswith('/%'):
continue
for i_col, cell in enumerate(row):
if cell not in ['', None]:
are_cols_empty[i_col] = False
# remove empty columns
reversed_enum_are_cols_empty = list(reversed(list(enumerate(are_cols_empty))))
for rows in [headings, data]:
for row in rows:
if len(row) == 1 and isinstance(row[0], str) and row[0].startswith('%/') and row[0].endswith('/%'):
continue
for i_col, is_col_empty in reversed_enum_are_cols_empty:
if is_col_empty:
row.pop(i_col)
merges = [None] * len(are_cols_empty)
for i_merge, merge_range in enumerate(merge_ranges):
merge_ranges[i_merge] = list(merge_range)
_, start_col, _, end_col = merge_range
for i_col in range(start_col, end_col + 1):
merges[i_col] = i_merge
for i_col, is_col_empty in reversed_enum_are_cols_empty:
if is_col_empty:
merges.pop(i_col)
for merge_range in merge_ranges:
merge_range[1] = None
merge_range[3] = None
for i_col, i_merge in enumerate(merges):
if i_merge is not None:
if merge_ranges[i_merge][1] is None:
merge_ranges[i_merge][1] = i_col
merge_ranges[i_merge][3] = i_col
merge_ranges[i_merge][1] = min(merge_ranges[i_merge][1], i_col)
merge_ranges[i_merge][3] = max(merge_ranges[i_merge][3], i_col)
for merge_range in reversed(merge_ranges):
if merge_range[1] is None:
merge_ranges.remove(merge_range)
# validations
if model.Meta.table_format == TableFormat.column:
field_validations = [None] * len(metadata_headings) + field_validations
validation = WorksheetValidation(orientation=WorksheetValidationOrientation[model.Meta.table_format.name],
fields=field_validations)
# write sheet for model to file
self.write_sheet(writer, model, data, headings, metadata_headings, validation,
extra_entries=extra_entries, merge_ranges=merge_ranges, protected=protected)
def write_sheet(self, writer, model, data, headings, metadata_headings, validation,
extra_entries=0, merge_ranges=None, protected=True):
""" Write data to sheet
Args:
writer (:obj:`wc_utils.workbook.io.Writer`): io writer
model (:obj:`type`): model
data (:obj:`list` of :obj:`list` of :obj:`object`): list of list of cell values
headings (:obj:`list` of :obj:`list` of :obj:`str`): list of list of row headings validations
metadata_headings (:obj:`list` of :obj:`list` of :obj:`str`): model metadata (name, description)
to print at the top of the worksheet
validation (:obj:`WorksheetValidation`): validation
extra_entries (:obj:`int`, optional): additional entries to display
merge_ranges (:obj:`list` of :obj:`tuple`): list of ranges of cells to merge
protected (:obj:`bool`, optional): if :obj:`True`, protect the worksheet
"""
style = self.create_worksheet_style(model, extra_entries=extra_entries)
if model.Meta.table_format == TableFormat.row:
sheet_name = model.Meta.verbose_name_plural
row_headings = []
column_headings = headings
style.auto_filter = True
style.title_rows = len(metadata_headings)
style.head_rows = len(column_headings)
if merge_ranges:
style.merge_ranges = merge_ranges
else:
style.merge_ranges = []
else:
sheet_name = model.Meta.verbose_name
data = transpose(data)
style.auto_filter = False
row_headings = headings
column_headings = []
style.title_rows = len(metadata_headings)
style.head_rows = 0
style.head_columns = len(row_headings)
if merge_ranges:
n = len(metadata_headings)
style.merge_ranges = [(start_col + n, start_row - n, end_col + n, end_row - n)
for start_row, start_col, end_row, end_col in merge_ranges]
else:
style.merge_ranges = []
# merge data, headings
for i_row, row_heading in enumerate(transpose(row_headings)):
if i_row < len(data):
row = data[i_row]
else:
row = []
data.append(row)
for val in reversed(row_heading):
row.insert(0, val)
for _ in row_headings:
for column_heading in column_headings:
column_heading.insert(
0, None) # pragma: no cover # unreachable because row_headings and column_headings cannot both be non-empty
content = metadata_headings + column_headings + data
# write content to worksheet
if isinstance(writer, wc_utils.workbook.io.ExcelWriter):
sheet_name = '!!' + sheet_name
writer.write_worksheet(sheet_name, content, style=style, validation=validation, protected=protected)
@staticmethod
def create_worksheet_style(model, extra_entries=0):
""" Create worksheet style for model
Args:
model (:obj:`type`): model class
extra_entries (:obj:`int`, optional): additional entries to display
Returns:
:obj:`WorksheetStyle`: worksheet style
"""
style = WorksheetStyle(
extra_rows=0,
extra_columns=0,
)
if model.Meta.table_format == TableFormat.row:
style.extra_rows = extra_entries
else:
style.extra_columns = extra_entries
return style
class PandasWriter(WorkbookWriter):
""" Write model instances to a dictionary of :obj:`pandas.DataFrame`
Attributes:
_data_frames (:obj:`dict`): dictionary that maps models (:obj:`Model`)
to their instances (:obj:`pandas.DataFrame`)
"""
def __init__(self):
self._data_frames = None
def run(self, objects, schema_name=None, models=None, get_related=True,
include_all_attributes=True, validate=True,
protected=False):
""" Write model instances to a dictionary of :obj:`pandas.DataFrame`
Args:
objects (:obj:`Model` or :obj:`list` of :obj:`Model`): object or list of objects
schema_name (:obj:`str`, optional): schema name
models (:obj:`list` of :obj:`Model`, optional): models in the order that they should
appear as worksheets; all models which are not in :obj:`models` will
follow in alphabetical order
get_related (:obj:`bool`, optional): if :obj:`True`, write :obj:`objects` and all their related objects
include_all_attributes (:obj:`bool`, optional): if :obj:`True`, export all attributes including those
not explictly included in :obj:`Model.Meta.attribute_order`
validate (:obj:`bool`, optional): if :obj:`True`, validate the data
protected (:obj:`bool`, optional): if :obj:`True`, protect the worksheet
Returns:
:obj:`dict`: dictionary that maps models (:obj:`Model`) to their
instances (:obj:`pandas.DataFrame`)
"""
self._data_frames = {}
super(PandasWriter, self).run('*.csv', objects,
schema_name=schema_name,
models=models,
get_related=get_related,
include_all_attributes=include_all_attributes,
validate=validate,
write_toc=False, write_schema=False,
protected=protected)
return self._data_frames
def write_sheet(self, writer, model, data, headings, metadata_headings, validation,
extra_entries=0, merge_ranges=None, protected=False):
""" Write data to sheet
Args:
writer (:obj:`wc_utils.workbook.io.Writer`): io writer
model (:obj:`type`): model
data (:obj:`list` of :obj:`list` of :obj:`object`): list of list of cell values
headings (:obj:`list` of :obj:`list` of :obj:`str`): list of list of row headingsvalidations
metadata_headings (:obj:`list` of :obj:`list` of :obj:`str`): model metadata (name, description)
to print at the top of the worksheet
validation (:obj:`WorksheetValidation`): validation
extra_entries (:obj:`int`, optional): additional entries to display
merge_ranges (:obj:`list` of :obj:`tuple`): list of ranges of cells to merge
protected (:obj:`bool`, optional): if :obj:`True`, protect the worksheet
"""
if len(headings) == 1:
columns = []
for h in headings[0]:
columns.append(h[1:])
else:
for row in headings:
for i_cell, cell in enumerate(row):
if cell:
row[i_cell] = cell[1:]
columns = pandas.MultiIndex.from_tuples(transpose(headings))
self._data_frames[model] = pandas.DataFrame(data, columns=columns)
class MultiSeparatedValuesWriter(WriterBase):
""" Write model objects to a single text file which contains multiple
comma or tab-separated tables.
"""
def run(self, path, objects, schema_name=None, doc_metadata=None, model_metadata=None,
models=None, get_related=True, include_all_attributes=True, validate=True,
title=None, description=None, keywords=None, version=None, language=None, creator=None,
write_toc=True, write_schema=False, write_empty_models=True, write_empty_cols=True,
extra_entries=0, group_objects_by_model=True, data_repo_metadata=False, schema_package=None,
protected=False):
""" Write model objects to a single text file which contains multiple
comma or tab-separated tables.
Args:
path (:obj:`str`): path to write file(s)
objects (:obj:`Model` or :obj:`list` of :obj:`Model`): :obj:`Model` instance or list of :obj:`Model` instances
schema_name (:obj:`str`, optional): schema name
doc_metadata (:obj:`dict`, optional): dictionary of document metadata to be saved to header row
(e.g., ``!!!ObjTables ...``)
model_metadata (:obj:`dict`, optional): dictionary that maps models to dictionary with their metadata to
be saved to header row (e.g., ``!!ObjTables ...``)
models (:obj:`list` of :obj:`Model`, optional): models in the order that they should
appear as worksheets; all models which are not in :obj:`models` will
follow in alphabetical order
get_related (:obj:`bool`, optional): if :obj:`True`, write :obj:`objects` and all their related objects
include_all_attributes (:obj:`bool`, optional): if :obj:`True`, export all attributes including | |
and the other one will be in the foreground
def merge_images(im1, im2, im_size):
background = Image.open(im1).resize([im_size, im_size])
foreground = Image.open(im2).resize([im_size, im_size])
background.paste(foreground, (0, 0), foreground)
return background
def merge_image_with_random_noise(image, im_size):
random_noise = numpy.dstack((255 * numpy.random.random((im_size, im_size)), 255 * numpy.random.random((im_size, im_size)), 255 * numpy.random.random((im_size, im_size))))
background = Image.fromarray(numpy.uint8(random_noise))
foreground = Image.open(image).resize([im_size, im_size])
background.paste(foreground, (0, 0), foreground)
return background
# merge a batch of images with background images
def create_merged_images():
for i in range(1, 5351):
for j in range(0, 5):
im1 = 'clutter_backgrounds/preprocessed/preprocessed' + str(i + j * 3000) + '.jpg'
im2 = 'zylinder_images/leo_images_nur_vorne/preprocessed/Systembediengeraet_A110/transparent' + str(
i) + '.jpg'
sav = 'zylinder_images/leo_images/Systembediengeraet_A110/merged' + str(i) + '_' + str(j) + '.jpg'
merge_images(im1, im2, sav)
if i % 53 == 0:
print('\r' + str(int(i / 53.4)) + '% done', end='')
# horizontally flip images
def flip_images():
for x in range(1, 32101):
image_string = 'zylinder_images/leo_images/Systembediengeraet_A110/preprocessed' + str(x) + '.jpg'
save_string = 'zylinder_images/leo_images/Systembediengeraet_A110/preprocessed' + str(32100 + x) + '.jpg'
im = scipy.misc.imread(image_string)
horizontal_im = cv2.flip(im, 0)
scipy.misc.imsave(save_string, horizontal_im)
if x % 32 == 0:
print('\r' + str(int(x / 320)) + '% done', end='')
def change_brightness(image, save):
# manipulate brightness of the image
brightness = ImageEnhance.Brightness(image)
brightness_manipulated = brightness.enhance(random.uniform(0.6, 1.5))
rgb_im = brightness_manipulated.convert('RGB')
rgb_im.save(save, "JPEG")
# manipulate HSV-channels of the whole image
def manipulate_hsv(image):
im = image.convert('HSV')
im_arr = numpy.array(im)
h_vals = random.uniform(0.7, 1.6) * (im_arr[..., 0])
s_vals = random.uniform(0.3, 2.6) * (im_arr[..., 1] + randint(1,
30)) # components have lots of grey colors -> grey means saturation == 0 -> give a little more saturation, so that manipulation is successful
v_vals = random.uniform(0.7, 1.6) * im_arr[..., 2]
# S and V channels should not be greater than 255. H channel can be greater, because it starts from beginning and beginning is the continuous successor of the end -> see HSV cone
s_vals[s_vals > 255] = 255
v_vals[v_vals > 255] = 255
im_arr[..., 0] = h_vals
im_arr[..., 1] = s_vals
im_arr[..., 2] = v_vals
manipulated_image = Image.fromarray(im_arr, mode='HSV')
return manipulated_image.convert('RGB')
# manipulate HSV-channels of the whole image variante 2
def manipulate_hsv_addition(image):
im = image.convert('HSV')
im_arr = numpy.array(im, dtype=numpy.uint16)
h_vals = im_arr[..., 0]
s_vals = im_arr[..., 1]
v_vals = im_arr[..., 2]
h_vals = h_vals + randint(-20, 20)
s_vals = s_vals + randint(-40, 40)
v_vals = v_vals + randint(-40, 40)
s_vals[s_vals < 0] = 0
s_vals[s_vals > 255] = 255
v_vals[v_vals < 0] = 0
v_vals[v_vals > 255] = 255
im_arr[..., 0] = h_vals
im_arr[..., 1] = s_vals
im_arr[..., 2] = v_vals
im_arr = numpy.array(im_arr, dtype=numpy.uint8) # Pillow needs an 8bit array to form a picture from the array
manipulated_image = Image.fromarray(im_arr, mode='HSV')
# manipulated_image.show()
return manipulated_image.convert('RGB')
# manipulate every single pixel's HSV-values
def manipulate_every_pixels_hsv(image):
# image = Image.open(image)
hsv_im = image.convert('HSV')
im_arr = numpy.array(hsv_im)
height, width, _ = im_arr.shape
for j in range(width):
for i in range(height):
im_arr[i][j][0] = min(random.uniform(0.7, 1.6) * im_arr[i][j][0], 255) # H-value
im_arr[i][j][1] = min(random.uniform(0.7, 1.6) * im_arr[i][j][1], 255) # S-value
im_arr[i][j][2] = min(random.uniform(0.7, 1.6) * im_arr[i][j][2], 255) # V-value
manipulated_image = Image.fromarray(im_arr, mode='HSV')
return manipulated_image.convert('RGB')
def manipulate_rgb(image):
rgb_im = image.convert('RGB')
im_arr = numpy.array(rgb_im, dtype=numpy.uint16) # we need 16bit int, because 8bit only works until 255
r_vals = im_arr[..., 0]
g_vals = im_arr[..., 1]
b_vals = im_arr[..., 2]
r_vals = r_vals + randint(-20, 20)
g_vals = g_vals + randint(-20, 20)
b_vals = b_vals + randint(-20, 20)
r_vals[r_vals < 0] = 0
r_vals[r_vals > 255] = 255
g_vals[g_vals < 0] = 0
g_vals[g_vals > 255] = 255
b_vals[b_vals < 0] = 0
b_vals[b_vals > 255] = 255
im_arr[..., 0] = r_vals
im_arr[..., 1] = g_vals
im_arr[..., 2] = b_vals
im_arr = numpy.array(im_arr, dtype=numpy.uint8) # Pillow needs an 8bit array to form a picture from the array
im = Image.fromarray(im_arr, mode='RGB')
im.show()
# equalize the histogram of the luminance (Y-channel) of an image
def equalize_luminance(image):
pil_im = image.convert('RGB')
img = numpy.array(pil_im)
img = img[:, :, ::-1].copy()
img_yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
# equalize the histogram of the Y channel
img_yuv[:, :, 0] = cv2.equalizeHist(img_yuv[:, :, 0])
# convert the YUV image back to RGB format
img_output = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2RGB)
cv2_im = img_output
pil_im = Image.fromarray(cv2_im)
# pil_im.show()
return pil_im
def blur_images(im_dir):
for root, dirs, files in os.walk(im_dir):
for idx, file in enumerate(files):
if file.endswith(".jpg"):
try:
image_path = os.path.join(root, file)
print(image_path)
image = Image.open(image_path)
image = image.filter(ImageFilter.GaussianBlur(radius=1))
image.save(image_path, "JPEG")
except Exception as e:
print(e)
# convert image from *.png to *.jpg
def convert_to_jpg(image):
im = Image.open(image)
# im = im.resize([224, 224])
rgb_im = im.convert('RGB')
rgb_im.save(image.replace('.png', '.jpg'))
def resizeImages(im_dir):
for root, dirs, files in os.walk(im_dir):
for idx, file in enumerate(files):
if file.endswith(".JPG"):
try:
image_path = os.path.join(root, file)
print(image_path)
image = Image.open(image_path)
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation': break
exif = dict(image._getexif().items())
if exif[orientation] == 3:
image = image.rotate(180, expand=True)
elif exif[orientation] == 6:
image = image.rotate(270, expand=True)
elif exif[orientation] == 8:
image = image.rotate(90, expand=True)
basewidth = 1200
wpercent = (basewidth / float(image.size[0]))
hsize = int((float(image.size[1]) * float(wpercent)))
image.thumbnail((basewidth, hsize), Image.ANTIALIAS)
image.save(image_path, "JPEG")
except Exception as e:
print(e)
# do the whole preprocessing process for one image
# preprocess image and merge it with several backgrounds
def preprocess_all(image_path, sav_dir, image_size, background_directory):
sav_dir = sav_dir + os.path.basename(os.path.dirname(image_path))
if not os.path.isdir(sav_dir): # if directory doesn't exist, create it
os.mkdir(sav_dir)
# uri = os.path.dirname(image_path) + '/uri.txt' // uncomment uri path
# shutil.copyfile(uri, sav_dir + '/uri.txt') // uncomment uri path
# read image
im = Image.open(image_path)
width, height = im.size
# im.resize([int(0.5 * width), int(0.5 * height)]) # resize image to 50% to accelerate computation
for y in range(0, 6):
save_string = sav_dir + "/" + os.path.basename(image_path)[:-4] + '_' + str(y) + '.jpg'
image = rotate_randomly_pil(im)
image = crop_square_pil(image)
image = crop_random_square_pil(image)
# image = crop_quarter_square_pil(image, y)
image = image.resize([image_size, image_size])
image.save(save_string, "PNG")
for y in range(0, 6):
foreground = sav_dir + "/" + os.path.basename(image_path)[:-4] + '_' + str(y) + '.jpg'
for z in range(0, 1):
background = background_directory + random.choice(
os.listdir(background_directory)) # randomly choose a background image
save_string_merged = sav_dir + "/" + os.path.basename(image_path)[:-4] + '_' + str(y) + '.jpg'
merged_image = merge_images(background, foreground, image_size)
# merged_image = merge_image_with_random_noise(foreground, image_size)
# hsv_manipulated_image = manipulate_every_pixels_hsv(merged_image)
hsv_manipulated_image = manipulate_hsv_addition(merged_image)
# equalized_image = equalize_luminance(hsv_manipulated_image)
change_brightness(hsv_manipulated_image, save_string_merged)
# initialize preprocessing for a directory.
# Directory should contain images for all classes in different directories
# structure should be as follows:
#
# im_dir (<- the one you use as parameter)
# │
# │
# └───Class1
# │ │ image1.png
# │ │ image2.png
# │ │ ...
# │
# └───Class2
# │ image1.png
# │ image2.png
# │ ...
def do_preprocessing_for_dir(im_dir, sav_dir, image_size, background_directory):
if not os.path.exists(sav_dir):
os.mkdir(sav_dir)
for root, dirs, files in os.walk(im_dir):
for idx, file in enumerate(files):
if file.endswith(".png"):
image_path = os.path.join(root, file)
start_image_preprocessing(image_path, sav_dir, image_size, background_directory)
printProgressBar(idx + 1, len(files), prefix='Progress:', suffix='Complete')
# print('\rpreprocessing:' + str(round(idx / 4.84, 2)) + '% completed', end='')
# start the image preprocessing and take care of possibly occuring ValueErrors
def start_image_preprocessing(image_path, sav_dir, image_size, background_directory):
try:
preprocess_all(image_path, sav_dir, image_size, background_directory)
except ValueError as e: # quick and dirty Solution: sometimes a ValueError is raised while converting to HSV. Retrying always helps. So we catch it here and try again
if str(e) == "conversion from L to HSV not supported":
print(e)
start_image_preprocessing(image_path, sav_dir, image_size, background_directory)
else:
raise
# Print iterations progress
def printProgressBar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end='')
# Print New Line on Complete
if iteration == total:
print()
if __name__ == '__main__':
'''
image_size = 299
# background_directory = "/Users/adminsitrator/denkbares/deep_learning_stuff/bilder_deep_learning/clutter_backgrounds/preprocessed_299/"
| |
"cannot be compiled since it inherits from nn.Module"):
torch.jit.script(MyModule)
def test_view_write(self):
def fn(x, y):
l = []
l.append(x)
x_view = l[0]
a = x + x
x_view.add_(y)
b = x + x
return a == b
self.checkScript(fn, (torch.rand(2, 3), torch.rand(2, 3)))
def test_module_attrs(self):
class M(torch.jit.ScriptModule):
def __init__(self, table):
super(M, self).__init__()
self.table = torch.jit.Attribute(table, Dict[str, torch.Tensor])
self.x = torch.nn.Parameter(torch.tensor([100.0]))
@torch.jit.script_method
def forward(self, key):
# type: (str) -> Tensor
return self.table[key] + self.x
with torch._jit_internal._disable_emit_hooks():
# TODO: re-enable module hook when Python printing of attributes is
# supported
m = M({char : torch.ones(1) + ord(char) - ord("a") for char in "abcdefg"})
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(m("c"), torch.tensor([103]))
def test_module_none_attrs(self):
class MyMod(torch.jit.ScriptModule):
def __init__(self):
super(MyMod, self).__init__()
self.optional_value = None
@torch.jit.script_method
def forward(self):
return self.optional_value
graph = MyMod().forward.graph
FileCheck().check("prim::GetAttr").run(graph)
self.run_pass('peephole', graph)
FileCheck().check_not("prim::GetAttr").run(graph)
def test_tensor_import_export(self):
@torch.jit.script
def foo(x):
a = torch.tensor(1)
b = torch.tensor([1, 2])
c = [a, b]
return c
self.run_pass('constant_propagation', foo.graph)
m = self.createFunctionFromGraph(foo.graph)
self.getExportImportCopy(m)
def get_pickle_values(self):
return (('dict', {"I": "am", "a test": "test"}, Dict[str, str]),
('float', 2.3, float),
('int', 99, int),
('bool', False, bool),
('tuple', (1, 2, 3, 4), Tuple[int, int, int, int]),
('list', [(1, 2), (3, 4)], List[Tuple[int, int]]),
('tensor', torch.randn(2, 2), torch.Tensor),
('int_list', [1, 2, 3, 4], List[int]),
('tensor_list', [torch.ones(2, 2) + i for i in range(4)], List[torch.Tensor]),
('bool_list', [True, True, False, True], List[bool]),
('float_list', [1., 2., 3., 4.], List[float]),
('str_list', ['hello', 'bye'], List[str]),
('none', None, Optional[int]),
('a_device', torch.device('cpu'), torch.device),
('another_device', torch.device('cuda:1'), torch.device))
def test_attribute_serialization(self):
tester = self
class M(torch.jit.ScriptModule):
def __init__(self):
super(M, self).__init__()
for name, value, the_type in tester.get_pickle_values():
setattr(self, name, torch.jit.Attribute(value, the_type))
@torch.jit.script_method
def forward(self):
return (self.dict, self.float, self.int, self.bool, self.tuple,
self.list, self.int_list, self.tensor_list, self.bool_list,
self.float_list, self.str_list, self.none)
m = M()
imported_m = self.getExportImportCopy(m)
self.assertEqual(m(), imported_m())
def test_string_len(self):
def fn(x):
# type: (str) -> int
return len(x)
self.checkScript(fn, ("",))
self.checkScript(fn, ("h",))
self.checkScript(fn, ("hello",))
def test_multiline_optional_future_refinement(self):
@torch.jit.script
def fun() -> int:
future: Optional[
torch.jit.Future[Tuple[torch.Tensor]]
] = None
return 1
self.assertEqual(fun(), 1)
@unittest.skipIf(IS_WINDOWS or IS_SANDCASTLE, "NYI: TemporaryFileName support for Windows or Sandcastle")
def test_attribute_unpickling(self):
tensor = torch.randn(2, 2)
tester = self
class M(torch.jit.ScriptModule):
def __init__(self):
super(M, self).__init__()
for name, value, the_type in tester.get_pickle_values():
setattr(self, "_" + name, torch.jit.Attribute(value, the_type))
@torch.jit.script_method
def forward(self):
return (self._dict, self._float, self._int, self._bool, self._tuple,
self._list, self._int_list, self._tensor_list, self._bool_list,
self._float_list, self._str_list, self._none)
with TemporaryFileName() as fname:
M().save(fname)
loaded = torch.jit.load(fname)
def is_tensor_value(item):
if isinstance(item, torch.Tensor):
return True
if isinstance(item, list):
return is_tensor_value(item[0])
return False
for name, value, the_type in self.get_pickle_values():
if is_tensor_value(value):
continue
self.assertEqual(value, getattr(loaded, "_" + name))
@unittest.skipIf(IS_WINDOWS or IS_SANDCASTLE, "NYI: TemporaryFileName support for Windows or Sandcastle")
@unittest.skipIf(not BUILD_WITH_CAFFE2, "PyTorch is build without Caffe2 support")
def test_old_models_bc(self):
model = {
'archive/version': b'1',
'archive/code/archive.py':
b'''
op_version_set = 0
def forward(self,
_0: Tensor) -> Tensor:
_1 = torch.zeros([10], dtype=6, layout=0, device=torch.device("cpu"))
result = torch.to(torch.fill_(_1, 5), dtype=6, layout=0, device=torch.device("cpu"),
non_blocking=False, copy=False)
result2 = torch.rand([10], dtype=6, layout=0, device=torch.device("cpu"))
result3 = torch.rand_like(result2, dtype=6, layout=0, device=torch.device("cpu"))
_2 = torch.add(torch.add(result, result2, alpha=1), result3, alpha=1)
return _2
''',
'archive/attributes.pkl': b'\x80\x02](e.',
'archive/libs.py': b'op_version_set = 0\n',
'archive/model.json':
b'''
{
"protoVersion":"2",
"mainModule":{
"torchscriptArena":{
"key":"code/archive.py"
},
"name":"archive",
"optimize":true
},
"producerName":"pytorch",
"producerVersion":"1.0",
"libs":{
"torchscriptArena":{
"key":"libs.py"
}
}
}'''}
with TemporaryFileName() as fname:
archive_name = os.path.basename(os.path.normpath(fname))
with zipfile.ZipFile(fname, 'w') as archive:
for k, v in model.items():
archive.writestr(k, v)
with open(fname, "rb") as f:
fn = torch.jit.load(f)
x = torch.zeros(10)
fn(x)
def test_submodule_attribute_serialization(self):
class S(torch.jit.ScriptModule):
def __init__(self, list_data):
super(S, self).__init__()
self.table = torch.jit.Attribute({"I": "am", "a test": "test"}, Dict[str, str])
self.list = torch.jit.Attribute(list_data, List[Tuple[int, int]])
@torch.jit.script_method
def forward(self):
return (self.table, self.list)
class M(torch.jit.ScriptModule):
def __init__(self):
super(M, self).__init__()
self.table = torch.jit.Attribute({"this": "is", "a different": "dict"}, Dict[str, str])
self.tensor = torch.jit.Attribute(torch.randn(2, 2), torch.Tensor)
self.s1 = S([(1, 2)])
self.s2 = S([(4, 5)])
@torch.jit.script_method
def forward(self):
return (self.table, self.tensor, self.s1.table, self.s2.list, self.s1.list)
m = M()
imported_m = self.getExportImportCopy(m)
self.assertEqual(m(), imported_m())
def test_serialization_big_ints(self):
class M(torch.jit.ScriptModule):
def __init__(self):
super(M, self).__init__()
self.int32_max = torch.jit.Attribute(2**31 - 1, int)
self.int32_min = torch.jit.Attribute(-2**31, int)
self.uint32_max = torch.jit.Attribute(2**32, int)
self.int64_max = torch.jit.Attribute(2**63 - 1, int)
self.int64_min = torch.jit.Attribute(-2**63, int)
self.tensor = torch.nn.Parameter(torch.ones(2, 2))
@torch.jit.script_method
def forward(self, x):
# type: (int) -> (int)
return x + (self.int32_max + self.int32_min) + (self.int64_max + self.int64_min)
m = M()
imported = self.getExportImportCopy(m)
self.assertEqual(m(10), imported(10))
self.assertEqual(m.int32_max, imported.int32_max)
self.assertEqual(m.int32_min, imported.int32_min)
self.assertEqual(m.uint32_max, imported.uint32_max)
self.assertEqual(m.int64_max, imported.int64_max)
self.assertEqual(m.int64_min, imported.int64_min)
def test_script_scope(self):
scripted = torch.jit.script(torch.nn.functional.triplet_margin_loss)
@unittest.skipIf(IS_WINDOWS, "NYI: TemporaryFileName on Windows")
def test_serialization_sharing(self):
class M(torch.jit.ScriptModule):
def __init__(self):
super(M, self).__init__()
self.list = torch.jit.Attribute([], List[str])
@torch.jit.script_method
def forward(self, key):
# type: (str) -> List[str]
self.list.append(key)
self.list.append(key)
self.list.append(key)
return self.list
# the text of the string should only appear once in the pickling
m = M()
s1 = "a long string"
s2 = "a different, even longer string"
self.assertEqual(m(s1), [s1] * 3)
self.assertEqual(m(s2), [s1] * 3 + [s2] * 3)
with TemporaryFileName() as fname:
m.save(fname)
archive_name = os.path.basename(os.path.normpath(fname))
archive = zipfile.ZipFile(fname, 'r')
pickled_data = archive.read(os.path.join(archive_name, 'data.pkl'))
out = io.StringIO()
pickletools.dis(pickled_data, out=out)
disassembled = out.getvalue()
FileCheck().check_count(s1, 1, exactly=True) \
.check_count("BINGET", 2, exactly=True) \
.check_count(s2, 1, exactly=True) \
.check_count("BINGET", 2, exactly=True).run(out.getvalue())
def test_sys_stdout_override(self):
@torch.jit.script
def foo():
print('foo')
class Redirect(object):
def __init__(self):
self.s = ''
def write(self, s):
self.s += s
old_stdout = sys.stdout
redirect = Redirect()
try:
sys.stdout = redirect
foo()
finally:
sys.stdout = old_stdout
FileCheck().check('foo').run(redirect.s)
def test_dtype_attr(self):
class Foo(torch.nn.Module):
def __init__(self):
super(Foo, self).__init__()
self.dtype = torch.zeros([]).dtype
def forward(self):
return torch.zeros(3, 4, dtype=self.dtype)
f = Foo()
torch.jit.script(f)
def test_named_buffers_are_iterable(self):
class MyMod(torch.nn.Module):
def __init__(self):
super(MyMod, self).__init__()
self.mod = (torch.nn.ReLU())
self.mod2 = (torch.nn.ReLU())
self.mod3 = torch.nn.Sequential(torch.nn.Sequential(torch.nn.ReLU()))
self.register_buffer('x', torch.zeros(3))
self.register_buffer('y', torch.zeros(3))
self.z = torch.zeros(3)
def bleh(self):
return self.z + 4
@torch.jit.export
def method(self):
names = [""]
vals = []
for name, buffer in self.named_buffers():
names.append(name)
vals.append(buffer + 2)
return names, vals
def forward(self, x):
return x
model = MyMod()
x = torch.jit.script(model)
z = self.getExportImportCopy(x)
self.assertEqual(z.method(), x.method())
self.assertEqual(z.method(), model.method())
self.assertEqual(x.method(), model.method())
names = x.method()
for name in names:
self.assertNotEqual('z', name)
def test_static_if_prop(self):
class MaybeHasAttr(torch.nn.Module):
def __init__(self, add_attr):
super(MaybeHasAttr, self).__init__()
if add_attr:
self.maybe_attr = 1
def forward(self):
if hasattr(self, "maybe_attr") and True:
return self.maybe_attr
else:
return 0
class MaybeHasAttr2(torch.nn.Module):
def __init__(self, add_attr):
super(MaybeHasAttr2, self).__init__()
if add_attr:
self.maybe_attr = 1
def forward(self):
if not hasattr(self, "maybe_attr") or False:
return 0
else:
return self.maybe_attr
torch.jit.script(MaybeHasAttr(True))
torch.jit.script(MaybeHasAttr(False))
torch.jit.script(MaybeHasAttr2(True))
torch.jit.script(MaybeHasAttr2(False))
class MyMod(torch.nn.Module):
def forward(self):
if hasattr(self, "foo"):
return 1
else:
return 0
@torch.jit.export
def fee(self):
return 1
self.checkModule(MyMod(), ())
class HasAttrMod(torch.nn.Module):
__constants__ = ["fee"]
def __init__(self):
super().__init__()
self.fee = 3
def forward(self):
a = hasattr(self, "fee")
b = hasattr(self, "foo")
c = hasattr(self, "hi")
d = hasattr(self, "nonexistant")
return (a, b, c, d)
def foo(self):
return 1
@torch.jit._overload_method
def hi(self, x: Tensor): ... # noqa: E704
def hi(self, x): # noqa: F811
return 2
self.checkModule(HasAttrMod(), ())
@torch.jit.script
class FooTest(object):
def __init__(self):
self.x = 1
def foo(self, y):
return self.x + y
def foo():
a = FooTest()
val1 = hasattr(a, "foo"), hasattr(a, "x"), hasattr(a, "bla")
val2 = hasattr(FooTest, "foo"), hasattr(FooTest, "a")
return val1, val2
self.assertEqual(foo(), torch.jit.script(foo)())
def _test_pickle_checkpoint(self, device):
with TemporaryFileName() as fname:
class M(torch.jit.ScriptModule):
__constants__ = ['fname']
def __init__(self, tensor):
super(M, self).__init__()
self.fname = fname
self.tensor = torch.nn.Parameter(tensor)
@torch.jit.script_method
def forward(self, x):
y = self.tensor + x
torch.save(y, self.fname)
return y
param = torch.randn(2, 2).to(device)
input = torch.randn(2, 2).to(device)
m = M(param)
m(input)
with open(fname, "rb") as handle:
loaded_tensor = torch.load(fname)
self.assertEqual(loaded_tensor, input + param)
def _test_pickle_checkpoint_views(self, device):
with TemporaryFileName() as fname:
class M(torch.jit.ScriptModule):
__constants__ = ['fname']
def __init__(self, tensor):
super(M, self).__init__()
self.fname = fname
self.tensor = torch.nn.Parameter(tensor)
@torch.jit.script_method
def forward(self, x):
y = self.tensor + x
y_view = y.view(4)
torch.save((y, y_view, y), self.fname)
return y
param = torch.randn(2, 2).to(device)
input = torch.randn(2, 2).to(device)
m = M(param)
m(input)
with open(fname, "rb") as handle:
loaded_y, loaded_y_view, loaded_y_2 = torch.load(fname)
self.assertEqual(loaded_y, input + param)
with torch.no_grad():
loaded_y_view[1] += 20
# assert that loaded_y changed as well
self.assertEqual(loaded_y.view(4), loaded_y_view)
self.assertEqual(loaded_y_2.view(4), loaded_y_view)
@unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_pickle_checkpoint_cuda(self):
self._test_pickle_checkpoint('cuda')
self._test_pickle_checkpoint_views('cuda')
def test_pickle_checkpoint(self):
self._test_pickle_checkpoint('cpu')
self._test_pickle_checkpoint_views('cpu')
def test_pickle_checkpoint_tup(self):
@torch.jit.script
def foo(fname):
# type: (str) -> None
torch.save((3, 4), fname)
with TemporaryFileName() as name:
foo(name)
self.assertEqual(torch.load(name), (3, 4))
def test_string_list(self):
def | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import namedtuple
from random import randint
from random import uniform
from random import choices
from random import seed
from multiprocessing import Process, Queue
import numpy as np
import numbers
# 'Model' controls single individuals,
# Whereas 'Treibhaus' controls multiple 'Model's:
from .model import Model
__author__ = "<NAME> <<EMAIL>>"
class Treibhaus():
def __init__(self, model_generator, fitness_evaluator, population, generations,
params, random_seed=None, new_individuals=0, exploration_damping=10000,
keep_parents=0, dynamic_exploration=1.1, workers=1, stopping_kriterion_gens=4,
stopping_kriterion_fitness=None, verbose=False, ignore_errors=False,
learning_rate=0.1, momentum=0.1, initial_population=[]):
"""
Finds the best model using evolutionary techniques.
Creates offspring based on the current population, and performs
selection on a merged population of offspring and parents such that
the size of the population remains at size of the hyperparameter.
Parents are selected by random, but selecting them becomes more likely when
they performed well. Children of well performed parents mutete only slightly,
those of worse performing mutate more.
Genes of parents are combined randomly.
Parameters
----------
model_generator : callable
A model that takes an array of model hyperparameters/weights
as parameter and e.g. forwards that to the constructor.
example:
def model_generator(params):
return Model(params[0], params[1])
fitness_evaluator : callable
a function that evaluates the model quality.
Parameter will be various Models
example:
def fitness_evaluator(Model):
return Model.crossValidate(testData)
population : int
how many models to combine and mutate each generation
generations : int
after how many generations to stop the algorithm
params : array
array of 3-tuples
[(200, 255, int),
(0.3, 1.0, float)]
first element in tuple:
the random initial model generation and the mutation need
bounds of how much to randomize. This is the upper bound.
[-10, -10]. It randomizes excluding those numbers, so
it will never be randomized to -10.
TODO add -inf as possibility, which will cause a gamma or
gauss (? maybe something for which the parameters work similar to beta and gamma would be better,
or translate alpha and beta to mean and variance. or whatever make a function that handles that
given position and exploration_damping. exploration_damping has to be the variance in case of gauss. yes that's
the solution that gives the user the most control. and position is just used as the mean)
distribution to be used instead of beta, depending on
upper being inf
second element in tuple:
this is the lower bound, just like in the first element
of the tuple
third element in tuple:
determines how to mutate. ints will be in/decremented
floats will be added with a random float
example:
float or int
TODO fourth element in tuple:
boolean logspace or linear, so that the
mutation probability distribution decreases its
variance when closer to 0
TODO: another possibility:
{'param1': (lower, upper, type), 'param2': etc.}
autodetect it. really try to make the code clean
for that one, because in my experience autodetection
stuff can get quite large.
1.: is it an array of 3-tuples? model receives *params
2.: is it a dict of 3-tuples? model receives **params
random_seed : number
random seed. setting this to the same number each time
means that the results will be the same each time. Setting
and remembering the random seeds will give the possibility
to reproduce results later.
new_individuals : float
float between 0 and 1, how much percent of the population
should be new random individuals in each generation. A value
of 1 corresponds to complete noise in each generation.
Default: 0
exploration_damping : number
the lower, the more severe mutations will happen. The higher,
the slower they will move around in minimas. Default: 5
can also be an array for rates individual
for parameters. exploration_damping = [2, 11, 5]
> 0
this is the sharpness of the distribution that is
used to mutate. parameters of the distribution add up
to exploration_damping
keep_parents : float
how many of the best parents to take into the next generation.
float between 0 and 1
dynamic_exploration : float
will make more exploration when no better performing
individuals were found in a generation. Default: 1.1
Set to 1 for no dynamic_exploration
workers : number
How many processes will be spawned to train models in parallel.
Default is 1, which means that the multiprocessing package will
not be used. Can be set to os.cpu_count() for example
stopping_kriterion_gens : number
after this number of generations that were not able to produce
a new best individual, the training is stopped. Default: 4
Set to None to not stop until last generation is completed.
stopping_kriterion_fitness : number
when the fitness of the best model is above this value, stop.
verbose : boolean
If True, will print when new generation starts. Default: False
ignore_errors : boolean
If True, will not stop the optimization when one of the
individuals throws an error. Defualt: False
learning_rate : float
If > 0, will see if moving into direction delta_X away from parents
resulted into an improvement of the fitness. If yes, children of this
individual will continue to move into that direction, if not, children
will move into opposite direction (additionally to the mutation)
momentum : float
Will weight the parents estimated loss-function derivatives and
add them to the currently estimated derivative, which makes the
individual move faster and faster down the loss function (or rather,
up the fitness mountain), when the gradient doesn't change. Same as
in classic gradient descent.
initial_population : list of Treibhaus Model objects
This is going to be used instead of randomly initialized
individuals. If lower than population, will fill up the
remaining individuals with random ones.
Default: [] empty list.
Raises
------
ValueError
when paramsTypes, paramsLower and paramsUpper
don't have the same length
ValueError
when one of the values in paramsLower and paramsUpper
is not of float or int
"""
params = np.array(params).T
paramsLower = params[0]
paramsUpper = params[1]
paramsTypes = params[2]
assert population > 1
# has to be int or float:
for i in range(len(paramsLower)):
if type(paramsLower[i]) != int and type(paramsUpper[i]) != int and type(paramsLower[i]) != float and type(paramsUpper[i]) != float:
raise ValueError(str(i)+"-th element should be int or float, but is:", paramsLower[i], paramsUpper[i])
# autodetect types:
if paramsTypes is None:
paramsTypes = []
for i in range(len(paramsLower)):
# prefer float in autodetection
# if both are ints, well then maybe those are ints and not floats
if type(paramsLower[i]) == float or type(paramsUpper[i]) == float:
paramsTypes += [float]
else:
paramsTypes += [type(paramsLower[0])]
# create exploration_damping for each param
if np.array([exploration_damping]).shape == (1,):
exploration_damping = [exploration_damping] * len(paramsTypes)
# otherwise only noise will be produced
# for rate in exploration_damping:
# assert rate > 0
# edit: well maybe that is desired for
# one of the optimized parameters
# should all be of the same length:
if not len(paramsLower) == len(paramsTypes) == len(paramsUpper) == len(exploration_damping):
raise ValueError("paramsTypes, paramsLower and paramsUpper should be of the same length:",
len(paramsLower), len(paramsTypes), len(paramsUpper))
# some basic settings
self.population = population
self.model_generator = model_generator
# if no model_generator, then just pass through
# for the fitness_evaluator
if model_generator is None:
self.model_generator = self.pass_through_generator
self.fitness_evaluator = fitness_evaluator
self.new_individuals = new_individuals
self.dynamic_exploration = dynamic_exploration
self.stopping_kriterion_gens = stopping_kriterion_gens
self.stopping_kriterion_fitness = stopping_kriterion_fitness
self.verbose = verbose
self.ignore_errors = ignore_errors
# exploration is dynamic, exploration_damping can change but will be reset sometimes
# make sure it's a numpy array for fancy math operations
self.exploration_damping = np.array(exploration_damping, float)
# percent to number of parents that are taken into the next generation
# always keep the very best one
self.keep_parents = max(1, int(keep_parents*population))
# parameter ranges
self.paramsUpper = paramsUpper
self.paramsLower = paramsLower
self.paramsTypes = paramsTypes
self.learning_rate = learning_rate
self.momentum = momentum
self.initial_population = initial_population
# multiprocessing
self.workers = workers
self.queueParams = None
self.queueResults = None
self.processes = []
# state
# arrays that contain tuples of (params, quality)
self.models = []
self.history = []
self.best = None
# TODO randomstate for numpy
seed(random_seed)
# now start
if generations > 0:
self.train(generations)
def pass_through_generator(self, *x):
return x
| |
# -*- coding: utf-8 -*-
"""
This module needs serious refactoring and testing
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import shelve
import six
import uuid
import json
import codecs
import os
#import lru
#git+https://github.com/amitdev/lru-dict
#import atexit
#import inspect
import contextlib
import collections
from six.moves import cPickle as pickle
from six.moves import range, zip
from os.path import join, normpath, basename, exists
from functools import partial
from itertools import chain
import zipfile
from utool import util_arg
from utool import util_hash
from utool import util_inject
from utool import util_path
from utool import util_io
from utool import util_str
from utool import util_cplat
from utool import util_inspect
from utool import util_list
from utool import util_class
from utool import util_type
from utool import util_decor
from utool import util_dict
from utool._internal import meta_util_constants
print, rrr, profile = util_inject.inject2(__name__)
# TODO: Remove globalness
VERBOSE = util_arg.VERBOSE
QUIET = util_arg.QUIET
VERBOSE_CACHE = util_arg.NOT_QUIET
USE_CACHE = not util_arg.get_argflag('--nocache')
__APPNAME__ = meta_util_constants.default_appname # the global application name
class CacheMissException(Exception):
pass
#class YACacher(object):
# @six.add_metaclass(util_class.ReloadingMetaclass)
@util_class.reloadable_class
class ShelfCacher(object):
""" yet another cacher """
def __init__(self, fpath, enabled=True):
self.verbose = True
if self.verbose:
print('[shelfcache] initializing()')
self.fpath = fpath
self.shelf = None if not enabled else shelve.open(fpath)
def __del__(self):
self.close()
def __getitem__(self, cachekey):
return self.load(cachekey)
def __setitem__(self, cachekey, data):
return self.save(cachekey, data)
def keys(self):
return self.shelf.keys()
def load(self, cachekey):
if self.verbose:
print('[shelfcache] loading %s' % (cachekey,))
cachekey = cachekey.encode('ascii')
if self.shelf is None or cachekey not in self.shelf:
raise CacheMissException(
'Cache miss cachekey=%r self.fpath=%r' % (cachekey, self.fpath))
else:
return self.shelf[cachekey]
def save(self, cachekey, data):
if self.verbose:
print('[shelfcache] saving %s' % (cachekey,))
cachekey = cachekey.encode('ascii')
if self.shelf is not None:
self.shelf[cachekey] = data
self.shelf.sync()
def clear(self):
if self.verbose:
print('[shelfcache] clearing cache')
self.shelf.clear()
self.shelf.sync()
def close(self):
if self.verbose:
print('[shelfcache] closing()')
if self.shelf is not None:
self.shelf.close()
def get_default_appname():
global __APPNAME__
return __APPNAME__
def text_dict_read(fpath):
try:
with open(fpath, 'r') as file_:
dict_text = file_.read()
except IOError:
dict_text = '{}'
try:
dict_ = eval(dict_text, {}, {})
except SyntaxError as ex:
import utool as ut
print(dict_text)
ut.printex(ex, 'Bad Syntax', keys=['dict_text'])
dict_ = {}
if util_arg.SUPER_STRICT:
raise
return dict_
def text_dict_write(fpath, dict_):
"""
Very naive, but readable way of storing a dictionary on disk
FIXME: This broke on RoseMary's big dataset. Not sure why. It gave bad
syntax. And the SyntaxError did not seem to be excepted.
"""
#dict_ = text_dict_read(fpath)
#dict_[key] = val
dict_text2 = util_str.repr4(dict_, strvals=False)
if VERBOSE:
print('[cache] ' + str(dict_text2))
util_io.write_to(fpath, dict_text2)
def consensed_cfgstr(prefix, cfgstr, max_len=128, cfgstr_hashlen=16):
if len(prefix) + len(cfgstr) > max_len:
hashed_cfgstr = util_hash.hashstr27(cfgstr, hashlen=cfgstr_hashlen)
# Hack for prettier names
if not prefix.endswith('_'):
fname_cfgstr = prefix + '_' + hashed_cfgstr
else:
fname_cfgstr = prefix + hashed_cfgstr
else:
fname_cfgstr = prefix + cfgstr
return fname_cfgstr
def _args2_fpath(dpath, fname, cfgstr, ext):
r"""
Ensures that the filename is not too long
Internal util_cache helper function
Windows MAX_PATH=260 characters
Absolute length is limited to 32,000 characters
Each filename component is limited to 255 characters
Args:
dpath (str):
fname (str):
cfgstr (str):
ext (str):
Returns:
str: fpath
CommandLine:
python -m utool.util_cache --test-_args2_fpath
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_cache import * # NOQA
>>> from utool.util_cache import _args2_fpath
>>> import utool as ut
>>> dpath = 'F:\\data\\work\\PZ_MTEST\\_ibsdb\\_ibeis_cache'
>>> fname = 'normalizer_'
>>> cfgstr = u'PZ_MTEST_DSUUIDS((9)67j%dr%&bl%4oh4+)_QSUUIDS((9)67j%dr%&bl%4oh4+)zebra_plains_vsone_NN(single,K1+1,last,cks1024)_FILT(ratio<0.625;1.0,fg;1.0)_SV(0.01;2;1.57minIn=4,nRR=50,nsum,)_AGG(nsum)_FLANN(4_kdtrees)_FEATWEIGHT(ON,uselabel,rf)_FEAT(hesaff+sift_)_CHIP(sz450)'
>>> ext = '.cPkl'
>>> fpath = _args2_fpath(dpath, fname, cfgstr, ext)
>>> result = str(ut.ensure_unixslash(fpath))
>>> target = 'F:/data/work/PZ_MTEST/_ibsdb/_ibeis_cache/normalizer_xfylfboirymmcpfg.cPkl'
>>> ut.assert_eq(result, target)
"""
if len(ext) > 0 and ext[0] != '.':
raise ValueError('Please be explicit and use a dot in ext')
max_len = 128
# should hashlen be larger?
cfgstr_hashlen = 16
prefix = fname
fname_cfgstr = consensed_cfgstr(prefix, cfgstr, max_len=max_len,
cfgstr_hashlen=cfgstr_hashlen)
fpath = join(dpath, fname_cfgstr + ext)
fpath = normpath(fpath)
return fpath
def save_cache(dpath, fname, cfgstr, data, ext='.cPkl', verbose=None):
"""
Saves data using util_io, but smartly constructs a filename
"""
fpath = _args2_fpath(dpath, fname, cfgstr, ext)
util_io.save_data(fpath, data, verbose=verbose)
return fpath
def load_cache(dpath, fname, cfgstr, ext='.cPkl', verbose=None, enabled=True):
"""
Loads data using util_io, but smartly constructs a filename
"""
if verbose is None:
verbose = VERBOSE_CACHE
if not USE_CACHE or not enabled:
if verbose > 1:
print('[util_cache] ... cache disabled: dpath=%s cfgstr=%r' %
(basename(dpath), cfgstr,))
raise IOError(3, 'Cache Loading Is Disabled')
fpath = _args2_fpath(dpath, fname, cfgstr, ext)
if not exists(fpath):
if verbose > 0:
print('[util_cache] ... cache does not exist: dpath=%r fname=%r cfgstr=%r' % (
basename(dpath), fname, cfgstr,))
raise IOError(2, 'No such file or directory: %r' % (fpath,))
else:
if verbose > 2:
print('[util_cache] ... cache exists: dpath=%r fname=%r cfgstr=%r' % (
basename(dpath), fname, cfgstr,))
import utool as ut
nbytes = ut.get_file_nBytes(fpath)
big_verbose = (nbytes > 1E6 and verbose > 2) or verbose > 2
if big_verbose:
print('[util_cache] About to read file of size %s' % (ut.byte_str2(nbytes),))
try:
with ut.Timer(fpath, verbose=big_verbose and verbose > 3):
data = util_io.load_data(fpath, verbose=verbose > 2)
except (EOFError, IOError, ImportError) as ex:
print('CORRUPTED? fpath = %s' % (fpath,))
if verbose > 1:
print('[util_cache] ... cache miss dpath=%s cfgstr=%r' % (
basename(dpath), cfgstr,))
raise IOError(str(ex))
except Exception:
print('CORRUPTED? fpath = %s' % (fpath,))
raise
else:
if verbose > 2:
print('[util_cache] ... cache hit')
return data
def tryload_cache(dpath, fname, cfgstr, verbose=None):
"""
returns None if cache cannot be loaded
"""
try:
return load_cache(dpath, fname, cfgstr, verbose=verbose)
except IOError:
return None
@profile
def tryload_cache_list(dpath, fname, cfgstr_list, verbose=False):
"""
loads a list of similar cached datas. Returns flags that needs to be computed
"""
data_list = [tryload_cache(dpath, fname, cfgstr, verbose) for cfgstr in cfgstr_list]
ismiss_list = [data is None for data in data_list]
return data_list, ismiss_list
@profile
def tryload_cache_list_with_compute(use_cache, dpath, fname, cfgstr_list,
compute_fn, *args):
"""
tries to load data, but computes it if it can't give a compute function
"""
# Load precomputed values
if use_cache is False:
data_list = [None] * len(cfgstr_list)
ismiss_list = [True] * len(cfgstr_list)
# Don't load or save, just compute
data_list = compute_fn(ismiss_list, *args)
return data_list
else:
data_list, ismiss_list = tryload_cache_list(dpath, fname, cfgstr_list,
verbose=False)
num_total = len(cfgstr_list)
if any(ismiss_list):
# Compute missing values
newdata_list = compute_fn(ismiss_list, *args)
newcfgstr_list = util_list.compress(cfgstr_list, ismiss_list)
index_list = util_list.list_where(ismiss_list)
print('[cache] %d/%d cache hits for %s in %s' % (num_total -
len(index_list),
num_total, fname,
util_path.tail(dpath)))
# Cache write
for newcfgstr, newdata in zip(newcfgstr_list, newdata_list):
save_cache(dpath, fname, newcfgstr, newdata, verbose=False)
# Populate missing result
for index, newdata in zip(index_list, newdata_list):
data_list[index] = newdata
else:
print('[cache] %d/%d cache hits for %s in %s' % (num_total, num_total,
fname,
util_path.tail(dpath)))
return data_list
class Cacher(object):
"""
old non inhertable version of cachable
"""
def __init__(self, fname, cfgstr=None, cache_dir='default',
appname='utool', ext='.cPkl', verbose=None,
enabled=True):
if verbose is None:
verbose = VERBOSE
if cache_dir == 'default':
cache_dir = util_cplat.get_app_resource_dir(appname)
util_path.ensuredir(cache_dir)
self.dpath = cache_dir
self.fname = fname
self.cfgstr = cfgstr
self.verbose = verbose
self.ext = ext
self.enabled = enabled
def get_fpath(self):
fpath = _args2_fpath(self.dpath, self.fname, self.cfgstr, self.ext)
return fpath
def existing_versions(self):
"""
Returns data with different cfgstr values that were previously computed
with this cacher.
"""
import glob
pattern = self.fname + '_*' + self.ext
for fname in glob.glob1(self.dpath, pattern):
fpath = join(self.dpath, fname)
yield fpath
def exists(self, cfgstr=None):
return exists(self.get_fpath())
def load(self, cfgstr=None):
cfgstr = self.cfgstr if cfgstr is None else cfgstr
# assert cfgstr is not None, 'must specify cfgstr in constructor or call'
if cfgstr is None:
import warnings
warnings.warn('No cfgstr given in Cacher constructor or call')
cfgstr = ''
assert self.fname is not None, 'no fname'
assert self.dpath is not None, 'no dpath'
# TODO: use the computed fpath from this object instead
data = load_cache(self.dpath, self.fname, cfgstr, self.ext,
verbose=self.verbose, enabled=self.enabled)
if self.verbose > 1:
print('[cache] ... ' + self.fname + ' Cacher hit')
return data
def tryload(self, cfgstr=None):
"""
Like load, but returns None if the load fails
"""
if cfgstr is None:
cfgstr = self.cfgstr
if cfgstr is None:
import warnings
warnings.warn('No cfgstr given in Cacher constructor or call')
cfgstr = ''
# assert cfgstr is not None, (
# 'must specify cfgstr in constructor or call')
if not self.enabled:
if self.verbose > 0:
print('[cache] ... %s Cacher disabled' % (self.fname))
return None
try:
if self.verbose > 1:
print('[cache] tryload fname=%s' % (self.fname,))
# if self.verbose > 2:
# print('[cache] cfgstr=%r' % (cfgstr,))
return self.load(cfgstr)
except IOError:
if self.verbose > 0:
print('[cache] ... %s Cacher miss' % (self.fname))
def ensure(self, func, *args, **kwargs):
data = self.tryload()
| |
mem_aval_pct = util.get_value_from_second_level_of_dict(ns_stats, ("memory_free_pct", "free-pct-memory"),
default_value=0, return_type=int)
mem_aval = util.pct_to_value(mem_size, mem_aval_pct)
cl_nodewise_mem_size = util.add_dicts(cl_nodewise_mem_size, mem_size)
cl_nodewise_mem_aval = util.add_dicts(cl_nodewise_mem_aval, mem_aval)
summary_dict["FEATURES"]["NAMESPACE"][ns]["memory_total"] = sum(mem_size.values())
summary_dict["FEATURES"]["NAMESPACE"][ns]["memory_aval"] = sum(mem_aval.values())
summary_dict["FEATURES"]["NAMESPACE"][ns]["memory_available_pct"] = (float(sum(mem_aval.values())) / float(
sum(mem_size.values()))) * 100.0
device_size = util.get_value_from_second_level_of_dict(ns_stats, ("device_total_bytes", "total-bytes-disk"),
default_value=0, return_type=int)
device_used = util.get_value_from_second_level_of_dict(ns_stats, ("device_used_bytes", "used-bytes-disk"),
default_value=0, return_type=int)
device_aval_pct = util.get_value_from_second_level_of_dict(ns_stats, ("device_available_pct", "available_pct"),
default_value=0, return_type=int)
device_aval = util.pct_to_value(device_size, device_aval_pct)
cl_nodewise_device_size = util.add_dicts(cl_nodewise_device_size, device_size)
cl_nodewise_device_used = util.add_dicts(cl_nodewise_device_used, device_used)
cl_nodewise_device_aval = util.add_dicts(cl_nodewise_device_aval, device_aval)
device_size_total = sum(device_size.values())
if device_size_total > 0:
summary_dict["FEATURES"]["NAMESPACE"][ns]["disk_total"] = device_size_total
summary_dict["FEATURES"]["NAMESPACE"][ns]["disk_used"] = sum(device_used.values())
summary_dict["FEATURES"]["NAMESPACE"][ns]["disk_aval"] = sum(device_aval.values())
summary_dict["FEATURES"]["NAMESPACE"][ns]["disk_used_pct"] = (float(sum(device_used.values())) / float(
device_size_total)) * 100.0
summary_dict["FEATURES"]["NAMESPACE"][ns]["disk_available_pct"] = (float(sum(device_aval.values())) / float(
device_size_total)) * 100.0
summary_dict["FEATURES"]["NAMESPACE"][ns]["repl_factor"] = list(set(
util.get_value_from_second_level_of_dict(ns_stats, ("repl-factor", "replication-factor"), default_value=0,
return_type=int).values()))
data_in_memory = \
util.get_value_from_second_level_of_dict(ns_stats, ("storage-engine.data-in-memory", "data-in-memory"),
default_value=False, return_type=bool).values()[0]
if data_in_memory:
cache_read_pcts = util.get_value_from_second_level_of_dict(ns_stats, ("cache_read_pct", "cache-read-pct"),
default_value="N/E", return_type=int).values()
if cache_read_pcts:
try:
summary_dict["FEATURES"]["NAMESPACE"][ns]["cache_read_pct"] = sum(cache_read_pcts) / len(
cache_read_pcts)
except Exception:
pass
master_objects = sum(
util.get_value_from_second_level_of_dict(ns_stats, ("master_objects", "master-objects"), default_value=0,
return_type=int).values())
summary_dict["CLUSTER"]["ns_count"] += 1
if master_objects > 0:
summary_dict["FEATURES"]["NAMESPACE"][ns]["master_objects"] = master_objects
summary_dict["CLUSTER"]["active_ns"] += 1
try:
rack_ids = util.get_value_from_second_level_of_dict(ns_stats, ("rack-id",), default_value=None,
return_type=int)
rack_ids = list(set(rack_ids.values()))
if len(rack_ids) > 1 or rack_ids[0] is not None:
if any((i is not None and i > 0) for i in rack_ids):
summary_dict["FEATURES"]["NAMESPACE"][ns]["rack-aware"] = True
else:
summary_dict["FEATURES"]["NAMESPACE"][ns]["rack-aware"] = False
except Exception:
pass
cl_device_counts = sum(cl_nodewise_device_counts.values())
if cl_device_counts:
summary_dict["CLUSTER"]["device"]["count"] = cl_device_counts
summary_dict["CLUSTER"]["device"]["count_per_node"] = int((float(cl_device_counts) / float(total_nodes)) + 0.5)
if len(set(cl_nodewise_device_counts.values())) > 1:
summary_dict["CLUSTER"]["device"]["count_same_across_nodes"] = False
cl_memory_size_total = sum(cl_nodewise_mem_size.values())
if cl_memory_size_total > 0:
summary_dict["CLUSTER"]["memory"]["total"] = cl_memory_size_total
summary_dict["CLUSTER"]["memory"]["aval"] = sum(cl_nodewise_mem_aval.values())
summary_dict["CLUSTER"]["memory"]["aval_pct"] = (float(sum(cl_nodewise_mem_aval.values())) / float(
cl_memory_size_total)) * 100.0
cl_device_size_total = sum(cl_nodewise_device_size.values())
if cl_device_size_total > 0:
summary_dict["CLUSTER"]["device"]["total"] = cl_device_size_total
summary_dict["CLUSTER"]["device"]["used"] = sum(cl_nodewise_device_used.values())
summary_dict["CLUSTER"]["device"]["aval"] = sum(cl_nodewise_device_aval.values())
summary_dict["CLUSTER"]["device"]["used_pct"] = (float(sum(cl_nodewise_device_used.values())) / float(
cl_device_size_total)) * 100.0
summary_dict["CLUSTER"]["device"]["aval_pct"] = (float(sum(cl_nodewise_device_aval.values())) / float(
cl_device_size_total)) * 100.0
return summary_dict
#############################
########## Histogram ##########
def _create_histogram_percentiles_output(histogram_name, histogram_data):
histogram_data = util.flip_keys(histogram_data)
for namespace, host_data in histogram_data.iteritems():
for host_id, data in host_data.iteritems():
hist = data['data']
width = data['width']
cum_total = 0
total = sum(hist)
percentile = 0.1
result = []
for i, v in enumerate(hist):
cum_total += float(v)
if total > 0:
portion = cum_total / total
else:
portion = 0.0
while portion >= percentile:
percentile += 0.1
result.append(i + 1)
if percentile > 1.0:
break
if result == []:
result = [0] * 10
if histogram_name is "objsz":
data['percentiles'] = [(r * width) - 1 if r > 0 else r for r in result]
else:
data['percentiles'] = [r * width for r in result]
return histogram_data
def _create_bytewise_histogram_percentiles_output(histogram_data, bucket_count, builds):
histogram_data = util.flip_keys(histogram_data)
for namespace, host_data in histogram_data.iteritems():
result = []
rblock_size_bytes = 128
width = 1
for host_id, data in host_data.iteritems():
try:
as_version = builds[host_id]
if (LooseVersion(as_version) < LooseVersion("2.7.0")
or (LooseVersion(as_version) >= LooseVersion("3.0.0")
and LooseVersion(as_version) < LooseVersion("3.1.3"))):
rblock_size_bytes = 512
except Exception:
pass
hist = data['data']
width = data['width']
for i, v in enumerate(hist):
if v and v > 0:
result.append(i)
result = list(set(result))
result.sort()
start_buckets = []
if len(result) <= bucket_count:
# if asinfo buckets with values>0 are less than
# show_bucket_count then we can show all single buckets as it
# is, no need to merge to show big range
for res in result:
start_buckets.append(res)
start_buckets.append(res + 1)
else:
# dividing volume buckets (from min possible bucket with
# value>0 to max possible bucket with value>0) into same range
start_bucket = result[0]
size = result[len(result) - 1] - result[0] + 1
bucket_width = size / bucket_count
additional_bucket_index = bucket_count - (size % bucket_count)
bucket_index = 0
while bucket_index < bucket_count:
start_buckets.append(start_bucket)
if bucket_index == additional_bucket_index:
bucket_width += 1
start_bucket += bucket_width
bucket_index += 1
start_buckets.append(start_bucket)
columns = []
need_to_show = {}
for i, bucket in enumerate(start_buckets):
if i == len(start_buckets) - 1:
break
key = _get_bucket_range(bucket, start_buckets[i + 1], width, rblock_size_bytes)
need_to_show[key] = False
columns.append(key)
for host_id, data in host_data.iteritems():
rblock_size_bytes = 128
try:
as_version = builds[host_id]
if (LooseVersion(as_version) < LooseVersion("2.7.0")
or (LooseVersion(as_version) >= LooseVersion("3.0.0")
and LooseVersion(as_version) < LooseVersion("3.1.3"))):
rblock_size_bytes = 512
except Exception:
pass
hist = data['data']
width = data['width']
data['values'] = {}
for i, s in enumerate(start_buckets):
if i == len(start_buckets) - 1:
break
b_index = s
key = _get_bucket_range(s, start_buckets[i + 1], width, rblock_size_bytes)
if key not in columns:
columns.append(key)
if key not in data["values"]:
data["values"][key] = 0
while b_index < start_buckets[i + 1]:
data["values"][key] += hist[b_index]
b_index += 1
if data["values"][key] > 0:
need_to_show[key] = True
else:
if key not in need_to_show:
need_to_show[key] = False
host_data["columns"] = []
for column in columns:
if need_to_show[column]:
host_data["columns"].append(column)
return histogram_data
def _get_bucket_range(current_bucket, next_bucket, width, rblock_size_bytes):
s_b = "0 B"
if current_bucket > 0:
last_bucket_last_rblock_end = ((current_bucket * width) - 1) * rblock_size_bytes
if last_bucket_last_rblock_end < 1:
last_bucket_last_rblock_end = 0
else:
last_bucket_last_rblock_end += 1
s_b = filesize.size(last_bucket_last_rblock_end, filesize.byte)
if current_bucket == 99 or next_bucket > 99:
return ">%s" % (s_b.replace(" ", ""))
bucket_last_rblock_end = ((next_bucket * width) - 1) * rblock_size_bytes
e_b = filesize.size(bucket_last_rblock_end, filesize.byte)
return "%s to %s" % (s_b.replace(" ", ""), e_b.replace(" ", ""))
def create_histogram_output(histogram_name, histogram_data, **params):
if "byte_distribution" not in params or not params["byte_distribution"]:
return _create_histogram_percentiles_output(histogram_name, histogram_data)
if "bucket_count" not in params or "builds" not in params:
return {}
return _create_bytewise_histogram_percentiles_output(histogram_data, params["bucket_count"], params["builds"])
#################################
########## System Collectinfo ##########
def _get_metadata(response_str, prefix='', old_response=''):
aws_c = ''
aws_metadata_base_url = 'http://169.254.169.254/latest/meta-data'
# set of values which will give same old_response, so no need to go further
last_values = []
for rsp in response_str.split("\n"):
if rsp[-1:] == '/':
rsp_p = rsp.strip('/')
aws_c += _get_metadata(rsp_p, prefix, old_response=old_response)
else:
meta_url = aws_metadata_base_url + prefix + rsp
req = urllib2.Request(meta_url)
r = urllib2.urlopen(req)
# r = requests.get(meta_url,timeout=aws_timeout)
if r.code != 404:
response = r.read().strip()
if response == old_response:
last_values.append(rsp.strip())
continue
try:
aws_c += _get_metadata(response, prefix + rsp + "/", old_response=response)
except Exception:
aws_c += (prefix + rsp).strip('/') + '\n' + response + "\n\n"
if last_values:
aws_c += prefix.strip('/') + '\n' + '\n'.join(last_values) + "\n\n"
return aws_c
def _collect_awsdata(cmd=''):
aws_rsp = ''
aws_timeout = 1
socket.setdefaulttimeout(aws_timeout)
aws_metadata_base_url = 'http://169.254.169.254/latest/meta-data'
out = "['AWS']"
try:
req = urllib2.Request(aws_metadata_base_url)
r = urllib2.urlopen(req)
# r = requests.get(aws_metadata_base_url,timeout=aws_timeout)
if r.code == 200:
rsp = r.read()
aws_rsp += _get_metadata(rsp, '/')
out += "\n" + "Requesting... {0} \n{1} \t Successful".format(aws_metadata_base_url, aws_rsp)
else:
aws_rsp = " Not likely in AWS"
out += "\n" + "Requesting... {0} \t FAILED {1} ".format(aws_metadata_base_url, aws_rsp)
except Exception as e:
out += "\n" + "Requesting... {0} \t {1} ".format(aws_metadata_base_url, e)
out += "\n" + "FAILED! Node Is Not likely In AWS"
return out, None
def _get_gce_metadata(response_str, fields_to_ignore=[], prefix=''):
res_str = ''
gce_metadata_base_url = 'http://metadata.google.internal/computeMetadata/v1/instance/'
for rsp in response_str.split("\n"):
rsp = rsp.strip()
if not rsp or rsp in fields_to_ignore:
continue
meta_url = gce_metadata_base_url + prefix + rsp
try:
req = urllib2.Request(meta_url, headers={"Metadata-Flavor" : "Google"})
r = urllib2.urlopen(req)
if r.code != 404:
response = r.read().strip()
if rsp[-1:] == '/':
res_str += _get_gce_metadata(response, fields_to_ignore=fields_to_ignore, prefix=prefix+rsp)
else:
res_str += prefix + rsp + "\n" + response + "\n\n"
except Exception:
pass
return res_str
def _collect_gcedata(cmd=''):
gce_timeout = 1
socket.setdefaulttimeout(gce_timeout)
gce_metadata_base_url = 'http://metadata.google.internal/computeMetadata/v1/instance/'
out = "['GCE']"
fields_to_ignore = ['attributes/']
try:
req = urllib2.Request(gce_metadata_base_url, headers={"Metadata-Flavor" : "Google"})
r = urllib2.urlopen(req)
if r.code == 200:
rsp = r.read()
gce_rsp = _get_gce_metadata(rsp, fields_to_ignore=fields_to_ignore)
out += "\n" + "Requesting... {0} \n{1} \t Successful".format(gce_metadata_base_url, gce_rsp)
else:
gce_rsp = " Not likely in GCE"
out += "\n" + "Requesting... {0} \t FAILED {1} ".format(gce_metadata_base_url, gce_rsp)
except Exception as e:
out += "\n" + "Requesting... {0} \t {1} ".format(gce_metadata_base_url, e)
out += "\n" + "FAILED! Node Is Not likely In GCE"
return out, None
def _collect_azuredata(cmd=''):
azure_timeout = 1
socket.setdefaulttimeout(azure_timeout)
azure_metadata_base_url = 'http://169.254.169.254/metadata/instance?api-version=2017-04-02'
out = "['Azure']"
try:
req = urllib2.Request(azure_metadata_base_url, headers={"Metadata" : "true"})
r = urllib2.urlopen(req)
if r.code == 200:
rsp = r.read()
rsp = rsp.decode("utf-8")
jsonObj = json.loads(rsp)
out += "\n" + "Requesting... {0} \n{1} \t Successful".format(azure_metadata_base_url,
json.dumps(jsonObj, sort_keys=True, indent=4, separators=(',', ': ')))
else:
rsp = " Not likely in Azure"
out += "\n" + "Requesting... {0} \t FAILED {1} ".format(azure_metadata_base_url, rsp)
except Exception as e:
out += "\n" + "Requesting... {0} \t {1} ".format(azure_metadata_base_url, e)
out += "\n" | |
<filename>Codes/CNN_More_complex_example.py
# File: CNN_More_complex_example.py
# Description: Neural Networks for computer vision in autonomous vehicles and robotics
# Environment: PyCharm and Anaconda environment
#
# MIT License
# Copyright (c) 2018 <NAME>
# github.com/sichkar-valentyn
#
# Reference to:
# <NAME>. Neural Networks for computer vision in autonomous vehicles and robotics // GitHub platform. DOI: 10.5281/zenodo.1317904
# Implementing more complex example of convolution
# Input image is GrayScale with three identical channels
# Hyperparameters is as following:
# Filter (kernel) size, K_size = 3
# Step for sliding (stride), Step = 1
# Processing edges (zero valued frame around image), Pad = 1
# Consequently, output image size is as following:
# Width_Out = (Width_In - K_size + 2*Pad) / Step + 1
# Height_Out = (Height_In - K_size + 2*Pad) / Step + 1
# If an input image is 50x50 spatial size (width and height), then output image:
# Width_Out = Height_Out = (50 - 3 + 2*1)/1 + 1 = 50
# The shape of output image is the same with input image according to the chosen Hyperparameters
# Importing needed libraries
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
# Creating function for 2D convolution operation
def convolution_2d(image, filter, pad, step):
# Size of the filter
k_size = filter.shape[0]
# Calculating spatial size - width and height
width_out = int((image.shape[0] - k_size + 2 * pad) / step + 1)
height_out = int((image.shape[1] - k_size + 2 * pad) / step + 1)
# Preparing zero valued output array for convolved image
output_image = np.zeros((width_out - 2 * pad, height_out - 2 * pad))
# Implementing 2D convolution operation
# Going through all input image
for i in range(image.shape[0] - k_size + 1):
for j in range(image.shape[1] - k_size + 1):
# Extracting patch (the same size with filter) from input image
patch_from_image = image[i:i+k_size, j:j+k_size]
# Applying elementwise multiplication and summation - this is convolution operation
output_image[i, j] = np.sum(patch_from_image * filter)
# Returning result
return output_image
# Creating function for CNN Layer
def cnn_layer(image_volume, filter, pad=1, step=1):
# Note: image here can be a volume of feature maps, obtained in the previous layer
# Applying to the input image volume Pad frame with zero values for all channels
# Preparing zero valued array
image = np.zeros((image_volume.shape[0] + 2 * pad, image_volume.shape[1] + 2 * pad, image_volume.shape[2]))
# Going through all channels from input volume
for p in range(image_volume.shape[2]):
# Using NumPy method 'pad'
# If Pad=0 the resulted image will be the same as input image
image[:, :, p] = np.pad(image_volume[:, :, p], (pad, pad), mode='constant', constant_values=0)
# Using following equations for calculating spatial size of output image volume:
# Width_Out = (Width_In - K_size + 2*Pad) / Step + 1
# Height_Out = (Height_In - K_size + 2*Pad) / Step + 1
# Depth_Out = K_number
# Size of the filter
k_size = filter.shape[1]
# Depth (number) of output feature maps - is the same with number of filters
# Note: this depth will also be as number of channels for input image for the next layer
depth_out = filter.shape[0]
# Calculating spatial size - width and height
width_out = int((image_volume.shape[0] - k_size + 2 * pad) / step + 1)
height_out = int((image_volume.shape[1] - k_size + 2 * pad) / step + 1)
# Creating zero valued array for output feature maps
feature_maps = np.zeros((width_out, height_out, depth_out)) # has to be tuple with numbers
# Implementing convolution of image with filters
# Note: or convolving volume of feature maps, obtained in the previous layer, with new filters
n_filters = filter.shape[0]
# For every filter
for i in range(n_filters):
# Initializing convolved image
convolved_image = np.zeros((width_out, height_out)) # has to be tuple with numbers
# For every channel of the image
# Note: or for every feature map from its volume, obtained in the previous layer
for j in range(image.shape[-1]):
# Convolving every channel (depth) of the image with every channel (depth) of the current filter
# Result is summed up
convolved_image += convolution_2d(image[:, :, j], filter[i, :, :, j], pad, step)
# Writing results into current output feature map
feature_maps[:, :, i] = convolved_image
# Returning resulted feature maps array
return feature_maps
# Creating function for replacing pixel values that are more than 255 with 255
def image_pixels_255(maps):
# Preparing array for output result
r = np.zeros(maps.shape)
# Replacing all elements that are more than 255 with 255
# Going through all channels
for c in range(r.shape[2]):
# Going through all elements
for i in range(r.shape[0]):
for j in range(r.shape[1]):
# Checking if the element is less than 255
if maps[i, j, c] <= 255:
r[i, j, c] = maps[i, j, c]
else:
r[i, j, c] = 255
# Returning resulted array
return r
# Creating function for ReLU Layer
def relu_layer(maps):
# Preparing array for output result
r = np.zeros_like(maps)
# Using 'np.where' setting condition that every element in 'maps' has to be more than appropriate element in 'r'
result = np.where(maps > r, maps, r)
# Returning resulted array
return result
# Creating function for Pooling Layer
def pooling_layer(maps, size=2, step=2):
# Calculating spatial size of output resulted array - width and height
# As our image has the same spatial size as input image (270, 480) according to the chosen Hyperparameters
# Then we can use following equations
width_out = int((maps.shape[0] - size) / step + 1)
height_out = int((maps.shape[1] - size) / step + 1)
# As filter size for pooling operation is 2x2 and step is 2
# Then spatial size of pooling image will be twice less (135, 240)
# Preparing zero valued output array for pooling image
pooling_image = np.zeros((width_out, height_out, maps.shape[2]))
# Implementing pooling operation
# For all channels
for c in range(maps.shape[2]):
# Going through all image with step=2
# Preparing indexes for pooling array
ii = 0
for i in range(0, maps.shape[0] - size + 1, step):
# Preparing indexes for pooling array
jj = 0
for j in range(0, maps.shape[1] - size + 1, step):
# Extracting patch (the same size with filter) from input image
patch_from_image = maps[i:i+size, j:j+size, c]
# Applying max pooling operation - choosing maximum element from the current patch
pooling_image[ii, jj, c] = np.max(patch_from_image)
# Increasing indexing for polling array
jj += 1
# Increasing indexing for polling array
ii += 1
# Returning resulted array
return pooling_image
# Opening grayscale input image and putting data into array
input_image = Image.open("images/eagle_grayscale.jpeg")
image_np = np.array(input_image)
# Checking the shape of the array
print(image_np.shape) # (270, 480, 3)
# Checking if all channels are the same
print(np.array_equal(image_np[:, :, 0], image_np[:, :, 1])) # True
print(np.array_equal(image_np[:, :, 1], image_np[:, :, 2])) # True
# Option #1 for filters
# Creating 4 first filters for the first CNN Layer with random integer numbers in range [-1, 1]
# The depth of each filter has to match the number of channels (depth) in input image
# In our case it is 3 as image has three identical grayscale channels
filter_1 = np.random.random_integers(low=-1, high=1, size=(4, 3, 3, image_np.shape[-1]))
# 4 corresponds to number of filters
# 3 and another 3 corresponds to spatial size of filters - width and height
# image_np.shape[-1] corresponds to the depth of each volume of filters
# Checking the shape of the filters
print(filter_1.shape) # (4, 3, 3, 3)
# Option #2 for filters
# Creating filters manually
# The depth of each filter has to match the number of channels (depth) in input image
# In our case it is 3 as image has three identical grayscale channels
filter_1 = np.zeros((4, 3, 3, 3))
# First filter
filter_1[0, :, :, 0] = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])
filter_1[0, :, :, 1] = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])
filter_1[0, :, :, 2] = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])
# Second filter
filter_1[1, :, :, 0] = np.array([[1, 1, 1], [0, 0, 0], [-1, -1, -1]])
filter_1[1, :, :, 1] = np.array([[1, 1, 1], [0, 0, 0], [-1, -1, -1]])
filter_1[1, | |
# -*- coding: utf-8 -*-
import unreal
import os
from Utilities.Utils import Singleton
from Utilities.Utils import cast
import Utilities
import QueryTools
import re
import types
import collections
from .import Utils
global _r
COLUMN_COUNT = 2
class DetailData(object):
def __init__(self):
self.filter_str = ""
self.filteredIndexToIndex = []
self.hisCrumbObjsAndNames = [] #list[(obj, propertyName)]
self.attributes = None
self.filtered_attributes = None
self.plains = []
self.riches = []
self.selected = set()
def check_line_id(self, line_id, column_count):
from_line = line_id * column_count
to_line = (line_id + 1) * column_count
assert len(self.plains) == len(self.riches), "len(self.plains) != len(self.riches)"
if 0 <= from_line < len(self.plains) and 0 <= to_line <= len(self.plains):
return True
else:
unreal.log_error(f"Check Line Id Failed: {line_id}, plains: {len(self.plains)}, rich: {len(self.riches)}")
return False
def get_plain(self, line_id, column_count):
assert self.check_line_id(line_id, column_count), "check line id failed."
return self.plains[line_id * 2 : line_id * 2 + 2]
def get_rich(self, line_id, column_count):
assert self.check_line_id(line_id, column_count), "check line id failed."
return self.riches[line_id * 2: line_id * 2 + 2]
class ObjectDetailViewer(metaclass=Singleton):
def __init__(self, jsonPath):
self.jsonPath = jsonPath
self.data = unreal.PythonBPLib.get_chameleon_data(self.jsonPath)
self.ui_checkbox_single_mode = "CheckBoxSingleMode"
self.ui_checkbox_compare_mode = "CheckBoxCompareMode"
self.ui_left_group = "LeftDetailGroup"
self.ui_right_group = "RightDetailGroup"
self.ui_button_refresh = "RefreshCompareButton"
self.ui_detailListLeft = "ListViewLeft"
self.ui_detailListRight = "ListViewRight"
self.ui_hisObjsBreadcrumbLeft = 'ObjectHisBreadcrumbLeft'
self.ui_hisObjsBreadcrumbRight = 'ObjectHisBreadcrumbRight'
# self.ui_headRowLeft = "HeaderRowLeft"
# self.ui_headRowRight = "HeaderRowRight"
self.ui_labelLeft = "LabelLeft"
self.ui_labelRight = "LabelRight"
self.ui_info_output = "InfoOutput"
self.ui_rightButtonsGroup = "RightButtonsGroup" # used for compare mode
self.ui_rightListGroup = "RightListGroup"
self.ui_refreshButtonGroup = "RefreshButtonGroup"
self.reset()
def on_close(self):
self.reset()
def reset(self):
self.showBuiltin = True
self.showOther = True
self.showProperties = True
self.showEditorProperties = True
self.showParamFunction = True
self.compareMode = False
self.left = None
self.right = None
self.leftSearchText = ""
self.rightSearchText = ""
self.left_rich = None
self.left_plain = None
self.var = None
self.diff_count = 0
def update_log_text(self, bRight):
bShowRight = self.compareMode
result = ""
for side_str in ["left", "right"] if bShowRight else ["left"]:
bRight = side_str != "left"
ui_breadcrumb = self.ui_hisObjsBreadcrumbRight if bRight else self.ui_hisObjsBreadcrumbLeft
breadcrumbs = self.right.hisCrumbObjsAndNames if bRight else self.left.hisCrumbObjsAndNames
crumbCount = self.data.get_breadcrumbs_count_string(ui_breadcrumb)
if bRight:
result += "\t\t\t"
result += "{} crumb: {} hisObj: {}".format(side_str, crumbCount, len(breadcrumbs))
if self.compareMode:
result = f"{result}\t\t\tdiff count: {self.diff_count}"
self.data.set_text(self.ui_info_output, result)
def get_color_by(self, attr : Utils.attr_detail):
if attr.bCallable_builtin:
return "DarkTurquoise".lower()
if attr.bCallable_other:
return "RoyalBlue".lower()
if attr.bEditorProperty:
return "LimeGreen".lower()
if attr.bOtherProperty:
return "yellow"
def get_color(self, typeStr):
if typeStr == "property":
return 'white'
if typeStr == "return_type":
return 'gray'
if typeStr == "param":
return 'gray'
def get_name_with_rich_text(self, attr:Utils.attr_detail):
name_color = self.get_color_by(attr)
param_color = self.get_color("param")
return_type_color = self.get_color("return_type")
if attr.bProperty:
return "\t<RichText.{}>{}</>".format(name_color, attr.name)
else:
if attr.param_str:
return "\t<RichText.{}>{}(</><RichText.{}>{}</><RichText.{}>)</>".format(name_color, attr.name
, param_color, attr.param_str
, name_color)
else:
if attr.bCallable_other:
return "\t<RichText.{}>{}</>".format(name_color, attr.name)
else:
return "\t<RichText.{}>{}()</><RichText.{}> {}</>".format(name_color, attr.name
, return_type_color, attr.return_type_str)
def get_name_with_plain_text(self, attr:Utils.attr_detail):
if attr.bProperty:
return "\t{}".format(attr.name)
else:
if attr.param_str:
return "\t{}({})".format( attr.name, attr.param_str)
else:
if attr.bCallable_other:
return "\t{}".format( attr.name)
else:
return "\t{}() {}".format(attr.name,attr.return_type_str)
def filter(self, data:DetailData):
result = []
indices = []
for i, attr in enumerate(data.attributes):
if not self.showEditorProperties and attr.bEditorProperty:
continue
if not self.showProperties and attr.bOtherProperty:
continue
if not self.showParamFunction and attr.bHasParamFunction:
continue
if not self.showBuiltin and attr.bCallable_builtin:
continue
if not self.showOther and attr.bCallable_other:
continue
if data.filter_str:
if data.filter_str.lower() not in attr.display_result.lower() and data.filter_str not in attr.display_name.lower() :
continue
result.append(attr)
indices.append(i)
return result, indices
def show_data(self, data:DetailData, ui_listView):
flatten_list_items = []
flatten_list_items_plain = []
for i, attr in enumerate(data.filtered_attributes):
# print(f"{i}: {attr.name} {attr.display_name}, {attr.display_result} ")
attr.check()
assert attr.display_name, f"display name null {attr.display_name}"
assert isinstance(attr.display_result, str), f"display result null {attr.display_result}"
result_str = attr.display_result
if len(result_str) > 200:
result_str = result_str[:200] + "......"
flatten_list_items.extend([self.get_name_with_rich_text(attr), result_str])
flatten_list_items_plain.extend([self.get_name_with_plain_text(attr), result_str])
data.riches = flatten_list_items
data.plains = flatten_list_items_plain
data.selected.clear()
self.data.set_list_view_multi_column_items(ui_listView, flatten_list_items, 2)
def query_and_push(self, obj, propertyName, bPush, bRight): #bPush: whether add Breadcrumb nor not, call by property
if bRight:
ui_Label = self.ui_labelRight
ui_listView = self.ui_detailListRight
ui_breadcrumb = self.ui_hisObjsBreadcrumbRight
else:
ui_Label = self.ui_labelLeft
ui_listView = self.ui_detailListLeft
ui_breadcrumb = self.ui_hisObjsBreadcrumbLeft
data = self.right if bRight else self.left
data.attributes = Utils.ll(obj)
data.filtered_attributes, data.filteredIndexToIndex = self.filter(data)
self.show_data(data, ui_listView)
# set breadcrumb
if propertyName and len(propertyName) > 0:
label = propertyName
else:
if isinstance(obj, unreal.Object):
label = obj.get_name()
else:
label = obj.__str__()
if bPush: # push
# print(f"%%% push: {propertyName}, label {label}")
data.hisCrumbObjsAndNames.append((obj, propertyName))
self.data.push_breadcrumb_string(ui_breadcrumb, label, label)
self.data.set_text(ui_Label, "{} type: {}".format(label, type(obj)) )
crumbCount = self.data.get_breadcrumbs_count_string(ui_breadcrumb)
if bRight:
assert len(self.right.hisCrumbObjsAndNames) == crumbCount, "hisCrumbObjsAndNames count not match {} {}".format(len(self.right.hisCrumbObjsAndNames), crumbCount)
else:
assert len(self.left.hisCrumbObjsAndNames) == crumbCount, "hisCrumbObjsAndNames count not match {} {}".format(len(self.left.hisCrumbObjsAndNames), crumbCount)
self.update_log_text(bRight)
def clear_and_query(self, obj, bRight):
# first time query
self.data.clear_breadcrumbs_string(self.ui_hisObjsBreadcrumbRight if bRight else self.ui_hisObjsBreadcrumbLeft)
if not self.right:
self.right = DetailData()
if not self.left:
self.left = DetailData()
data = self.right if bRight else self.left
data.hisCrumbObjsAndNames = [] #clear his-Object at first time query
if bRight:
assert len(self.right.hisCrumbObjsAndNames) == 0, "len(self.right.hisCrumbObjsAndNames) != 0"
else:
assert len(self.left.hisCrumbObjsAndNames) == 0, "len(self.left.hisCrumbObjsAndNames) != 0"
self.query_and_push(obj, "", bPush=True, bRight=bRight)
self.apply_compare_if_needed()
self.update_log_text(bRight)
def update_ui_by_mode(self):
self.data.set_is_checked(self.ui_checkbox_compare_mode, self.compareMode)
self.data.set_is_checked(self.ui_checkbox_single_mode, not self.compareMode)
bCollapsed = not self.compareMode
self.data.set_collapsed(self.ui_rightButtonsGroup, bCollapsed)
self.data.set_collapsed(self.ui_right_group, bCollapsed)
self.data.set_collapsed(self.ui_button_refresh, bCollapsed)
def on_checkbox_SingleMode_Click(self, state):
self.compareMode = False
self.update_ui_by_mode()
def on_checkbox_CompareMode_Click(self, state):
self.compareMode = True
self.update_ui_by_mode()
def on_button_Refresh_click(self):
self.apply_compare_if_needed()
def on_button_SelectAsset_click(self, bRightSide):
selectedAssets = Utilities.Utils.get_selected_assets()
if len(selectedAssets) == 0:
return
self.clear_and_query(selectedAssets[0], bRightSide)
def on_button_QuerySelected_click(self, bRightSide):
# query component when any component was selected, otherwise actor
obj = Utilities.Utils.get_selected_comp()
if not obj:
obj = Utilities.Utils.get_selected_actor()
if obj:
self.clear_and_query(obj, bRightSide)
def on_drop(self, bRightSide, *args, **kwargs):
if "assets" in kwargs and kwargs["assets"]:
asset = unreal.load_asset(kwargs["assets"][0])
if asset:
self.clear_and_query(asset, bRightSide)
return
if "actors" in kwargs and kwargs["actors"]:
actor = unreal.PythonBPLib.find_actor_by_name(kwargs["actors"][0], unreal.EditorLevelLibrary.get_editor_world())
if actor:
print(actor)
self.clear_and_query(actor, bRightSide)
return
item_count = 0
for k, v in kwargs.items():
item_count += len(v)
if item_count == 0:
selected_comp = Utilities.Utils.get_selected_comp()
if selected_comp:
self.clear_and_query(selected_comp, bRightSide)
def log_r_warning(self):
unreal.log_warning("Assign the global var: '_r' with the MenuItem: 'select X --> _r' on Python Icon menu")
def on_button_Query_R_click(self, r_obj, bRightSide=False):
print("on_button_Query_R_click call")
if not r_obj:
return
self.clear_and_query(r_obj, bRightSide)
def on_list_double_click_do(self, index, bRight):
# print ("on_listview_DetailList_mouse_button_double_click {} bRight: {}".format(index, bRight))
data = self.right if bRight else self.left
typeBlacklist = [int, float, str, bool] #, types.NotImplementedType]
real_index = data.filteredIndexToIndex[index] if data.filteredIndexToIndex else index
assert 0 <= real_index < len(data.attributes)
currentObj, _ = data.hisCrumbObjsAndNames[len(data.hisCrumbObjsAndNames) - 1]
attr_name = data.attributes[real_index].name
objResult, propertyName = self.try_get_object(data, currentObj, attr_name)
if not objResult or objResult is currentObj: # equal
return
if isinstance(objResult, str) and "skip call" in objResult.lower():
return
if type(objResult) in typeBlacklist:
return
if isinstance(objResult, collections.Iterable):
if type(objResult[0]) in typeBlacklist:
return
nextObj = objResult[0]
nextPropertyName = str(propertyName) + "[0]"
else:
nextObj = objResult
nextPropertyName = str(propertyName)
self.query_and_push(nextObj, nextPropertyName, bPush=True, bRight=bRight)
self.apply_compare_if_needed()
self.update_log_text(bRight)
def on_listview_DetailListRight_mouse_button_double_click(self, index):
self.on_list_double_click_do(index, bRight=True)
def on_listview_DetailListLeft_mouse_button_double_click(self, index):
self.on_list_double_click_do(index, bRight=False)
def on_breadcrumbtrail_click_do(self, item, bRight):
ui_hisObjsBreadcrumb = self.ui_hisObjsBreadcrumbRight if bRight else self.ui_hisObjsBreadcrumbLeft
data = self.right if bRight else self.left
count = self.data.get_breadcrumbs_count_string(ui_hisObjsBreadcrumb)
print ("on_breadcrumbtrail_ObjectHis_crumb_click: {} count: {} len(data.hisCrumbObjsAndNames): {}".format(item, count, len(data.hisCrumbObjsAndNames)))
while len(data.hisCrumbObjsAndNames) > count:
data.hisCrumbObjsAndNames.pop()
nextObj, name = data.hisCrumbObjsAndNames[len(data.hisCrumbObjsAndNames) - 1]
if not bRight:
assert self.left.hisCrumbObjsAndNames == data.hisCrumbObjsAndNames, "self.left.hisCrumbObjsAndNames = data.hisCrumbObjsAndNames"
self.query_and_push(nextObj, name, bPush=False, bRight=bRight)
self.apply_compare_if_needed()
self.update_log_text(bRight=bRight)
def on_breadcrumbtrail_ObjectHisLeft_crumb_click(self, item):
self.on_breadcrumbtrail_click_do(item, bRight=False)
def on_breadcrumbtrail_ObjectHisRight_crumb_click(self, item):
self.on_breadcrumbtrail_click_do(item, bRight=True)
def remove_address_str(self, strIn):
return re.sub(r'\(0x[0-9,A-F]{16}\)', '', strIn)
def apply_compare_if_needed(self):
if not self.compareMode:
return
lefts = self.left.filtered_attributes if self.left.filtered_attributes else self.left.attributes
rights = self.right.filtered_attributes if self.right.filtered_attributes else self.right.attributes
if not lefts:
lefts = []
if not rights:
rights = []
leftIDs = []
rightIDs = []
for i, left_attr in enumerate(lefts):
for j, right_attr in enumerate(rights):
if right_attr.name == left_attr.name:
if right_attr.result != left_attr.result:
leftIDs.append(i)
rightIDs.append(j)
break
self.data.set_list_view_multi_column_selections(self.ui_detailListLeft, leftIDs)
self.data.set_list_view_multi_column_selections(self.ui_detailListRight, rightIDs)
self.diff_count = len(leftIDs)
def apply_search_filter(self, text, bRight):
_data = self.right if bRight else self.left
_data.filter_str = text if len(text) else ""
_data.filtered_attributes, _data.filteredIndexToIndex = self.filter(_data)
ui_listView = self.ui_detailListRight if bRight else self.ui_detailListLeft
self.show_data(_data, ui_listView)
self.apply_compare_if_needed()
def on_searchbox_FilterLeft_text_changed(self, text):
self.apply_search_filter(text, bRight=False)
def on_searchbox_FilterLeft_text_committed(self, text):
self.apply_search_filter(text, bRight=False)
def on_searchbox_FilterRight_text_changed(self, text):
self.apply_search_filter(text, bRight=True)
def on_searchbox_FilterRight_text_committed(self, text):
self.apply_search_filter(text, bRight=True)
def apply_filter(self):
_datas = [self.left, self.right]
_isRight = [False, True]
for data, bRight in zip(_datas, _isRight):
if len(data.hisCrumbObjsAndNames) > 0:
nextObj, name = data.hisCrumbObjsAndNames[len(data.hisCrumbObjsAndNames)-1]
self.query_and_push(nextObj, name, bPush=False, bRight=bRight)
self.apply_compare_if_needed()
self.update_log_text(bRight=False) #
def try_get_object(self, data, obj, name:str):
index = -1
attribute = None
for i, attr in enumerate(data.attributes):
if attr.name == name:
index = i
attribute = attr
assert index >= 0
return attribute.result, name
def ui_on_checkbox_ShowBuiltin_state_changed(self, bEnabled):
self.showBuiltin = |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.