text stringlengths 957 885k |
|---|
# Copyright 2016 Quora, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from qcore.asserts import assert_eq, assert_is, AssertRaises
from qcore.testing import (
Anything,
decorate_all_test_methods,
decorate_func_or_method_or_class,
GreaterEq,
disabled,
SkipTest,
TEST_PREFIX,
)
def test_Anything():
assert_eq(Anything, None)
assert_eq(Anything, [])
assert_eq(None, Anything)
assert_eq([], Anything)
assert not (Anything != None)
assert not (Anything != [])
assert not (None != Anything)
assert not ([] != Anything)
assert_eq('<Anything>', repr(Anything))
def test_GreaterEq():
assert_eq(GreaterEq(2), 3)
assert_eq(GreaterEq(2), 2)
assert not GreaterEq(2) != 3
assert not GreaterEq(2) != 2
with AssertRaises(AssertionError):
assert_eq(GreaterEq(3), 2)
assert_eq('<GreaterEq(3)>', repr(GreaterEq(3)))
def _check_disabled(fn):
if SkipTest is None:
assert_is(None, fn())
else:
with AssertRaises(SkipTest):
fn()
def test_disabled():
@disabled
def fn():
pass
_check_disabled(fn)
@disabled
class TestCls(object):
def test_method(self):
return marker
def normal_method(self):
return marker
_check_disabled(TestCls().test_method)
assert_is(marker, TestCls().normal_method())
class TestCls2(object):
def test_method(self):
return marker
@disabled
def test_method_disabled(self):
return marker
def normal_method(self):
return marker
assert_is(marker, TestCls2().test_method())
_check_disabled(TestCls2().test_method_disabled)
assert_is(marker, TestCls2().normal_method())
with AssertRaises(AssertionError):
disabled(None)
def normal_method(self):
pass
marker = object()
test_method_name = TEST_PREFIX + '_method'
test_member_name = TEST_PREFIX + '_member'
def decorator(method):
return marker
def _get_decoratable_class():
class Cls(object):
pass
Cls.normal_method = normal_method
test_method = lambda self: None
setattr(Cls, test_method_name, test_method)
assert_eq(test_method.__get__(None, Cls), getattr(Cls, test_method_name))
setattr(Cls, test_member_name, 'not a method')
return Cls
def _assert_is_decorated(new_cls, cls):
assert_is(new_cls, cls)
assert_eq(normal_method.__get__(None, new_cls), new_cls.normal_method)
assert_is(marker, getattr(new_cls, test_method_name))
assert_eq('not a method', getattr(new_cls, test_member_name))
def test_decorate_all_test_methods():
cls = _get_decoratable_class()
new_cls = decorate_all_test_methods(decorator)(cls)
_assert_is_decorated(new_cls, cls)
def test_decorate_func_or_method_or_class():
cls = _get_decoratable_class()
new_cls = decorate_func_or_method_or_class(decorator)(cls)
_assert_is_decorated(new_cls, cls)
assert_is(marker, decorate_func_or_method_or_class(decorator)(normal_method))
with AssertRaises(AssertionError):
decorate_func_or_method_or_class(decorator)(None)
|
def generateXaxis():
"""Creates a list for the x axis of 1-10
Returns:
LIST: Contains range from 1 - 10
"""
x_axis = [i for i in range(1, 11)]
return x_axis
def generateYaxis():
"""Creates a list for the y axis of the letters A-J
Returns:
LIST: Contains range from A-J
"""
y_axis = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J']
return y_axis
def generateBoard():
"""Generates a dictionary which represents a blank battleship board
Returns:
DICT: A dictionary that represents the battleship board
"""
board = {}
x_axis = generateXaxis()
y_axis = generateYaxis()
for y in y_axis:
for x in x_axis:
pos = f'{y}:{x}'
board[pos] = '0'
return board
def clean_board(shipDict):
"""Takes the given dictionary and replaces the 0 values with a single
white space. This is to show a clean looking board
Args:
shipDict (DICT): A dictionary representing the battleship board
Returns:
DICT: A dictionary where all the 0 values have been replaced with
a single white space.
"""
for key, value in shipDict.items():
if value == '0':
shipDict[key] = ' '
return shipDict
def print_hunt_board(ship_dict):
"""Will print the given dictionary in an easy to read format.
Args:
ship_dict (DICT): Dictionary representing the battleship board
"""
ship_dict = clean_board(ship_dict)
print(' 1 2 3 4 5 6 7 8 9 10')
print(' --- --- --- --- --- --- --- --- --- ---')
print(f'A | {ship_dict.get("A:1", " ")} | {ship_dict.get("A:2", " ")} | {ship_dict.get("A:3", " ")} | {ship_dict.get("A:4", " ")} | {ship_dict.get("A:5", " ")} | {ship_dict.get("A:6", " ")} | {ship_dict.get("A:7", " ")} | {ship_dict.get("A:8", " ")} | {ship_dict.get("A:9", " ")} | {ship_dict.get("A:10", " ")} |')
print(' --- --- --- --- --- --- --- --- --- ---')
print(f'B | {ship_dict.get("B:1", " ")} | {ship_dict.get("B:2", " ")} | {ship_dict.get("B:3", " ")} | {ship_dict.get("B:4", " ")} | {ship_dict.get("B:5", " ")} | {ship_dict.get("B:6", " ")} | {ship_dict.get("B:7", " ")} | {ship_dict.get("B:8", " ")} | {ship_dict.get("B:9", " ")} | {ship_dict.get("B:10", " ")} |')
print(' --- --- --- --- --- --- --- --- --- ---')
print(f'C | {ship_dict.get("C:1", " ")} | {ship_dict.get("C:2", " ")} | {ship_dict.get("C:3", " ")} | {ship_dict.get("C:4", " ")} | {ship_dict.get("C:5", " ")} | {ship_dict.get("C:6", " ")} | {ship_dict.get("C:7", " ")} | {ship_dict.get("C:8", " ")} | {ship_dict.get("C:9", " ")} | {ship_dict.get("C:10", " ")} |')
print(' --- --- --- --- --- --- --- --- --- ---')
print(f'D | {ship_dict.get("D:1", " ")} | {ship_dict.get("D:2", " ")} | {ship_dict.get("D:3", " ")} | {ship_dict.get("D:4", " ")} | {ship_dict.get("D:5", " ")} | {ship_dict.get("D:6", " ")} | {ship_dict.get("D:7", " ")} | {ship_dict.get("D:8", " ")} | {ship_dict.get("D:9", " ")} | {ship_dict.get("D:10", " ")} |')
print(' --- --- --- --- --- --- --- --- --- ---')
print(f'E | {ship_dict.get("E:1", " ")} | {ship_dict.get("E:2", " ")} | {ship_dict.get("E:3", " ")} | {ship_dict.get("E:4", " ")} | {ship_dict.get("E:5", " ")} | {ship_dict.get("E:6", " ")} | {ship_dict.get("E:7", " ")} | {ship_dict.get("E:8", " ")} | {ship_dict.get("E:9", " ")} | {ship_dict.get("E:10", " ")} |')
print(' --- --- --- --- --- --- --- --- --- ---')
print(f'F | {ship_dict.get("F:1", " ")} | {ship_dict.get("F:2", " ")} | {ship_dict.get("F:3", " ")} | {ship_dict.get("F:4", " ")} | {ship_dict.get("F:5", " ")} | {ship_dict.get("F:6", " ")} | {ship_dict.get("F:7", " ")} | {ship_dict.get("F:8", " ")} | {ship_dict.get("F:9", " ")} | {ship_dict.get("F:10", " ")} |')
print(' --- --- --- --- --- --- --- --- --- ---')
print(f'G | {ship_dict.get("G:1", " ")} | {ship_dict.get("G:2", " ")} | {ship_dict.get("G:3", " ")} | {ship_dict.get("G:4", " ")} | {ship_dict.get("G:5", " ")} | {ship_dict.get("G:6", " ")} | {ship_dict.get("G:7", " ")} | {ship_dict.get("G:8", " ")} | {ship_dict.get("G:9", " ")} | {ship_dict.get("G:10", " ")} |')
print(' --- --- --- --- --- --- --- --- --- ---')
print(f'H | {ship_dict.get("H:1", " ")} | {ship_dict.get("H:2", " ")} | {ship_dict.get("H:3", " ")} | {ship_dict.get("H:4", " ")} | {ship_dict.get("H:5", " ")} | {ship_dict.get("H:6", " ")} | {ship_dict.get("H:7", " ")} | {ship_dict.get("H:8", " ")} | {ship_dict.get("H:9", " ")} | {ship_dict.get("H:10", " ")} |')
print(' --- --- --- --- --- --- --- --- --- ---')
print(f'I | {ship_dict.get("I:1", " ")} | {ship_dict.get("I:2", " ")} | {ship_dict.get("I:3", " ")} | {ship_dict.get("I:4", " ")} | {ship_dict.get("I:5", " ")} | {ship_dict.get("I:6", " ")} | {ship_dict.get("I:7", " ")} | {ship_dict.get("I:8", " ")} | {ship_dict.get("I:9", " ")} | {ship_dict.get("I:10", " ")} |')
print(' --- --- --- --- --- --- --- --- --- ---')
print(f'J | {ship_dict.get("J:1", " ")} | {ship_dict.get("J:2", " ")} | {ship_dict.get("J:3", " ")} | {ship_dict.get("J:4", " ")} | {ship_dict.get("J:5", " ")} | {ship_dict.get("J:6", " ")} | {ship_dict.get("J:7", " ")} | {ship_dict.get("J:8", " ")} | {ship_dict.get("J:9", " ")} | {ship_dict.get("J:10", " ")} |')
print(' --- --- --- --- --- --- --- --- --- ---')
def print_empty_board():
"""prints a blank battleship board
"""
print(' 1 2 3 4 5 6 7 8 9 10')
print(' --- --- --- --- --- --- --- --- --- ---')
print('A | | | | | | | | | | |')
print(' --- --- --- --- --- --- --- --- --- ---')
print('B | | | | | | | | | | |')
print(' --- --- --- --- --- --- --- --- --- ---')
print('C | | | | | | | | | | |')
print(' --- --- --- --- --- --- --- --- --- ---')
print('D | | | | | | | | | | |')
print(' --- --- --- --- --- --- --- --- --- ---')
print('E | | | | | | | | | | |')
print(' --- --- --- --- --- --- --- --- --- ---')
print('F | | | | | | | | | | |')
print(' --- --- --- --- --- --- --- --- --- ---')
print('G | | | | | | | | | | |')
print(' --- --- --- --- --- --- --- --- --- ---')
print('H | | | | | | | | | | |')
print(' --- --- --- --- --- --- --- --- --- ---')
print('I | | | | | | | | | | |')
print(' --- --- --- --- --- --- --- --- --- ---')
print('J | | | | | | | | | | |')
print(' --- --- --- --- --- --- --- --- --- ---')
def print_prob_board(ship_dict):
"""Will print the given dictionary in an easy to read format. Larger in
format to allow for double digit numbers to be shown correctly.
Args:
ship_dict (DICT): Dictionary representing the battleship board
"""
ship_dict = clean_board(ship_dict)
print(' 1 2 3 4 5 6 7 8 9 10')
print(' ----- ----- ----- ----- ----- ----- ----- ----- ----- -----')
print(f'A | {ship_dict.get("A:1", " ")} | {ship_dict.get("A:2", " ")} | {ship_dict.get("A:3", " ")} | {ship_dict.get("A:4", " ")} | {ship_dict.get("A:5", " ")} | {ship_dict.get("A:6", " ")} | {ship_dict.get("A:7", " ")} | {ship_dict.get("A:8", " ")} | {ship_dict.get("A:9", " ")} | {ship_dict.get("A:10", " ")} |')
print(' ----- ----- ----- ----- ----- ----- ----- ----- ----- -----')
print(f'B | {ship_dict.get("B:1", " ")} | {ship_dict.get("B:2", " ")} | {ship_dict.get("B:3", " ")} | {ship_dict.get("B:4", " ")} | {ship_dict.get("B:5", " ")} | {ship_dict.get("B:6", " ")} | {ship_dict.get("B:7", " ")} | {ship_dict.get("B:8", " ")} | {ship_dict.get("B:9", " ")} | {ship_dict.get("B:10", " ")} |')
print(' ----- ----- ----- ----- ----- ----- ----- ----- ----- -----')
print(f'C | {ship_dict.get("C:1", " ")} | {ship_dict.get("C:2", " ")} | {ship_dict.get("C:3", " ")} | {ship_dict.get("C:4", " ")} | {ship_dict.get("C:5", " ")} | {ship_dict.get("C:6", " ")} | {ship_dict.get("C:7", " ")} | {ship_dict.get("C:8", " ")} | {ship_dict.get("C:9", " ")} | {ship_dict.get("C:10", " ")} |')
print(' ----- ----- ----- ----- ----- ----- ----- ----- ----- -----')
print(f'D | {ship_dict.get("D:1", " ")} | {ship_dict.get("D:2", " ")} | {ship_dict.get("D:3", " ")} | {ship_dict.get("D:4", " ")} | {ship_dict.get("D:5", " ")} | {ship_dict.get("D:6", " ")} | {ship_dict.get("D:7", " ")} | {ship_dict.get("D:8", " ")} | {ship_dict.get("D:9", " ")} | {ship_dict.get("D:10", " ")} |')
print(' ----- ----- ----- ----- ----- ----- ----- ----- ----- -----')
print(f'E | {ship_dict.get("E:1", " ")} | {ship_dict.get("E:2", " ")} | {ship_dict.get("E:3", " ")} | {ship_dict.get("E:4", " ")} | {ship_dict.get("E:5", " ")} | {ship_dict.get("E:6", " ")} | {ship_dict.get("E:7", " ")} | {ship_dict.get("E:8", " ")} | {ship_dict.get("E:9", " ")} | {ship_dict.get("E:10", " ")} |')
print(' ----- ----- ----- ----- ----- ----- ----- ----- ----- -----')
print(f'F | {ship_dict.get("F:1", " ")} | {ship_dict.get("F:2", " ")} | {ship_dict.get("F:3", " ")} | {ship_dict.get("F:4", " ")} | {ship_dict.get("F:5", " ")} | {ship_dict.get("F:6", " ")} | {ship_dict.get("F:7", " ")} | {ship_dict.get("F:8", " ")} | {ship_dict.get("F:9", " ")} | {ship_dict.get("F:10", " ")} |')
print(' ----- ----- ----- ----- ----- ----- ----- ----- ----- -----')
print(f'G | {ship_dict.get("G:1", " ")} | {ship_dict.get("G:2", " ")} | {ship_dict.get("G:3", " ")} | {ship_dict.get("G:4", " ")} | {ship_dict.get("G:5", " ")} | {ship_dict.get("G:6", " ")} | {ship_dict.get("G:7", " ")} | {ship_dict.get("G:8", " ")} | {ship_dict.get("G:9", " ")} | {ship_dict.get("G:10", " ")} |')
print(' ----- ----- ----- ----- ----- ----- ----- ----- ----- -----')
print(f'H | {ship_dict.get("H:1", " ")} | {ship_dict.get("H:2", " ")} | {ship_dict.get("H:3", " ")} | {ship_dict.get("H:4", " ")} | {ship_dict.get("H:5", " ")} | {ship_dict.get("H:6", " ")} | {ship_dict.get("H:7", " ")} | {ship_dict.get("H:8", " ")} | {ship_dict.get("H:9", " ")} | {ship_dict.get("H:10", " ")} |')
print(' ----- ----- ----- ----- ----- ----- ----- ----- ----- -----')
print(f'I | {ship_dict.get("I:1", " ")} | {ship_dict.get("I:2", " ")} | {ship_dict.get("I:3", " ")} | {ship_dict.get("I:4", " ")} | {ship_dict.get("I:5", " ")} | {ship_dict.get("I:6", " ")} | {ship_dict.get("I:7", " ")} | {ship_dict.get("I:8", " ")} | {ship_dict.get("I:9", " ")} | {ship_dict.get("I:10", " ")} |')
print(' ----- ----- ----- ----- ----- ----- ----- ----- ----- -----')
print(f'J | {ship_dict.get("J:1", " ")} | {ship_dict.get("J:2", " ")} | {ship_dict.get("J:3", " ")} | {ship_dict.get("J:4", " ")} | {ship_dict.get("J:5", " ")} | {ship_dict.get("J:6", " ")} | {ship_dict.get("J:7", " ")} | {ship_dict.get("J:8", " ")} | {ship_dict.get("J:9", " ")} | {ship_dict.get("J:10", " ")} |')
print(' ----- ----- ----- ----- ----- ----- ----- ----- ----- -----') |
from allennlp.predictors.predictor import Predictor
import os
import sys
from sayhello import app
from nltk.stem.wordnet import WordNetLemmatizer
from sayhello.commonDataProcess import CommonDatabase
class OpenInfoPredictor:
def __init__(self):
# self.source_tgz = os.path.dirname(app.root_path) + "/sayhello/source/openie-model.2020.03.26.tar.gz"
self.source_tgz = "https://storage.googleapis.com/allennlp-public-models/openie-model.2020.03.26.tar.gz"
# print(source_tgz)
# print("Start Loading")
self.predictor = Predictor.from_path(self.source_tgz)
# print("End Loading")
def query(self, comment):
output = self.predictor.predict(sentence=comment)
return output
class EntailmentPredictor:
def __init__(self):
# self.source_tgz = os.path.dirname(app.root_path) + "/sayhello/source/decomposable-attention-elmo-2020.04.09.tar.gz"
self.source_tgz = "https://storage.googleapis.com/allennlp-public-models/decomposable-attention-elmo-2020.04.09.tar.gz"
self.predictor = Predictor.from_path(self.source_tgz)
def query(self, pre, hyp):
output = self.predictor.predict(premise=pre,hypothesis=hyp)['label']
return output
class NameEntityPredictor:
def __init__(self):
# self.source_tgz = os.path.dirname(app.root_path) + "/sayhello/source/ner-elmo.2021-02-12.tar.gz"
self.source_tgz = "https://storage.googleapis.com/allennlp-public-models/ner-elmo.2021-02-12.tar.gz"
self.predictor = Predictor.from_path(self.source_tgz)
print('load finish ner-elmo.2021-02-12.tar.gz')
def query(self, comment):
output = self.predictor.predict(sentence=comment)
return output
class UserPredict:
def __init__(self):
self.openInfoEngine = OpenInfoPredictor()
# self.entailEngine = EntailmentPredictor()
self.nameEntityEngine = NameEntityPredictor()
self.commonDB = CommonDatabase("nen.cmdata")
self.verb_database = []
self.entity_database_per = []
self.entity_database_org = []
self.entity_database_loc = []
def query(self, comment):
result_dict = self.openInfoEngine.query(comment)
print(result_dict)
# to determine the principal
tags_0 = []
n = 0
for verb_dict in result_dict["verbs"]:
# print(verb_dict)
description = verb_dict['description']
tags = verb_dict['tags']
# print(tags)
# print("determine the principal according to the number of 0 tags")
this = 0
for i in tags:
# print(i)
if i == "O":
this += 1
# print(this)
tags_0.append(this)
n += 1
# print(tags_0)
# We want the minimal number of tag O
index_desire = tags_0.index(min(tags_0))
best_verb_dict = result_dict["verbs"][index_desire]
print(best_verb_dict)
self.verb_database.append(best_verb_dict['verb'])
string = best_verb_dict['description']
# print(string)
stack = []
switch = False
this_word = ''
for i in string:
if i == '[':
switch = True
elif i == ']':
switch = False
stack.append(this_word)
this_word = ''
elif switch:
this_word += i
else:
pass
string_list = stack
# print(string_list)
# Convert the verb into standard form.
verb_standard = WordNetLemmatizer().lemmatize(best_verb_dict['verb'], 'v')
this_atom_clauses = {'verb': verb_standard, 'neg': False, 'args': None}
arg_list = []
for i in string_list:
if 'NEG' in i:
this_atom_clauses['neg'] = True
elif 'ARG' in i:
obj = i.split(': ')[1]
arg_list.append(obj)
# print(arg_list)
# Process the args:
this_atom_clauses['args'] = arg_list
# print(this_atom_clauses)
final_result = False
entity_result_list = []
for i in arg_list:
result = self.entity_processing(i)
entity_result_list.append(result)
print(result)
if result:
final_result = True
if not final_result:
print('This may not be a fact')
return False
print("----------")
this_atom_clauses_dict = this_atom_clauses
# Add suffix
for i in range(len(arg_list)):
print(arg_list)
if not entity_result_list[i]:
args_split = arg_list[i].split(' ')
print(args_split)
print(this_atom_clauses_dict['verb'])
this_list = [this_atom_clauses_dict['verb']] + args_split
print(this_list)
this_atom_clauses_dict['verb'] = '_'.join(this_list)
print(this_atom_clauses_dict['verb'])
# Remove the False arguments
new_list = []
for i in range(len(arg_list)):
a = arg_list[i]
b = entity_result_list[i]
if b:
new_list.append(a)
this_atom_clauses['args'] = new_list
print("----")
print(this_atom_clauses['args'])
print("----")
atom_clause = ''
if this_atom_clauses_dict['neg']:
atom_clause += '!'
atom_clause = atom_clause + this_atom_clauses_dict['verb'] + '(' + ','.join(this_atom_clauses_dict['args']) + ')'
return this_atom_clauses_dict, atom_clause, entity_result_list
def entity_processing(self, arg):
tag_result = self.nameEntityEngine.query(arg)['tags']
print(tag_result)
result_per = True
result_org = True
result_loc = True
for i in tag_result:
if 'PER' not in i:
result_per = False
break
if result_per:
self.entity_database_per.append(arg)
return 'PER'
for i in tag_result:
if 'ORG' not in i:
result_org = False
break
if result_org:
self.entity_database_per.append(arg)
return 'ORG'
for i in tag_result:
if 'LOC' not in i:
result_loc = False
break
if result_loc:
self.entity_database_per.append(arg)
return 'LOC'
return False
def breakdown(self, comment):
if "or" in comment:
pass
else:
pass
"""test = UserPredict(True)
# print(test.query("Tom accuses Bob."))
test.query("Tom does not accuse Bob of stealing the money.")
""" |
<gh_stars>1-10
#!/usr/bin/env python
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
import os
from ansible.module_utils.basic import AnsibleModule, json
from ansible.module_utils.viptela import viptelaModule, viptela_argument_spec
def run_module():
# define available arguments/parameters a user can pass to the module
argument_spec = viptela_argument_spec()
argument_spec.update(state=dict(type='str', choices=['absent', 'present'], default='present'),
aggregate=dict(type='list'),
name=dict(type='str'),
description = dict(type = 'str'),
type = dict(type ='str', required = True, choices= ['color', 'vpn', 'site', 'app',
'dataprefix', 'prefix', 'aspath', 'class', 'community', 'extcommunity', 'mirror', 'tloc',
'sla', 'policer', 'ipprefixall', 'dataprefixall']),
entries = dict(type ='list'),
push=dict(type='bool', default=False),
force=dict(type='bool', default=False)
)
# seed the result dict in the object
# we primarily care about changed and state
# change is if this module effectively modified the target
# state will include any data that you want your module to pass back
# for consumption, for example, in a subsequent task
result = dict(
changed=False,
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
)
viptela = viptelaModule(module)
# Always as an aggregate... make a list if just given a single entry
if viptela.params['aggregate']:
policy_list = viptela.params['aggregate']
else:
policy_list = [
{
"name": viptela.params['name'],
"description": viptela.params['description'],
"type": viptela.params['type'],
"entries": viptela.params['entries'],
}
]
policy_list_dict = viptela.get_policy_list_dict(viptela.params['type'], remove_key=False)
compare_values = ["name", "description", "type", "entries"]
# Import site lists
for list in policy_list:
if viptela.params['state'] == 'present':
if list['name'] in policy_list_dict:
# FIXME Just compare the entries for now.
if (list['entries'] != policy_list_dict[list['name']]['entries']) or viptela.params['force']:
list['listId'] = policy_list_dict[list['name']]['listId']
viptela.result['new_entries'] = list['entries']
viptela.result['existing_entries'] = policy_list_dict[list['name']]['entries']
# If description is not specified, try to get it from the existing information
if not list['description']:
list['description'] = policy_list_dict[list['name']]['description']
viptela.result['changed'] = True
if not module.check_mode:
viptela.result['put_payload'] = list
response = viptela.request('/dataservice/template/policy/list/{0}/{1}'.format(list['type'].lower(), list['listId']),
method='PUT', payload=list)
viptela.result['response'] = response.json
if response.json:
# Updating the policy list returns a `processId` that locks the list and 'masterTemplatesAffected'
# that lists the templates affected by the change.
if 'processId' in response.json:
process_id = response.json['processId']
viptela.result['put_payload'] = response.json['processId']
if viptela.params['push']:
# If told to push out the change, we need to reattach each template affected by the change
for template_id in response.json['masterTemplatesAffected']:
action_id = viptela.reattach_device_template(template_id)
# Delete the lock on the policy list
# FIXME: The list does not seem to update when we unlock too soon, so I think that we need
# to wait for the attachment, but need to understand this better.
response = viptela.request('/dataservice/template/lock/{0}'.format(process_id), method='DELETE')
viptela.result['lock_response'] = response.json
else:
viptela.fail_json(msg="Did not get a process id when updating policy list")
else:
if not module.check_mode:
viptela.request('/dataservice/template/policy/list/{0}/'.format(list['type'].lower()),
method='POST', payload=list)
viptela.result['changed'] = True
else:
if list['name'] in policy_list_dict:
if not module.check_mode:
viptela.request('/dataservice/template/policy/list/{0}/{1}'.format(list['type'].lower(), list['listId']),
method='DELETE')
viptela.result['changed'] = True
viptela.logout()
viptela.exit_json(**viptela.result)
def main():
run_module()
if __name__ == '__main__':
main()
# https://172.16.31.10:8443/dataservice/template/lock/push_feature_template_configuration-2e133445-ae15-49ab-b74a-c1fe65e263b6 |
from __future__ import annotations
from typing import Set
from collections import defaultdict
from heapq import heapify, heappop, heappush
import os
'''
coarse matrix
0, 0, 0, 0, 0, 1, 1, 1, 0, 1,
0, 0, 1, 0, 0, 0, 1, 1, 0, 0,
0, 1, 0, 0, 0, 1, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 0,
1, 0, 1, 0, 0, 0, 0, 1, 0, 1,
1, 1, 0, 0, 0, 0, 0, 0, 1, 0,
1, 1, 0, 0, 0, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1, 0, 1, 0, 0, 1,
1, 0, 1, 0, 0, 1, 0, 0, 1, 0,
'''
class Client:
def __init__(self, like: Set[str], dislike: Set[str]):
self.likes = like
self.dislikes = dislike
def is_compatible_with(self, other: Client):
return self.likes.isdisjoint(other.dislikes) and self.dislikes.isdisjoint(other.likes)
def __str__(self):
return f'likes = {self.likes}, dislikes = {self.dislikes}'
def __repr__(self):
return self.__str__()
class Graph:
def __init__(self, adj_list, indegree):
self.adj_list = adj_list
self.indegree = indegree
self.active_vertices = [True] * len(self.adj_list)
def find_maximal_cliques(self):
vertices = {idx for idx, v in enumerate(self.active_vertices) if v}
result = []
self._bron_kerbosch_algo(set(), vertices, set(), result)
result.sort(key=lambda x: len(x), reverse=False)
return result
def _bron_kerbosch_algo(self, current, candidate, exclusion, result):
if not candidate and not exclusion:
result.append(current)
return
while candidate:
vertex = candidate.pop()
self._bron_kerbosch_algo(current.union({vertex}), candidate.intersection(self.adj_list[vertex]),
exclusion.intersection(self.adj_list[vertex]), result)
exclusion.update({vertex})
def inactivate_vertices(self, vertices):
for vertex in vertices:
self.active_vertices[vertex] = False
def find_compatible_cliques(self):
result = []
maximal_incompatible_cliques = self.find_maximal_cliques()
while maximal_incompatible_cliques[0] != set():
print(f'find compatible cliques process left {len(maximal_incompatible_cliques)}')
current_clique = maximal_incompatible_cliques.pop()
result.append(current_clique)
self.inactivate_vertices(current_clique)
maximal_incompatible_cliques = self.find_maximal_cliques()
return self.find_max_compatible_vertex(result)
def find_max_compatible_vertex(self, compatible_cliques):
print(f'start of find max compatible vertex')
result = [set()]
self._helper(compatible_cliques, result, set(), 0)
return result
def _helper(self, cliques, result, ds, current):
if current == len(cliques):
if len(ds) > len(result[0]):
while result:
result.pop()
result.append(ds.copy())
elif len(ds) == len(result[0]):
result.append(ds.copy())
print(f'current max result {len(result[0])}')
return
for vertex in cliques[current]:
if ds.isdisjoint(self.adj_list[vertex]):
ds.add(vertex)
self._helper(cliques, result, ds, current + 1)
ds.remove(vertex)
else:
self._helper(cliques, result, ds, current + 1)
def find_max_compatible_vertex_v2(self):
priority_q = []
visited = [False] * len(self.adj_list)
result = set()
for vertex, indegree in enumerate(self.indegree):
heappush(priority_q, (indegree, vertex))
while priority_q:
degree, vertex = heappop(priority_q)
if visited[vertex]:
continue
result.add(vertex)
visited[vertex] = True
for adj_vertex in self.adj_list[vertex]:
visited[adj_vertex] = True
for adj_vertex_adj in self.adj_list[adj_vertex]:
if self.indegree[adj_vertex_adj] > 1:
self.indegree[adj_vertex_adj] -= 1
heappush(priority_q, (self.indegree[adj_vertex_adj], adj_vertex_adj))
return result
class OnePizza:
def __init__(self, file_name):
self.clients = self.get_clients(self.parser(file_name))
self.incompatible_graph = Graph(*self.build_incompatible_client_graph())
self.file_name = file_name
def final_result(self):
compatible_clients = self.get_max_compatible_clients()
compatible_clients_ingredients = self.get_max_compatible_clients_ingredients(compatible_clients)
result = len(compatible_clients_ingredients), compatible_clients_ingredients
print('-' * 50)
print(self.file_name)
print(f'total compatible customers - {len(compatible_clients)}')
print(result)
print('-' * 50)
self.result_file_creator(self.file_name, result)
def result_file_creator(self, file_name, result):
base, file = os.path.split(file_name)
file = file.replace('in', 'out')
with open(os.path.join(base, file), 'w+') as file:
file.write(f'{result[0]} {" ".join(result[1])}')
def get_clients(self, data):
clients = []
for idx in range(2, len(data), 2):
dislikes = set(data[idx][1:])
likes = set(data[idx - 1][1:])
clients.append(Client(likes, dislikes))
return clients
def build_incompatible_client_graph(self):
adj_list = [set() for _ in range(len(self.clients))]
adj_matrix = [[0] * len(self.clients) for _ in range(len(self.clients))]
indegree = [0] * len(adj_list)
for idx1 in range(len(self.clients)):
client1 = self.clients[idx1]
for idx2 in range(idx1 + 1, len(self.clients)):
client2 = self.clients[idx2]
if not client1.is_compatible_with(client2):
adj_list[idx1].add(idx2)
adj_list[idx2].add(idx1)
adj_matrix[idx1][idx2] = 1
adj_matrix[idx2][idx1] = 1
indegree[idx2] += 1
indegree[idx1] += 1
print(f'graph building completed. Total vertex {len(adj_list)}')
return adj_list, indegree
def get_max_compatible_clients(self):
result = self.incompatible_graph.find_max_compatible_vertex_v2()
return result
def mark_cliques(self, cliques_visited, groups, client, visited, cliques):
for item in groups[client]:
cliques_visited[item] = True
for c in cliques[item]:
visited[c] += 1
def unmark_cliques(self, cliques_visited, groups, client, visited: Set[int], cliques):
for item in groups[client]:
cliques_visited[item] = False
for c in cliques[item]:
visited[c] = 0 if visited[c] == 1 else visited[c] - 1
def create_client_clique_groups(self, cliques):
result = defaultdict(list)
for idx, clique in enumerate(cliques):
for client in clique:
result[client].append(idx)
return result
def get_max_compatible_clients_ingredients(self, clients):
result = set()
for client in clients:
client = self.clients[client]
result.update(client.likes)
result.difference_update(client.dislikes)
return result
def parser(self, file_name):
data = []
with open(file_name, 'r') as file:
line = file.readline()
while line:
data.append(line.strip().split())
line = file.readline()
return data
def __str__(self):
return f'total clients - {len(self.clients)}'
OnePizza(f'data/a_an_example.in.txt').final_result()
OnePizza(f'data/b_basic.in.txt').final_result()
OnePizza(f'data/c_coarse.in.txt').final_result()
OnePizza(f'data/d_difficult.in.txt').final_result()
OnePizza(f'data/e_elaborate.in.txt').final_result()
|
# coding: utf-8
import pprint
import re
import six
class ResizeInstanceReq:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'new_spec_code': 'str',
'new_storage_space': 'int'
}
attribute_map = {
'new_spec_code': 'new_spec_code',
'new_storage_space': 'new_storage_space'
}
def __init__(self, new_spec_code=None, new_storage_space=None):
"""ResizeInstanceReq - a model defined in huaweicloud sdk"""
self._new_spec_code = None
self._new_storage_space = None
self.discriminator = None
if new_spec_code is not None:
self.new_spec_code = new_spec_code
if new_storage_space is not None:
self.new_storage_space = new_storage_space
@property
def new_spec_code(self):
"""Gets the new_spec_code of this ResizeInstanceReq.
规格变更后的规格ID。 若只扩展磁盘大小,则规格ID保持和原实例不变。 规格ID请参考[查询实例的扩容规格列表](https://support.huaweicloud.com/api-kafka/ShowInstanceExtendProductInfo.html)接口。
:return: The new_spec_code of this ResizeInstanceReq.
:rtype: str
"""
return self._new_spec_code
@new_spec_code.setter
def new_spec_code(self, new_spec_code):
"""Sets the new_spec_code of this ResizeInstanceReq.
规格变更后的规格ID。 若只扩展磁盘大小,则规格ID保持和原实例不变。 规格ID请参考[查询实例的扩容规格列表](https://support.huaweicloud.com/api-kafka/ShowInstanceExtendProductInfo.html)接口。
:param new_spec_code: The new_spec_code of this ResizeInstanceReq.
:type: str
"""
self._new_spec_code = new_spec_code
@property
def new_storage_space(self):
"""Gets the new_storage_space of this ResizeInstanceReq.
规格变更后的消息存储空间,单位:GB。 若扩展实例基准带宽,则new_storage_space不能低于基准带宽规定的最小磁盘大小。 磁盘空间大小请参考[查询实例的扩容规格列表](https://support.huaweicloud.com/api-kafka/ShowInstanceExtendProductInfo.html)接口。
:return: The new_storage_space of this ResizeInstanceReq.
:rtype: int
"""
return self._new_storage_space
@new_storage_space.setter
def new_storage_space(self, new_storage_space):
"""Sets the new_storage_space of this ResizeInstanceReq.
规格变更后的消息存储空间,单位:GB。 若扩展实例基准带宽,则new_storage_space不能低于基准带宽规定的最小磁盘大小。 磁盘空间大小请参考[查询实例的扩容规格列表](https://support.huaweicloud.com/api-kafka/ShowInstanceExtendProductInfo.html)接口。
:param new_storage_space: The new_storage_space of this ResizeInstanceReq.
:type: int
"""
self._new_storage_space = new_storage_space
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResizeInstanceReq):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
from typing import Any, Dict, Optional, Tuple, Text, List
import pytest
from rasa.nlu.tokenizers.tokenizer import Token
from rasa.nlu.constants import TOKENS_NAMES
from rasa.shared.nlu.constants import (
TEXT,
INTENT,
RESPONSE,
INTENT_RESPONSE_KEY,
ACTION_TEXT,
ACTION_NAME,
)
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizerGraphComponent
def create_whitespace_tokenizer(
config: Optional[Dict[Text, Any]] = None
) -> WhitespaceTokenizerGraphComponent:
return WhitespaceTokenizerGraphComponent(
{
**WhitespaceTokenizerGraphComponent.get_default_config(),
**(config if config else {}),
}
)
def test_tokens_comparison():
x = Token("hello", 0)
y = Token("Hello", 0)
assert x == x
assert y < x
assert x != 1
with pytest.raises(TypeError):
assert y < "a"
@pytest.mark.parametrize(
"text, expected_tokens, expected_indices",
[("Forecast for lunch", ["Forecast", "for", "lunch"], [(0, 8), (9, 12), (13, 18)])],
)
def test_train_tokenizer(
text: Text, expected_tokens: List[Text], expected_indices: List[Tuple[int]]
):
tk = create_whitespace_tokenizer()
message = Message.build(text=text)
message.set(RESPONSE, text)
message.set(INTENT, text)
training_data = TrainingData()
training_data.training_examples = [message]
tk.process_training_data(training_data)
for attribute in [RESPONSE, TEXT]:
tokens = training_data.training_examples[0].get(TOKENS_NAMES[attribute])
assert [t.text for t in tokens] == expected_tokens
assert [t.start for t in tokens] == [i[0] for i in expected_indices]
assert [t.end for t in tokens] == [i[1] for i in expected_indices]
# check intent attribute
tokens = training_data.training_examples[0].get(TOKENS_NAMES[INTENT])
assert [t.text for t in tokens] == [text]
@pytest.mark.parametrize(
"text, expected_tokens, expected_indices",
[("Forecast for lunch", ["Forecast", "for", "lunch"], [(0, 8), (9, 12), (13, 18)])],
)
def test_train_tokenizer_e2e_actions(
text: Text, expected_tokens: List[Text], expected_indices: List[Tuple[int]]
):
tk = create_whitespace_tokenizer()
message = Message.build(text=text)
message.set(ACTION_TEXT, text)
message.set(ACTION_NAME, text)
training_data = TrainingData()
training_data.training_examples = [message]
tk.process_training_data(training_data)
for attribute in [ACTION_TEXT, TEXT]:
tokens = training_data.training_examples[0].get(TOKENS_NAMES[attribute])
assert [t.text for t in tokens] == expected_tokens
assert [t.start for t in tokens] == [i[0] for i in expected_indices]
assert [t.end for t in tokens] == [i[1] for i in expected_indices]
@pytest.mark.parametrize(
"text, expected_tokens, expected_indices",
[("Forecast for lunch", ["Forecast", "for", "lunch"], [(0, 8), (9, 12), (13, 18)])],
)
def test_train_tokenizer_action_name(
text: Text, expected_tokens: List[Text], expected_indices: List[Tuple[int]]
):
tk = create_whitespace_tokenizer()
message = Message.build(text=text)
message.set(ACTION_NAME, text)
training_data = TrainingData()
training_data.training_examples = [message]
tk.process_training_data(training_data)
# check action_name attribute
tokens = training_data.training_examples[0].get(TOKENS_NAMES[ACTION_NAME])
assert [t.text for t in tokens] == [text]
@pytest.mark.parametrize(
"text, expected_tokens, expected_indices",
[("Forecast for lunch", ["Forecast", "for", "lunch"], [(0, 8), (9, 12), (13, 18)])],
)
def test_process_tokenizer(
text: Text, expected_tokens: List[Text], expected_indices: List[Tuple[int]]
):
tk = create_whitespace_tokenizer()
message = Message.build(text=text)
tk.process([message])
tokens = message.get(TOKENS_NAMES[TEXT])
assert [t.text for t in tokens] == expected_tokens
assert [t.start for t in tokens] == [i[0] for i in expected_indices]
assert [t.end for t in tokens] == [i[1] for i in expected_indices]
@pytest.mark.parametrize(
"text, expected_tokens", [("action_listen", ["action", "listen"])]
)
def test_process_tokenizer_action_name(text: Text, expected_tokens: List[Text]):
tk = create_whitespace_tokenizer({"intent_tokenization_flag": True})
message = Message.build(text=text)
message.set(ACTION_NAME, text)
tk.process([message])
tokens = message.get(TOKENS_NAMES[ACTION_NAME])
assert [t.text for t in tokens] == expected_tokens
@pytest.mark.parametrize(
"text, expected_tokens", [("I am hungry", ["I", "am", "hungry"])]
)
def test_process_tokenizer_action_test(text: Text, expected_tokens: List[Text]):
tk = create_whitespace_tokenizer({"intent_tokenization_flag": True})
message = Message.build(text=text)
message.set(ACTION_NAME, text)
message.set(ACTION_TEXT, text)
tk.process([message])
tokens = message.get(TOKENS_NAMES[ACTION_TEXT])
assert [t.text for t in tokens] == expected_tokens
message.set(ACTION_TEXT, "")
tk.process([message])
tokens = message.get(TOKENS_NAMES[ACTION_NAME])
assert [t.text for t in tokens] == [text]
@pytest.mark.parametrize(
"text, expected_tokens",
[
("Forecast_for_LUNCH", ["Forecast_for_LUNCH"]),
("Forecast for LUNCH", ["Forecast for LUNCH"]),
],
)
def test_split_intent(text: Text, expected_tokens: List[Text]):
component_config = {"intent_tokenization_flag": True, "intent_split_symbol": "+"}
tk = create_whitespace_tokenizer(component_config)
message = Message.build(text=text)
message.set(INTENT, text)
assert [t.text for t in tk._split_name(message, INTENT)] == expected_tokens
@pytest.mark.parametrize(
"text, expected_tokens",
[
("faq/ask_language", ["faq", "ask_language"]),
("faq/ask+language", ["faq", "ask", "language"]),
],
)
def test_split_intent_response_key(text, expected_tokens):
component_config = {"intent_tokenization_flag": True, "intent_split_symbol": "+"}
tk = create_whitespace_tokenizer(component_config)
message = Message.build(text=text)
message.set(INTENT_RESPONSE_KEY, text)
assert [
t.text for t in tk._split_name(message, attribute=INTENT_RESPONSE_KEY)
] == expected_tokens
@pytest.mark.parametrize(
"token_pattern, tokens, expected_tokens",
[
(
None,
[Token("hello", 0), Token("there", 6)],
[Token("hello", 0), Token("there", 6)],
),
(
"",
[Token("hello", 0), Token("there", 6)],
[Token("hello", 0), Token("there", 6)],
),
(
r"(?u)\b\w\w+\b",
[Token("role-based", 0), Token("access-control", 11)],
[
Token("role", 0),
Token("based", 5),
Token("access", 11),
Token("control", 18),
],
),
(
r".*",
[Token("role-based", 0), Token("access-control", 11)],
[Token("role-based", 0), Token("access-control", 11)],
),
(
r"(test)",
[Token("role-based", 0), Token("access-control", 11)],
[Token("role-based", 0), Token("access-control", 11)],
),
],
)
def test_apply_token_pattern(
token_pattern: Text, tokens: List[Token], expected_tokens: List[Token]
):
component_config = {"token_pattern": token_pattern}
tokenizer = create_whitespace_tokenizer(component_config)
actual_tokens = tokenizer._apply_token_pattern(tokens)
assert len(actual_tokens) == len(expected_tokens)
for actual_token, expected_token in zip(actual_tokens, expected_tokens):
assert actual_token.text == expected_token.text
assert actual_token.start == expected_token.start
assert actual_token.end == expected_token.end
@pytest.mark.parametrize(
"text, expected_tokens",
[
("Forecast_for_LUNCH", ["Forecast_for_LUNCH"]),
("Forecast for LUNCH", ["Forecast for LUNCH"]),
("Forecast+for+LUNCH", ["Forecast", "for", "LUNCH"]),
],
)
def test_split_action_name(text: Text, expected_tokens: List[Text]):
component_config = {"intent_tokenization_flag": True, "intent_split_symbol": "+"}
tk = create_whitespace_tokenizer(component_config)
message = Message.build(text=text)
message.set(ACTION_NAME, text)
assert [t.text for t in tk._split_name(message, ACTION_NAME)] == expected_tokens
def test_token_fingerprints_are_unique():
"""Tests that token fingerprints are consistent across runs and machines."""
tokens = [
Token("testing", 2, 9, {"x": 3}, "test"),
Token("testing", 3, 10, {"x": 3}, "test"),
Token("working", 2, 9, {"x": 3}, "work"),
Token("testing", 2, 9, None, "test"),
Token("testing", 2, 9),
Token("testing", 3),
]
fingerprints = {t.fingerprint() for t in tokens}
assert len(fingerprints) == len(tokens)
|
<reponame>Coding618/Django_base
from django.http import HttpResponse
from django.shortcuts import render
from book.models import BookInfo
# Create your views here.
def index(request):
book = BookInfo.objects.all()
print(book)
return HttpResponse('index')
######### insert 数据 ################
from book.models import BookInfo
book = BookInfo(
name='Django',
pub_date='2000-01-01',
readcount=10
)
book.save()
# 方式2
# objects -- 实现增删改查
BookInfo.objects.create(
name='测试开发入门',
pub_date='2020-2-1',
readcount=100
)
########## 修改参数 #########
## 方式一
# select * from book_bookinfo where id=6;
book = BookInfo.objects.get(id=6)
book.name='爬虫入门'
book.save() # 执行保存操作,才会将修改的结果,保存到数据库中欧你
## 方式二 使用 filter 方法去执行
book = BookInfo.objects.filter(id=5).update(name='大数据开发入门',commentcount=999)
########### 删除数据 ####################
book=BookInfo.objects.get(id=6)
book.delete()
book=BookInfo.objects.get(id=5).delete()
book=BookInfo.objects.filter(id=5).delete()
############## 查询 ##################
# get 单一结果
try:
booK=BookInfo.objects.get(id=6)
except BookInfo.DoesNotExist:
print("查询结果不存在")
# all 列表
book=BookInfo.objects.all()
from book.models import PeopleInfo
PeopleInfo.objects.all()
# count 查询结果数量
BookInfo.objects.all().count()
BookInfo.objects.count()
############### 过滤查询 #############################
# filter 过滤出多个结果
# exclude 排查符合条件下的多个结果
# get 过滤单一结果
# 模型类名.objects.filter(属性名__运算符=参数值) 获取 n 个结果, n=1,2,3,...
# 模型类名.objects.exclude(属性名__运算符=参数值) 获取 n 个结果, n=1,2,3,...
# 模型类名.objects.get(属性名__运算符=参数值) 获取 1 个结果, 或者 异常
# 查询编号为1的图书
BookInfo.objects.get(id=1) # 简写形式 (属性名=值)
BookInfo.objects.get(id__exact=1) # 完整形式
BookInfo.objects.get(pk=1)
BookInfo.objects.get(id=1)
BookInfo.objects.filter(id=1)
# 查询书名包含'湖'的图书
BookInfo.objects.filter(name__contains='湖')
# 查询书名以'部'结尾的图书
BookInfo.objects.filter(name__endswith='部')
# 查询书名为空的图书
BookInfo.objects.filter(name__isnull=True)
# 查询编号为1或3或5的图书
BookInfo.objects.filter(id__in=[1,3,5])
# 查询编号大于3的图书
# 大于 gt great
# 大于等于 gte equal
# 小于 lt little less then
# 小于等于 lte
BookInfo.objects.filter(id__gt=3)
# 查询编号不等于3的图书
BookInfo.objects.exclude(id=3)
# 查询1980年发表的图书
BookInfo.objects.filter(pub_date__year='1980')
# 查询1990年1月1日后发表的图书
BookInfo.objects.filter(pub_date__gt='1990-01-01')
BookInfo.objects.filter(id__gt=3)
# 查询阅读量大于评论量的书籍
from django.db.models import F
BookInfo.objects.filter(readcount__gt=F('commentcount'))
# 查询阅读量大于2倍评论量的图书
BookInfo.objects.filter(readcount__gte=F('commentcount')*2)
# 并且查询
# 查询阅读量大于20, 并且编号小于=3的图书
BookInfo.objects.filter(readcount__gte=20)
BookInfo.objects.filter(readcount__gte=20).filter(id__lte=3)
# 或者
BookInfo.objects.filter(readcount__gte=20, id__lte=3)
# 或者查询
# 查询阅读量大于20, 或者编号小于=3的图书
BookInfo.objects.filter(readcount__gte=20)
from django.db.models import Q
BookInfo.objects.filter(Q(readcount__gte=20) | Q(id__lte=3))
############# 聚合函数 #########################
from django.db.models import Sum, Max, Min, Avg, Count
# 聚合函数 模型名.objects.aggregate(XXX('字段名'))
BookInfo.objects.aggregate(Sum('readcount'))
BookInfo.objects.order_by('readcount')
# 查询书籍为1的所有人物信息
# book=BookInfo.objects.filter(id=1)
people = PeopleInfo.objects.get(id=1)
people.book.name
people.book.readcount
############# 关联过滤查询 ###############
# 查询图书,要求图书人物为"郭靖"
BookInfo.objects.filter(peopleinfo__name__exact='郭靖')
BookInfo.objects.filter(peopleinfo__name='郭靖')
# 查询图书,要求图书中人物的描述包含"八"
BookInfo.objects.filter(peopleinfo__description__contains='八')
# 查询图书,要求图书人物为"郭靖"
BookInfo.objects.filter(peopleinfo__name__exact='郭靖')
# 查询书名为“天龙八部”的所有人物
PeopleInfo.objects.filter(book__name='天龙八部')
# 查询图书阅读量大于30的所有人物
PeopleInfo.objects.filter(book__readcount__gt=30)
# 将硬盘的数据放到内存中,称作缓存 例如 redis |
<gh_stars>0
"""
Database connection.
Functions to insert, update and select data.
Needs config setted at authenticate.py file. Exemple present in authenticate.example.py
WRITE OPERATIONS: insert_user, insert_tweet, update_tweet, update_tweet_text_after, auto_update_tweet, update_user
READ OPERATIONS: tweet_list, last_tweets_list, users_list, tweets_attr
"""
import sys
import pymysql
import pymysql.cursors
from helper.authenticate import db_connection_data
class DbConnection(object):
# # # # # # # # # # # # # # # # CONNECTION # # # # # # # # # # # # # # # #
def __init__(self):
try:
conn_data = db_connection_data()
self.mysqlCon = pymysql.connect(
host = conn_data['host'],
user = conn_data['user'],
password = <PASSWORD>['password'],
db = conn_data['db'],
charset = conn_data['charset'],
cursorclass = pymysql.cursors.DictCursor
)
except Exception as e:
print("Não foi possível estabelecer conexão com o banco!\
\nERROR:", str(e))
exit()
# # # # # # # # # # # # # # # WRITE OPERATIONS # # # # # # # # # # # # # #
def insert_user(self, user):
if 'id' in user:
user_id = user["id"]
user_name = user["name"]
user_screen_name = user["screen_name"]
user_following = user["friends_count"]
user_language = user["lang"]
with self.mysqlCon.cursor() as cur:
sql = """INSERT INTO user (user_id, user_name,
user_screen_name, user_following, user_language)
VALUES (%s, %s, %s, %s, %s)
ON DUPLICATE KEY UPDATE user_id = user_id"""
cur.execute(sql, (user_id, user_name, user_screen_name,
user_following, user_language))
self.mysqlCon.commit()
cur.close()
return True
else:
return False
def insert_tweet(self, tweet):
if 'id' in tweet:
tweet_id = tweet["id"]
tweet_text = tweet["text"]
tweet_datetime = tweet["created_at"]
tweet_language = tweet["lang"]
tweet_retweets = tweet["retweet_count"]
tweet_likes = tweet["favorite_count"]
tweet_media = tweet['has_media']
tweet_streamed = tweet['streamed']
tweet_polarity = round(tweet["polarity"], 6)
tweet_subjectivity = round(tweet["subjectivity"], 6)
tweet_url = 0 if tweet_text.find('http') == -1 else 1
tweet_hashtag = 0 if tweet_text.find('#') == -1 else 1
tweet_RT = 0 if tweet_text.find('RT', 0, 2) == -1 else 1
tweet_size = len(tweet_text)
user_id = tweet["user_id"]
user_tweet_counter = tweet["statuses_count"]
user_followers = tweet["followers_count"]
with self.mysqlCon.cursor() as cur:
sql = "SELECT `tweet_id` FROM `tweet` WHERE `tweet_id` = %s"
cur.execute(sql, (tweet_id))
result = cur.fetchone()
cur.close()
if(result is None):
with self.mysqlCon.cursor() as cur:
sql = """INSERT INTO tweet (tweet_id, tweet_text,
tweet_datetime, tweet_language, tweet_retweets,
tweet_likes, tweet_polarity, tweet_subjectivity,
tweet_url, tweet_hashtag, tweet_media,
tweet_streamed, tweet_RT, tweet_size, user_id,
user_tweet_counter, user_followers)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,
%s, %s, %s, %s, %s)
ON DUPLICATE KEY UPDATE tweet_id=tweet_id"""
cur.execute(sql, (tweet_id, tweet_text, tweet_datetime,
tweet_language, tweet_retweets,
tweet_likes, tweet_polarity,
tweet_subjectivity, tweet_url,
tweet_hashtag, tweet_media,
tweet_streamed, tweet_RT, tweet_size,
user_id, user_tweet_counter,
user_followers))
self.mysqlCon.commit()
cur.close()
return True
else:
return False
def update_tweet(self, tweet_id, deleted=0, media=0, retweets=-1, likes=-1,
text='', text_after='', ban_100=-1, ban_1k=-1, ban_3k=-1):
sql = """UPDATE tweet SET deleted = %s, tweet_media = %s,
tweet_retweets = %s, tweet_likes = %s,
tweet_text = %s, tweet_text_after = %s,
tweet_ban_100 = %s, tweet_ban_1000 = %s,
tweet_ban_3000 = %s
WHERE tweet_id = %s"""
cur = self.mysqlCon.cursor()
try:
cur.execute(sql, (deleted, media, retweets, likes, text,
text_after, ban_100, ban_1k, ban_3k, tweet_id))
self.mysqlCon.commit()
result = "Ok"
except:
result = 'EXEPTION occurred!' + str(sys.exc_info()[1])
self.mysqlCon.rollback()
cur.close()
return result
def update_tweet_text_after(self, tweet_id, text_after=''):
sql = "UPDATE tweet SET tweet_text_after = %s WHERE tweet_id = %s"
cur = self.mysqlCon.cursor()
try:
cur.execute(sql, (text_after, tweet_id))
self.mysqlCon.commit()
result = "Ok"
except:
self.mysqlCon.rollback()
result = 'EXEPTION occurred!' + str(sys.exc_info()[1])
cur.close()
return result
def auto_update_tweet(self):
cur = self.mysqlCon.cursor()
try:
print("Updating tweet text to remove the borring emoji '⃣'...")
cur.execute("UPDATE tweet SET \
tweet_text_after = REPLACE(tweet_text_after, '⃣', '') \
WHERE tweet_text_after like '%⃣%'")
print("Updating usage of URLs...")
cur.execute("UPDATE tweet AS t SET tweet_url = 0 \
WHERE tweet_text NOT LIKE '%http%'")
cur.execute("UPDATE tweet AS t SET tweet_url = 1 \
WHERE tweet_text LIKE '%http%'")
print("Updating usage of Hashtags...")
cur.execute("UPDATE tweet SET tweet_hashtag = 0 \
WHERE tweet_text NOT LIKE '%#%'")
cur.execute("UPDATE tweet SET tweet_hashtag = 1 \
WHERE tweet_text LIKE '%#%'")
print("Updating tweets when they are retweets...")
cur.execute("UPDATE tweet SET tweet_RT = 0 \
WHERE tweet_text NOT LIKE 'RT @%'")
cur.execute("UPDATE tweet SET tweet_RT = 1 \
WHERE tweet_text LIKE 'RT @%'")
print("Updating the size range of each message...")
cur.execute("UPDATE tweet SET tweet_size = 0 \
WHERE LENGTH(tweet_text) = 0")
for i in range(1, 26):
j = i*10
cur.execute("UPDATE tweet SET tweet_size = {0} \
WHERE LENGTH(tweet_text) <= {0} \
AND LENGTH(tweet_text) > {1}".format(j, j-10))
cur.execute("UPDATE tweet SET tweet_size = 255 \
WHERE LENGTH(tweet_text) <= 255 AND LENGTH(tweet_text) > 250")
self.mysqlCon.commit()
except:
self.mysqlCon.rollback()
print('EXEPTION occurred! ' + str(sys.exc_info()[1]))
cur.close()
def update_user(self, user_id, info):
sql = """UPDATE user SET user_following = %s,
user_followers = %s, user_created_at = %s,
user_location = %s, user_description = %s
WHERE user_id = %s"""
cur = self.mysqlCon.cursor()
try:
cur.execute(sql, (info['following'], info['followers'],
info['created_at'], info['location'],
info['description'], user_id))
self.mysqlCon.commit()
result = "Ok"
except:
result = 'EXEPTION occurred!' + str(sys.exc_info()[1])
self.mysqlCon.rollback()
cur.close()
return result
# # # # # # # # # # # # # # # READ OPERATIONS # # # # # # # # # # # # # # #
def tweet_list(self, where=''):
sql = """SELECT tweet_id as id, tweet_text as txt,
tweet_language as lang, tweet_retweets as retweets,
tweet_likes as likes, deleted
FROM tweet """ + where
cur = self.mysqlCon.cursor()
cur.execute(sql)
result = cur.fetchall()
cur.close()
return result
def last_tweets_list(self):
sql = """SELECT u.user_id, u.user_name, t.tweet_id,
t.user_tweet_counter as tweet_counter,
(SELECT MAX(t3.user_tweet_counter) FROM tweet AS t3 WHERE t3.user_id = t.user_id AND t3.tweet_streamed = 0) AS counter_max,
(SELECT COUNT(*) FROM tweet AS t3 WHERE t3.user_id = t.user_id AND t3.tweet_streamed = 0) AS counter_diff,
(SELECT MIN(t3.tweet_id) FROM tweet AS t3 WHERE t3.user_id = t.user_id AND t3.tweet_streamed = 0) AS max_id
FROM tweet as t
JOIN user as u on u.user_id = t.user_id
WHERE tweet_id in (select MAX(t2.tweet_id) from tweet as t2 where t2.tweet_streamed = 1 group by t2.user_id)
GROUP BY user_id"""
cur = self.mysqlCon.cursor()
cur.execute(sql)
result = cur.fetchall()
cur.close()
return result
def users_list(self, where=''):
sql = """SELECT * FROM user """ + where
cur = self.mysqlCon.cursor()
cur.execute(sql)
allUsers = cur.fetchall()
cur.close()
result = {}
for user in allUsers:
result[user['user_id']] = user['user_name']
return result
def tweets_attr(self, rate, user_id=0, counter=0):
# Users that must be ignored for some reason
ignored_users = "822215679726100480, 128372940, 25521487"
sql = """SELECT
t.tweet_text as full_txt,
t.tweet_text_after as txt,
IF(t.tweet_polarity IS NOT NULL, CAST(t.tweet_polarity AS DEC(4,2)), 0.00) AS polarity,
IF(t.tweet_url = 1, 1, 0) as url,
IF(t.tweet_hashtag = 1, 1, 0) as hashtag,
IF(t.tweet_RT = 1, 1, 0) as RT,
t.tweet_size,
IF(t.tweet_ban_3000 IS NOT NULL, CAST(t.tweet_ban_3000 AS DEC(4,2)), 0.00) AS banality,
IF(((t.tweet_likes+t.tweet_retweets)/u.user_followers*100)>{}, 1, 0) as popular
FROM tweet as t
JOIN user as u ON t.user_id = u.user_id
WHERE t.tweet_language = 'en'
AND t.tweet_text_after != ''
AND t.user_id not in ({})""".format(rate, ignored_users)
if(user_id != 0):
sql = sql + " AND t.user_id = {} ".format(user_id)
if(counter):
sql = "SELECT popular, count(*) as count \
FROM (" + sql + ") as test GROUP BY popular ORDER BY popular"
cur = self.mysqlCon.cursor()
cur.execute(sql)
result = cur.fetchall()
cur.close()
return result
|
# Title: Utilities for talking to a Corelatus GTH from python
# Author: <NAME> (<EMAIL>)
#
import sys
from sys import stderr
from transport import API_socket
import socket
class API:
def __init__(self, gth_ip_or_hostname, verbosity=0):
"""
verbosity=0: keep mostly quiet
verbosity=1: print event counts
verbosity=2: print events
verbosity=3: print all commands, responses and events
"""
self.verbosity = verbosity
self.socket = API_socket(gth_ip_or_hostname)
def bye(self):
self.send("<bye/>")
self.check_ok("bye")
def delete(self, ID):
"Delete the given job"
self.send("<delete id='%s'/>" % ID)
self.check_ok("delete")
def disable(self, name):
"Disable an E1/T1 or SDH/SONET interface"
self.send("<disable name='%s'/>" % name)
self.check_ok("disable")
def enable(self, name, attributes):
"Enable an E1/T1 or SDH/SONET interface"
self.send("<enable name='%s'>%s</enable>"
% (name, format_attributes(attributes)))
self.check_ok("enable")
def map(self, Type, Name):
"Map (assign a name) an E1/T1 carried on an SDH/SONET interface"
if Type != "pcm_source":
raise SemanticError("tried to map something other than a pcm_source")
self.send("<map target_type='pcm_source'>" \
"<sdh_source name='%s'/></map>" % Name)
reply, _events = self.next_non_event()
if reply[0] != "resource":
stderr.write(reply + "\n")
se = ("should have returned a resource", command, reply)
raise SemanticError(se)
print reply.name
def new_atm_aal5_monitor(self, span, timeslot_list, (vpi, vci), opts = {}):
"""Returns a (job_id, socket) tuple.
Monitor ATM AAL5 on a GTH. Socket returned uses the format defined in
the GTH API manual, under new_atm_aal5_monitor."""
IP, _api_port = self.socket._socket.getsockname()
port, ls = tcp_listen()
opts['ip_addr'] = IP
opts['ip_port'] = "%d" % port
opts['vpi'] = "%d" % vpi
opts['vci'] = "%d" % vci
self.send("<new><atm_aal5_monitor %s>%s" \
"</atm_aal5_monitor></new>"\
% (options(opts), sources(span, timeslot_list)))
aal5_id, _ignored_events = self.receive_job_id()
data, _remote_address = ls.accept()
ls.close()
return (aal5_id, data)
def new_fr_monitor(self, span, timeslots):
"""Returns a (job_id, socket) tuple. Monitor Frame Relay on a
GTH. Socket returned uses the format defined in the GTH API
manual, under new_fr_monitor."""
IP, _api_port = self.socket._socket.getsockname()
port, ls = tcp_listen()
self.send("<new><fr_monitor ip_addr='%s' ip_port='%s'>"\
"%s"\
"</fr_monitor></new>"\
% (IP, port, sources(span, timeslots)) )
fr_id, _ignored_events = self.receive_job_id()
data, _remote_address = ls.accept()
ls.close()
return (fr_id, data)
def new_mtp2_monitor(self, span, timeslot):
"""Returns a (job_id, socket) tuple.
Monitor MTP-2 on a GTH. Socket returned uses the format defined in
the GTH API manual, under new_mtp2_monitor."""
IP, _api_port = self.socket._socket.getsockname()
port, ls = tcp_listen()
self.send("<new><mtp2_monitor ip_addr='%s' ip_port='%s'>"\
"<pcm_source span='%s' timeslot='%d'/>"\
"</mtp2_monitor></new>"\
% (IP, port, span, timeslot) )
mtp2_id, _ignored_events = self.receive_job_id()
data, _remote_address = ls.accept()
ls.close()
return (mtp2_id, data)
def new_player(self, span, timeslot):
"""Returns a (job_id, socket) tuple.
Create a timeslot player on a GTH."""
IP, _api_port = self.socket._socket.getsockname()
port, ls = tcp_listen()
self.send("<new><player>" \
"<tcp_source ip_addr='%s' ip_port='%d'/>"\
"<pcm_sink span='%s' timeslot='%d'/>" \
"</player></new>"\
% (IP, port, span, timeslot) )
player_id, _ignored_events = self.receive_job_id()
data, _remote_address = ls.accept()
ls.close()
return (player_id, data)
def new_recorder(self, span, timeslot):
"""Returns a (job_id, socket) tuple.
Create a timeslot recorder on a GTH."""
IP, _api_port = self.socket._socket.getsockname()
port, ls = tcp_listen()
self.send("<new><recorder>"\
"<pcm_source span='%s' timeslot='%d'/>"\
"<tcp_sink ip_addr='%s' ip_port='%d'/>"\
"</recorder></new>"\
% (span, timeslot, IP, port) )
recorder_id, _ignored_events = self.receive_job_id()
data, _remote_address = ls.accept()
ls.close()
return (recorder_id, data)
def new_v110_monitor(self, span, timeslot, first_bit, n_bits, ra0="no"):
"""Returns a (job_id, socket) tuple.
Monitor V.110. Socket returned uses the format defined in
the GTH API manual, under new_fr_monitor."""
IP, _api_port = self.socket._socket.getsockname()
port, ls = tcp_listen()
self.send("<new>"\
"<v110_monitor ip_addr='%s' ip_port='%s' rate='%d' ra0='%s'>"\
"<pcm_source span='%s' timeslot='%d'"\
" first_bit='%d' bandwidth='%d'/>"\
"</v110_monitor></new>"\
% (IP, port, 4800 * n_bits, ra0,\
span, timeslot, first_bit, n_bits * 8) )
id, _ignored_events = self.receive_job_id()
data, _remote_address = ls.accept()
ls.close()
return (id, data)
def query_resource(self, name):
"""Returns a dict of attributes
Query a GTH resource"""
self.send("<query><resource name='%s'/></query>" % name)
reply, _events = self.next_non_event()
if reply[0] != "state":
raise SemanticError( ("query failed", reply) )
if name == "inventory":
result = []
reply.pop(0)
while len(reply) >= 2:
reply.pop(0)
result.append(reply.pop(0)[1])
return result
else:
return reply[3]
def reset(self):
"Reset (reboot) the GTH"
self.send("<reset><resource name='cpu'/></reset>")
self.check_ok("reset");
def set(self, name, attributes):
"Set attributes on a resource"
self.send("<set name='%s'>%s</set>"
% (name, format_attributes(attributes)))
self.check_ok("set");
def unmap(self, Name):
"Unmap a resource"
self.send("<unmap name='%s'/>" % Name)
self.check_ok("unmap")
def zero_job(self, id):
"Clear the counters on a job"
self.send("<zero><job id='%s'/></zero>" % id)
self.check_ok("zero")
def zero_resource(self, name):
"Clear the counters on a resource"
self.send("<zero><resource name='%s'/></zero>" % name)
self.check_ok("zero")
#---- The remaining functions are primarily intended for internal
# use. They're also useful for implementing new commands.
def send(self, XML):
if self.verbosity >= 3:
stderr.write("C: %s\n" % XML)
self.socket.send(XML)
def next_non_event(self):
"""Return a tuple (answer, events).
Answer is the next reply from the GTH and events is a list of all
asynchronous data before that"""
events = []
while True:
answer = self.socket.receive()
if answer[0] == 'event':
if self.verbosity >= 2:
stderr.write("G: %s\n" % answer)
events.append(answer)
else:
if self.verbosity == 1:
stderr.write("G: skipping %d events\n" % len(events))
if self.verbosity >= 3:
stderr.write("G: %s\n" % answer)
return (answer, events)
def next_event(self):
"""Block, waiting for an event
Return that event"""
return self.socket.receive()
def check_ok(self, command):
reply, _events = self.next_non_event()
if reply[0] != "ok":
stderr.write("expected OK, got %s\n" % reply)
se = ("should have returned OK", command, reply)
raise SemanticError(se)
def receive_job_id(self):
"""Return a tuple (ID, events)
If the next reply from the GTH is not a jobId, we raise SemanticError"""
answer, events = self.next_non_event()
if answer[0] == 'job':
return (answer[1][1], events)
else:
raise SemanticError(answer)
def options(dict):
"Returns a string with an XML representation of a list of key/value opts"
list = ""
for key,val in dict.items():
list += " " + key + "='" + val + "'"
return list
def sources(span, timeslots):
"Returns a string with an XML representation of the sources"
list = ""
for ts in timeslots:
list += "<pcm_source span='%s' timeslot='%d'/>" % (span, ts)
return list
def tcp_listen():
"""Create a server socket, i.e. one which listens.
Returns (port_number, socket)"""
s = socket.socket(socket.AF_INET)
s.bind(("", 0))
s.listen(1)
addr, port = s.getsockname()
return (port, s)
def udp_listen():
"Returns (port_number, socket)"
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(("", 0))
addr, port = s.getsockname()
return (port, s)
def format_attribute( (key, value) ):
return "<attribute name='" + key + "' value='" + value + "'/>"
def format_attributes( list):
result = ""
for x in list:
result += format_attribute(x)
return result
class SemanticError(Exception):
def __init__(self, clue):
self.clue = clue
|
"""
Website tester utility for magna.
"""
import httpx
# API_URL is the online service of the api
# Update this with your own.
# [REMOVE TRAILING SLASH]
API_URL = "https://magna-sc.cf"
# set of sample website links to request
# if the api returns OK
sample_requests = {
"manganelo": {
"manga": "https://manganelo.com/manga/pn918005",
"chapter": "https://manganelo.com/chapter/pn918005/chapter_140",
},
"mangakakalot": {
"manga": "https://mangakakalot.com/read-ce9by158524526578",
"chapter": "https://mangakakalot.com/chapter/ke922068/chapter_116",
},
"bulumanga": {
"manga": "https://ww6.bulumanga.net/solo-leveling",
"chapter": "https://ww6.bulumanga.net/solo-leveling-chap-140",
},
"manhwa18": {
"manga": "https://manhwa18.com/manga-that-mans-epilepsy.html",
"chapter": "https://manhwa18.com/read-sex-exercise-chapter-46.html",
},
"hiperdex": {
"manga": "https://hiperdex.com/manga/black-lagoon-engl/",
"chapter": "https://hiperdex.com/manga/black-lagoon-engl/chapter-108/",
},
"webtoon": {
"manga": "https://www.webtoon.xyz/read/terror-man/",
"chapter": "https://www.webtoon.xyz/read/terror-man/chapter-176/",
},
"isekaiscan": {
"manga": "https://isekaiscan.com/manga/drifters/",
"chapter": "https://isekaiscan.com/manga/drifters/vol-07/ch-081/",
},
# "toonily": {
# "manga": "https://toonily.com/webtoon/president-is-my-neighbor-cousin/",
# "chapter": "https://toonily.com/webtoon/president-is-my-neighbor-cousin/chapter-41/",
# },
# "dark-scans": {
# "manga": "", "chapter": ""
# },
"mangatx": {
"manga": "https://mangatx.com/manga/i-became-the-ugly-lady/",
"chapter": "https://mangatx.com/manga/i-became-the-ugly-lady/chapter-21/",
},
"pmscans": {
"manga": "https://www.pmscans.com/manga/shark/",
"chapter": "https://www.pmscans.com/manga/shark/chapter-21/",
},
"asurascans": {
"manga": "https://asurascans.com/comics/worn-and-torn-newbie/",
"chapter": "https://asurascans.com/worn-and-torn-newbie-chapter-30/",
},
"leviatanscans": {
"manga": "https://leviatanscans.com/manga/the-throne/",
"chapter": "https://leviatanscans.com/manga/the-throne/30/",
},
"reaperscans": {
"manga": "https://reaperscans.com/comics/371413-return-to-player",
"chapter": "https://reaperscans.com/comics/371413-return-to-player/1/25",
},
"skscans": {
"manga": "https://skscans.com/manga/chronicles-of-heavenly-demon/",
"chapter": "https://skscans.com/manga/chronicles-of-heavenly-demon/137/",
},
"merakiscans": {
"manga": "https://merakiscans.com/manga/the-last-human/",
"chapter": "https://merakiscans.com/manga/the-last-human/300/",
},
"manhwatop": {
"manga": "https://manhwatop.com/manga/the-evil-lady-will-change",
"chapter": "https://manhwatop.com/manga/the-evil-lady-will-change/chapter-59/",
},
"mangapark": {
"manga": "https://mangapark.net/manga/kaguya-sama-wa-kokurasetai-tensai-tachi-no-renai-zunousen-akasaka-aka",
"chapter": "https://mangapark.net/manga/kaguya-sama-wa-kokurasetai-tensai-tachi-no-renai-zunousen-akasaka-aka/i2655523/c217/1",
},
"methodscans": {
"manga": "https://methodscans.com/comics/741529-murim-login",
"chapter": "https://methodscans.com/comics/741529-murim-login/1/58",
},
"flamescans": {
"manga": "https://www.flame-scans.com/manga/mookhyang-dark-lady/",
"chapter": "https://www.flame-scans.com/mookhyang-chapter-78/",
},
"aloalivn": {
"manga": "https://aloalivn.com/manga/forced-to-become-the-villains-son-in-law/",
"chapter": "https://aloalivn.com/manga/forced-to-become-the-villains-son-in-law/chapter-66/",
},
"manhuafast": {
"manga": "https://manhuafast.com/manga/your-ancestor-is-online/",
"chapter": "https://manhuafast.com/manga/your-ancestor-is-online/chapter-19/",
},
# "s2manga": {
# "manga": "https://s2manga.com/manga/this-is-an-obvious-fraudulent-marriage/",
# "chapter": "https://s2manga.com/manga/this-is-an-obvious-fraudulent-marriage/chapter-68/",
# },
"manga68": {
"manga": "https://manga68.com/manga/gang-of-school/",
"chapter": "https://manga68.com/manga/gang-of-school/chapter-8/",
},
"manhwamanga": {
"manga": "https://manhwamanga.net/my-friend-came-back-from-the-future-to-fuck-me.html",
"chapter": "https://manhwamanga.net/my-friend-came-back-from-the-future-to-fuck-me/chapter-22.html",
},
"1stkissmanga": {
"manga": "https://1stkissmanga.com/manga/legend-of-the-northern-blade/",
"chapter": "https://1stkissmanga.com/manga/legend-of-the-northern-blade/chapter-77/",
},
"mangarockteam": {
"manga": "https://mangarockteam.com/manga/ta-sui-xian-he/",
"chapter": "https://mangarockteam.com/manga/ta-sui-xian-he/chapter-101/",
},
"secretscans": {
"manga": "https://secretscans.co/comics/460420-poison-dragon-the-legend-of-an-asura",
"chapter": "https://secretscans.co/comics/460420-poison-dragon-the-legend-of-an-asura/1/14",
},
"zeroscans": {
"manga": "https://zeroscans.com/comics/188504-second-life-ranker",
"chapter": "https://zeroscans.com/comics/188504-second-life-ranker/1/77",
},
"mm-scans": {
"manga": "https://mm-scans.com/manga/white-blood",
"chapter": "https://mm-scans.com/manga/white-blood/chapter-62/",
},
}
def get_manga(website: str) -> bool:
"""
API Manga website scraper checker.
"""
check = True
try:
httpx.get(f"{API_URL}/manga?q={website}").json()
# if len(res["chapters"]) < 1:
# check = False
except Exception:
# there was a problem with the function above
check = False
# return the status
return check
def get_chapter(website: str) -> bool:
"""
API Manga Chapter website scraper checker.
"""
check = True
try:
httpx.get(f"{API_URL}/manga/chapters?q={website}").json()
# if len(res["images"]) < 2:
# check = False
except Exception:
# there was a problem with the function above
check = False
# return the status
return check
def test_website(name: str, samples: dict) -> bool:
"""
Test if the API's website scraper works with different sample urls and links.
"""
# check manga query
manga = get_manga(samples["manga"])
if manga:
print(f"{name} =>\n\t\t ====> manga: \033[32m OK \033[00m")
else:
print(f"{name} =>\n\t\t ====> manga: \033[31m FAILED \033[00m")
# check chapter query
chapter = get_chapter(samples["chapter"])
if chapter:
print(f"{name} =>\n\t\t ====> chapter: \033[32m OK \033[00m")
else:
print(f"{name} =>\n\t\t ====> chapter: \033[31m FAILED \033[00m")
if not manga or not chapter:
return False
return True
if __name__ == "__main__":
"""
Run script once called.
"""
fails = 0
for i, k in sample_requests.items():
print(f"\n\033[93mChecking:: {i.upper()}\033[00m\n")
if not test_website(i, k):
fails += 1
# check if there are fails
if fails > 0:
raise ValueError("Some website scrapers have failed...")
|
<filename>parental-control.py
# NOTE: This file should be copied to /etc/parental-control directory
import sys
import datetime
import subprocess
import os
COL_USER = 0
COL_FUNC = 1
COL_WKDY = 2
COL_SU = COL_WKDY
COL_MO = COL_SU + 1
COL_TU = COL_SU + 2
COL_WE = COL_SU + 3
COL_TH = COL_SU + 4
COL_FR = COL_SU + 5
COL_SA = COL_SU + 6
# F U N C T I O N S
def get_cfg_file_path(cfg_file):
return os.path.dirname(cfg_file)
def stop_internet_connection():
subprocess.run(["iptables", "-F"])
def restore_internet_connection(res_file):
print("res_file = ", res_file)
stop_internet_connection()
subprocess.run(["iptables-restore", "-c", "<", res_file], timeout=1, shell=True)
def get_current_user():
return subprocess.run(["who"], stdout=subprocess.PIPE).stdout.decode('utf-8').splitlines()[-1].split()[0]
def logout_user(user):
subprocess.run(["skill", "-KILL", "-u", user])
def get_timespent_by_user(user, ac_output):
tspent = 0
for line in reversed(ac_output):
if "Today" in line:
continue
if user in line:
tspent = float(line.split()[1]) * 60
break
return tspent
def get_timeallowed_user(user, function, cfg_table):
tallowed = 7 * 24 * 60.0
for row in cfg_table:
if row[COL_USER] == user and row[COL_FUNC] == function:
wkday = datetime.datetime.today().weekday() + COL_WKDY
tallowed = float(row[wkday])
break
return tallowed
# Commandline Argument: this, config-file, log-file
CMDL_ARGS = 2
if len(sys.argv) >= CMDL_ARGS:
cfg_file = sys.argv[1]
cfg_path = get_cfg_file_path(cfg_file)
username = get_current_user()
else:
print("Not enough arguments passed (", len(sys.argv), " >= ", CMDL_ARGS, ")")
print(sys.argv)
exit(-1)
# Read configurations from configuration file
with open(cfg_file) as f:
contents = f.readlines()
cfg_table = []
for line in contents:
sline = line.replace(" ", "").replace("\t", "").replace("\n", "")
if len(sline) == 0:
continue
if sline[0] == '#':
continue
cfg_table.append(line.split())
# Find out how many minutes the user had already spent
ac_output = subprocess.run(["ac", "-dp"], stdout=subprocess.PIPE).stdout.decode('utf-8').split('\n')
time_spent = get_timespent_by_user(username, ac_output)
print("time_spent by", username, "=", time_spent, "minutes")
# Find out how many minutes the user is allowed today
login_allowed = get_timeallowed_user(username, "login", cfg_table)
brows_allowed = get_timeallowed_user(username, "http", cfg_table)
print("time_allowed: login =", login_allowed, "browsing =", brows_allowed, "minutes")
# Check and take actions
if login_allowed < 24 * 60.0:
print("Restore connections (if stopped in previous login)")
restore_internet_connection(cfg_path + "/parental-control.ip.rules.v4")
if time_spent > brows_allowed:
print("stopping internet!")
stop_internet_connection()
if time_spent > login_allowed:
print("logging out user")
logout_user(username)
now = datetime.datetime.now()
date = now.strftime("%Y-%m-%d")
time = now.strftime("%H:%M")
print("username = ", username)
print(date, " ", time)
|
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
import numpy as np
import numba
_mock_identity = np.eye(2, dtype=np.float32)
_mock_ones = np.ones(2, dtype=np.float32)
@numba.njit(fastmath=True)
def euclidean(x, y):
r"""Standard euclidean distance.
.. math::
D(x, y) = \\sqrt{\sum_i (x_i - y_i)^2}
"""
result = 0.0
for i in range(x.shape[0]):
result += (x[i] - y[i]) ** 2
return np.sqrt(result)
@numba.njit(fastmath=True)
def standardised_euclidean(x, y, sigma=_mock_ones):
r"""Euclidean distance standardised against a vector of standard
deviations per coordinate.
.. math::
D(x, y) = \sqrt{\sum_i \frac{(x_i - y_i)**2}{v_i}}
"""
result = 0.0
for i in range(x.shape[0]):
result += ((x[i] - y[i]) ** 2) / sigma[i]
return np.sqrt(result)
@numba.njit(fastmath=True)
def manhattan(x, y):
r"""Manhattan, taxicab, or l1 distance.
.. math::
D(x, y) = \sum_i |x_i - y_i|
"""
result = 0.0
for i in range(x.shape[0]):
result += np.abs(x[i] - y[i])
return result
@numba.njit(fastmath=True)
def chebyshev(x, y):
r"""Chebyshev or l-infinity distance.
.. math::
D(x, y) = \max_i |x_i - y_i|
"""
result = 0.0
for i in range(x.shape[0]):
result = max(result, np.abs(x[i] - y[i]))
return result
@numba.njit(fastmath=True)
def minkowski(x, y, p=2):
r"""Minkowski distance.
.. math::
D(x, y) = \left(\sum_i |x_i - y_i|^p\right)^{\frac{1}{p}}
This is a general distance. For p=1 it is equivalent to
manhattan distance, for p=2 it is Euclidean distance, and
for p=infinity it is Chebyshev distance. In general it is better
to use the more specialised functions for those distances.
"""
result = 0.0
for i in range(x.shape[0]):
result += (np.abs(x[i] - y[i])) ** p
return result ** (1.0 / p)
@numba.njit(fastmath=True)
def weighted_minkowski(x, y, w=_mock_ones, p=2):
r"""A weighted version of Minkowski distance.
.. math::
D(x, y) = \left(\sum_i w_i |x_i - y_i|^p\right)^{\frac{1}{p}}
If weights w_i are inverse standard deviations of data in each dimension
then this represented a standardised Minkowski distance (and is
equivalent to standardised Euclidean distance for p=1).
"""
result = 0.0
for i in range(x.shape[0]):
result += (w[i] * np.abs(x[i] - y[i])) ** p
return result ** (1.0 / p)
@numba.njit(fastmath=True)
def mahalanobis(x, y, vinv=_mock_identity):
result = 0.0
diff = np.empty(x.shape[0], dtype=np.float32)
for i in range(x.shape[0]):
diff[i] = x[i] - y[i]
for i in range(x.shape[0]):
tmp = 0.0
for j in range(x.shape[0]):
tmp += vinv[i, j] * diff[j]
result += tmp * diff[i]
return np.sqrt(result)
@numba.njit(fastmath=True)
def hamming(x, y):
result = 0.0
for i in range(x.shape[0]):
if x[i] != y[i]:
result += 1.0
return float(result) / x.shape[0]
@numba.njit(fastmath=True)
def canberra(x, y):
result = 0.0
for i in range(x.shape[0]):
denominator = np.abs(x[i]) + np.abs(y[i])
if denominator > 0:
result += np.abs(x[i] - y[i]) / denominator
return result
@numba.njit(fastmath=True)
def bray_curtis(x, y):
numerator = 0.0
denominator = 0.0
for i in range(x.shape[0]):
numerator += np.abs(x[i] - y[i])
denominator += np.abs(x[i] + y[i])
if denominator > 0.0:
return float(numerator) / denominator
else:
return 0.0
@numba.njit(fastmath=True)
def jaccard(x, y):
num_non_zero = 0.0
num_equal = 0.0
for i in range(x.shape[0]):
x_true = x[i] != 0
y_true = y[i] != 0
num_non_zero += x_true or y_true
num_equal += x_true and y_true
if num_non_zero == 0.0:
return 0.0
else:
return float(num_non_zero - num_equal) / num_non_zero
@numba.njit(fastmath=True)
def matching(x, y):
num_not_equal = 0.0
for i in range(x.shape[0]):
x_true = x[i] != 0
y_true = y[i] != 0
num_not_equal += x_true != y_true
return float(num_not_equal) / x.shape[0]
@numba.njit(fastmath=True)
def dice(x, y):
num_true_true = 0.0
num_not_equal = 0.0
for i in range(x.shape[0]):
x_true = x[i] != 0
y_true = y[i] != 0
num_true_true += x_true and y_true
num_not_equal += x_true != y_true
if num_not_equal == 0.0:
return 0.0
else:
return num_not_equal / (2.0 * num_true_true + num_not_equal)
@numba.njit(fastmath=True)
def kulsinski(x, y):
num_true_true = 0.0
num_not_equal = 0.0
for i in range(x.shape[0]):
x_true = x[i] != 0
y_true = y[i] != 0
num_true_true += x_true and y_true
num_not_equal += x_true != y_true
if num_not_equal == 0:
return 0.0
else:
return float(num_not_equal - num_true_true + x.shape[0]) / (
num_not_equal + x.shape[0]
)
@numba.njit(fastmath=True)
def rogers_tanimoto(x, y):
num_not_equal = 0.0
for i in range(x.shape[0]):
x_true = x[i] != 0
y_true = y[i] != 0
num_not_equal += x_true != y_true
return (2.0 * num_not_equal) / (x.shape[0] + num_not_equal)
@numba.njit(fastmath=True)
def russellrao(x, y):
num_true_true = 0.0
for i in range(x.shape[0]):
x_true = x[i] != 0
y_true = y[i] != 0
num_true_true += x_true and y_true
if num_true_true == np.sum(x != 0) and num_true_true == np.sum(y != 0):
return 0.0
else:
return float(x.shape[0] - num_true_true) / (x.shape[0])
@numba.njit(fastmath=True)
def sokal_michener(x, y):
num_not_equal = 0.0
for i in range(x.shape[0]):
x_true = x[i] != 0
y_true = y[i] != 0
num_not_equal += x_true != y_true
return (2.0 * num_not_equal) / (x.shape[0] + num_not_equal)
@numba.njit(fastmath=True)
def sokal_sneath(x, y):
num_true_true = 0.0
num_not_equal = 0.0
for i in range(x.shape[0]):
x_true = x[i] != 0
y_true = y[i] != 0
num_true_true += x_true and y_true
num_not_equal += x_true != y_true
if num_not_equal == 0.0:
return 0.0
else:
return num_not_equal / (0.5 * num_true_true + num_not_equal)
@numba.njit(fastmath=True)
def haversine(x, y):
if x.shape[0] != 2:
raise ValueError("haversine is only defined for 2 dimensional data")
sin_lat = np.sin(0.5 * (x[0] - y[0]))
sin_long = np.sin(0.5 * (x[1] - y[1]))
result = np.sqrt(sin_lat ** 2 + np.cos(x[0]) * np.cos(y[0]) * sin_long ** 2)
return 2.0 * np.arcsin(result)
@numba.njit(fastmath=True)
def yule(x, y):
num_true_true = 0.0
num_true_false = 0.0
num_false_true = 0.0
for i in range(x.shape[0]):
x_true = x[i] != 0
y_true = y[i] != 0
num_true_true += x_true and y_true
num_true_false += x_true and (not y_true)
num_false_true += (not x_true) and y_true
num_false_false = x.shape[0] - num_true_true - num_true_false - num_false_true
if num_true_false == 0.0 or num_false_true == 0.0:
return 0.0
else:
return (2.0 * num_true_false * num_false_true) / (
num_true_true * num_false_false + num_true_false * num_false_true
)
@numba.njit(fastmath=True)
def cosine(x, y):
result = 0.0
norm_x = 0.0
norm_y = 0.0
for i in range(x.shape[0]):
result += x[i] * y[i]
norm_x += x[i] ** 2
norm_y += y[i] ** 2
if norm_x == 0.0 and norm_y == 0.0:
return 0.0
elif norm_x == 0.0 or norm_y == 0.0:
return 1.0
else:
return 1.0 - (result / np.sqrt(norm_x * norm_y))
@numba.njit(fastmath=True)
def correlation(x, y):
mu_x = 0.0
mu_y = 0.0
norm_x = 0.0
norm_y = 0.0
dot_product = 0.0
for i in range(x.shape[0]):
mu_x += x[i]
mu_y += y[i]
mu_x /= x.shape[0]
mu_y /= x.shape[0]
for i in range(x.shape[0]):
shifted_x = x[i] - mu_x
shifted_y = y[i] - mu_y
norm_x += shifted_x ** 2
norm_y += shifted_y ** 2
dot_product += shifted_x * shifted_y
if norm_x == 0.0 and norm_y == 0.0:
return 0.0
elif dot_product == 0.0:
return 1.0
else:
return 1.0 - (dot_product / np.sqrt(norm_x * norm_y))
@numba.njit()
def hellinger(x, y):
result = 0.0
l1_norm_x = 0.0
l1_norm_y = 0.0
for i in range(x.shape[0]):
result += np.sqrt(x[i] * y[i])
l1_norm_x += x[i]
l1_norm_y += y[i]
if l1_norm_x == 0 and l1_norm_y == 0:
return 0.0
elif l1_norm_x == 0 or l1_norm_y == 0:
return 1.0
else:
return np.sqrt(1 - result / np.sqrt(l1_norm_x * l1_norm_y))
named_distances = {
# general minkowski distances
"euclidean": euclidean,
"l2": euclidean,
"manhattan": manhattan,
"taxicab": manhattan,
"l1": manhattan,
"chebyshev": chebyshev,
"linfinity": chebyshev,
"linfty": chebyshev,
"linf": chebyshev,
"minkowski": minkowski,
# Standardised/weighted distances
"seuclidean": standardised_euclidean,
"standardised_euclidean": standardised_euclidean,
"wminkowski": weighted_minkowski,
"weighted_minkowski": weighted_minkowski,
"mahalanobis": mahalanobis,
# Other distances
"canberra": canberra,
"cosine": cosine,
"correlation": correlation,
"hellinger": hellinger,
"haversine": haversine,
"braycurtis": bray_curtis,
# Binary distances
"hamming": hamming,
"jaccard": jaccard,
"dice": dice,
"matching": matching,
"kulsinski": kulsinski,
"rogerstanimoto": rogers_tanimoto,
"russellrao": russellrao,
"sokalsneath": sokal_sneath,
"sokalmichener": sokal_michener,
"yule": yule,
}
|
#!/usr/bin/env python3
#
## @file
# list_repos_command.py
#
# Copyright (c) 2019 - 2020, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
import collections
import os
#from git import Repo
from colorama import Fore, Style
# Our modules
from edkrepo.commands.edkrepo_command import EdkrepoCommand
from edkrepo.commands.edkrepo_command import ColorArgument
import edkrepo.commands.arguments.list_repos_args as arguments
import edkrepo.commands.humble.list_repos_humble as humble
from edkrepo.common.edkrepo_exception import EdkrepoInvalidParametersException, EdkrepoManifestInvalidException
from edkrepo.common.ui_functions import init_color_console
from edkrepo.common.workspace_maintenance.manifest_repos_maintenance import pull_all_manifest_repos
from edkrepo.common.workspace_maintenance.manifest_repos_maintenance import list_available_manifest_repos
from edkrepo.config.tool_config import CI_INDEX_FILE_NAME
from edkrepo_manifest_parser.edk_manifest import CiIndexXml, ManifestXml
class ListReposCommand(EdkrepoCommand):
def __init__(self):
super().__init__()
self.repo_names = None
def get_metadata(self):
metadata = {}
metadata['name'] = 'list-repos'
metadata['help-text'] = arguments.COMMAND_DESCRIPTION
args = []
metadata['arguments'] = args
args.append({'name': 'repos',
'positional': False,
'required': False,
'action': 'store',
'nargs': '+',
'help-text': arguments.REPOS_HELP})
args.append({'name': 'archived',
'short-name': 'a',
'positional': False,
'required': False,
'help-text': arguments.ARCHIVED_HELP})
args.append(ColorArgument)
return metadata
def run_command(self, args, config):
print()
init_color_console(args.color)
pull_all_manifest_repos(config['cfg_file'], config['user_cfg_file'])
print()
cfg_manifest_repos, user_config_manifest_repos, conflicts = list_available_manifest_repos(config['cfg_file'], config['user_cfg_file'])
found_manifests = {}
manifests = {}
repo_urls = set()
config_manifest_repos_project_list = []
user_config_manifest_repos_project_list = []
for manifest_repo in cfg_manifest_repos:
# Get path to global manifest file
global_manifest_directory = config['cfg_file'].manifest_repo_abs_path(manifest_repo)
if args.verbose:
print(humble.MANIFEST_DIRECTORY)
print(global_manifest_directory)
print()
#Create a dictionary containing all the manifests listed in the CiIndex.xml file
index_path = os.path.join(global_manifest_directory, CI_INDEX_FILE_NAME)
print(index_path)
ci_index_xml = CiIndexXml(index_path)
config_manifest_repos_project_list = ci_index_xml.project_list
if args.archived:
config_manifest_repos_project_list.extend(ci_index_xml.archived_project_list)
for project in config_manifest_repos_project_list:
xml_file = ci_index_xml.get_project_xml(project)
manifest = ManifestXml(os.path.normpath(os.path.join(global_manifest_directory, xml_file)))
found_manifests['{}:{}'.format(manifest_repo, project)] = manifest
combo_list = [c.name for c in manifest.combinations]
if args.archived:
combo_list.extend([c.name for c in manifest.archived_combinations])
for combo in combo_list:
sources = manifest.get_repo_sources(combo)
for source in sources:
repo_urls.add(self.get_repo_url(source.remote_url))
for manifest_repo in user_config_manifest_repos:
# Get path to global manifest file
global_manifest_directory = config['user_cfg_file'].manifest_repo_abs_path(manifest_repo)
if args.verbose:
print(humble.MANIFEST_DIRECTORY)
print(global_manifest_directory)
print()
#Create a dictionary containing all the manifests listed in the CiIndex.xml file
index_path = os.path.join(global_manifest_directory, CI_INDEX_FILE_NAME)
ci_index_xml = CiIndexXml(index_path)
user_config_manifest_repos_project_list = ci_index_xml.project_list
if args.archived:
user_config_manifest_repos_project_list.extend(ci_index_xml.archived_project_list)
for project in user_config_manifest_repos_project_list:
xml_file = ci_index_xml.get_project_xml(project)
manifest = ManifestXml(os.path.normpath(os.path.join(global_manifest_directory, xml_file)))
found_manifests['{}:{}'.format(manifest_repo, project)] = manifest
combo_list = [c.name for c in manifest.combinations]
if args.archived:
combo_list.extend([c.name for c in manifest.archived_combinations])
for combo in combo_list:
sources = manifest.get_repo_sources(combo)
for source in sources:
repo_urls.add(self.get_repo_url(source.remote_url))
#Remove the manifest repo portion of the key is there is not a duplicate project name
key_list = list(found_manifests)
for entry in key_list:
new_key = entry.split(':')[1]
value = found_manifests[entry]
del found_manifests[entry]
for found_manifest in list(found_manifests):
if found_manifest.split(':')[1] == new_key:
new_key = 'Manifest Repository: {} Project: {}'.format(entry.split(':')[0], entry.split(':')[1])
break
if new_key in manifests.keys():
new_key = 'Manifest Repository: {} Project: {}'.format(entry.split(':'[0]), entry.split(':')[1])
manifests[new_key] = value
#Sort the manifests so projects will be displayed alphabetically
manifests = collections.OrderedDict(sorted(manifests.items()))
project_justify = len(max(manifests.keys(), key=len))
#Determine the names of the repositories
self.generate_repo_names(repo_urls, manifests, args.archived)
print(humble.REPOSITORIES)
#If the user provided a list of repositories to view, check to make sure
#at least one repository will be shown, if not provide an error
if args.repos and len([x for x in self.repo_names if x in args.repos]) <= 0:
raise EdkrepoInvalidParametersException(humble.REPO_NOT_FOUND_IN_MANIFEST.format(','.join(args.repos)))
#For each each git repository...
for repo_name in self.repo_names:
if args.repos and repo_name not in args.repos:
continue
repo = self.repo_names[repo_name][0]
print(humble.REPO_NAME_AND_URL.format(repo_name, repo))
print(humble.BRANCHES)
#Determine the list of branches that used by any branch combination in any manifest
branches = set()
for project_name in manifests:
combo_list = [c.name for c in manifests[project_name].combinations]
if args.archived:
combo_list.extend([c.name for c in manifests[project_name].archived_combinations])
for combo in combo_list:
sources = manifests[project_name].get_repo_sources(combo)
for source in sources:
if self.get_repo_url(source.remote_url) == repo:
branches.add(source.branch)
#Sort the branch names so they will be displayed alphabetically
#with the exception that if a branch named "master" exists, then it
#will be displayed first
branches = sorted(branches, key=str.casefold)
if 'master' in branches:
branches.remove('master')
branches.insert(0, 'master')
#For each interesting branch in the current git repository...
for branch in branches:
print(humble.BRANCH_FORMAT_STRING.format(branch))
#Determine the branch combinations that use that branch
for project_name in manifests:
combos = []
combo_list = [c.name for c in manifests[project_name].combinations]
if args.archived:
combo_list.extend([c.name for c in manifests[project_name].archived_combinations])
for combo in combo_list:
sources = manifests[project_name].get_repo_sources(combo)
for source in sources:
if self.get_repo_url(source.remote_url) == repo and source.branch == branch:
combos.append(combo)
break
if len(combos) > 0:
#Sort the branch combinations so they will be displayed alphabetically
#with the exception that the default branch combination for the manifest
#file will be displayed first
combos = sorted(combos, key=str.casefold)
default_combo = manifests[project_name].general_config.default_combo
if default_combo in combos:
combos.remove(default_combo)
combos.insert(0, default_combo)
first_combo = True
for combo in combos:
#Print the project name
if first_combo:
project_name_print = humble.PROJECT_NAME_FORMAT_STRING.format(project_name.ljust(project_justify))
first_combo = False
else:
project_name_print = '{} '.format((' ' * len(project_name)).ljust(project_justify))
#Print the branch combination name, if this is the default branch combination,
#then print it in green color with *'s around it
if default_combo == combo:
print(humble.DEFAULT_COMBO_FORMAT_STRING.format(project_name_print, combo))
else:
print(humble.COMBO_FORMAT_STRING.format(project_name_print, combo))
def get_repo_url(self, repo_url):
if repo_url[-4:].lower() == '.git':
return repo_url[:-4]
return repo_url
def get_repo_name(self, repo_url, manifests):
for name in self.repo_names:
if self.repo_names[name][0] == repo_url:
return name
raise EdkrepoInvalidParametersException(humble.REPO_NAME_NOT_FOUND)
def generate_repo_names(self, repo_urls, manifests, archived=False):
#Determine the names of the repositories
self.repo_names = collections.OrderedDict()
for repo_url in repo_urls:
self.__repo_name_worker(repo_url, manifests, archived)
#Sort the git repositories so they will be displayed alphabetically
self.repo_names = collections.OrderedDict(sorted(self.repo_names.items()))
names_to_move = []
for repo_name in self.repo_names:
if repo_name.lower().find('edk2') == 0:
names_to_move.append(repo_name)
names_to_move = sorted(names_to_move, reverse=True)
for name_to_move in names_to_move:
self.repo_names.move_to_end(name_to_move, False)
names_to_move = []
for repo_name in self.repo_names:
if repo_name.lower().find('intel') == 0:
names_to_move.append(repo_name)
names_to_move = sorted(names_to_move, reverse=True)
for name_to_move in names_to_move:
self.repo_names.move_to_end(name_to_move, False)
def __repo_name_worker(self, repo_url, manifests, archived=False):
#This is a heuristic that guesses the "name" of a repository by looking
#at the name given to it by the most manifest files.
names = collections.defaultdict(int)
for project_name in manifests:
combo_list = [c.name for c in manifests[project_name].combinations]
if archived:
combo_list.extend([c.name for c in manifests[project_name].archived_combinations])
for combo in combo_list:
sources = manifests[project_name].get_repo_sources(combo)
for source in sources:
if self.get_repo_url(source.remote_url) == repo_url:
names[source.root] += 1
found_unique_name = False
original_best_name = None
original_best_name_frequency = 0
while not found_unique_name:
best_name = None
best_name_frequency = 0
if len(names) <= 0:
if original_best_name_frequency == 1:
#If only 1 project uses this name, then append the project
#name to the directory name to create the repo name
for project_name in manifests:
combo_list = [c.name for c in manifests[project_name].combinations]
if archived:
combo_list.extend([c.name for c in manifests[project_name].archived_combinations])
for combo in combo_list:
sources = manifests[project_name].get_repo_sources(combo)
for source in sources:
if self.get_repo_url(source.remote_url) == repo_url and source.root == original_best_name:
best_name = "{}-{}".format(original_best_name, project_name)
best_name_frequency = original_best_name_frequency
else:
best_name = repo_url
best_name_frequency = 0
break
for name in names:
if names[name] > best_name_frequency:
best_name = name
best_name_frequency = names[name]
if best_name is None:
raise EdkrepoManifestInvalidException(humble.REPO_NOT_FOUND_IN_MANIFEST.format(repo_url))
if original_best_name is None:
original_best_name = best_name
original_best_name_frequency = best_name_frequency
if best_name in self.repo_names:
if self.repo_names[best_name][0] == repo_url:
found_unique_name = True
else:
#If there is a name collision, then which repo has the most
#Usage of the name owns the name
if best_name_frequency > self.repo_names[best_name][1]:
old_repo_url = self.repo_names[name][0]
del self.repo_names[best_name]
found_unique_name = True
self.repo_names[best_name] = (repo_url, best_name_frequency)
self.__repo_name_worker(old_repo_url, manifests, archived)
else:
#Use the name given by the second most manifest files
del names[best_name]
else:
found_unique_name = True
self.repo_names[best_name] = (repo_url, best_name_frequency)
|
<gh_stars>0
# system
import math
from collections import namedtuple, Counter
from functools import reduce
from pprint import pprint
from typing import List, Dict, Tuple
# internal
from .day import Day
"""
===============================================================================
Day 3 Puzzle 1
The gravity assist was successful, and you're well on your way to the Venus
refuelling station. During the rush back on Earth, the fuel management system
wasn't completely installed, so that's next on the priority list.
Opening the front panel reveals a jumble of wires. Specifically, two wires are
connected to a central port and extend outward on a grid. You trace the path
each wire takes as it leaves the central port, one wire per line of text (your
puzzle input).
The wires twist and turn, but the two wires occasionally cross paths. To fix
the circuit, you need to find the intersection point closest to the central
port. Because the wires are on a grid, use the Manhattan distance for this
measurement. While the wires do technically cross right at the central port
where they both start, this point does not count, nor does a wire count as
crossing with itself.
For example, if the first wire's path is R8,U5,L5,D3, then starting from the
central port (o), it goes right 8, up 5, left 5, and finally down 3:
...........
...........
...........
....+----+.
....|....|.
....|....|.
....|....|.
.........|.
.o-------+.
...........
Then, if the second wire's path is U7,R6,D4,L4, it goes up 7, right 6, down 4,
and left 4:
...........
.+-----+...
.|.....|...
.|..+--X-+.
.|..|..|.|.
.|.-X--+.|.
.|..|....|.
.|.......|.
.o-------+.
...........
These wires cross at two locations (marked X), but the lower-left one is closer to the
central port: its distance is 3 + 3 = 6.
Here are a few more examples:
R75,D30,R83,U83,L12,D49,R71,U7,L72
U62,R66,U55,R34,D71,R55,D58,R83 = distance 159
R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51
U98,R91,D20,R16,D67,R40,U7,R15,U6,R7 =
distance 135
What is the Manhattan distance from the central port to the closest intersection?
===============================================================================
Day 3 Puzzle 2
It turns out that this circuit is very timing-sensitive; you actually need to
minimize the signal delay.
To do this, calculate the number of steps each wire takes to reach each
intersection; choose the intersection where the sum of both wires' steps is
lowest. If a wire visits a position on the grid multiple times, use the steps
value from the first time it visits that position when calculating the total
value of a specific intersection.
The number of steps a wire takes is the total number of grid squares the wire
has entered to get to that location, including the intersection being
considered. Again consider the example from above:
...........
.+-----+...
.|.....|...
.|..+--X-+.
.|..|..|.|.
.|.-X--+.|.
.|..|....|.
.|.......|.
.o-------+.
...........
In the above example, the intersection closest to the central port is reached
after 8+5+5+2 = 20 steps by the first wire and 7+6+4+3 = 20 steps by the second
wire for a total of 20+20 = 40 steps.
However, the top-right intersection is better: the first wire takes only 8+5+2
= 15 and the second wire takes only 7+6+2 = 15, a total of 15+15 = 30 steps.
Here are the best steps for the extra examples from above:
R75,D30,R83,U83,L12,D49,R71,U7,L72 U62,R66,U55,R34,D71,R55,D58,R83 = 610 steps
R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51
U98,R91,D20,R16,D67,R40,U7,R15,U6,R7 = 410 steps What is the fewest combined
steps the wires must take to reach an intersection?
"""
Point = namedtuple("Point", ["x", "y", "dist"])
class Day3(Day):
# changes in x & y based on direction
_moves = {"R": (1, 0), "L": (-1, 0), "U": (0, 1), "D": (0, -1)}
def _get_turns(self, path: str) -> List[Tuple[str, int]]:
path = path.rstrip("\n")
return [(turn[0], int(turn[1:])) for turn in path.split(",")]
def _get_points_from_turns(self, turns: List[Tuple[str, int]]) -> List[Point]:
points = []
point = Point(0, 0, 0)
for direction, length in turns:
x, y = self._moves[direction]
for _ in range(length):
nx = point.x + x
ny = point.y + y
# create a list of all points on the path
points.append(point := Point(nx, ny, abs(nx) + abs(ny)))
return points
def _get_intersection_points(self, wires) -> Dict:
return dict(filter(lambda item: item[1] > 1, wires.items()))
def _get_steps(self, point, steps):
return sum(map(lambda points: points.index(point) + 1, steps))
def _puzzle1(self):
answer = ""
# Point is a named tuple which stores the x, y and manhattan distance
# It is named so I can track in a dictionary using Counter from collections
wires = Counter()
# apparently pylint doesn't know about 3.8 iteralbes on streams
# pylint: disable=not-an-iterable
for path in self._input_stream:
# get turns from path
turns = self._get_turns(path)
# get points on the path from turns
points = self._get_points_from_turns(turns)
# create a dict from the points assign a default value of 1. This will be added to
# the wires Counter, causing any crossing of unique points to be increased by 1
wire = dict.fromkeys(points, 1)
wires.update(wire)
# find all intersection of points on the wires, their value will be 2 if 2 wires cross
intersections = self._get_intersection_points(wires)
# reduce the dict to the Point with the shortest distance
shortest = reduce(
lambda shortest, point: shortest if shortest.dist < point.dist else point,
intersections.keys(),
)
answer = str(shortest.dist)
return answer
def _puzzle2(self):
answer = ""
# Point is a named tuple which stores the x, y and manhattan distance
# It is named so I can track in a dictionary using Counter from collections
wires = Counter()
steps = []
# apparently pylint doesn't know about 3.8 iteralbes on streams
# pylint: disable=not-an-iterable
for path in self._input_stream:
# get turns from path
turns = self._get_turns(path)
# get points on the path from turns
points = self._get_points_from_turns(turns)
# store points for step count
steps.append(points)
# create a dict from the points assign a default value of 1. This will be added to
# the wires Counter, causing any crossing of unique points to be increased by 1
wire = dict.fromkeys(points, 1)
wires.update(wire)
# find all intersection of points on the wires, their value will be 2 if 2 wires cross
intersections = self._get_intersection_points(wires)
# reduce the dict to the Point with the closet steps
closest = reduce(
lambda closest, point: closest
if self._get_steps(closest, steps) < self._get_steps(point, steps)
else point,
intersections.keys(),
)
answer = str(self._get_steps(closest, steps))
return answer
|
<reponame>CardinalNumberFromReddit/robinhood_trailingstop<gh_stars>0
#!/usr/bin/python
# trailing_stop.py
# Install python package:
# > pip install https://github.com/swgr424/Robinhood/archive/master.zip
# swgr424 includes a PR for gathering quotes for multiple symbols at once
# Wait...
from Robinhood import Robinhood
import time
import argparse
from six.moves.urllib.parse import unquote
# Debug
#from pprint import pprint
''' Python example of trailing stop loss simulation for Robinhood
Example:
./trailing_stop.py -u=cardinalnumber -p=Ujp43wJi0fsajk54ew
'''
# Assign description to the help doc
parser = argparse.ArgumentParser(
description='Demo stop loss orders for Robinhood')
# Add arguments
parser.add_argument(
'-u', '--username', type=str, help='User name', required=True)
parser.add_argument(
'-p', '--password', type=str, help='Password', required=True)
parser.add_argument(
'--percent', type=float, help='Trailing percent', required=False, default=3)
#parser.add_argument(
# '--tight', type=float, help='Trailing percent when below average price', required=False, default=2)
# Array for all arguments passed to script
args = parser.parse_args()
# Assign args to variables
# server = args.server
#pprint(args)
#
rh = Robinhood()
logged_in = rh.login(username=args.username, password=args.password)
account = rh.get_account()
def load_positions():
_pos = {}
_ord = {}
next = rh.endpoints['positions'] + '?nonzero=true'
while True:
positions = rh.session.get(next).json()
for position in positions.get('results'):
instrument = rh.session.get(position['instrument']).json()
_pos[instrument['symbol']] = position
_ord[instrument['symbol']] = list(filter(lambda x: x['side'] == 'sell' and x['cancel'] != None, rh.session.get(rh.endpoints['orders'] + '?instrument=' + position['instrument']).json().get('results')))
if positions['next'] == None:
break
next = positions['next']
return _pos, _ord
def to_price(price):
return( float(('%.4f' if float(price) < 1 else '%.2f') % float(price)))
while True:
positions, orders = load_positions()
quotes = rh.session.get(rh.endpoints['quotes'] + '?symbols=' + ',' .join(list(positions.keys()))).json().get('results')
for idx,symbol in enumerate(positions):
#print("Average price of ${0}: ${1} | bid ${2}" . format(symbol, positions[symbol]['average_buy_price'], quotes[idx]['bid_price']))
trailing_price = to_price((to_price(quotes[idx]['bid_price']) - (to_price(quotes[idx]['bid_price']) * float(args.percent / 100))))
#if to_price(positions[symbol]['average_buy_price']) > trailing_price:
# trailing_price = to_price(
# (to_price(positions[symbol]['average_buy_price'])
# - (to_price(positions[symbol]['average_buy_price']) * float(args.tight / 100))
# )
# )
#print (" trailing price: %f" % trailing_price)
quantity = float(positions[symbol]['quantity']) - float(positions[symbol]['shares_held_for_sells'])
for order in list(filter(lambda order: float(order.get('stop_price')) < trailing_price, orders[symbol] )):
#pprint(order)
quantity += float(order['quantity'])
rh.session.post(order['cancel']).json() # TODO: verify
if quantity:
print('Setting stop loss at ${0} for {1} shares of ${2}' . format(trailing_price, quantity, symbol))
res = rh.session.post(
rh.endpoints['orders'],
data = {
'account': unquote(account['url']),
'instrument': unquote(positions[symbol]['instrument']),
'price': float(quotes[idx]['bid_price']),
'stop_price' : float(trailing_price),
'quantity': quantity,
'side': 'sell',
'symbol': symbol,
'time_in_force': 'gtc',
'trigger': 'stop',
'type': 'market'
}
)
res.raise_for_status()
# TODO: verify
time.sleep(15)
|
<gh_stars>0
#!/usr/bin/python
# -*- coding: utf-8 -*-
import datetime as DT
import subprocess
import sys
from netmiko import ConnectHandler
keyfile = "vmanage"
logfile = "backupjob.log"
backup_path = "./backupdata"
login_info = {
"device_type": "linux",
"host": "10.75.58.50",
"username": "admin",
"use_keys": True,
"key_file": keyfile,
}
date = str(DT.date.today())
week_ago = DT.datetime.today() - DT.timedelta(days=7)
week_ago = str(week_ago.date())
zerofile = "/tmp/confdb_backup" + week_ago + ".tar.gz"
logtitle = "=" * 15 + "Day of " + date + "=" * 15 + "\n"
class SSHjob:
"""SSHjob defines a class for a job running through SSH by
calling the module netmiko.
...
Attributes
----------
net_connect : netmiko return object.
backup_ret : str
The return of running backup on vmanage.
ret1 : str
The first return, copy backup file.
ret2 : str
The second return, copy zero size file.
Methods
-------
connect():
Call the netmiko to connect.
run_backup():
Run backup request on vmanage.
copy_backup_file():
Copy backup file through scp.
copy_zero_file():
Copy zero size file to vmanage.
disconnect():
Disconnect vmanage
"""
def __init__(self):
self.net_connect = None
self.backup_ret = None
self.ret1 = None
self.ret2 = None
def connect(self):
self.net_connect = ConnectHandler(**login_info)
def run_backup(self):
backup_cmd = (
"request nms configuration-db backup path \
/home/admin/confdb_backup"
+ date
)
self.backup_ret = self.net_connect.send_command(backup_cmd)
def copy_backup_file(self):
runcmd = (
"scp -i "
+ keyfile
+ " "
+ login_info["username"]
+ "@"
+ login_info["host"]
+ ":"
+ "/home/admin/confdb_backup"
+ date
+ ".tar.gz "
+ backup_path
)
self.ret1 = str(
subprocess.run(
runcmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
timeout=5,
)
)
def copy_zero_file(self):
runcmd = (
"touch "
+ zerofile
+ " && "
+ "scp -i vmanage "
+ zerofile
+ " admin@"
+ login_info["host"]
+ ":/home/admin/"
+ " && "
+ "rm "
+ zerofile
)
self.ret2 = str(
subprocess.run(
runcmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
timeout=5,
)
)
def disconnect(self):
self.net_connect.disconnect()
def main():
jobstart = str(DT.datetime.now())
backup_job = SSHjob()
backup_job.connect()
backup_job.run_backup()
backup_job.copy_backup_file()
backup_job.copy_zero_file()
backup_job.disconnect()
jobend = str(DT.datetime.now())
logdata = (
logtitle
+ jobstart
+ " Job started...\n"
+ backup_job.backup_ret
+ "\n"
+ backup_job.ret1
+ "\n"
+ backup_job.ret2
+ "\n"
+ jobend
+ " Job ended...\n"
)
with open(logfile, "a") as fobj:
fobj.write(logdata)
sys.exit(0)
if __name__ == "__main__":
main()
|
<reponame>alan-turing-institute/pcit
import matplotlib.pyplot as plt
n_list = np.round(np.exp(list(np.arange(6,10,0.1)))).astype(int)
size_mat = 10
B = 10
def get_conf_ints(series, sd):
sd[sd == 0] = 0.01
low = series - 1.64 * sd
high = series + 1.64 * sd
low[low <= 0] = 0
high[high >= 1] = 1
return low, high
def get_statistics(result_mat):
if len(result_mat.shape) == 4:
fdr = np.nan_to_num(result_mat[:,1,0,:] / np.sum(result_mat[:,1,:,:], axis = 1)) ## FDR
pwr = np.nan_to_num(result_mat[:,1,1,:] / np.sum(result_mat[:,:,1,:], axis = 1)) ## Power
fdr_sd = np.sqrt(np.var(fdr, axis = 1))
pwr_sd = np.sqrt(np.var(pwr, axis = 1))
fdr = np.mean(fdr, axis = 1)
pwr = np.mean(pwr, axis = 1)
if len(result_mat.shape) == 3:
fdr = np.nan_to_num(result_mat[:,1,0] / np.sum(result_mat[:,1,:], axis = 1)) ## FDR
pwr = np.nan_to_num(result_mat[:,1,1] / np.sum(result_mat[:,:,1], axis = 1)) ## Power
fdr_sd = None
pwr_sd = None
return(fdr, fdr_sd, pwr, pwr_sd)
def draw_performance_graph(n_list, fdr, pwr, fdr_sd, pwr_sd, title):
plt.figure(figsize=(10, 6))
plt.xscale('log')
plt.xlabel('n')
plt.ylim((0,1))
pwr_low, pwr_high = get_conf_ints(pwr, pwr_sd)
fdr_low, fdr_high = get_conf_ints(fdr, fdr_sd)
plt.fill_between(n_list, pwr_low, pwr_high, color = 'skyblue', alpha = 0.5)
plt.plot(n_list, pwr, color = 'blue')
plt.fill_between(n_list, fdr_low, fdr_high, color = 'lightcoral', alpha = 0.5)
plt.plot(n_list, fdr, color = 'red')
plt.plot((np.min(n_list), np.max(n_list)), (0.05, 0.05), '--')
plt.title(('Power curve and FDR for ' + title))
plt.legend(['Power', 'FDR'], loc = 2)
plt.xticks([500, 1000, 2500, 5000, 10000, 20000], [500, 1000, 2500, 5000, 10000, 20000])
none = np.load('16082017none.npy')
stack = np.load('16082017stacking.npy')
mplx = np.load('16082017multiplexing.npy')
none_old = np.load('05082017none.npy')
stack_old = np.load('05082017stacking.npy')
mplx_old = np.load('05082017multiplexing.npy')
none_fdr, none_fdr_sd, none_pwr, none_pwr_sd = get_statistics(none)
stack_fdr, stack_fdr_sd, stack_pwr, stack_pwr_sd = get_statistics(stack)
mplx_fdr, mplx_fdr_sd, mplx_pwr, mplx_pwr_sd = get_statistics(mplx)
none_old_fdr, temp, none_old_pwr, temp = get_statistics(none_old)
stack_old_fdr, temp, stack_old_pwr, temp = get_statistics(stack_old)
mplx_old_fdr, temp, mplx_old_pwr, temp = get_statistics(mplx_old)
none_fdr = (none_fdr + none_old_fdr) / 2
stack_fdr = (stack_fdr + stack_old_fdr) / 2
mplx_fdr = (mplx_fdr + mplx_old_fdr) / 2
none_pwr = (none_pwr + none_old_pwr) / 2
stack_pwr = (stack_pwr + stack_old_pwr) / 2
mplx_pwr = (mplx_pwr + mplx_old_pwr) / 2
draw_performance_graph(n_list, none_fdr, none_pwr, none_fdr_sd, none_pwr_sd, 'no emsembling')
draw_performance_graph(n_list, stack_fdr, stack_pwr, stack_fdr_sd, stack_pwr_sd, 'stacking')
draw_performance_graph(n_list, mplx_fdr, mplx_pwr, mplx_fdr_sd, mplx_pwr_sd, 'multiplexing')
np.mean(none_fdr) |
# Copyright 2019 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import json
import fritzconnection as fc
import prometheus_client
from prometheus_client.core import GaugeMetricFamily, CounterMetricFamily, REGISTRY
class FritzBoxConnection:
def __init__(self, host, user, passwd):
self.host = host
self.user = user
self.passwd = <PASSWORD>
self.conn = None
def connect(self):
self.conn = fc.FritzConnection(address=self.host, user=self.user, password=self.passwd)
class FritzBoxCollector(object):
def get_fritzbox_list(self):
boxlist = list()
if os.path.exists(self.config_file):
with open(self.config_file, 'r') as fh:
json_config = json.loads(fh.read())
if json_config is None or type(json_config) is not list:
raise ValueError("Failed to read json data from configuration")
for json_entry in json_config:
boxlist.append(FritzBoxConnection(
json_entry['host'],
json_entry['username'],
json_entry['password'],
))
if os.getenv('FRITZ_USER') is not None and os.getenv('FRITZ_PASS') is not None:
boxlist.append(FritzBoxConnection(
os.getenv('FRITZ_HOST', 'fritz.box'),
os.getenv('FRITZ_USER'),
os.getenv('FRITZ_PASS')
))
for box in boxlist:
box.connect()
return boxlist
def __init__(self, config_file):
self.config_file = config_file
self.boxes = self.get_fritzbox_list()
def collect(self):
if len(self.boxes) == 0:
print("Skipping collect(), no boxes configured!")
return
fritzbox_uptime = CounterMetricFamily('fritzbox_uptime', 'FritzBox uptime, system info in labels',
labels=['ModelName', 'SoftwareVersion', 'Serial'])
fritzbox_update = GaugeMetricFamily('fritzbox_update_available', 'FritzBox update available',
labels=['Serial', 'NewSoftwareVersion'])
fritzbox_lanenable = GaugeMetricFamily('fritzbox_lan_status_enabled', 'LAN Interface enabled',
labels=['Serial'])
fritzbox_lanstatus = GaugeMetricFamily('fritzbox_lan_status', 'LAN Interface status', labels=['Serial'])
fritzbox_lan_brx = CounterMetricFamily('fritzbox_lan_received_bytes', 'LAN bytes received', labels=['Serial'])
fritzbox_lan_btx = CounterMetricFamily('fritzbox_lan_transmitted_bytes', 'LAN bytes transmitted',
labels=['Serial'])
fritzbox_lan_prx = CounterMetricFamily('fritzbox_lan_received_packets_total', 'LAN packets received',
labels=['Serial'])
fritzbox_lan_ptx = CounterMetricFamily('fritzbox_lan_transmitted_packets_total', 'LAN packets transmitted',
labels=['Serial'])
fritzbox_dsl_enable = GaugeMetricFamily('fritzbox_dsl_status_enabled', 'DSL enabled', labels=['Serial'])
fritzbox_dsl_status = GaugeMetricFamily('fritzbox_dsl_status', 'DSL status', labels=['Serial'])
fritzbox_dsl_datarate = GaugeMetricFamily('fritzbox_dsl_datarate_kbps', 'DSL datarate in kbps',
labels=['Serial', 'Direction', 'Type'])
fritzbox_internet_online_monitor = GaugeMetricFamily('fritzbox_internet_online_monitor', 'Online-Monitor stats',
labels=['Serial', 'Direction', 'SyncGroupMode',
'SyncGroupName'])
fritzbox_dsl_noisemargin = GaugeMetricFamily('fritzbox_dsl_noise_margin_dB', 'Noise Margin in dB',
labels=['Serial', 'Direction'])
fritzbox_dsl_attenuation = GaugeMetricFamily('fritzbox_dsl_attenuation_dB', 'Line attenuation in dB',
labels=['Serial', 'Direction'])
fritzbox_ppp_uptime = GaugeMetricFamily('fritzbox_ppp_connection_uptime', 'PPP connection uptime',
labels=['Serial'])
fritzbox_ppp_connected = GaugeMetricFamily('fritzbox_ppp_conection_state', 'PPP connection state',
labels=['Serial', 'last_error'])
fritzbox_wan_data = CounterMetricFamily('fritzbox_wan_data_bytes', 'WAN data in bytes',
labels=['Serial', 'Direction'])
fritzbox_wan_packets = CounterMetricFamily('fritzbox_wan_data_packets', 'WAN data in packets',
labels=['Serial', 'Direction'])
fritzbox_fec_errors = GaugeMetricFamily('fritzbox_dsl_errors_fec', 'FEC errors', labels=['Serial'])
fritzbox_crc_errors = GaugeMetricFamily('fritzbox_dsl_errors_crc', 'CRC Errors', labels=['Serial'])
fritzbox_dsl_upstream_power = GaugeMetricFamily('fritzbox_dsl_power_upstream', 'Upstream Power',
labels=['Serial'])
fritzbox_dsl_downstream_power = GaugeMetricFamily('fritzbox_dsl_power_downstream', 'Downstream Power',
labels=['Serial'])
for box in self.boxes:
try:
connection = box.conn
info_result = connection.call_action('DeviceInfo:1', 'GetInfo')
fb_serial = info_result['NewSerialNumber']
# fritzbox_uptime
fritzbox_uptime.add_metric(
[info_result['NewModelName'], info_result['NewSoftwareVersion'], fb_serial],
info_result['NewUpTime']
)
# fritzbox_update_available
update_result = connection.call_action('UserInterface:1', 'GetInfo')
upd_available = 1 if update_result['NewUpgradeAvailable'] == '1' else 0
new_software_version = "n/a" if update_result['NewX_AVM-DE_Version'] is None else update_result[
'NewX_AVM-DE_Version']
fritzbox_update.add_metric([fb_serial, new_software_version], upd_available)
# fritzbox_lan_status_enabled
lanstatus_result = connection.call_action('LANEthernetInterfaceConfig:1', 'GetInfo')
fritzbox_lanenable.add_metric([fb_serial], lanstatus_result['NewEnable'])
# fritzbox_lan_status
lanstatus = 1 if lanstatus_result['NewStatus'] == 'Up' else 0
fritzbox_lanstatus.add_metric([fb_serial], lanstatus)
# fritzbox_lan_received_bytes
# fritzbox_lan_transmitted_bytes
# fritzbox_lan_received_packets_total
# fritzbox_lan_transmitted_packets_total
lanstats_result = connection.call_action('LANEthernetInterfaceConfig:1', 'GetStatistics')
fritzbox_lan_brx.add_metric([fb_serial], lanstats_result['NewBytesReceived'])
fritzbox_lan_btx.add_metric([fb_serial], lanstats_result['NewBytesSent'])
fritzbox_lan_prx.add_metric([fb_serial], lanstats_result['NewPacketsReceived'])
fritzbox_lan_ptx.add_metric([fb_serial], lanstats_result['NewPacketsSent'])
# fritzbox_dsl_status_enabled
# fritzbox_dsl_status
fritzbox_dslinfo_result = connection.call_action('WANDSLInterfaceConfig:1', 'GetInfo')
fritzbox_dsl_enable.add_metric([fb_serial], fritzbox_dslinfo_result['NewEnable'])
dslstatus = 1 if fritzbox_dslinfo_result['NewStatus'] == 'Up' else 0
fritzbox_dsl_status.add_metric([fb_serial], dslstatus)
# fritzbox_dsl_datarate_kbps
fritzbox_dsl_datarate.add_metric([fb_serial, 'up', 'curr'],
fritzbox_dslinfo_result['NewUpstreamCurrRate'])
fritzbox_dsl_datarate.add_metric([fb_serial, 'down', 'curr'],
fritzbox_dslinfo_result['NewDownstreamCurrRate'])
fritzbox_dsl_datarate.add_metric([fb_serial, 'up', 'max'],
fritzbox_dslinfo_result['NewUpstreamMaxRate'])
fritzbox_dsl_datarate.add_metric([fb_serial, 'down', 'max'],
fritzbox_dslinfo_result['NewDownstreamMaxRate'])
# fritzbox_internet_online_monitor
online_monitor = connection.call_action('WANCommonInterfaceConfig', 'X_AVM-DE_GetOnlineMonitor',
arguments={"NewSyncGroupIndex": 0})
fritzbox_internet_online_monitor.add_metric([fb_serial, 'up', online_monitor['NewSyncGroupMode'], online_monitor['NewSyncGroupName']], online_monitor['Newmax_us'])
fritzbox_internet_online_monitor.add_metric([fb_serial, 'down', online_monitor['NewSyncGroupMode'], online_monitor['NewSyncGroupName']], online_monitor['Newmax_ds'])
# fritzbox_dsl_noise_margin_dB
fritzbox_dsl_noisemargin.add_metric([fb_serial, 'up'],
fritzbox_dslinfo_result['NewUpstreamNoiseMargin'] / 10)
fritzbox_dsl_noisemargin.add_metric([fb_serial, 'down'],
fritzbox_dslinfo_result['NewDownstreamNoiseMargin'] / 10)
# fritzbox_dsl_attenuation_dB
fritzbox_dsl_attenuation.add_metric([fb_serial, 'up'],
fritzbox_dslinfo_result['NewUpstreamAttenuation'] / 10)
fritzbox_dsl_attenuation.add_metric([fb_serial, 'down'],
fritzbox_dslinfo_result['NewDownstreamAttenuation'] / 10)
# fritzbox_ppp_connection_uptime
# fritzbox_ppp_conection_state
fritzbox_pppstatus_result = connection.call_action('WANPPPConnection:1', 'GetStatusInfo')
pppconnected = 1 if fritzbox_pppstatus_result['NewConnectionStatus'] == 'Connected' else 0
fritzbox_ppp_uptime.add_metric([fb_serial], fritzbox_pppstatus_result['NewUptime'])
fritzbox_ppp_connected.add_metric([fb_serial, fritzbox_pppstatus_result['NewLastConnectionError']],
pppconnected)
# fritzbox_wan_data_bytes
fritzbox_wan_result = connection.call_action('WANCommonIFC1', 'GetAddonInfos')
wan_bytes_rx = fritzbox_wan_result['NewX_AVM_DE_TotalBytesReceived64']
wan_bytes_tx = fritzbox_wan_result['NewX_AVM_DE_TotalBytesSent64']
fritzbox_wan_data.add_metric([fb_serial, 'up'], wan_bytes_tx)
fritzbox_wan_data.add_metric([fb_serial, 'down'], wan_bytes_rx)
# fritzbox_wan_data_packets
fritzbox_wan_result = connection.call_action('WANCommonInterfaceConfig:1', 'GetTotalPacketsReceived')
wan_packets_rx = fritzbox_wan_result['NewTotalPacketsReceived']
fritzbox_wan_result = connection.call_action('WANCommonInterfaceConfig:1', 'GetTotalPacketsSent')
wan_packets_tx = fritzbox_wan_result['NewTotalPacketsSent']
fritzbox_wan_packets.add_metric([fb_serial, 'up'], wan_packets_tx)
fritzbox_wan_packets.add_metric([fb_serial, 'down'], wan_packets_rx)
# fritzbox_dsl_errors_*
statistics_total = connection.call_action('WANDSLInterfaceConfig1', 'X_AVM-DE_GetDSLInfo')
fritzbox_crc_errors.add_metric([fb_serial], statistics_total['NewCRCErrors'])
fritzbox_fec_errors.add_metric([fb_serial], statistics_total['NewFECErrors'])
# fritzbox_dsl_power_*
fritzbox_dsl_upstream_power.add_metric([fb_serial], statistics_total['NewUpstreamPower'])
fritzbox_dsl_downstream_power.add_metric([fb_serial], statistics_total['NewDownstreamPower'])
except Exception as e:
print("Error fetching metrics for FB " + box.host)
yield fritzbox_uptime
yield fritzbox_update
yield fritzbox_lanenable
yield fritzbox_lanstatus
yield fritzbox_lan_brx
yield fritzbox_lan_btx
yield fritzbox_lan_prx
yield fritzbox_lan_ptx
yield fritzbox_dsl_enable
yield fritzbox_dsl_status
yield fritzbox_dsl_datarate
yield fritzbox_internet_online_monitor
yield fritzbox_dsl_noisemargin
yield fritzbox_dsl_attenuation
yield fritzbox_ppp_uptime
yield fritzbox_ppp_connected
yield fritzbox_wan_data
yield fritzbox_wan_packets
yield fritzbox_fec_errors
yield fritzbox_crc_errors
yield fritzbox_dsl_upstream_power
yield fritzbox_dsl_downstream_power
def get_configuration():
collectors = list()
if os.path.exists('settings.json'):
with open('settings.json', 'r') as fh:
configuration = json.loads(fh.read())
if configuration is not None:
if type(configuration) is list:
for entry in configuration:
if 'host' in entry and 'username' in entry and 'password' in entry:
collectors.append(
FritzBoxCollector(entry['host'], entry['username'], entry['password']))
if os.getenv('FRITZ_USER') is not None and os.getenv('FRITZ_PASS') is not None:
collectors.append(
FritzBoxCollector(os.getenv('FRITZ_HOST', 'fritz.box'), os.getenv('FRITZ_USER'), os.getenv('FRITZ_PASS')))
return collectors
if __name__ == '__main__':
REGISTRY.register(FritzBoxCollector('settings.json'))
# Start up the server to expose the metrics.
print("Starting Server at " + str(os.getenv('FRITZ_EXPORTER_PORT', 8765)))
prometheus_client.start_http_server(os.getenv('FRITZ_EXPORTER_PORT', 8765))
while True:
time.sleep(10000)
|
<filename>ethosu/vela/tflite_mapping.py<gh_stars>1-10
# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the License); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Description:
# TensorFlow Lite mapping functions used by both reader and writer.
# Contains a mapping from the various TensorFlow Lite enums and options structs, generated by the FlatBuffer code
# generator, to Vela's internal format.
import struct
import numpy as np
from .data_type import DataType
from .operation import CustomType
from .operation import Op
from .tflite import AbsOptions
from .tflite import AddNOptions
from .tflite import AddOptions
from .tflite import ArgMaxOptions
from .tflite import ArgMinOptions
from .tflite import BatchMatMulOptions
from .tflite import BatchToSpaceNDOptions
from .tflite import BidirectionalSequenceLSTMOptions
from .tflite import BidirectionalSequenceRNNOptions
from .tflite import CallOptions
from .tflite import CastOptions
from .tflite import ConcatEmbeddingsOptions
from .tflite import ConcatenationOptions
from .tflite import Conv2DOptions
from .tflite import CosOptions
from .tflite import DensifyOptions
from .tflite import DepthToSpaceOptions
from .tflite import DepthwiseConv2DOptions
from .tflite import DequantizeOptions
from .tflite import DivOptions
from .tflite import EmbeddingLookupSparseOptions
from .tflite import EqualOptions
from .tflite import ExpandDimsOptions
from .tflite import ExpOptions
from .tflite import FakeQuantOptions
from .tflite import FillOptions
from .tflite import FloorDivOptions
from .tflite import FloorModOptions
from .tflite import FullyConnectedOptions
from .tflite import GatherNdOptions
from .tflite import GatherOptions
from .tflite import GreaterEqualOptions
from .tflite import GreaterOptions
from .tflite import HardSwishOptions
from .tflite import IfOptions
from .tflite import L2NormOptions
from .tflite import LeakyReluOptions
from .tflite import LessEqualOptions
from .tflite import LessOptions
from .tflite import LocalResponseNormalizationOptions
from .tflite import LogicalAndOptions
from .tflite import LogicalNotOptions
from .tflite import LogicalOrOptions
from .tflite import LogSoftmaxOptions
from .tflite import LSHProjectionOptions
from .tflite import LSTMOptions
from .tflite import MatrixDiagOptions
from .tflite import MatrixSetDiagOptions
from .tflite import MaximumMinimumOptions
from .tflite import MirrorPadOptions
from .tflite import MulOptions
from .tflite import NegOptions
from .tflite import NonMaxSuppressionV4Options
from .tflite import NonMaxSuppressionV5Options
from .tflite import NotEqualOptions
from .tflite import OneHotOptions
from .tflite import PackOptions
from .tflite import PadOptions
from .tflite import PadV2Options
from .tflite import Pool2DOptions
from .tflite import PowOptions
from .tflite import QuantizeOptions
from .tflite import RangeOptions
from .tflite import RankOptions
from .tflite import ReducerOptions
from .tflite import ReshapeOptions
from .tflite import ResizeBilinearOptions
from .tflite import ResizeNearestNeighborOptions
from .tflite import ReverseSequenceOptions
from .tflite import ReverseV2Options
from .tflite import RNNOptions
from .tflite import ScatterNdOptions
from .tflite import SegmentSumOptions
from .tflite import SelectOptions
from .tflite import SelectV2Options
from .tflite import SequenceRNNOptions
from .tflite import ShapeOptions
from .tflite import SkipGramOptions
from .tflite import SliceOptions
from .tflite import SoftmaxOptions
from .tflite import SpaceToBatchNDOptions
from .tflite import SpaceToDepthOptions
from .tflite import SparseToDenseOptions
from .tflite import SplitOptions
from .tflite import SplitVOptions
from .tflite import SquaredDifferenceOptions
from .tflite import SquareOptions
from .tflite import SqueezeOptions
from .tflite import StridedSliceOptions
from .tflite import SubOptions
from .tflite import SVDFOptions
from .tflite import TileOptions
from .tflite import TopKV2Options
from .tflite import TransposeConvOptions
from .tflite import TransposeOptions
from .tflite import UnidirectionalSequenceLSTMOptions
from .tflite import UniqueOptions
from .tflite import UnpackOptions
from .tflite import WhereOptions
from .tflite import WhileOptions
from .tflite import ZerosLikeOptions
from .tflite.ActivationFunctionType import ActivationFunctionType
from .tflite.BuiltinOperator import BuiltinOperator
from .tflite.BuiltinOptions import BuiltinOptions
from .tflite.Padding import Padding
from .tflite.TensorType import TensorType
def inverse_map(map):
return {v: k for k, v in map.items()}
datatype_map = {
TensorType.UINT8: DataType.uint8,
TensorType.INT8: DataType.int8,
TensorType.INT16: DataType.int16,
TensorType.INT32: DataType.int32,
TensorType.INT64: DataType.int64,
TensorType.FLOAT16: DataType.float16,
TensorType.FLOAT32: DataType.float32,
TensorType.FLOAT64: DataType.float64,
TensorType.STRING: DataType.string,
TensorType.BOOL: DataType.bool,
TensorType.COMPLEX64: DataType.complex64,
TensorType.COMPLEX128: DataType.complex128,
}
datatype_inv_map = inverse_map(datatype_map)
datatype_inv_map[DataType.quint8] = TensorType.UINT8
datatype_inv_map[DataType.qint8] = TensorType.INT8
datatype_inv_map[DataType.qint16] = TensorType.INT16
datatype_inv_map[DataType.qint32] = TensorType.INT32
datatype_map_numpy = {
TensorType.UINT8: np.uint8,
TensorType.INT8: np.int8,
TensorType.INT16: np.int16,
TensorType.INT32: np.int32,
TensorType.INT64: np.int64,
TensorType.FLOAT16: np.float16,
TensorType.FLOAT32: np.float32,
TensorType.FLOAT64: np.float64,
TensorType.BOOL: np.bool,
TensorType.COMPLEX64: np.complex64,
TensorType.COMPLEX128: np.complex128,
TensorType.STRING: np.dtype("S1"),
}
builtin_options_map = {
BuiltinOptions.Conv2DOptions: Conv2DOptions.Conv2DOptions,
BuiltinOptions.DepthwiseConv2DOptions: DepthwiseConv2DOptions.DepthwiseConv2DOptions,
BuiltinOptions.ConcatEmbeddingsOptions: ConcatEmbeddingsOptions.ConcatEmbeddingsOptions,
BuiltinOptions.LSHProjectionOptions: LSHProjectionOptions.LSHProjectionOptions,
BuiltinOptions.Pool2DOptions: Pool2DOptions.Pool2DOptions,
BuiltinOptions.SVDFOptions: SVDFOptions.SVDFOptions,
BuiltinOptions.RNNOptions: RNNOptions.RNNOptions,
BuiltinOptions.FullyConnectedOptions: FullyConnectedOptions.FullyConnectedOptions,
BuiltinOptions.SoftmaxOptions: SoftmaxOptions.SoftmaxOptions,
BuiltinOptions.ConcatenationOptions: ConcatenationOptions.ConcatenationOptions,
BuiltinOptions.AddOptions: AddOptions.AddOptions,
BuiltinOptions.L2NormOptions: L2NormOptions.L2NormOptions,
BuiltinOptions.LocalResponseNormalizationOptions: LocalResponseNormalizationOptions.LocalResponseNormalizationOptions, # noqa: E501
BuiltinOptions.LSTMOptions: LSTMOptions.LSTMOptions,
BuiltinOptions.ResizeBilinearOptions: ResizeBilinearOptions.ResizeBilinearOptions,
BuiltinOptions.CallOptions: CallOptions.CallOptions,
BuiltinOptions.ReshapeOptions: ReshapeOptions.ReshapeOptions,
BuiltinOptions.SkipGramOptions: SkipGramOptions.SkipGramOptions,
BuiltinOptions.SpaceToDepthOptions: SpaceToDepthOptions.SpaceToDepthOptions,
BuiltinOptions.EmbeddingLookupSparseOptions: EmbeddingLookupSparseOptions.EmbeddingLookupSparseOptions,
BuiltinOptions.MulOptions: MulOptions.MulOptions,
BuiltinOptions.PadOptions: PadOptions.PadOptions,
BuiltinOptions.GatherOptions: GatherOptions.GatherOptions,
BuiltinOptions.BatchToSpaceNDOptions: BatchToSpaceNDOptions.BatchToSpaceNDOptions,
BuiltinOptions.SpaceToBatchNDOptions: SpaceToBatchNDOptions.SpaceToBatchNDOptions,
BuiltinOptions.TransposeOptions: TransposeOptions.TransposeOptions,
BuiltinOptions.ReducerOptions: ReducerOptions.ReducerOptions,
BuiltinOptions.SubOptions: SubOptions.SubOptions,
BuiltinOptions.DivOptions: DivOptions.DivOptions,
BuiltinOptions.SqueezeOptions: SqueezeOptions.SqueezeOptions,
BuiltinOptions.SequenceRNNOptions: SequenceRNNOptions.SequenceRNNOptions,
BuiltinOptions.StridedSliceOptions: StridedSliceOptions.StridedSliceOptions,
BuiltinOptions.ExpOptions: ExpOptions.ExpOptions,
BuiltinOptions.TopKV2Options: TopKV2Options.TopKV2Options,
BuiltinOptions.SplitOptions: SplitOptions.SplitOptions,
BuiltinOptions.LogSoftmaxOptions: LogSoftmaxOptions.LogSoftmaxOptions,
BuiltinOptions.CastOptions: CastOptions.CastOptions,
BuiltinOptions.DequantizeOptions: DequantizeOptions.DequantizeOptions,
BuiltinOptions.MaximumMinimumOptions: MaximumMinimumOptions.MaximumMinimumOptions,
BuiltinOptions.ArgMaxOptions: ArgMaxOptions.ArgMaxOptions,
BuiltinOptions.LessOptions: LessOptions.LessOptions,
BuiltinOptions.NegOptions: NegOptions.NegOptions,
BuiltinOptions.PadV2Options: PadV2Options.PadV2Options,
BuiltinOptions.GreaterOptions: GreaterOptions.GreaterOptions,
BuiltinOptions.GreaterEqualOptions: GreaterEqualOptions.GreaterEqualOptions,
BuiltinOptions.LessEqualOptions: LessEqualOptions.LessEqualOptions,
BuiltinOptions.SelectOptions: SelectOptions.SelectOptions,
BuiltinOptions.SliceOptions: SliceOptions.SliceOptions,
BuiltinOptions.TransposeConvOptions: TransposeConvOptions.TransposeConvOptions,
BuiltinOptions.SparseToDenseOptions: SparseToDenseOptions.SparseToDenseOptions,
BuiltinOptions.TileOptions: TileOptions.TileOptions,
BuiltinOptions.ExpandDimsOptions: ExpandDimsOptions.ExpandDimsOptions,
BuiltinOptions.EqualOptions: EqualOptions.EqualOptions,
BuiltinOptions.NotEqualOptions: NotEqualOptions.NotEqualOptions,
BuiltinOptions.ShapeOptions: ShapeOptions.ShapeOptions,
BuiltinOptions.PowOptions: PowOptions.PowOptions,
BuiltinOptions.ArgMinOptions: ArgMinOptions.ArgMinOptions,
BuiltinOptions.FakeQuantOptions: FakeQuantOptions.FakeQuantOptions,
BuiltinOptions.PackOptions: PackOptions.PackOptions,
BuiltinOptions.LogicalOrOptions: LogicalOrOptions.LogicalOrOptions,
BuiltinOptions.OneHotOptions: OneHotOptions.OneHotOptions,
BuiltinOptions.LogicalAndOptions: LogicalAndOptions.LogicalAndOptions,
BuiltinOptions.LogicalNotOptions: LogicalNotOptions.LogicalNotOptions,
BuiltinOptions.UnpackOptions: UnpackOptions.UnpackOptions,
BuiltinOptions.FloorDivOptions: FloorDivOptions.FloorDivOptions,
BuiltinOptions.SquareOptions: SquareOptions.SquareOptions,
BuiltinOptions.ZerosLikeOptions: ZerosLikeOptions.ZerosLikeOptions,
BuiltinOptions.FillOptions: FillOptions.FillOptions,
BuiltinOptions.BidirectionalSequenceLSTMOptions: BidirectionalSequenceLSTMOptions.BidirectionalSequenceLSTMOptions,
BuiltinOptions.BidirectionalSequenceRNNOptions: BidirectionalSequenceRNNOptions.BidirectionalSequenceRNNOptions,
BuiltinOptions.UnidirectionalSequenceLSTMOptions: UnidirectionalSequenceLSTMOptions.UnidirectionalSequenceLSTMOptions, # noqa: E501
BuiltinOptions.FloorModOptions: FloorModOptions.FloorModOptions,
BuiltinOptions.RangeOptions: RangeOptions.RangeOptions,
BuiltinOptions.ResizeNearestNeighborOptions: ResizeNearestNeighborOptions.ResizeNearestNeighborOptions,
BuiltinOptions.LeakyReluOptions: LeakyReluOptions.LeakyReluOptions,
BuiltinOptions.SquaredDifferenceOptions: SquaredDifferenceOptions.SquaredDifferenceOptions,
BuiltinOptions.MirrorPadOptions: MirrorPadOptions.MirrorPadOptions,
BuiltinOptions.AbsOptions: AbsOptions.AbsOptions,
BuiltinOptions.SplitVOptions: SplitVOptions.SplitVOptions,
BuiltinOptions.UniqueOptions: UniqueOptions.UniqueOptions,
BuiltinOptions.ReverseV2Options: ReverseV2Options.ReverseV2Options,
BuiltinOptions.AddNOptions: AddNOptions.AddNOptions,
BuiltinOptions.GatherNdOptions: GatherNdOptions.GatherNdOptions,
BuiltinOptions.CosOptions: CosOptions.CosOptions,
BuiltinOptions.WhereOptions: WhereOptions.WhereOptions,
BuiltinOptions.RankOptions: RankOptions.RankOptions,
BuiltinOptions.ReverseSequenceOptions: ReverseSequenceOptions.ReverseSequenceOptions,
BuiltinOptions.MatrixDiagOptions: MatrixDiagOptions.MatrixDiagOptions,
BuiltinOptions.QuantizeOptions: QuantizeOptions.QuantizeOptions,
BuiltinOptions.MatrixSetDiagOptions: MatrixSetDiagOptions.MatrixSetDiagOptions,
BuiltinOptions.DensifyOptions: DensifyOptions.DensifyOptions,
BuiltinOptions.DepthToSpaceOptions: DepthToSpaceOptions.DepthToSpaceOptions,
BuiltinOptions.HardSwishOptions: HardSwishOptions.HardSwishOptions,
BuiltinOptions.IfOptions: IfOptions.IfOptions,
BuiltinOptions.NonMaxSuppressionV4Options: NonMaxSuppressionV4Options.NonMaxSuppressionV4Options,
BuiltinOptions.NonMaxSuppressionV5Options: NonMaxSuppressionV5Options.NonMaxSuppressionV5Options,
BuiltinOptions.ScatterNdOptions: ScatterNdOptions.ScatterNdOptions,
BuiltinOptions.SegmentSumOptions: SegmentSumOptions.SegmentSumOptions,
BuiltinOptions.SelectV2Options: SelectV2Options.SelectV2Options,
BuiltinOptions.WhileOptions: WhileOptions.WhileOptions,
BuiltinOptions.BatchMatMulOptions: BatchMatMulOptions.BatchMatMulOptions,
}
builtin_options_inv_map = inverse_map(builtin_options_map)
def underscore_to_camel_case(s):
return "".join(x.title() for x in s.split("_"))
def padding_deserialize(x):
return padding_map[x]
def padding_serialize(builder, x):
return padding_inv_map[x]
def activation_deserialize(x):
return activation_function_map[x]
def activation_serialize(builder, x):
return activation_function_inv_map[x]
def datatype_deserialize(x):
return datatype_map[x]
def datatype_serialize(builder, x):
return datatype_inv_map[x]
def identity(x):
return x
def identity_serialize(builder, x):
return x
def write_byte_vector(builder, v):
builder.StartVector(1, len(v), 1)
for e in v[::-1]:
builder.PrependByte(e)
return builder.EndVector(len(v))
def write_int_vector(builder, v):
builder.StartVector(4, len(v), 4)
for e in v[::-1]:
builder.PrependInt32(e)
return builder.EndVector(len(v))
class OptionsSerializer:
def __init__(self, name, members=[]):
self.name = name
self.module = globals()[self.name]
self.cls = getattr(self.module, self.name)
self.builtin_opt_type = builtin_options_inv_map[self.cls]
self.members = []
for mem in members:
deserialize = identity
serialize = identity_serialize
is_vector = False
if isinstance(mem, tuple):
if len(mem) == 3:
mem, deserialize, serialize = mem
elif len(mem) == 2:
mem, is_vector = mem
deserialize = tuple
serialize = write_int_vector
else:
assert 0
underscore_mem = mem
camelcase_mem = underscore_to_camel_case(mem)
self.members.append((underscore_mem, camelcase_mem, deserialize, serialize, is_vector))
def deserialize(self, op_data):
builtin_options = op_data.BuiltinOptions()
attrs = {}
if builtin_options:
tfattrs = self.cls()
tfattrs.Init(builtin_options.Bytes, builtin_options.Pos)
for underscore_mem, camelcase_mem, deserialize, serialize, is_vector in self.members:
fun = camelcase_mem
if is_vector:
fun += "AsNumpy"
attr = getattr(tfattrs, fun)()
try:
attrs[underscore_mem] = deserialize(attr)
except TypeError:
print("Warning: {0} could not read attribute '{1}'.".format(self.name, underscore_mem))
return attrs
def serialize(self, builder, attrs):
ser_attrs = []
for underscore_mem, camelcase_mem, deserialize, serialize, is_vector in self.members:
a = serialize(builder, attrs[underscore_mem])
ser_attrs.append((camelcase_mem, a))
getattr(self.module, self.name + "Start")(builder)
for camelcase_mem, a in ser_attrs:
getattr(self.module, self.name + "Add" + camelcase_mem)(builder, a)
return getattr(self.module, self.name + "End")(builder), None
class CustomOptionsSerializer:
CUSTOM_OPTIONS_NPU_OP = [0x01, 0x04, 0x01] # NpuOp=1, FlexbufferFormat.UINT8=4, byte length=1
CUSTOM_OPTIONS_FORMAT_DEFAULT = 0
def __init__(self):
self.custom_opt_format = 0
def deserialize(self, op_data):
attrs = {}
custom_options = op_data.CustomOptionsAsNumpy()
attrs["custom_options"] = custom_options
attrs["custom_options_format"] = op_data.CustomOptionsFormat()
if np.array_equal(custom_options, self.CUSTOM_OPTIONS_NPU_OP):
attrs["custom_type"] = CustomType.ExistingNpuOp
return attrs
def serialize(self, builder, attrs):
custom_type = attrs.get("custom_type", CustomType.ThirdPartyOp)
self.custom_opt_format = attrs.get("custom_options_format", self.CUSTOM_OPTIONS_FORMAT_DEFAULT)
# Set NPU op custom options for the TensorFlow Lite custom operator
if custom_type == CustomType.NpuOp:
custom_options = self.CUSTOM_OPTIONS_NPU_OP
else:
custom_options = attrs.get("custom_options", [])
custom_options_bytes = struct.pack("<{0}B".format(len(custom_options)), *custom_options)
custom_offset = write_byte_vector(builder, custom_options_bytes)
return None, custom_offset
padding_map = {
Padding.SAME: b"SAME",
Padding.VALID: b"VALID",
}
padding_inv_map = inverse_map(padding_map)
activation_function_map = {
ActivationFunctionType.NONE: None,
ActivationFunctionType.RELU: Op.Relu,
ActivationFunctionType.RELU_N1_TO_1: Op.ReluN1To1,
ActivationFunctionType.RELU6: Op.Relu6,
ActivationFunctionType.TANH: Op.Tanh,
ActivationFunctionType.SIGN_BIT: Op.SignBit,
}
activation_function_inv_map = inverse_map(activation_function_map)
fused_act = ("fused_activation_function", activation_deserialize, activation_serialize)
padding = ("padding", padding_deserialize, padding_serialize)
pool2d_opts = OptionsSerializer(
"Pool2DOptions", (padding, "stride_w", "stride_h", "filter_width", "filter_height", fused_act,)
)
depthwise_opts = OptionsSerializer(
"DepthwiseConv2DOptions",
(padding, "stride_w", "stride_h", "depth_multiplier", fused_act, "dilation_w_factor", "dilation_h_factor",),
)
conv2d_opts = OptionsSerializer(
"Conv2DOptions", (padding, "stride_w", "stride_h", fused_act, "dilation_w_factor", "dilation_h_factor",)
)
lstm_opts = OptionsSerializer(
"LSTMOptions", (fused_act, "cell_clip", "proj_clip", "kernel_type", "asymmetric_quantize_inputs")
)
unidir_seq_lstm_opts = OptionsSerializer(
"UnidirectionalSequenceLSTMOptions",
(fused_act, "cell_clip", "proj_clip", "time_major", "asymmetric_quantize_inputs",),
)
bidir_seq_lstm_opts = OptionsSerializer(
"BidirectionalSequenceLSTMOptions",
(fused_act, "cell_clip", "proj_clip", "merge_outputs", "time_major", "asymmetric_quantize_inputs"),
)
rnn_opts = OptionsSerializer("RNNOptions", (fused_act, "asymmetric_quantize_inputs"))
seq_rnn_opts = OptionsSerializer("SequenceRNNOptions", ("time_major", fused_act, "asymmetric_quantize_inputs",))
bidir_seq_rnn_opts = OptionsSerializer(
"BidirectionalSequenceRNNOptions", ("time_major", fused_act, "merge_outputs", "asymmetric_quantize_inputs")
)
reducer_opts = OptionsSerializer("ReducerOptions", ("keep_dims",))
is_int_vec = True
builtin_operator_map = {
BuiltinOperator.ADD: (Op.Add, OptionsSerializer("AddOptions", (fused_act, "pot_scale_int16"))),
BuiltinOperator.AVERAGE_POOL_2D: (Op.AvgPool, pool2d_opts),
BuiltinOperator.CONCATENATION: (Op.ConcatTFLite, OptionsSerializer("ConcatenationOptions", ("axis", fused_act))),
BuiltinOperator.CONV_2D: (Op.Conv2DBias, conv2d_opts),
BuiltinOperator.DEPTHWISE_CONV_2D: (Op.DepthwiseConv2DBias, depthwise_opts),
BuiltinOperator.DEPTH_TO_SPACE: (Op.DepthToSpace, OptionsSerializer("DepthToSpaceOptions", ("block_size",))),
BuiltinOperator.DEQUANTIZE: (Op.Dequantize, OptionsSerializer("DequantizeOptions")),
BuiltinOperator.EMBEDDING_LOOKUP: (Op.EmbeddingLookup, None),
BuiltinOperator.FLOOR: (Op.Floor, None),
BuiltinOperator.FULLY_CONNECTED: (
Op.FullyConnected,
OptionsSerializer("FullyConnectedOptions", (fused_act, "weights_format", "asymmetric_quantize_inputs")),
),
BuiltinOperator.HASHTABLE_LOOKUP: (Op.HashtableLookup, None),
BuiltinOperator.L2_NORMALIZATION: (Op.L2Norm, OptionsSerializer("L2NormOptions", (fused_act,))),
BuiltinOperator.L2_POOL_2D: (Op.L2Pool2D, pool2d_opts),
BuiltinOperator.LOCAL_RESPONSE_NORMALIZATION: (
Op.LRN,
OptionsSerializer("LocalResponseNormalizationOptions", ("radius", "bias", "alpha", "beta")),
),
BuiltinOperator.LOGISTIC: (Op.Sigmoid, None),
BuiltinOperator.LSH_PROJECTION: (Op.LSHProjection, OptionsSerializer("LSHProjectionOptions", ("type",))),
BuiltinOperator.LSTM: (Op.Lstm, lstm_opts),
BuiltinOperator.MAX_POOL_2D: (Op.MaxPool, pool2d_opts),
BuiltinOperator.MUL: (Op.Mul, OptionsSerializer("MulOptions", (fused_act,))),
BuiltinOperator.RELU: (Op.Relu, None),
BuiltinOperator.RELU_N1_TO_1: (Op.ReluN1To1, None),
BuiltinOperator.RELU6: (Op.Relu6, None),
BuiltinOperator.RESHAPE: (Op.Reshape, OptionsSerializer("ReshapeOptions", (("new_shape", is_int_vec),))),
BuiltinOperator.RESIZE_BILINEAR: (
Op.ResizeBilinear,
OptionsSerializer("ResizeBilinearOptions", ("align_corners", "half_pixel_centers")),
),
BuiltinOperator.RNN: (Op.Rnn, rnn_opts),
BuiltinOperator.SOFTMAX: (Op.Softmax, OptionsSerializer("SoftmaxOptions", ("beta",))),
BuiltinOperator.SPACE_TO_DEPTH: (Op.SpaceToDepth, OptionsSerializer("SpaceToDepthOptions", ("block_size",))),
BuiltinOperator.SVDF: (
Op.Svdf,
OptionsSerializer("SVDFOptions", ("rank", fused_act, "asymmetric_quantize_inputs")),
),
BuiltinOperator.TANH: (Op.Tanh, None),
BuiltinOperator.CONCAT_EMBEDDINGS: (
Op.ConcatEmbeddings,
OptionsSerializer(
"ConcatEmbeddingsOptions",
(
"num_channels",
"num_columns_per_channel",
"num_columns_per_channel_as_numpy",
"num_columns_per_channel_as_length",
"embedding_dim_per_channel",
"embedding_dim_per_channel_as_numpy",
"embedding_dim_per_channel_as_length",
),
),
),
BuiltinOperator.SKIP_GRAM: (
Op.SkipGram,
OptionsSerializer("SkipGramOptions", ("ngram_size", "max_skip_size", "include_all_ngrams")),
),
BuiltinOperator.CALL: (Op.Call, OptionsSerializer("CallOptions", ("subgraph",))),
BuiltinOperator.EMBEDDING_LOOKUP_SPARSE: (
Op.EmbeddingLookupSparse,
OptionsSerializer("EmbeddingLookupSparseOptions", ("combiner",)),
),
BuiltinOperator.PAD: (Op.Pad, OptionsSerializer("PadOptions")),
BuiltinOperator.UNIDIRECTIONAL_SEQUENCE_RNN: (Op.UnidirectionalSequenceRnn, seq_rnn_opts),
BuiltinOperator.GATHER: (Op.GatherV2, OptionsSerializer("GatherOptions", ("axis",))),
BuiltinOperator.BATCH_TO_SPACE_ND: (Op.BatchToSpaceND, OptionsSerializer("BatchToSpaceNDOptions")),
BuiltinOperator.SPACE_TO_BATCH_ND: (Op.SpaceToBatchND, OptionsSerializer("SpaceToBatchNDOptions")),
BuiltinOperator.TRANSPOSE: (Op.Transpose, OptionsSerializer("TransposeOptions")),
BuiltinOperator.MEAN: (Op.Mean, None),
BuiltinOperator.SUB: (Op.Sub, OptionsSerializer("SubOptions", (fused_act, "pot_scale_int16",))),
BuiltinOperator.DIV: (Op.Div, OptionsSerializer("DivOptions", (fused_act,))),
BuiltinOperator.SQUEEZE: (Op.Squeeze, OptionsSerializer("SqueezeOptions", (("squeeze_dims", is_int_vec),))),
BuiltinOperator.UNIDIRECTIONAL_SEQUENCE_LSTM: (Op.UnidirectionalSequenceLstm, unidir_seq_lstm_opts),
BuiltinOperator.STRIDED_SLICE: (
Op.StridedSlice,
OptionsSerializer(
"StridedSliceOptions", ("begin_mask", "end_mask", "ellipsis_mask", "new_axis_mask", "shrink_axis_mask")
),
),
BuiltinOperator.BIDIRECTIONAL_SEQUENCE_RNN: (Op.BidirectionalSequenceRnn, bidir_seq_rnn_opts),
BuiltinOperator.EXP: (Op.Exp, OptionsSerializer("ExpOptions")),
BuiltinOperator.TOPK_V2: (Op.TopKV2, OptionsSerializer("TopKV2Options")),
BuiltinOperator.SPLIT: (Op.Split, OptionsSerializer("SplitOptions", ("num_splits",))),
BuiltinOperator.LOG_SOFTMAX: (Op.LogSoftmax, OptionsSerializer("LogSoftmaxOptions")),
BuiltinOperator.DELEGATE: (Op.Delegate, None),
BuiltinOperator.BIDIRECTIONAL_SEQUENCE_LSTM: (Op.BidirectionalSequenceLstm, bidir_seq_lstm_opts),
BuiltinOperator.CAST: (
Op.Cast,
OptionsSerializer(
"CastOptions",
(
("in_data_type", datatype_deserialize, datatype_serialize),
("out_data_type", datatype_deserialize, datatype_serialize),
),
),
),
BuiltinOperator.PRELU: (Op.Prelu, None),
BuiltinOperator.MAXIMUM: (Op.Maximum, OptionsSerializer("MaximumMinimumOptions")),
BuiltinOperator.ARG_MAX: (
Op.ArgMax,
OptionsSerializer("ArgMaxOptions", (("output_type", datatype_deserialize, datatype_serialize),)),
),
BuiltinOperator.MINIMUM: (Op.Minimum, OptionsSerializer("MaximumMinimumOptions")),
BuiltinOperator.LESS: (Op.Less, OptionsSerializer("LessOptions")),
BuiltinOperator.NEG: (Op.Neg, OptionsSerializer("NegOptions")),
BuiltinOperator.PADV2: (Op.PadV2, OptionsSerializer("PadV2Options")),
BuiltinOperator.GREATER: (Op.Greater, OptionsSerializer("GreaterOptions")),
BuiltinOperator.GREATER_EQUAL: (Op.GreaterEqual, OptionsSerializer("GreaterEqualOptions")),
BuiltinOperator.LESS_EQUAL: (Op.LessEqual, OptionsSerializer("LessEqualOptions")),
BuiltinOperator.SELECT: (Op.Select, OptionsSerializer("SelectOptions")),
BuiltinOperator.SLICE: (Op.Slice, OptionsSerializer("SliceOptions")),
BuiltinOperator.SIN: (Op.Sin, None),
BuiltinOperator.TRANSPOSE_CONV: (
Op.Conv2DBackpropInput,
OptionsSerializer("TransposeConvOptions", (padding, "stride_w", "stride_h")),
),
BuiltinOperator.SPARSE_TO_DENSE: (
Op.SparseToDense,
OptionsSerializer("SparseToDenseOptions", ("validate_indices",)),
),
BuiltinOperator.TILE: (Op.Tile, OptionsSerializer("TileOptions")),
BuiltinOperator.EXPAND_DIMS: (Op.ExpandDims, OptionsSerializer("ExpandDimsOptions")),
BuiltinOperator.EQUAL: (Op.Equal, OptionsSerializer("EqualOptions")),
BuiltinOperator.NOT_EQUAL: (Op.NotEqual, OptionsSerializer("NotEqualOptions")),
BuiltinOperator.LOG: (Op.Log, None),
BuiltinOperator.SUM: (Op.Sum, None),
BuiltinOperator.SQRT: (Op.Sqrt, None),
BuiltinOperator.RSQRT: (Op.Rsqrt, None),
BuiltinOperator.SHAPE: (
Op.Shape,
OptionsSerializer("ShapeOptions", (("out_type", datatype_deserialize, datatype_serialize),)),
),
BuiltinOperator.POW: (Op.Pow, OptionsSerializer("PowOptions")),
BuiltinOperator.ARG_MIN: (
Op.ArgMin,
OptionsSerializer("ArgMinOptions", (("output_type", datatype_deserialize, datatype_serialize),)),
),
BuiltinOperator.FAKE_QUANT: (
Op.FakeQuantWithMinMaxArgs,
OptionsSerializer("FakeQuantOptions", ("min", "max", "num_bits", "narrow_range")),
),
BuiltinOperator.REDUCE_PROD: (Op.Prod, reducer_opts),
BuiltinOperator.REDUCE_MAX: (Op.Max, reducer_opts),
BuiltinOperator.PACK: (Op.Pack, OptionsSerializer("PackOptions", ("values_count", "axis"))),
BuiltinOperator.LOGICAL_OR: (Op.LogicalOr, OptionsSerializer("LogicalOrOptions")),
BuiltinOperator.ONE_HOT: (Op.OneHot, OptionsSerializer("OneHotOptions", ("axis",))),
BuiltinOperator.LOGICAL_AND: (Op.LogicalAnd, OptionsSerializer("LogicalAndOptions")),
BuiltinOperator.LOGICAL_NOT: (Op.LogicalNot, OptionsSerializer("LogicalNotOptions")),
BuiltinOperator.UNPACK: (Op.Unpack, OptionsSerializer("UnpackOptions", ("num", "axis"))),
BuiltinOperator.REDUCE_MIN: (Op.Min, reducer_opts),
BuiltinOperator.FLOOR_DIV: (Op.FloorDiv, OptionsSerializer("FloorDivOptions")),
BuiltinOperator.REDUCE_ANY: (Op.Any, reducer_opts),
BuiltinOperator.SQUARE: (Op.Square, OptionsSerializer("SquareOptions")),
BuiltinOperator.ZEROS_LIKE: (Op.ZerosLike, OptionsSerializer("ZerosLikeOptions")),
BuiltinOperator.FILL: (Op.Fill, OptionsSerializer("FillOptions")),
BuiltinOperator.FLOOR_MOD: (Op.FloorMod, OptionsSerializer("FloorModOptions")),
BuiltinOperator.RANGE: (Op.Range, OptionsSerializer("RangeOptions")),
BuiltinOperator.RESIZE_NEAREST_NEIGHBOR: (
Op.ResizeNearestNeighbor,
OptionsSerializer("ResizeNearestNeighborOptions", ("align_corners", "half_pixel_centers")),
),
BuiltinOperator.LEAKY_RELU: (Op.LeakyRelu, OptionsSerializer("LeakyReluOptions", ("alpha",))),
BuiltinOperator.SQUARED_DIFFERENCE: (Op.SquaredDifference, OptionsSerializer("SquaredDifferenceOptions")),
BuiltinOperator.MIRROR_PAD: (Op.MirrorPad, OptionsSerializer("MirrorPadOptions", ("mode",))),
BuiltinOperator.ABS: (Op.Abs, OptionsSerializer("AbsOptions")),
BuiltinOperator.SPLIT_V: (Op.SplitV, OptionsSerializer("SplitVOptions", ("num_splits",))),
BuiltinOperator.UNIQUE: (
Op.Unique,
OptionsSerializer("UniqueOptions", (("idx_out_type", datatype_deserialize, datatype_serialize),)),
),
BuiltinOperator.CEIL: (Op.Ceil, None),
BuiltinOperator.REVERSE_V2: (Op.ReverseV2, OptionsSerializer("ReverseV2Options")),
BuiltinOperator.ADD_N: (Op.AddN, OptionsSerializer("AddNOptions")),
BuiltinOperator.GATHER_ND: (Op.GatherNd, OptionsSerializer("GatherNdOptions")),
BuiltinOperator.COS: (Op.Cos, OptionsSerializer("CosOptions")),
BuiltinOperator.WHERE: (Op.Where, OptionsSerializer("WhereOptions")),
BuiltinOperator.RANK: (Op.Rank, OptionsSerializer("RankOptions")),
BuiltinOperator.ELU: (Op.Elu, None),
BuiltinOperator.REVERSE_SEQUENCE: (
Op.ReverseSequence,
OptionsSerializer("ReverseSequenceOptions", ("seq_dim", "batch_dim")),
),
BuiltinOperator.MATRIX_DIAG: (Op.MatrixDiag, OptionsSerializer("MatrixDiagOptions")),
BuiltinOperator.QUANTIZE: (Op.Quantize, OptionsSerializer("QuantizeOptions")),
BuiltinOperator.MATRIX_SET_DIAG: (Op.MatrixSetDiag, OptionsSerializer("MatrixSetDiagOptions")),
BuiltinOperator.ROUND: (Op.Round, None),
BuiltinOperator.HARD_SWISH: (Op.HardSwish, OptionsSerializer("HardSwishOptions")),
BuiltinOperator.IF: (Op.If, OptionsSerializer("IfOptions", ("then_subgraph_index", "else_subgraph_index"))),
BuiltinOperator.WHILE: (
Op.While,
OptionsSerializer("WhileOptions", ("cond_subgraph_index", "body_subgraph_index")),
),
BuiltinOperator.NON_MAX_SUPPRESSION_V4: (Op.NonMaxSuppressionV4, OptionsSerializer("NonMaxSuppressionV4Options")),
BuiltinOperator.NON_MAX_SUPPRESSION_V5: (Op.NonMaxSuppressionV5, OptionsSerializer("NonMaxSuppressionV5Options")),
BuiltinOperator.SCATTER_ND: (Op.ScatterNd, OptionsSerializer("ScatterNdOptions")),
BuiltinOperator.SELECT_V2: (Op.SelectV2, OptionsSerializer("SelectV2Options")),
BuiltinOperator.DENSIFY: (Op.Densify, OptionsSerializer("DensifyOptions")),
BuiltinOperator.SEGMENT_SUM: (Op.SegmentSum, OptionsSerializer("SegmentSumOptions")),
BuiltinOperator.BATCH_MATMUL: (Op.BatchMatMul, OptionsSerializer("BatchMatMulOptions", ("adj_x", "adj_y"))),
BuiltinOperator.CUSTOM: (Op.Custom, CustomOptionsSerializer()),
}
builtin_operator_inv_map = {v[0]: (k, v[1]) for k, v in builtin_operator_map.items()}
builtin_operator_inv_map[Op.CustomNpuOp] = (BuiltinOperator.CUSTOM, CustomOptionsSerializer())
BUILTIN_OPERATOR_UNKNOWN = "UNKNOWN"
def builtin_type_name(builtin):
return next(k for k, v in vars(BuiltinOperator).items() if v == builtin)
def optype_to_builtintype(op_type):
if op_type in builtin_operator_inv_map:
return builtin_type_name(builtin_operator_inv_map[op_type][0])
else:
return BUILTIN_OPERATOR_UNKNOWN
|
# -*- coding: utf-8 -*-
r"""
Ambient spaces of modular symbols
This module defines the following classes. There is an abstract base
class ``ModularSymbolsAmbient``, derived from
``space.ModularSymbolsSpace`` and ``hecke.AmbientHeckeModule``. As
this is an abstract base class, only derived classes should be
instantiated. There are five derived classes:
- ``ModularSymbolsAmbient_wtk_g0``, for modular symbols of general
weight `k` for `\Gamma_0(N)`;
- ``ModularSymbolsAmbient_wt2_g0`` (derived from
``ModularSymbolsAmbient_wtk_g0``), for modular symbols of weight 2
for `\Gamma_0(N)`;
- ``ModularSymbolsAmbient_wtk_g1``, for modular symbols of general
weight `k` for `\Gamma_1(N)`;
- ``ModularSymbolsAmbient_wtk_gamma_h``, for modular symbols of
general weight `k` for `\Gamma_H`, where `H` is a subgroup of
`\ZZ/N\ZZ`;
- ``ModularSymbolsAmbient_wtk_eps``, for modular symbols of general
weight `k` and character `\epsilon`.
EXAMPLES:
We compute a space of modular symbols modulo 2. The dimension is
different from that of the corresponding space in characteristic
0::
sage: M = ModularSymbols(11,4,base_ring=GF(2)); M
Modular Symbols space of dimension 7 for Gamma_0(11) of weight 4
with sign 0 over Finite Field of size 2
sage: M.basis()
([X*Y,(1,0)], [X*Y,(1,8)], [X*Y,(1,9)], [X^2,(0,1)], [X^2,(1,8)], [X^2,(1,9)], [X^2,(1,10)])
sage: M0 = ModularSymbols(11,4,base_ring=QQ); M0
Modular Symbols space of dimension 6 for Gamma_0(11) of weight 4
with sign 0 over Rational Field
sage: M0.basis()
([X^2,(0,1)], [X^2,(1,6)], [X^2,(1,7)], [X^2,(1,8)], [X^2,(1,9)], [X^2,(1,10)])
The characteristic polynomial of the Hecke operator `T_2` has an extra
factor `x`.
::
sage: M.T(2).matrix().fcp('x')
(x + 1)^2 * x^5
sage: M0.T(2).matrix().fcp('x')
(x - 9)^2 * (x^2 - 2*x - 2)^2
"""
################################################################################
# Sage: Open Source Mathematical Software
#
# Copyright (C) 2005 <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
################################################################################
# Sage packages
from sage.misc.search import search
import sage.misc.latex as latex
import sage.misc.misc as misc
import sage.matrix.matrix_space as matrix_space
from sage.matrix.matrix_integer_2x2 import MatrixSpace_ZZ_2x2 as M2Z
import sage.modules.free_module_element as free_module_element
import sage.modules.free_module as free_module
import sage.misc.misc as misc
import sage.modular.arithgroup.all as arithgroup
import sage.modular.dirichlet as dirichlet
import sage.modular.hecke.all as hecke
import sage.rings.rational_field as rational_field
import sage.rings.integer_ring as integer_ring
import sage.rings.all as rings
import sage.rings.arith as arith
import sage.structure.formal_sum as formal_sum
import sage.categories.all as cat
from sage.modular.cusps import Cusp
import sage.structure.all
import boundary
import element
import heilbronn
import manin_symbols
import modular_symbols
import modsym
import p1list
import relation_matrix
import space
import subspace
QQ = rings.Rational
ZZ = rings.Integers
class ModularSymbolsAmbient(space.ModularSymbolsSpace, hecke.AmbientHeckeModule):
r"""
An ambient space of modular symbols for a congruence subgroup of
`SL_2(\ZZ)`.
This class is an abstract base class, so only derived classes
should be instantiated.
INPUT:
- ``weight`` - an integer
- ``group`` - a congruence subgroup.
- ``sign`` - an integer, either -1, 0, or 1
- ``base_ring`` - a commutative ring
- ``custom_init`` - a function that is called with self as input
before any computations are done using self; this could be used
to set a custom modular symbols presentation.
"""
def __init__(self, group, weight, sign, base_ring,
character=None, custom_init=None):
"""
Initialize a space of modular symbols.
INPUT:
- ``weight`` - an integer
- ``group`` - a congruence subgroup.
- ``sign`` - an integer, either -1, 0, or 1
- ``base_ring`` - a commutative ring
EXAMPLES::
sage: ModularSymbols(2,2)
Modular Symbols space of dimension 1 for Gamma_0(2) of weight 2 with sign 0 over Rational Field
"""
weight = int(weight)
if weight <= 1:
raise ValueError("Weight (=%s) Modular symbols of weight <= 1 not defined."%weight)
if not arithgroup.is_CongruenceSubgroup(group):
raise TypeError("group must be a congruence subgroup")
sign = int(sign)
if not isinstance(base_ring, rings.Ring) and base_ring.is_field():
raise TypeError("base_ring must be a commutative ring")
if character == None and arithgroup.is_Gamma0(group):
character = dirichlet.TrivialCharacter(group.level(), base_ring)
space.ModularSymbolsSpace.__init__(self, group, weight,
character, sign, base_ring)
if custom_init is not None:
custom_init(self)
try:
formula = self._dimension_formula()
except NotImplementedError:
formula = None
rank = self.rank()
if formula != None:
assert rank == formula, \
"Computed dimension (=%s) of ambient space \"%s\" doesn't match dimension formula (=%s)!\n"%(d, self, formula) + \
"ModularSymbolsAmbient: group = %s, weight = %s, sign = %s, base_ring = %s, character = %s"%(
group, weight, sign, base_ring, character)
hecke.AmbientHeckeModule.__init__(self, base_ring, rank, group.level(), weight)
def __cmp__(self, other):
"""
Standard comparison function.
EXAMPLES::
sage: ModularSymbols(11,2) == ModularSymbols(11,2) # indirect doctest
True
sage: ModularSymbols(11,2) == ModularSymbols(11,4) # indirect doctest
False
"""
if not isinstance(other, space.ModularSymbolsSpace):
return cmp(type(self), type(other))
if isinstance(other, ModularSymbolsAmbient):
return misc.cmp_props(self, other, ['group', 'weight', 'sign', 'base_ring', 'character'])
c = cmp(self, other.ambient_hecke_module())
if c: return c
if self.free_module() == other.free_module():
return 0
return -1
def new_submodule(self, p=None):
r"""
Returns the new or `p`-new submodule of this modular symbols ambient space.
INPUT:
- ``p`` - (default: None); if not None, return only
the `p`-new submodule.
OUTPUT:
The new or `p`-new submodule of this modular symbols ambient space.
EXAMPLES::
sage: ModularSymbols(100).new_submodule()
Modular Symbols subspace of dimension 2 of Modular Symbols space of dimension 31 for Gamma_0(100) of weight 2 with sign 0 over Rational Field
sage: ModularSymbols(389).new_submodule()
Modular Symbols space of dimension 65 for Gamma_0(389) of weight 2 with sign 0 over Rational Field
"""
# Check for special cases where the answer is easy.
# If not in one of those cases, use the generic code.
if self.level().is_prime() and self.weight() == 2:
return self
return hecke.AmbientHeckeModule.new_submodule(self, p=p)
def manin_symbols(self):
"""
Return the list of Manin symbols for this modular symbols ambient space.
EXAMPLES::
sage: ModularSymbols(11,2).manin_symbols()
Manin Symbol List of weight 2 for Gamma0(11)
"""
raise NotImplementedError
def manin_generators(self):
"""
Return list of all Manin symbols for this space. These are the
generators in the presentation of this space by Manin symbols.
EXAMPLES::
sage: M = ModularSymbols(2,2)
sage: M.manin_generators()
[(0,1), (1,0), (1,1)]
::
sage: M = ModularSymbols(1,6)
sage: M.manin_generators()
[[Y^4,(0,0)], [X*Y^3,(0,0)], [X^2*Y^2,(0,0)], [X^3*Y,(0,0)], [X^4,(0,0)]]
"""
return self._manin_generators
def manin_basis(self):
r"""
Return a list of indices into the list of Manin generators (see
``self.manin_generators()``) such that those symbols
form a basis for the quotient of the `\QQ`-vector
space spanned by Manin symbols modulo the relations.
EXAMPLES::
sage: M = ModularSymbols(2,2)
sage: M.manin_basis()
[1]
sage: [M.manin_generators()[i] for i in M.manin_basis()]
[(1,0)]
sage: M = ModularSymbols(6,2)
sage: M.manin_basis()
[1, 10, 11]
sage: [M.manin_generators()[i] for i in M.manin_basis()]
[(1,0), (3,1), (3,2)]
"""
try:
return self._manin_basis
except AttributeError:
self.compute_presentation()
return self._manin_basis
def p1list(self):
"""
Return a P1list of the level of this modular symbol space.
EXAMPLES::
sage: ModularSymbols(11,2).p1list()
The projective line over the integers modulo 11
"""
try:
return self.__p1list
except AttributeError:
self.__p1list = p1list.P1List(self.level())
return self.__p1list
# See the file relation_matrix.py
#
# def relation_matrix(self):
# raise NotImplementedError
def compute_presentation(self):
r"""
Compute and cache the presentation of this space.
EXAMPLES::
sage: ModularSymbols(11,2).compute_presentation() # no output
"""
B, basis, mod = relation_matrix.compute_presentation(
self.manin_symbols(), self.sign(),
self.base_ring())
self._manin_generators = self.manin_symbols().manin_symbol_list()
self._manin_basis = basis
self._manin_gens_to_basis = B
self._mod2term = mod
def manin_gens_to_basis(self):
r"""
Return the matrix expressing the manin symbol generators in terms of the basis.
EXAMPLES::
sage: ModularSymbols(11,2).manin_gens_to_basis()
[-1 0 0]
[ 1 0 0]
[ 0 0 0]
[ 0 0 1]
[ 0 -1 1]
[ 0 -1 0]
[ 0 0 -1]
[ 0 0 -1]
[ 0 1 -1]
[ 0 1 0]
[ 0 0 1]
[ 0 0 0]
"""
try:
return self._manin_gens_to_basis
except AttributeError:
self.compute_presentation()
return self._manin_gens_to_basis
#####################################################################
# Coercion
#####################################################################
def __call__(self, x, computed_with_hecke=False):
r"""
Coerce `x` into this modular symbols space. The result is
either an element of self or a subspace of self.
INPUTS:
The allowed input types for `x` are as follows:
- ``Vector`` - a vector of the same degree. This
defines the corresponding linear combination of the basis of self.
- ``ManinSymbol`` - a Manin symbol of the same weight
as the space
- ``ModularSymbolsElement`` - a modular symbol whose
ambient parent is this space of modular symbols. (TODO: make more
sophisticated)
- 0 - the integer 0; results in the 0 modular symbol.
- 3-tuple - Given a 3-tuple (i,u,v), returns the modular symbol
element defined by the Manin symbol
`[X^{i}\cdot Y^{k-2-i}, (u,v)]`, where k is the weight.
Note that we must have `0\leq i \leq k-2`.
- 2-tuple - Given a 2-tuple (u,v), returns the element defined by
the Manin symbol `[X^0 \cdot Y^{2-k}, (u,v)]`.
- 2-elements list - Given a list ``[alpha, beta]``,
where `\alpha` and `\beta` are (coercible to)
cusps, return the modular symbol `\{\alpha, \beta\}`. When
the the weight `k > 2` return
`Y^{k-2} \{\alpha, \beta\}`.
- 3-element list - Given a list ``[i, alpha, beta]``,
where `i` is an integer, and `\alpha`,
`\beta` are (coercible to) cusps, return the modular symbol
`X^i Y^{k-2-i} \{\alpha, \beta\}`.
If our list is ``[f, alpha, beta]``, where `f`
is a homogeneous polynomial in two variables of degree k-2 with
integer coefficients, and alpha and beta are cusps, return the
corresponding sum of modular symbols as an element of self. So if
`f = \sum_{i=0}^{k-2} a_i X^i Y^{k-2-i}`, return
`\sum_{i=0}^{k-2} a_i * [ i, alpha, beta ]`.
EXAMPLES::
sage: M = ModularSymbols(37,2)
M(0) is the 0 element of the space::
sage: M(0)
0
sage: type(M(0))
<class 'sage.modular.modsym.element.ModularSymbolsElement'>
From a vector of the correct dimension we construct the
corresponding linear combination of the basis elements::
sage: M.dimension()
5
sage: M.basis()
((1,0), (1,23), (1,32), (1,34), (1,35))
sage: M(vector([1,2,3,4,5]))
(1,0) + 2*(1,23) + 3*(1,32) + 4*(1,34) + 5*(1,35)
sage: M(vector([1/2,2/3,3/4,4/5,5/6]))
1/2*(1,0) + 2/3*(1,23) + 3/4*(1,32) + 4/5*(1,34) + 5/6*(1,35)
Manin symbols can be converted to elements of the space::
sage: from sage.modular.modsym.manin_symbols import ManinSymbol
sage: ManinSymbol(M.manin_symbols(),(0,2,3))
(2,3)
sage: M(ManinSymbol(M.manin_symbols(),(0,2,3)))
(1,34) - (1,35)
However, it is easier to use one of the following forms.
Either a 3-tuple `(i,u,v)` or a 2-tuple `(u,v)` with `i=0`
assumed::
sage: M((0,2,3))
(1,34) - (1,35)
sage: M((2,3))
(1,34) - (1,35)
Or a 3-list `[i,\alpha,\beta]` where `i` is the degree and
`\alpha` and `beta` are cusps, or a 2-tuple `[\alpha,\beta]`
with `i=0` assumed::
sage: M([0,Cusp(1/2),Cusp(0)])
(1,35)
sage: M([Cusp(1/2),Cusp(0)])
(1,35)
"""
if isinstance(x, free_module_element.FreeModuleElement):
if x.degree() != self.dimension():
raise TypeError("Incompatible degrees: x has degree %s\
but modular symbols space has dimension %s"%(
x.degree(), self.dimension()))
#if x.parent().base_ring() != self.base_ring():
# raise TypeError, "Vector x is over %s, but modular symbols space is over %s."%(
# x.parent().base_ring(), self.base_ring())
return element.ModularSymbolsElement(self, x)
elif isinstance(x, (manin_symbols.ManinSymbol, element.ModularSymbolsElement)):
return self.element(x)
elif isinstance(x, modular_symbols.ModularSymbol):
return self(x.manin_symbol_rep())
elif isinstance(x, (int, rings.Integer)) and x==0:
return element.ModularSymbolsElement(self, self.free_module()(0))
elif isinstance(x, tuple):
return self.manin_symbol(x)
elif isinstance(x, formal_sum.FormalSum):
return sum([c*self(y) for c, y in x], self(0))
elif isinstance(x, list):
if len(x) == 3 and rings.is_MPolynomial(x[0]):
return self.modular_symbol_sum(x)
else:
return self.modular_symbol(x)
raise TypeError("No coercion of %s into %s defined."%(x, self))
def change_ring(self, R):
r"""
Change the base ring to R.
EXAMPLES::
sage: ModularSymbols(Gamma1(13), 2).change_ring(GF(17))
Modular Symbols space of dimension 15 for Gamma_1(13) of weight 2 with sign 0 and over Finite Field of size 17
sage: M = ModularSymbols(DirichletGroup(5).0, 7); MM=M.change_ring(CyclotomicField(8)); MM
Modular Symbols space of dimension 6 and level 5, weight 7, character [zeta8^2], sign 0, over Cyclotomic Field of order 8 and degree 4
sage: MM.change_ring(CyclotomicField(4)) == M
True
sage: M.change_ring(QQ)
Traceback (most recent call last):
...
ValueError: cannot coerce element of order 4 into self
"""
if self.character() is None:
return modsym.ModularSymbols(self.group(), self.weight(), self.sign(), R)
else:
return modsym.ModularSymbols(self.character(), self.weight(), self.sign(), R)
def base_extend(self, R):
r"""
Canonically change the base ring to R.
EXAMPLE::
sage: M = ModularSymbols(DirichletGroup(5).0, 7); MM = M.base_extend(CyclotomicField(8)); MM
Modular Symbols space of dimension 6 and level 5, weight 7, character [zeta8^2], sign 0, over Cyclotomic Field of order 8 and degree 4
sage: MM.base_extend(CyclotomicField(4))
Traceback (most recent call last):
...
ValueError: No coercion defined
"""
if not R.has_coerce_map_from(self.base_ring()):
raise ValueError("No coercion defined")
else:
return self.change_ring(R)
def _action_on_modular_symbols(self, g):
r"""
Returns the matrix of the action of a 2x2 matrix on this space.
INPUT:
``g`` (list) -- `g=[a,b,c,d]` where `a,b,c,d` are integers
defining a `2\times2` integer matrix.
OUTPUT:
(matrix) The matrix of the action of `g` on this Modular
Symbol space, with respect to the standard basis.
.. note::
Use _matrix_of_operator_on_modular_symbols for more general
operators.
EXAMPLES::
sage: M = ModularSymbols(11,4,1)
sage: M._action_on_modular_symbols([1,2,3,7])
[ 0 0 5/2 -3/2]
[ 0 0 5/2 -3/2]
[ 0 1 0 0]
[ 0 1 -1/2 1/2]
"""
if not isinstance(g, list):
raise TypeError("g must be a list")
if not len(g) == 4:
raise TypeError("g must be a list of length 4")
return self._matrix_of_operator_on_modular_symbols(self, [g])
def manin_symbol(self, x, check=True):
r"""
Construct a Manin Symbol from the given data.
INPUT:
- ``x`` (list) -- either `[u,v]` or `[i,u,v]`, where `0\le
i\le k-2` where `k` is the weight, and `u`,`v` are integers
defining a valid element of `\mathbb{P}^1(N)`, where `N` is
the level.
OUTPUT:
(ManinSymbol) the monomial Manin Symbol associated to
`[i;(u,v)]`, with `i=0` if not supplied, corresponding to the
symbol `[X^i*Y^{k-2-i}, (u,v)]`.
EXAMPLES::
sage: M = ModularSymbols(11,4,1)
sage: M.manin_symbol([2,5,6])
[X^2,(1,10)]
"""
if check:
if len(x) == 2:
x = (0,x[0],x[1])
if len(x) == 3:
# Manin symbol of the form (i, u, v), which corresponds to [X^i*Y^(k-2-i), (u,v)].
if x[0] < 0 or x[0] > self.weight()-2:
raise ValueError("The first entry of the tuple (=%s)\
must be an integer between 0 and k-2 (=%s)."%(
x, self.weight()-2))
else:
raise ValueError("x (=%s) must be of length 2 or 3"%x)
# end check
N = self.level()
x = (x[0], x[1]%N, x[2]%N)
try:
return self.__manin_symbol[x]
except AttributeError:
self.__manin_symbol = {}
except KeyError:
pass
y = manin_symbols.ManinSymbol(self.manin_symbols(), x)
z = self(y)
self.__manin_symbol[x] = z
return z
def _modular_symbol_0_to_alpha(self, alpha, i=0):
r"""
Return the modular symbol `\{0,\alpha\}` in this space.
INPUT:
- ``alpha`` (rational or Infinity) -- a cusp
- ``i`` (int, default 0) -- the degree of the symbol.
OUTPUT:
(ModularSymbol) The modular symbol `X^iY^{k-2-i}\{0,\alpha\}`.
EXAMPLES::
sage: M = ModularSymbols(11,4,1)
sage: M._modular_symbol_0_to_alpha(Cusp(3/5))
11*[X^2,(1,7)] + 33*[X^2,(1,9)] - 20*[X^2,(1,10)]
sage: M._modular_symbol_0_to_alpha(Cusp(3/5),1)
15/2*[X^2,(1,7)] + 35/2*[X^2,(1,9)] - 10*[X^2,(1,10)]
sage: M._modular_symbol_0_to_alpha(Cusp(Infinity))
-[X^2,(1,10)]
sage: M._modular_symbol_0_to_alpha(Cusp(Infinity),1)
0
"""
if alpha.is_infinity():
return self.manin_symbol((i,0,1), check=False)
v, c = arith.continued_fraction_list(alpha._rational_(), partial_convergents=True)
a = self(0)
zero = rings.ZZ(0)
one = rings.ZZ(1)
two = rings.ZZ(2)
if self.weight() > two:
R = rings.ZZ['X']
X = R.gen(0)
## need to add first two terms, which aren't necessarily
## zero in this case. we do the first here, and the
## second in the k=0 case below, so as to avoid code
## duplication
a += self.manin_symbol((i,0,1), check=False)
for k in range(0,len(c)):
## matrix entries associated to this partial sum
if k == 0:
x = c[0][0]
y = -1
z = 1
w = 0
else:
x = c[k][0]
y = c[k-1][0]
z = c[k][1]
w = c[k-1][1]
if k%2 == 0:
y = -y
w = -w
## two options here: write out the polynomial directly,
## and deal with all the separate cases, or create two
## polynomials and then exponentiate and multiply them.
## given how fast ntl/flint/etc are, the second may
## be faster.
## method 1: write out solution. this is currently
## incorrect, because it ends up doing 0^0 in the sum,
## so I'll fix it and do timings soon.
## for s in range(0,self.weight()-two+1):
## coeff = sum([ binomial(i,t)*binomial(self.weight()-two-i,s-t)*
## x**t * y**(i-t) * z**(s-t) *
## w**(self.weight()-two-i-s+t) for t in range(0,s) ])
## m = coeff * self.manin_symbol((s, y, w), check=False)
## a += m
## method 2
p1 = x*X+y
p2 = z*X+w
if i == 0:
p1 = R(one)
if (self.weight()-2-i == 0):
p2 = R(one)
poly = (p1**i) * (p2**(self.weight()-2-i))
for s in range(0,self.weight()-1): ## k-2+1 = k-1
a += poly[s] * self.manin_symbol((s,z,w), check=False)
else:
for k in range(1,len(c)):
u = c[k][1]
v = c[k-1][1]
if k % 2 == 0:
v = -v
x = self.manin_symbol((i, u, v), check=False)
a += x
return a
def modular_symbol(self, x, check=True):
r"""
Create a modular symbol in this space.
INPUT:
- ``x`` (list) -- a list of either 2 or 3 entries:
- 2 entries: `[\alpha, \beta]` where `\alpha` and `\beta`
are cusps;
- 3 entries: `[i, \alpha, \beta]` where `0\le i\le k-2`
and `\alpha` and `\beta` are cusps;
- ``check`` (bool, default True) -- flag that determines
whether the input ``x`` needs processing: use check=False
for efficiency if the input ``x`` is a list of length 3 whose
first entry is an Integer, and whose second and third
entries are Cusps (see examples).
OUTPUT:
(Modular Symbol) The modular symbol `Y^{k-2}\{\alpha,
\beta\}`. or `X^i Y^{k-2-i}\{\alpha,\beta\}`.
EXAMPLES::
sage: set_modsym_print_mode('modular')
sage: M = ModularSymbols(11)
sage: M.modular_symbol([2/11, oo])
-{-1/9, 0}
sage: M.1
{-1/8, 0}
sage: M.modular_symbol([-1/8, 0])
{-1/8, 0}
sage: M.modular_symbol([0, -1/8, 0])
{-1/8, 0}
sage: M.modular_symbol([10, -1/8, 0])
Traceback (most recent call last):
...
ValueError: The first entry of the tuple (=[10, -1/8, 0]) must be an integer between 0 and k-2 (=0).
::
sage: N = ModularSymbols(6,4)
sage: set_modsym_print_mode('manin')
sage: N([1,Cusp(-1/4),Cusp(0)])
17/2*[X^2,(2,3)] - 9/2*[X^2,(2,5)] + 15/2*[X^2,(3,1)] - 15/2*[X^2,(3,2)]
sage: N([1,Cusp(-1/2),Cusp(0)])
1/2*[X^2,(2,3)] + 3/2*[X^2,(2,5)] + 3/2*[X^2,(3,1)] - 3/2*[X^2,(3,2)]
Use check=False for efficiency if the input x is a list of length 3
whose first entry is an Integer, and whose second and third entries
are cusps::
sage: M.modular_symbol([0, Cusp(2/11), Cusp(oo)], check=False)
-(1,9)
::
sage: set_modsym_print_mode() # return to default.
"""
if check:
if len(x) == 2:
x = [0,x[0],x[1]]
elif len(x) == 3:
if x[0] < 0 or x[0] > self.weight()-2:
raise ValueError("The first entry of the tuple (=%s)\
must be an integer between 0 and k-2 (=%s)."%(
x, self.weight()-2))
else:
raise ValueError("x (=%s) must be of length 2 or 3"%x)
i = rings.Integer(x[0])
alpha = Cusp(x[1])
beta = Cusp(x[2])
else:
i = x[0]
alpha = x[1]
beta = x[2]
# Compute {0,beta} - {0,alpha}
a = self._modular_symbol_0_to_alpha(alpha, i)
b = self._modular_symbol_0_to_alpha(beta, i)
return b - a
def modular_symbol_sum(self, x, check=True):
r"""
Construct a modular symbol sum.
INPUT:
- ``x`` (list) -- `[f, \alpha, \beta]` where `f =
\sum_{i=0}^{k-2} a_i X^i Y^{k-2-i}` is a homogeneous
polynomial over `\ZZ` of degree `k` and `\alpha` and `\beta`
are cusps.
- ``check`` (bool, default True) -- if True check the validity
of the input tuple ``x``
OUTPUT:
The sum `\sum_{i=0}^{k-2} a_i [ i, \alpha, \beta ]` as an
element of this modular symbol space.
EXAMPLES:
sage: M = ModularSymbols(11,4)
sage: R.<X,Y>=QQ[]
sage: M.modular_symbol_sum([X*Y,Cusp(0),Cusp(Infinity)])
-3/14*[X^2,(1,6)] + 1/14*[X^2,(1,7)] - 1/14*[X^2,(1,8)] + 1/2*[X^2,(1,9)] - 2/7*[X^2,(1,10)]
"""
if check:
if len(x) != 3:
raise ValueError("%s must have length 3"%x)
f = x[0]
R = self.base_ring()['X','Y']
X = R.gen(0)
try:
f = R(f)
except TypeError:
raise ValueError("f must be coercible to a polynomial \
over %s"%self.base_ring())
if (not f.is_homogeneous()) or (f.degree() != self.weight()-2):
raise ValueError("f must be a homogeneous polynomial of degree k-2")
alpha = Cusp(x[1])
beta = Cusp(x[2])
else:
f = x[0]
R = self.base_ring()
X = R.gen(0)
alpha = x[1]
beta = x[2]
s = self(0)
for term in f.monomials():
deg = term.degree(X)
a = self._modular_symbol_0_to_alpha(alpha, deg)
b = self._modular_symbol_0_to_alpha(beta, deg)
s += f.monomial_coefficient(term) * (b-a)
return s
def _compute_dual_hecke_matrix(self, n):
r"""
Return the matrix of the dual Hecke operator `T(n)`.
INPUT:
- ``n`` (int) -- a positive integer
OUTPUT:
(matrix) The matrix of the dual od `T(n)`.
EXAMPLES::
sage: M = ModularSymbols(11,4,1)
sage: M._compute_dual_hecke_matrix(5)
[126 0 0 0]
[ 2 63 38 22]
[ 11 33 82 121]
[-13 30 6 -17]
"""
return self.hecke_matrix(n).transpose()
def _compute_hecke_matrix_prime(self, p, rows=None):
"""
Return the matrix of the Hecke operator `T(p)`.
INPUT:
- ``p`` (int) -- a prime number.
- ``rows`` (list or None (default)) -- if not None, a list of
the rows which should be computed; otherwise the complete
matrix will be computed,
.. note::
`p` does not have to be, prime despite the function name.
OUTPUT:
(matrix) The matrix of the Hecke operator `T(p)` on this
space, with respect to its standard basis.
ALGORITHM:
Use Heilbronn-Cremona matrices if `p` is prime, else use
Heilbronn-Merel matrices.
EXAMPLES:
We first compute some examples for Gamma0(N)::
sage: m = ModularSymbols(2, weight=4)
sage: m._compute_hecke_matrix_prime(2).charpoly('x')
x^2 - 9*x + 8
::
sage: m = ModularSymbols(1,weight=12)
sage: m._compute_hecke_matrix_prime(2).charpoly('x')
x^3 - 2001*x^2 - 97776*x - 1180224
sage: m._compute_hecke_matrix_prime(13).charpoly('x')
x^3 - 1792159238562*x^2 - 2070797989680255444*x - 598189440899986203208472
::
sage: m = ModularSymbols(1,weight=12, sign=-1)
sage: m._compute_hecke_matrix_prime(5)
[4830]
sage: m._compute_hecke_matrix_prime(23)
[18643272]
::
sage: m = ModularSymbols(3,4)
sage: m._compute_hecke_matrix_prime(2).charpoly('x')
x^2 - 18*x + 81
::
sage: m = ModularSymbols(6,4)
sage: m._compute_hecke_matrix_prime(2).charpoly('x')
x^6 - 14*x^5 + 29*x^4 + 172*x^3 - 124*x^2 - 320*x + 256
sage: m._compute_hecke_matrix_prime(3).charpoly('x')
x^6 - 50*x^5 + 511*x^4 + 3012*x^3 - 801*x^2 - 9234*x + 6561
::
sage: m = ModularSymbols(15,4, sign=-1)
sage: m._compute_hecke_matrix_prime(3).charpoly('x')
x^4 - 2*x^3 + 18*x^2 + 18*x - 243
::
sage: m = ModularSymbols(6,4)
sage: m._compute_hecke_matrix_prime(7).charpoly('x')
x^6 - 1344*x^5 + 666240*x^4 - 140462080*x^3 + 8974602240*x^2 + 406424518656*x + 3584872677376
::
sage: m = ModularSymbols(4,4)
sage: m._compute_hecke_matrix_prime(3).charpoly('x')
x^3 - 84*x^2 + 2352*x - 21952
We now compute some examples for modular symbols on Gamma1(N)::
sage: m = ModularSymbols(Gamma1(13),2, sign=-1)
sage: m._compute_hecke_matrix_prime(2).charpoly('x')
x^2 + 3*x + 3
The following is an example with odd weight::
sage: m = ModularSymbols(Gamma1(5),3)
sage: m._compute_hecke_matrix_prime(2).charpoly('x')
x^4 - 10*x^3 + 50*x^2 - 170*x + 289
This example has composite conductor and weight2 dividing the
conductor and nontrivial sign::
sage: m = ModularSymbols(Gamma1(9),3, sign=1)
sage: m._compute_hecke_matrix_prime(3).charpoly('x')
x^6 + 3*x^4 - 19*x^3 + 24*x^2 - 9*x
In some situations we do not need all the rows of the result, and can thereby save time::
sage: m = ModularSymbols(1,weight=12)
sage: m._compute_hecke_matrix_prime(2)
[ -24 0 0]
[ 0 -24 0]
[4860 0 2049]
sage: m._compute_hecke_matrix_prime(2,rows=[0,1])
[-24 0 0]
[ 0 -24 0]
sage: m._compute_hecke_matrix_prime(2,rows=[1,2])
[ 0 -24 0]
[4860 0 2049]
"""
# note -- p doesn't have to be prime despite the function name
p = int(rings.Integer(p)) # go through Integer so p = 2.5 gives an error.
if isinstance(rows, list):
rows = tuple(rows)
try:
return self._hecke_matrices[(p,rows)]
except AttributeError:
self._hecke_matrices = {}
except KeyError:
pass
tm = misc.verbose("Computing Hecke operator T_%s"%p)
if arith.is_prime(p):
H = heilbronn.HeilbronnCremona(p)
else:
H = heilbronn.HeilbronnMerel(p)
B = self.manin_basis()
if not rows is None:
B = [B[i] for i in rows]
cols = []
mod2term = self._mod2term
R = self.manin_gens_to_basis()
K = self.base_ring()
W = R.new_matrix(nrows=len(B), ncols = R.nrows())
syms = self.manin_symbols()
n = len(syms)
j = 0
for i in B:
for h in H:
entries = syms.apply(i,h)
for k, x in entries:
f, s = mod2term[k]
if s != 0:
W[j,f] = W[j,f] + s*K(x)
j += 1
tm = misc.verbose("start matrix multiply",tm)
if hasattr(W, '_matrix_times_matrix_dense'):
Tp = W._matrix_times_matrix_dense(R)
misc.verbose("done matrix multiply and computing Hecke operator",tm)
else:
Tp = W * R
tm = misc.verbose("done matrix multiply",tm)
Tp = Tp.dense_matrix()
misc.verbose("done making Hecke operator matrix dense",tm)
self._hecke_matrices[(p,rows)] = Tp
return Tp
def __heilbronn_operator(self, M, H, t=1):
r"""
Return the matrix function to the space `M` defined by `H`, `t`.
.. note::
Users will instead use the simpler interface defined, for
example, by ``hecke_matrix()`` (see examples).
INPUT:
- ``M`` (ModularSymbols) -- codomain (a space of modular
symbols);
- ``H`` (list) -- a list of matrices in `M_2(\ZZ)`;
- ``t`` (int, default 1) -- an integer.
OUTPUT:
(free module morphism) A function from the Modular Symbol
space to the Modular Symbol space `M` defined by `t` and the
matrices in `H`.
EXAMPLES::
sage: M = ModularSymbols(37,2)
sage: M._ModularSymbolsAmbient__heilbronn_operator(M,HeilbronnCremona(3))
Hecke module morphism Heilbronn operator(The Cremona-Heilbronn matrices of determinant 3,1) defined by the matrix
[ 4 0 0 0 -1]
[ 0 -1 2 2 -2]
[ 0 2 -1 2 0]
[ 0 0 0 -3 2]
[ 0 0 0 0 1]
Domain: Modular Symbols space of dimension 5 for Gamma_0(37) of weight ...
Codomain: Modular Symbols space of dimension 5 for Gamma_0(37) of weight ...
sage: M.hecke_matrix(3)
[ 4 0 0 0 -1]
[ 0 -1 2 2 -2]
[ 0 2 -1 2 0]
[ 0 0 0 -3 2]
[ 0 0 0 0 1]
"""
MS = matrix_space.MatrixSpace(self.base_ring(), self.dimension(), M.dimension())
hom = self.Hom(M)
if self.dimension() == 0 or M.dimension() == 0:
A = MS(0)
phi = hom(A, "Heilbronn operator(%s,%s)"%(H,t))
return phi
rows = []
B = self.manin_basis()
syms = self.manin_symbols()
k = self.weight()
for n in B:
z = M(0)
i, u, v = syms[n]
# We apply each Heilbronn matrix to the
# Manin symbol [X^i*Y^(k-2-i), (u,v)]
for h in H:
# Apply h to the polynomial part
(a,b,c,d) = tuple(h)
# P gives the ordered coefficients of (a*X+b*Y)^i*(c*X+d*Y)^(j-i)
P = manin_symbols.apply_to_monomial(i, k-2, a,b,c,d)
# Apply h to the (u,v) part of the Manin symbol
(uu,vv) = (u*a+v*c, u*b+v*d)
# For the generalized Heilbronn operator, we through away any
# symbols for which the (u,v) part of the symbol doesn't have
# both entries divisible by t.
if t != 1:
if uu%t != 0 or vv%t != 0:
continue
uu = uu//t
vv = vv//t
# Now coerce each Manin symbol
#
# P[m]*[X^m*Y^(k-2-m), (uu,vv)], for m=0,...,len(P)
#
# into the image space M and add that to z.
# Note that we coerce in Manin symbols as tuples.
for m in range(len(P)):
x = M((m,uu,vv))
z += x*P[m]
rows.append(z.element())
A = MS(rows)
return hom(A, "Heilbronn operator(%s,%s)"%(H,t))
def _repr_(self):
r"""
String representation of this Modular Symbols space.
EXAMPLES::
sage: m = ModularSymbols(1,weight=12)
sage: m # indirect doctest
Modular Symbols space of dimension 3 for Gamma_0(1) of weight 12 with sign 0 over Rational Field
"""
return "Modular Symbols space of dimension %s and weight %s for %s with sign %s and character %s over %s"%(
self.dimension(), self.weight(), self.group(), self.sign(), self.character()._repr_short_(), self.base_ring())
def _latex_(self):
r"""
Latex representation of this Modular Symbols space.
EXAMPLES::
sage: m = ModularSymbols(11,weight=12)
sage: latex(m) # indirect doctest
\mathrm{ModSym}_{12}(\Gamma_0(11),\left[1\right];\Bold{Q})
sage: chi = DirichletGroup(7).0
sage: m = ModularSymbols(chi)
sage: latex(m)
\mathrm{ModSym}_{2}(\Gamma_1(7),\left[\zeta_{6}\right];\Bold{Q}(\zeta_{6}))
"""
return "\\mathrm{ModSym}_{%s}(%s,%s;%s)"%(self.weight(),
latex.latex(self.group()),
latex.latex(list(self.character().values_on_gens())),
latex.latex(self.base_ring()))
def _matrix_of_operator_on_modular_symbols(self, codomain, R):
r"""
Returns the matrix of a modular symbols operator.
.. note::
Users will usually instead use the simpler interface
defined, for example, by ``hecke_matrix()`` (see examples),
though this function allows one to compute much more
general operators.
INPUT:
- ``codomain`` - space of modular symbols
- ``R`` (list) -- a list of lists `[a,b,c,d]` of length 4,
which we view as elements of `GL_2(`QQ)`.
OUTPUT:
-- (matrix) The matrix of the operator
.. math::
x \mapsto \sum_{g in R} g.x,
where `g.x` is the formal linear fractional transformation on modular
symbols, with respect to the standard basis.
EXAMPLES::
sage: M = ModularSymbols(37,2)
sage: M._matrix_of_operator_on_modular_symbols(M,HeilbronnCremona(3))
[ 4 0 0 0 0]
[ 0 -3 1 1 0]
[ 0 3 0 5 -2]
[ 0 -3 1 -5 3]
[ 0 0 2 3 -3]
"""
rows = []
for b in self.basis():
v = formal_sum.FormalSum(0, check=False)
for c, x in b.modular_symbol_rep():
for g in R:
y = x.apply(g)
v += y*c
w = codomain(v).element()
rows.append(w)
M = matrix_space.MatrixSpace(self.base_ring(), len(rows), codomain.degree(), sparse=False)
return M(rows)
def _compute_atkin_lehner_matrix(self, d):
r"""
Return the matrix of the Atkin-Lehner involution `W_d`.
INPUT:
- ``d`` (int) -- an integer that divides the level.
OUTPUT:
(matrix) The matrix of the involution `W_d` with respect to
the standard basis.
EXAMPLES: An example at level 29::
sage: M = ModularSymbols((DirichletGroup(29,QQ).0), 2,1); M
Modular Symbols space of dimension 4 and level 29, weight 2, character [-1], sign 1, over Rational Field
sage: w = M._compute_atkin_lehner_matrix(29)
sage: w^2 == 1
True
sage: w.fcp()
(x - 1)^2 * (x + 1)^2
This doesn't work since the character has order 2::
sage: M = ModularSymbols((DirichletGroup(13).0), 2,1); M
Modular Symbols space of dimension 0 and level 13, weight 2, character [zeta12], sign 1, over Cyclotomic Field of order 12 and degree 4
sage: M._compute_atkin_lehner_matrix(13)
Traceback (most recent call last):
...
ValueError: Atkin-Lehner only leaves space invariant when character is trivial or quadratic. In general it sends M_k(chi) to M_k(1/chi)
Note that Atkin-Lehner does make sense on `\Gamma_1(13)`,
but doesn't commute with the Hecke operators::
sage: M = ModularSymbols(Gamma1(13),2)
sage: w = M.atkin_lehner_operator(13).matrix()
sage: t = M.T(2).matrix()
sage: t*w == w*t
False
sage: w^2 == 1
True
"""
chi = self.character()
if chi is not None and chi.order() > 2:
raise ValueError("Atkin-Lehner only leaves space invariant when character is trivial or quadratic. In general it sends M_k(chi) to M_k(1/chi)")
N = self.level()
k = self.weight()
R = self.base_ring()
if N%d != 0:
raise ValueError("d must divide N")
g, x, y = arith.xgcd(d, -N//d)
g = [d*x, y, N, d]
A = self._action_on_modular_symbols(g)
scale = R(d)**(1 - k//2)
Wmat = scale * A
return Wmat
def boundary_map(self):
r"""
Return the boundary map to the corresponding space of boundary modular
symbols.
EXAMPLES::
sage: ModularSymbols(20,2).boundary_map()
Hecke module morphism boundary map defined by the matrix
[ 1 -1 0 0 0 0]
[ 0 1 -1 0 0 0]
[ 0 1 0 -1 0 0]
[ 0 0 0 -1 1 0]
[ 0 1 0 -1 0 0]
[ 0 0 1 -1 0 0]
[ 0 1 0 0 0 -1]
Domain: Modular Symbols space of dimension 7 for Gamma_0(20) of weight ...
Codomain: Space of Boundary Modular Symbols for Congruence Subgroup Gamma0(20) ...
sage: type(ModularSymbols(20,2).boundary_map())
<class 'sage.modular.hecke.morphism.HeckeModuleMorphism_matrix'>
"""
try:
return self.__boundary_map
except AttributeError:
# compute boundary map
B = self.boundary_space()
I = [B(b) for b in self.basis()]
W = matrix_space.MatrixSpace(self.base_ring(), len(I), B.rank(), sparse=True)
# Note -- the underlying elements have degree the number of distinct
# cusps known when the element was computed. This isn't constant,
# so we pad the elements.
E = [x.element() for x in I]
zero = self.base_ring()(0)
n = int(B.dimension())
E = sum([ list(x) + [zero]*(n - len(x)) for x in E ], [])
A = W( E )
H = cat.Hom(self, B)
self.__boundary_map = H(A, "boundary map")
return self.__boundary_map
def cusps(self):
r"""
Return the set of cusps for this modular symbols space.
EXAMPLES::
sage: ModularSymbols(20,2).cusps()
[Infinity, 0, -1/4, 1/5, -1/2, 1/10]
"""
try:
return self.__cusps
except AttributeError:
f = self.boundary_map()
B = f.codomain()
C = B._known_cusps()
self.__cusps = C
return C
def boundary_space(self):
r"""
Return the subspace of boundary modular symbols of this modular symbols ambient space.
EXAMPLES::
sage: M = ModularSymbols(20, 2)
sage: B = M.boundary_space(); B
Space of Boundary Modular Symbols for Congruence Subgroup Gamma0(20) of weight 2 and over Rational Field
sage: M.cusps()
[Infinity, 0, -1/4, 1/5, -1/2, 1/10]
sage: M.dimension()
7
sage: B.dimension()
6
"""
raise NotImplementedError
def cuspidal_submodule(self):
"""
The cuspidal submodule of this modular symbols ambient space.
EXAMPLES::
sage: M = ModularSymbols(12,2,0,GF(5)) ; M
Modular Symbols space of dimension 5 for Gamma_0(12) of weight 2 with sign 0 over Finite Field of size 5
sage: M.cuspidal_submodule()
Modular Symbols subspace of dimension 0 of Modular Symbols space of dimension 5 for Gamma_0(12) of weight 2 with sign 0 over Finite Field of size 5
sage: ModularSymbols(1,24,-1).cuspidal_submodule()
Modular Symbols subspace of dimension 2 of Modular Symbols space of dimension 2 for Gamma_0(1) of weight 24 with sign -1 over Rational Field
The cuspidal submodule of the cuspidal submodule is itself::
sage: M = ModularSymbols(389)
sage: S = M.cuspidal_submodule()
sage: S.cuspidal_submodule() is S
True
"""
try:
return self.__cuspidal_submodule
except AttributeError:
try:
if self.__is_cuspidal:
return self
except AttributeError:
pass
S = self.boundary_map().kernel()
S._set_is_cuspidal(True)
S._is_full_hecke_module = True
## We know the cuspidal subspace is stable, so
## if it's one-dimensional, it must be simple
if S.dimension() == 1:
S._is_simple = True
if self.base_ring().characteristic() == 0:
d = self._cuspidal_submodule_dimension_formula()
if not d is None:
assert d == S.dimension(), "According to dimension formulas the cuspidal subspace of \"%s\" has dimension %s; however, computing it using modular symbols we obtained %s, so there is a bug (please report!)."%(self, d, S.dimension())
self.__cuspidal_submodule = S
return self.__cuspidal_submodule
def _degeneracy_raising_matrix(self, M, t):
r"""
Return the matrix of the level-raising degeneracy map from self to M,
of index t. This is calculated by composing the level-raising matrix
for `t = 1` with a Hecke operator.
INPUT:
- ``M`` (int) -- a space of modular symbols whose level is an integer
multiple of the the level of self
- ``t`` (int) -- a positive integer dividing the quotient of the two
levels.
OUTPUT:
(matrix) The matrix of the degeneracy map of index `t` from this space
of level `N` to the space `M` (of level a multiple of `N`). Here `t` is
a divisor of the quotient.
EXAMPLES::
sage: A = ModularSymbols(11, 2); B = ModularSymbols(22, 2)
sage: A._degeneracy_raising_matrix(B, 1)
[ 1 0 0 0 0 -1 -1]
[ 0 1 0 -3 1 1 -1]
[ 0 1 1 -1 -1 0 0]
sage: A._degeneracy_raising_matrix(B, 2)
[ 2 0 0 0 1 0 -1]
[ 0 0 -1 3 -1 -1 1]
[ 0 -1 -1 1 0 1 -1]
Check that :trac:`13198` is fixed::
sage: M22 = ModularSymbols(Gamma1(22), sign=1)
sage: M2 = ModularSymbols(Gamma1(2))
sage: d1 = M2.degeneracy_map(M22,1)
sage: d2 = M2.degeneracy_map(M22,11)
sage: M22.hecke_matrix(17).restrict((d1.image() + d2.image()).free_module())
[18 0]
[ 0 18]
sage: S = M22.cuspidal_submodule()
sage: S.new_submodule().intersection(S.old_submodule()) == S.zero_submodule()
True
"""
if t == 1:
return self._degeneracy_raising_matrix_1(M)
else:
# use Hecke operator and t=1 case.
d1 = self.degeneracy_map(M, 1).matrix()
T = M.hecke_matrix(t)
return (~self.base_ring()(t)) * d1 * T
def _degeneracy_raising_matrix_1(self, M):
r"""
Return the matrix of the degeneracy map to the given level
(which must be a multiple of the level of self).
.. note::
Not implemented in the base class, only in the derived classes.
EXAMPLES::
sage: M = ModularSymbols(37,4)
sage: M._degeneracy_raising_matrix_1(ModularSymbols(74, 4))
20 x 58 dense matrix over Rational Field
"""
raise NotImplementedError
def _degeneracy_lowering_matrix(self, M, t):
r"""
Return the matrix of the level-lowering degeneracy map from self to M.
INPUT:
- ``M`` -- a modular symbols space whose level divides the level of
self
- ``t`` (int) -- a positive integer dividing the quotient of the
levels.
OUTPUT:
(matrix) The matrix of the degeneracy map from this space to the space
`M` of index `t`, where `t` is a divisor of the quotient of the levels
of self and `M`.
EXAMPLES::
sage: M = ModularSymbols(22,2)
sage: M._degeneracy_lowering_matrix(ModularSymbols(11, 2), 2)
[ 1 0 0]
[ 0 1 -1]
[ 0 0 -1]
[ 0 1 0]
[ 0 0 0]
[-1 0 1]
[-1 0 0]
"""
# Use Proposition 2.6.15 in Merel's 1585 paper (or Prop 15 in
# electronic version of that paper).
H = heilbronn.HeilbronnMerel(t)
return self.__heilbronn_operator(M,H,t).matrix()
def rank(self):
"""
Returns the rank of this modular symbols ambient space.
OUTPUT:
(int) The rank of this space of modular symbols.
EXAMPLES::
sage: M = ModularSymbols(389)
sage: M.rank()
65
::
sage: ModularSymbols(11,sign=0).rank()
3
sage: ModularSymbols(100,sign=0).rank()
31
sage: ModularSymbols(22,sign=1).rank()
5
sage: ModularSymbols(1,12).rank()
3
sage: ModularSymbols(3,4).rank()
2
sage: ModularSymbols(8,6,sign=-1).rank()
3
"""
try:
return self.__rank
except AttributeError:
self.__rank = len(self.manin_basis())
return self.__rank
def eisenstein_submodule(self):
"""
Return the Eisenstein submodule of this space of modular symbols.
EXAMPLES::
sage: ModularSymbols(20,2).eisenstein_submodule()
Modular Symbols subspace of dimension 5 of Modular Symbols space of dimension 7 for Gamma_0(20) of weight 2 with sign 0 over Rational Field
"""
try:
return self.__eisenstein_submodule
except AttributeError:
self.__eisenstein_submodule = self.cuspidal_submodule().complement()
return self.__eisenstein_submodule
def element(self, x):
"""
Creates and returns an element of self from a modular symbol, if
possible.
INPUT:
- ``x`` - an object of one of the following types:
ModularSymbol, ManinSymbol.
OUTPUT:
ModularSymbol - a modular symbol with parent self.
EXAMPLES::
sage: M = ModularSymbols(11,4,1)
sage: M.T(3)
Hecke operator T_3 on Modular Symbols space of dimension 4 for Gamma_0(11) of weight 4 with sign 1 over Rational Field
sage: M.T(3)(M.0)
28*[X^2,(0,1)] + 2*[X^2,(1,7)] - [X^2,(1,9)] - [X^2,(1,10)]
sage: M.T(3)(M.0).element()
(28, 2, -1, -1)
"""
if isinstance(x, manin_symbols.ManinSymbol):
if not x.parent().weight() == self.weight():
raise ArithmeticError("incompatible weights: Manin symbol\
has weight %s, but modular symbols space has weight %s"%(
x.parent().weight(), self.weight()))
t = self.manin_symbols().index(x.tuple())
if isinstance(t, tuple):
i, scalar = t
v = self.manin_gens_to_basis().row(i) * scalar
else:
v = self.manin_gens_to_basis().row(t)
return element.ModularSymbolsElement(self, v)
elif isinstance(x, element.ModularSymbolsElement):
M = x.parent()
if M.ambient_hecke_module() != self:
# TODO -- sometimes do something more sophisticated here.
raise TypeError("Modular symbol (%s) does not lie in this space."%x)
return self(x.element())
else:
raise ValueError("Cannot create element of %s from %s."%(x,self))
def dual_star_involution_matrix(self):
"""
Return the matrix of the dual star involution, which is induced by
complex conjugation on the linear dual of modular symbols.
EXAMPLES::
sage: ModularSymbols(20,2).dual_star_involution_matrix()
[1 0 0 0 0 0 0]
[0 1 0 0 0 0 0]
[0 0 0 0 1 0 0]
[0 0 0 1 0 0 0]
[0 0 1 0 0 0 0]
[0 0 0 0 0 1 0]
[0 0 0 0 0 0 1]
"""
try:
return self.__dual_star_involution_matrix
except AttributeError:
pass
self.__dual_star_involution_matrix = self.star_involution().matrix().transpose()
return self.__dual_star_involution_matrix
def factorization(self):
r"""
Returns a list of pairs `(S,e)` where `S` is spaces
of modular symbols and self is isomorphic to the direct sum of the
`S^e` as a module over the *anemic* Hecke algebra adjoin
the star involution. The cuspidal `S` are all simple, but
the Eisenstein factors need not be simple.
EXAMPLES::
sage: ModularSymbols(Gamma0(22), 2).factorization()
(Modular Symbols subspace of dimension 1 of Modular Symbols space of dimension 3 for Gamma_0(11) of weight 2 with sign 0 over Rational Field)^2 *
(Modular Symbols subspace of dimension 1 of Modular Symbols space of dimension 3 for Gamma_0(11) of weight 2 with sign 0 over Rational Field)^2 *
(Modular Symbols subspace of dimension 3 of Modular Symbols space of dimension 7 for Gamma_0(22) of weight 2 with sign 0 over Rational Field)
::
sage: ModularSymbols(1,6,0,GF(2)).factorization()
(Modular Symbols subspace of dimension 1 of Modular Symbols space of dimension 2 for Gamma_0(1) of weight 6 with sign 0 over Finite Field of size 2) *
(Modular Symbols subspace of dimension 1 of Modular Symbols space of dimension 2 for Gamma_0(1) of weight 6 with sign 0 over Finite Field of size 2)
::
sage: ModularSymbols(18,2).factorization()
(Modular Symbols subspace of dimension 2 of Modular Symbols space of dimension 7 for Gamma_0(18) of weight 2 with sign 0 over Rational Field) *
(Modular Symbols subspace of dimension 5 of Modular Symbols space of dimension 7 for Gamma_0(18) of weight 2 with sign 0 over Rational Field)
::
sage: M = ModularSymbols(DirichletGroup(38,CyclotomicField(3)).0^2, 2, +1); M
Modular Symbols space of dimension 7 and level 38, weight 2, character [zeta3], sign 1, over Cyclotomic Field of order 3 and degree 2
sage: M.factorization() # long time (about 8 seconds)
(Modular Symbols subspace of dimension 1 of Modular Symbols space of dimension 7 and level 38, weight 2, character [zeta3], sign 1, over Cyclotomic Field of order 3 and degree 2) *
(Modular Symbols subspace of dimension 2 of Modular Symbols space of dimension 7 and level 38, weight 2, character [zeta3], sign 1, over Cyclotomic Field of order 3 and degree 2) *
(Modular Symbols subspace of dimension 2 of Modular Symbols space of dimension 7 and level 38, weight 2, character [zeta3], sign 1, over Cyclotomic Field of order 3 and degree 2) *
(Modular Symbols subspace of dimension 2 of Modular Symbols space of dimension 7 and level 38, weight 2, character [zeta3], sign 1, over Cyclotomic Field of order 3 and degree 2)
"""
## EXAMPLES:
## sage: M = ModularSymbols(Gamma0(22), 2); M
## Modular Symbols space of dimension 7 for Gamma_0(22) of weight 2 with sign 0 over Rational Field
## sage: M.factorization():
## ... print b.dimension(), b.level(), e
## 1 11 2
## 1 11 2
## 1 11 2
## 1 22 1
## An example with sign 1:
## sage: M = ModularSymbols(Gamma0(22), 2, sign=1); M
## Modular Symbols space of dimension 5 for Gamma_0(22) of weight 2 with sign 1 over Rational Field
## sage: for b, e in M.factorization():
## ... print b.dimension(), b.level(), e
## 1 11 2
## 1 11 2
## 1 22 1
## An example for Gamma1:
## sage: M = ModularSymbols(Gamma1(26), 2, sign=1); M
## Modular Symbols space of dimension 33 for Gamma_1(26) of weight 2 with sign 1 and over Rational Field
## sage: for b, e in M.factorization():
## ... print b.dimension(), b.level(), e
## 1 13 2
## 1 13 2
## 1 13 2
## 2 13 2
## 2 13 2
## 2 13 2
## 2 13 2
## 2 13 2
## 1 26 1
## 1 26 1
## 1 26 1
## 2 26 1
## 2 26 1
## An example with level divisible by a square:
## sage: M = ModularSymbols(Gamma0(2*9),2); M
## ???
## sage: for b, e in M.factorization():
## ... print b.dimension(), b.level(), e
## ???
try:
return self._factorization
except AttributeError:
pass
try:
if self._is_simple:
return [self]
except AttributeError:
pass
D = []
# Treat the cuspidal and eisenstein parts separately. The
# cuspidal part is very straightforward because of
# Atkin-Lehner-Li theory. The eisenstein part is trickier,
# because of E2 and that the new and old Eisenstein subspaces
# can intersect (e.g., they do for M_2(Gamma_0(6))), even
# in a way that involves forms other than E_2 (i.e., twists
# of E2).
# 1. Cuspidal part -- compute the factors and their multiplicities
# using Atkin-Lehner-Li.
# 2. Eisenstein part -- just call normal decomposition.
# In the special case of weight 2 we have to do a bunch of
# annoying extra work below to deal with the Eisenstein series E_2.
k = self.weight()
## If the characteristic of the base ring is 2,
## the star involution is the identity, so we
## want to avoid adding each cuspidal submodule
## twice.
if self.base_ring().characteristic() == 2:
skip_minus = True
else:
skip_minus = False
# The cuspidal part
# We only run through spaces of level a multiple of the conductor of the character, which
# we compute below, or set to 1 in case of Gamma_H or Gamma_1
chi = self.character()
cond = 1 if chi is None else chi.conductor()
# Now actually run through the divisor levels, taking only the ones with that are
# a multiple of the conductor.
for d in reversed(arith.divisors(self.level())):
if d%cond != 0: continue
n = arith.number_of_divisors(self.level() // d)
M = self.modular_symbols_of_level(d)
N = M.new_submodule().cuspidal_submodule().decomposition()
for A in N:
if self.sign() == 0:
V = A.plus_submodule()
V._is_simple = True
D.append((V,n))
if skip_minus:
continue
V = A.minus_submodule()
V._is_simple = True
D.append((V,n))
else:
A._is_simple = True
D.append((A,n))
# The eisenstein part
for E in self.eisenstein_submodule().decomposition(anemic=True):
D.append((E,1))
r = self.dimension()
s = sum([A.rank()*mult for A, mult in D])
D = sage.structure.all.Factorization(D, cr=True, sort=False)
D.sort(_cmp = cmp)
assert r == s, "bug in factorization -- self has dimension %s, but sum of dimensions of factors is %s\n%s"%(r, s, D)
self._factorization = D
return self._factorization
factor = factorization
def is_cuspidal(self):
r"""
Returns True if this space is cuspidal, else False.
EXAMPLES::
sage: M = ModularSymbols(20,2)
sage: M.is_cuspidal()
False
sage: S = M.cuspidal_subspace()
sage: S.is_cuspidal()
True
sage: S = M.eisenstein_subspace()
sage: S.is_cuspidal()
False
"""
try:
return self.__is_cuspidal
except AttributeError:
S = self.ambient_hecke_module().cuspidal_submodule()
self.__is_cuspidal = (S.dimension() == self.dimension())
return self.__is_cuspidal
def is_eisenstein(self):
r"""
Returns True if this space is Eisenstein, else False.
EXAMPLES::
sage: M = ModularSymbols(20,2)
sage: M.is_eisenstein()
False
sage: S = M.eisenstein_submodule()
sage: S.is_eisenstein()
True
sage: S = M.cuspidal_subspace()
sage: S.is_eisenstein()
False
"""
try:
return self.__is_eisenstein
except AttributeError:
S = self.ambient_hecke_module().eisenstein_submodule()
self.__is_eisenstein = self.dimension()==S.dimension()
return self.__is_eisenstein
def manin_symbols_basis(self):
"""
A list of Manin symbols that form a basis for the ambient space
self. INPUT:
- ``ModularSymbols self`` - an ambient space of
modular symbols
OUTPUT:
- ``list`` - a list of 2-tuples (if the weight is 2)
or 3-tuples, which represent the Manin symbols basis for self.
EXAMPLES::
sage: m = ModularSymbols(23)
sage: m.manin_symbols_basis()
[(1,0), (1,17), (1,19), (1,20), (1,21)]
sage: m = ModularSymbols(6, weight=4, sign=-1)
sage: m.manin_symbols_basis()
[[X^2,(2,1)]]
"""
s = self.manin_symbols()
return [s.manin_symbol(i) for i in self.manin_basis()]
def modular_symbols_of_sign(self, sign):
r"""
Returns a space of modular symbols with the same defining
properties (weight, level, etc.) as this space except with given
sign.
INPUT:
- ``sign`` (int) -- A sign (`+1`, `-1` or `0`).
OUTPUT:
(ModularSymbolsAmbient) A space of modular symbols with the
same defining properties (weight, level, etc.) as this space
except with given sign.
EXAMPLES::
sage: M = ModularSymbols(Gamma0(11),2,sign=0)
sage: M
Modular Symbols space of dimension 3 for Gamma_0(11) of weight 2 with sign 0 over Rational Field
sage: M.modular_symbols_of_sign(-1)
Modular Symbols space of dimension 1 for Gamma_0(11) of weight 2 with sign -1 over Rational Field
sage: M = ModularSymbols(Gamma1(11),2,sign=0)
sage: M.modular_symbols_of_sign(-1)
Modular Symbols space of dimension 1 for Gamma_1(11) of weight 2 with sign -1 and over Rational Field
"""
if sign == self.sign():
return self
return modsym.ModularSymbols(self.group(), self.weight(), sign=sign, base_ring=self.base_ring())
def modular_symbols_of_weight(self, k):
r"""
Returns a space of modular symbols with the same defining
properties (weight, sign, etc.) as this space except with weight
`k`.
INPUT:
- ``k`` (int) -- A positive integer.
OUTPUT:
(ModularSymbolsAmbient) A space of modular symbols with the
same defining properties (level, sign) as this space
except with given weight.
EXAMPLES::
sage: M = ModularSymbols(Gamma1(6),2,sign=0)
sage: M.modular_symbols_of_weight(3)
Modular Symbols space of dimension 4 for Gamma_1(6) of weight 3 with sign 0 and over Rational Field
"""
if k == self.weight():
return self
return modsym.ModularSymbols(self.group(), weight=k, sign=self.sign(), base_ring=self.base_ring())
def _compute_sign_submodule(self, sign, compute_dual=True):
r"""
Return the subspace of self that is fixed under the star
involution.
INPUT:
- ``sign`` - int (either -1 or +1)
- ``compute_dual`` - bool (default: True) also
compute dual subspace. This are useful for many algorithms.
OUTPUT:
A subspace of modular symbols
EXAMPLES::
sage: ModularSymbols(1,12,0,GF(5)).minus_submodule() ## indirect doctest
Modular Symbols subspace of dimension 1 of Modular Symbols space of dimension 3 for Gamma_0(1) of weight 12 with sign 0 over Finite Field of size 5
"""
S = self.star_involution().matrix() - self.base_ring()(sign)
V = S.kernel()
if compute_dual:
Vdual = S.transpose().kernel()
M = self.submodule(V, Vdual, check=False)
else:
M = self.submodule(V, check=False)
M._set_sign(sign)
return M
def star_involution(self):
r"""
Return the star involution on this modular symbols space.
OUTPUT:
(matrix) The matrix of the star involution on this space,
which is induced by complex conjugation on modular symbols,
with respect to the standard basis.
EXAMPLES::
sage: ModularSymbols(20,2).star_involution()
Hecke module morphism Star involution on Modular Symbols space of dimension 7 for Gamma_0(20) of weight 2 with sign 0 over Rational Field defined by the matrix
[1 0 0 0 0 0 0]
[0 1 0 0 0 0 0]
[0 0 0 0 1 0 0]
[0 0 0 1 0 0 0]
[0 0 1 0 0 0 0]
[0 0 0 0 0 1 0]
[0 0 0 0 0 0 1]
Domain: Modular Symbols space of dimension 7 for Gamma_0(20) of weight ...
Codomain: Modular Symbols space of dimension 7 for Gamma_0(20) of weight ...
"""
try:
return self.__star_involution
except AttributeError:
pass
S = self.__heilbronn_operator(self, [[-1,0, 0,1]], 1)
S.name("Star involution on %s"%self)
self.__star_involution = S
return self.__star_involution
def _compute_diamond_matrix(self, d):
r"""
Return the diamond bracket d operator on this modular symbols space.
INPUT:
- `d` -- integer
OUTPUT:
- ``matrix`` - the matrix of the diamond bracket operator
on this space.
EXAMPLES::
sage: e = kronecker_character(7)
sage: M = ModularSymbols(e,2,sign=1)
sage: D = M.diamond_bracket_operator(5); D
Diamond bracket operator <5> on Modular Symbols space ...
sage: D.matrix() # indirect doctest
[-1 0 0 0]
[ 0 -1 0 0]
[ 0 0 -1 0]
[ 0 0 0 -1]
sage: [M.diamond_bracket_operator(d).matrix()[0,0] for d in [0..6]]
[0, 1, 0, 1, 0, -1, 0]
sage: [e(d) for d in [0..6]]
[0, 1, 0, 1, 0, -1, 0]
We test that the sign issue at #8620 is fixed::
sage: M = Newforms(Gamma1(13),names = 'a')[0].modular_symbols(sign=0)
sage: M.diamond_bracket_operator(4).matrix()
[ 0 0 1 -1]
[-1 -1 0 1]
[-1 -1 0 0]
[ 0 -1 1 -1]
We check that the result is correctly normalised for weight > 2::
sage: ModularSymbols(Gamma1(13), 5).diamond_bracket_operator(6).charpoly().factor()
(x^2 + 1)^8 * (x^4 - x^2 + 1)^10
"""
return self.__heilbronn_operator(self, [[d,0, 0,d]], 1).matrix() * d**(2 - self.weight())
def submodule(self, M, dual_free_module=None, check=True):
r"""
Return the submodule with given generators or free module `M`.
INPUT:
- ``M`` - either a submodule of this ambient free module, or
generators for a submodule;
- ``dual_free_module`` (bool, default None) -- this may be
useful to speed up certain calculations; it is the
corresponding submodule of the ambient dual module;
- ``check`` (bool, default True) -- if True, check that `M` is
a submodule, i.e. is invariant under all Hecke operators.
OUTPUT:
A subspace of this modular symbol space.
EXAMPLES::
sage: M = ModularSymbols(11)
sage: M.submodule([M.0])
Traceback (most recent call last):
...
ValueError: The submodule must be invariant under all Hecke operators.
sage: M.eisenstein_submodule().basis()
((1,0) - 1/5*(1,9),)
sage: M.basis()
((1,0), (1,8), (1,9))
sage: M.submodule([M.0 - 1/5*M.2])
Modular Symbols subspace of dimension 1 of Modular Symbols space of dimension 3 for Gamma_0(11) of weight 2 with sign 0 over Rational Field
.. note::
It would make more sense to only check that `M` is invariant
under the Hecke operators with index coprime to the level.
Unfortunately, I do not know a reasonable algorithm for
determining whether a module is invariant under just the
anemic Hecke algebra, since I do not know an analogue of
the Sturm bound for the anemic Hecke algebra. - <NAME>, 2007-07-27
"""
if check:
if not free_module.is_FreeModule(M):
V = self.free_module()
if not isinstance(M, (list,tuple)):
M = M.gens()
M = V.span([V(x.element()) for x in M])
return subspace.ModularSymbolsSubspace(self, M, dual_free_module=dual_free_module, check=check)
def twisted_winding_element(self, i, eps):
r"""
Return the twisted winding element of given degree and character.
INPUT:
- ``i`` (int) -- an integer, `0\le i\le k-2` where `k` is the weight.
- ``eps`` (character) -- a Dirichlet character
OUTPUT:
(modular symbol) The so-called 'twisted winding element':
.. math::
\sum_{a \in (\ZZ/m\ZZ)^\times} \varepsilon(a) * [ i, 0, a/m ].
.. note::
This will only work if the base ring of the modular symbol
space contains the character values.
EXAMPLES::
sage: eps = DirichletGroup(5)[2]
sage: K = eps.base_ring()
sage: M = ModularSymbols(37,2,0,K)
sage: M.twisted_winding_element(0,eps)
2*(1,23) - 2*(1,32) + 2*(1,34)
"""
if not dirichlet.is_DirichletCharacter(eps):
raise TypeError("eps must be a Dirichlet character.")
if (i < 0) or (i > self.weight()-2):
raise ValueError("i must be between 0 and k-2.")
m = eps.modulus()
s = self(0)
for a in ([ x for x in range(1,m) if rings.gcd(x,m) == 1 ]):
s += eps(a) * self.modular_symbol([i, Cusp(0), Cusp(a/m)])
return s
######################################################################
# Z-module of integral modular symbols.
#######################################################################
def integral_structure(self, algorithm='default'):
r"""
Return the `\ZZ`-structure of this modular symbols
space, generated by all integral modular symbols.
INPUT:
- ``algorithm`` - string (default: 'default' - choose
heuristically)
- ``'pari'`` - use pari for the HNF computation
- ``'padic'`` - use p-adic algorithm (only good for
dense case)
ALGORITHM: It suffices to consider lattice generated by the free
generating symbols `X^iY^{k-2-i}.(u,v)` after quotienting
out by the `S` (and `I`) relations, since the
quotient by these relations is the same over any ring.
EXAMPLES: In weight 2 the rational basis is often integral.
::
sage: M = ModularSymbols(11,2)
sage: M.integral_structure()
Free module of degree 3 and rank 3 over Integer Ring
Echelon basis matrix:
[1 0 0]
[0 1 0]
[0 0 1]
This is rarely the case in higher weight::
sage: M = ModularSymbols(6,4)
sage: M.integral_structure()
Free module of degree 6 and rank 6 over Integer Ring
Echelon basis matrix:
[ 1 0 0 0 0 0]
[ 0 1 0 0 0 0]
[ 0 0 1/2 1/2 1/2 1/2]
[ 0 0 0 1 0 0]
[ 0 0 0 0 1 0]
[ 0 0 0 0 0 1]
Here is an example involving `\Gamma_1(N)`.
::
sage: M = ModularSymbols(Gamma1(5),6)
sage: M.integral_structure()
Free module of degree 10 and rank 10 over Integer Ring
Echelon basis matrix:
[ 1 0 0 0 0 0 0 0 0 0]
[ 0 1 0 0 0 0 0 0 0 0]
[ 0 0 1/102 0 5/204 1/136 23/24 3/17 43/136 69/136]
[ 0 0 0 1/48 0 1/48 23/24 1/6 1/8 17/24]
[ 0 0 0 0 1/24 0 23/24 1/3 1/6 1/2]
[ 0 0 0 0 0 1/24 23/24 1/3 11/24 5/24]
[ 0 0 0 0 0 0 1 0 0 0]
[ 0 0 0 0 0 0 0 1/2 0 1/2]
[ 0 0 0 0 0 0 0 0 1/2 1/2]
[ 0 0 0 0 0 0 0 0 0 1]
"""
if not self.base_ring() == rational_field.RationalField():
raise NotImplementedError
try:
return self.__integral_structure
except AttributeError:
pass
# The attribute _mod2term is set by self.compute_presentation().
# It is a list of pairs (n, c), such that the ith element of the list
# is equivalent to c times the n-th basis Manin symbol.
G = set([i for i, _ in self._mod2term])
# Now G is a set of integer i such that these integers gives
# indices of Manin symbols that together generate the integral
# structure. We next obtain the corresponding list of elements
# by passing to the quotient by the remaining relations
# via the _manin_gens_to_basis attribute.
# Next we take each element of X, which gives a linear combination
# of the basis of the underlying vector space of self, and compute
# the Z-module they span.
G = list(G)
G.sort()
B = self._manin_gens_to_basis.matrix_from_rows(list(G)).dense_matrix()
B, d = B._clear_denom()
if algorithm == 'default':
# pari is much better in the weight 2 case when the input
# matrix is extremely sparse; the p-adic algorithm is
# terrible in the sparse case.
if self.weight() == 2:
algorithm = 'pari'
else:
algorithm = 'padic'
if algorithm == 'pari':
B = B.echelon_form(algorithm='pari', include_zero_rows=False)
elif algorithm == 'padic':
B = B.echelon_form(algorithm='padic', include_zero_rows=False)
else:
raise ValueError("unknown algorithm '%s'"%algorithm)
W = B.row_module()
if d != 1:
W = W.scale(1/d)
self.__integral_structure = W
assert W.rank() == self.rank(), "there is a bug in computing integral structure on modular symbols"
return self.__integral_structure
######################################################################
# Eigenvalues
#######################################################################
def compact_newform_eigenvalues(self, v, names='alpha'):
r"""
Return compact systems of eigenvalues for each Galois conjugacy
class of cuspidal newforms in this ambient space.
INPUT:
- ``v`` - list of positive integers
OUTPUT:
- ``list`` - of pairs (E, x), where E\*x is a vector
with entries the eigenvalues `a_n` for
`n \in v`.
EXAMPLES::
sage: M = ModularSymbols(43,2,1)
sage: X = M.compact_newform_eigenvalues(prime_range(10))
sage: X[0][0] * X[0][1]
(-2, -2, -4, 0)
sage: X[1][0] * X[1][1]
(alpha1, -alpha1, -alpha1 + 2, alpha1 - 2)
::
sage: M = ModularSymbols(DirichletGroup(24,QQ).1,2,sign=1)
sage: M.compact_newform_eigenvalues(prime_range(10),'a')
[([-1/2 -1/2]
[ 1/2 -1/2]
[ -1 1]
[ -2 0], (1, -2*a0 - 1))]
sage: a = M.compact_newform_eigenvalues([1..10],'a')[0]
sage: a[0]*a[1]
(1, a0, a0 + 1, -2*a0 - 2, -2*a0 - 2, -a0 - 2, -2, 2*a0 + 4, -1, 2*a0 + 4)
sage: M = ModularSymbols(DirichletGroup(13).0^2,2,sign=1)
sage: M.compact_newform_eigenvalues(prime_range(10),'a')
[([ -zeta6 - 1]
[ 2*zeta6 - 2]
[-2*zeta6 + 1]
[ 0], (1))]
sage: a = M.compact_newform_eigenvalues([1..10],'a')[0]
sage: a[0]*a[1]
(1, -zeta6 - 1, 2*zeta6 - 2, zeta6, -2*zeta6 + 1, -2*zeta6 + 4, 0, 2*zeta6 - 1, -zeta6, 3*zeta6 - 3)
"""
if self.sign() == 0:
raise ValueError("sign must be nonzero")
v = list(v)
# Get decomposition of this space
D = self.cuspidal_submodule().new_subspace().decomposition()
for A in D:
# since sign is zero and we're on the new cuspidal subspace
# each factor is definitely simple.
A._is_simple = True
B = [A.dual_free_module().basis_matrix().transpose() for A in D]
# Normalize the names strings.
names = ['%s%s'%(names,i) for i in range(len(B))]
# Find an integer i such that the i-th columns of the basis for the
# dual modules corresponding to the factors in D are all nonzero.
nz = None
for i in range(self.dimension()):
# Decide if this i works, i.e., ith row of every element of B is nonzero.
bad = False
for C in B:
if C.row(i) == 0:
# i is bad.
bad = True
continue
if bad: continue
# It turns out that i is not bad.
nz = i
break
if nz is not None:
R = self.hecke_images(nz, v)
return [(R*m, D[i].dual_eigenvector(names=names[i], lift=False, nz=nz)) for i, m in enumerate(B)]
else:
# No single i works, so we do something less uniform.
ans = []
cache = {}
for i in range(len(D)):
nz = D[i]._eigen_nonzero()
if nz in cache:
R = cache[nz]
else:
R = self.hecke_images(nz, v)
cache[nz] = R
ans.append((R*B[i], D[i].dual_eigenvector(names=names[i], lift=False, nz=nz)))
return ans
class ModularSymbolsAmbient_wtk_g0(ModularSymbolsAmbient):
r"""
Modular symbols for `\Gamma_0(N)` of integer weight
`k > 2` over the field `F`.
For weight `2`, it is faster to use ``ModularSymbols_wt2_g0``.
INPUT:
- ``N`` - int, the level
- ``k`` - integer weight = 2.
- ``sign`` - int, either -1, 0, or 1
- ``F`` - field
EXAMPLES::
sage: ModularSymbols(1,12)
Modular Symbols space of dimension 3 for Gamma_0(1) of weight 12 with sign 0 over Rational Field
sage: ModularSymbols(1,12, sign=1).dimension()
2
sage: ModularSymbols(15,4, sign=-1).dimension()
4
sage: ModularSymbols(6,6).dimension()
10
sage: ModularSymbols(36,4).dimension()
36
"""
def __init__(self, N, k, sign, F, custom_init=None):
r"""
Initialize a space of modular symbols of weight `k` for
`\Gamma_0(N)`, over `\QQ`.
For weight `2`, it is faster to use
``ModularSymbols_wt2_g0``.
INPUT:
- ``N`` - int, the level
- ``k`` - integer weight = 2.
- ``sign`` - int, either -1, 0, or 1
- ``F`` - field
EXAMPLES::
sage: ModularSymbols(1,12)
Modular Symbols space of dimension 3 for Gamma_0(1) of weight 12 with sign 0 over Rational Field
sage: ModularSymbols(1,12, sign=1).dimension()
2
sage: ModularSymbols(15,4, sign=-1).dimension()
4
sage: ModularSymbols(6,6).dimension()
10
sage: ModularSymbols(36,4).dimension()
36
"""
N = int(N)
k = int(k)
sign = int(sign)
if not sign in [-1,0,1]:
raise TypeError("sign must be an int in [-1,0,1]")
ModularSymbolsAmbient.__init__(self, weight=k, group=arithgroup.Gamma0(N),
sign=sign, base_ring=F, custom_init=custom_init)
def _dimension_formula(self):
r"""
Return the dimension of this space using the formula.
EXAMPLES::
sage: M = ModularSymbols(37,6)
sage: M.dimension()
32
sage: M._dimension_formula()
32
"""
if self.base_ring().characteristic() == 0:
N, k, sign = self.level(), self.weight(), self.sign()
if sign != 0: return None
if k%2 == 1:
return 0
elif k > 2:
return 2*self.group().dimension_cusp_forms(k) + self.group().ncusps()
else:
return 2*self.group().dimension_cusp_forms(k) + self.group().ncusps() - 1
else:
raise NotImplementedError
def _repr_(self):
r"""
Return the string representation of this Modular Symbols space.
EXAMPLES::
sage: M = ModularSymbols(37,6)
sage: M # indirect doctest
Modular Symbols space of dimension 32 for Gamma_0(37) of weight 6 with sign 0 over Rational Field
"""
return ("Modular Symbols space of dimension %s for Gamma_0(%s) of weight %s with sign %s " + \
"over %s")%(self.dimension(), self.level(),self.weight(), self.sign(),
self.base_ring())
def _cuspidal_submodule_dimension_formula(self):
r"""
Return the dimension of the cuspidal subspace, using the formula.
EXAMPLES::
sage: M = ModularSymbols(37,4)
sage: M.cuspidal_subspace().dimension()
18
sage: M._cuspidal_submodule_dimension_formula()
18
"""
if self.base_ring().characteristic() == 0:
N, k, sign = self.level(), self.weight(), self.sign()
if sign == 0:
m = 2
else:
m = 1
return m * self.group().dimension_cusp_forms(k)
else:
raise NotImplementedError
def _degeneracy_raising_matrix_1(self, M):
r"""
Return the matrix of the degeneracy map (with t = 1) to level
`N`, where `N` is a multiple of the level.
INPUT:
- ``M`` -- A space of Gamma0 modular symbols of the same weight as
self, with level an integer multiple of the level of self.
OUTPUT:
(matrix) The matrix of the degeneracy raising map to `M`.
EXAMPLES::
sage: M = ModularSymbols(37,4)
sage: M._degeneracy_raising_matrix_1(ModularSymbols(74, 4))
20 x 58 dense matrix over Rational Field
sage: M.dimension()
20
sage: ModularSymbols(74,4).dimension()
58
"""
level = int(M.level())
N = self.level()
# 1. Find coset representatives H for Gamma_0(M.level()) \ Gamma_0(self.level())
# (need to be careful in some small levels, cf. #13198)
if arithgroup.is_Gamma0(M.group()):
H = arithgroup.degeneracy_coset_representatives_gamma0(level, N, 1)
elif arithgroup.is_Gamma1(M.group()):
H = arithgroup.degeneracy_coset_representatives_gamma1(level, N, 1)
else:
raise NotImplementedError("Degeneracy raising maps not implemented for GammaH levels")
# 2. The map is
# [P,pi(g)] |--> sum_{h in H} [P, pi(h*g)]
#
MS = matrix_space.MatrixSpace(self.base_ring(), self.dimension(), M.dimension())
if self.dimension() == 0 or M.dimension() == 0:
return MS(0)
rows = []
B = self.manin_basis()
syms = self.manin_symbols()
k = self.weight()
G = M2Z()
H = [G(h) for h in H]
for n in B:
z = M(0)
s = syms.manin_symbol(n)
g = G(list(s.lift_to_sl2z(N)))
i = s.i
# We apply each matrix in H according to the above formula
for h in H:
hg = h*g
z += M((i, hg[1,0], hg[1,1]))
rows.append(z.element())
A = MS(rows)
return A
def _cuspidal_new_submodule_dimension_formula(self):
r"""
Return the dimension of the new cuspidal subspace, via the formula.
EXAMPLES::
sage: M = ModularSymbols(100,2)
sage: M._cuspidal_new_submodule_dimension_formula()
2
sage: M.cuspidal_subspace().new_subspace().dimension()
2
"""
if self.base_ring().characteristic() == 0:
N, k, sign = self.level(), self.weight(), self.sign()
if sign == 0:
m = 2
else:
m = 1
return m * self.group().dimension_new_cusp_forms(k)
else:
raise NotImplementedError
def boundary_space(self):
r"""
Return the space of boundary modular symbols for this space.
EXAMPLES::
sage: M = ModularSymbols(100,2)
sage: M.boundary_space()
Space of Boundary Modular Symbols for Congruence Subgroup Gamma0(100) of weight 2 and over Rational Field
"""
try:
return self.__boundary_space
except AttributeError:
pass
self.__boundary_space = boundary.BoundarySpace_wtk_g0(
self.level(), self.weight(), self.sign(), self.base_ring())
return self.__boundary_space
def manin_symbols(self):
r"""
Return the Manin symbol list of this modular symbol space.
EXAMPLES::
sage: M = ModularSymbols(100,4)
sage: M.manin_symbols()
Manin Symbol List of weight 4 for Gamma0(100)
sage: len(M.manin_symbols())
540
"""
try:
return self.__manin_symbols
except AttributeError:
self.__manin_symbols = manin_symbols.ManinSymbolList_gamma0(
level=self.level(), weight=self.weight())
return self.__manin_symbols
def modular_symbols_of_level(self, N):
r"""
Returns a space of modular symbols with the same parameters as
this space except with level `N`.
INPUT:
- ``N`` (int) -- a positive integer.
OUTPUT:
(Modular Symbol space) A space of modular symbols with the
same defining properties (weight, sign, etc.) as this space
except with level `N`.
For example, if self is the space of modular symbols of weight `2`
for `\Gamma_0(22)`, and level is `11`, then this function returns
the modular symbol space of weight `2` for `\Gamma_0(11)`.
EXAMPLES::
sage: M = ModularSymbols(11)
sage: M.modular_symbols_of_level(22)
Modular Symbols space of dimension 7 for Gamma_0(22) of weight 2 with sign 0 over Rational Field
sage: M = ModularSymbols(Gamma1(6))
sage: M.modular_symbols_of_level(12)
Modular Symbols space of dimension 9 for Gamma_1(12) of weight 2 with sign 0 and over Rational Field
"""
return modsym.ModularSymbols(arithgroup.Gamma0(rings.Integer(N)),
self.weight(), sign=self.sign(),
base_ring=self.base_ring())
def _hecke_images(self, i, v):
"""
Return matrix whose rows are the images of the `i`-th
standard basis vector under the Hecke operators `T_p` for
all integers in `v`.
INPUT:
- ``i`` - nonnegative integer
- ``v`` - a list of positive integer
OUTPUT:
- ``matrix`` - whose rows are the Hecke images
EXAMPLES::
sage: M = ModularSymbols(11,4,1)
sage: M._hecke_images(0,[1,2,3,4])
[ 1 0 0 0]
[ 9 0 1 -1]
[28 2 -1 -1]
[73 2 5 -7]
sage: M.T(1)(M.0).element()
(1, 0, 0, 0)
sage: M.T(2)(M.0).element()
(9, 0, 1, -1)
sage: M.T(3)(M.0).element()
(28, 2, -1, -1)
sage: M.T(4)(M.0).element()
(73, 2, 5, -7)
sage: M = ModularSymbols(12,4)
sage: M._hecke_images(0,[1,2,3,4])
[ 1 0 0 0 0 0 0 0 0 0 0 0]
[ 8 1 -1 -2 2 2 -3 1 -2 3 -1 0]
[ 27 4 -4 -8 8 10 -14 4 -9 14 -5 0]
[ 64 10 -10 -20 20 26 -36 10 -24 38 -14 0]
sage: M.T(1)(M.0).element()
(1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
sage: M.T(2)(M.0).element()
(8, 1, -1, -2, 2, 2, -3, 1, -2, 3, -1, 0)
sage: M.T(3)(M.0).element()
(27, 4, -4, -8, 8, 10, -14, 4, -9, 14, -5, 0)
sage: M.T(4)(M.0).element()
(64, 10, -10, -20, 20, 26, -36, 10, -24, 38, -14, 0)
"""
# Find basis vector for ambient space such that it is not in
# the kernel of the dual space corresponding to self.
c = self.manin_generators()[self.manin_basis()[i]]
N = self.level()
return heilbronn.hecke_images_gamma0_weight_k(c.u,c.v, c.i, N, self.weight(),
v, self.manin_gens_to_basis())
class ModularSymbolsAmbient_wt2_g0(ModularSymbolsAmbient_wtk_g0):
r"""
Modular symbols for `\Gamma_0(N)` of integer weight `2` over the field
`F`.
INPUT:
- ``N`` - int, the level
- ``sign`` - int, either -1, 0, or 1
OUTPUT:
The space of modular symbols of weight `2`, trivial character, level
`N` and given sign.
EXAMPLES::
sage: ModularSymbols(Gamma0(12),2)
Modular Symbols space of dimension 5 for Gamma_0(12) of weight 2 with sign 0 over Rational Field
"""
def __init__(self, N, sign, F, custom_init=None):
"""
Initialize a space of modular symbols. INPUT:
INPUT:
- ``N`` - int, the level
- ``sign`` - int, either -1, 0, or 1
OUTPUT:
The space of modular symbols of weight 2, trivial character,
level N and given sign.
EXAMPLES::
sage: M = ModularSymbols(Gamma0(12),2)
"""
ModularSymbolsAmbient_wtk_g0.__init__(self,
N=N, k=2, sign=sign, F=F,
custom_init=custom_init)
def _dimension_formula(self):
r"""
Return the dimension of this space using the formula.
EXAMPLES::
sage: M = ModularSymbols(37,6)
sage: M.dimension()
32
sage: M._dimension_formula()
32
"""
if self.base_ring().characteristic() == 0:
N, sign = self.level(), self.sign()
if sign != 0: return None
return 2*self.group().dimension_cusp_forms(2) + self.group().ncusps() - 1
else:
raise NotImplementedError
def _cuspidal_submodule_dimension_formula(self):
r"""
Return the dimension of the cuspidal subspace, using the formula.
EXAMPLES::
sage: M = ModularSymbols(37,4)
sage: M.cuspidal_subspace().dimension()
18
sage: M._cuspidal_submodule_dimension_formula()
18
"""
if self.base_ring().characteristic() == 0:
if self.sign() == 0:
m = 2
else:
m = 1
return m * self.group().dimension_cusp_forms(2)
else:
raise NotImplementedError
def _cuspidal_new_submodule_dimension_formula(self):
r"""
Return the dimension of the new cuspidal subspace, via the formula.
EXAMPLES:
sage: M = ModularSymbols(100,2)
sage: M._cuspidal_new_submodule_dimension_formula()
2
sage: M.cuspidal_subspace().new_subspace().dimension()
2
"""
if self.base_ring().characteristic() == 0:
if self.sign() == 0:
m = 2
else:
m = 1
return m * self.group().dimension_new_cusp_forms(2)
else:
raise NotImplementedError
def _compute_hecke_matrix_prime(self, p, rows=None):
r"""
Compute and return the matrix of the `p`-th Hecke operator.
EXAMPLES::
sage: m = ModularSymbols(37,2)
sage: m._compute_hecke_matrix_prime(2).charpoly('x')
x^5 + x^4 - 8*x^3 - 12*x^2
"""
# note -- p doesn't have to be prime.
if isinstance(rows, list):
rows = tuple(rows)
try:
return self._hecke_matrices[(p,rows)]
except AttributeError:
self._hecke_matrices = {}
except KeyError:
pass
tm = misc.verbose("Computing Hecke operator T_%s"%p)
H = heilbronn.HeilbronnCremona(p)
##H = heilbronn.HeilbronnMerel(p)
B = self.manin_basis()
if not rows is None:
B = [B[i] for i in rows]
cols = []
N = self.level()
P1 = self.p1list()
mod2term = self._mod2term
R = self.manin_gens_to_basis()
W = R.new_matrix(nrows=len(B), ncols = R.nrows()) # the 0 with given number of rows and cols.
j = 0
tm = misc.verbose("Matrix non-reduced", tm)
for i in B:
# The following step is where most of the time is spent.
c,d = P1[i]
v = H.apply(c,d, N)
# v is now a list of pairs ((c,d),m), where m is the
# number of times that (c,d) appears in the image of x
# under the matrices in H. Also, the pairs (c,d) are
# normalized.
# Let ind(c,d) denote the index of the normalized pair
# (c,d) in the fixed ordered list of elements of
# P1(Z/NZ). Then the list of pairs (ind(c,d), m)
# obtained from the above list defines a sparse vector
# s, and the image of x under T_p is the product
# of s with the matrix R defined above.
for z, m in v:
k = P1.index_of_normalized_pair(z[0],z[1])
if k != -1:
f, s = mod2term[k]
if s != 0:
W[j,f] = W[j,f] + s*m
j += 1
tm = misc.verbose("done making non-reduced matrix",tm)
misc.verbose("start matrix-matrix (%s x %s) times (%s x %s) multiply to get Tp"%(W.nrows(), W.ncols(),
R.nrows(), R.ncols()))
if hasattr(W, '_matrix_times_matrix_dense'):
Tp = W._matrix_times_matrix_dense(R)
misc.verbose("done matrix multiply and computing Hecke operator",tm)
else:
Tp = W * R
tm = misc.verbose("done multiplying",tm)
Tp = Tp.dense_matrix()
misc.verbose("done making hecke operator dense",tm)
if rows is None:
self._hecke_matrices[(p,rows)] = Tp
return Tp
def boundary_space(self):
r"""
Return the space of boundary modular symbols for this space.
EXAMPLES::
sage: M = ModularSymbols(100,2)
sage: M.boundary_space()
Space of Boundary Modular Symbols for Congruence Subgroup Gamma0(100) of weight 2 and over Rational Field
"""
try:
return self.__boundary_space
except AttributeError:
pass
self.__boundary_space = boundary.BoundarySpace_wtk_g0(
self.level(), self.weight(), self.sign(), self.base_ring())
return self.__boundary_space
def _hecke_image_of_ith_basis_vector(self, n, i):
"""
Return `T_n(e_i)`, where `e_i` is the
`i`th basis vector of this ambient space.
INPUT:
- ``n`` - an integer which should be prime.
OUTPUT:
- ``modular symbol`` - element of this ambient space
EXAMPLES::
sage: M = ModularSymbols(43,2,1)
sage: M._hecke_image_of_ith_basis_vector(2, 0)
3*(1,0) - 2*(1,33)
sage: M.hecke_operator(2)(M.0)
3*(1,0) - 2*(1,33)
sage: M._hecke_image_of_ith_basis_vector(6, 1)
-2*(1,33)
sage: M.hecke_operator(6)(M.1)
-2*(1,33)
"""
c = self.manin_generators()[self.manin_basis()[i]]
N = self.level()
I = heilbronn.hecke_images_gamma0_weight2(c.u,c.v,N,[n], self.manin_gens_to_basis())
return self(I[0])
def _hecke_images(self, i, v):
"""
Return images of the `i`-th standard basis vector under the
Hecke operators `T_p` for all integers in `v`.
INPUT:
- ``i`` - nonnegative integer
- ``v`` - a list of positive integer
OUTPUT:
- ``matrix`` - whose rows are the Hecke images
EXAMPLES::
sage: M = ModularSymbols(46,2,-1)
sage: M._hecke_images(1,[3,4,5,6])
[ 0 1 -2 2 0]
[ 2 -3 0 0 1]
[ 0 -2 2 -2 0]
[-5 3 -1 1 0]
sage: v = M.basis()[1]
sage: M.T(3)(v).element()
(0, 1, -2, 2, 0)
sage: M.T(4)(v).element()
(2, -3, 0, 0, 1)
sage: M.T(5)(v).element()
(0, -2, 2, -2, 0)
sage: M.T(6)(v).element()
(-5, 3, -1, 1, 0)
"""
# Find basis vector for ambient space such that it is not in
# the kernel of the dual space corresponding to self.
c = self.manin_generators()[self.manin_basis()[i]]
N = self.level()
return heilbronn.hecke_images_gamma0_weight2(c.u,c.v,N, v, self.manin_gens_to_basis())
class ModularSymbolsAmbient_wtk_g1(ModularSymbolsAmbient):
r"""
INPUT:
- ``level`` - int, the level
- ``weight`` - int, the weight = 2
- ``sign`` - int, either -1, 0, or 1
- ``F`` - field
EXAMPLES::
sage: ModularSymbols(Gamma1(17),2)
Modular Symbols space of dimension 25 for Gamma_1(17) of weight 2 with sign 0 and over Rational Field
sage: [ModularSymbols(Gamma1(7),k).dimension() for k in [2,3,4,5]]
[5, 8, 12, 16]
::
sage: ModularSymbols(Gamma1(7),3)
Modular Symbols space of dimension 8 for Gamma_1(7) of weight 3 with sign 0 and over Rational Field
"""
def __init__(self, level, weight, sign, F, custom_init=None):
r"""
Initialize a space of modular symbols for Gamma1(N).
INPUT:
- ``level`` - int, the level
- ``weight`` - int, the weight = 2
- ``sign`` - int, either -1, 0, or 1
- ``F`` - field
EXAMPLES::
sage: ModularSymbols(Gamma1(17),2)
Modular Symbols space of dimension 25 for Gamma_1(17) of weight 2 with sign 0 and over Rational Field
sage: [ModularSymbols(Gamma1(7),k).dimension() for k in [2,3,4,5]]
[5, 8, 12, 16]
::
sage: M = ModularSymbols(Gamma1(7),3)
"""
ModularSymbolsAmbient.__init__(self,
weight=weight,
group=arithgroup.Gamma1(level),
sign=sign,
base_ring=F,
custom_init=custom_init)
def _dimension_formula(self):
r"""
Return the dimension of this space using the formula.
EXAMPLES::
sage: M = ModularSymbols(Gamma1(7),6)
sage: M.dimension()
20
sage: M._dimension_formula()
20
"""
if self.base_ring().characteristic() != 0:
raise NotImplementedError
level, weight, sign = self.level(), self.weight(), self.sign()
if sign != 0: return None
d = 2*self.group().dimension_cusp_forms(weight) + self.group().ncusps()
if level == 1 and weight%2 == 1:
return 0
if weight == 2:
return d - 1
if weight % 2 == 0:
return d
# TODO: I don't know a formula for dim ModSym_k(Gamma_1(N)) for odd k!!!
return None
def _repr_(self):
r"""
Return a string representation of this space.
EXAMPLES::
sage: M = ModularSymbols(Gamma1(7),3)
sage: M # indirect doctest
Modular Symbols space of dimension 8 for Gamma_1(7) of weight 3 with sign 0 and over Rational Field
"""
return ("Modular Symbols space of dimension %s for Gamma_1(%s) of weight %s with sign %s " + \
"and over %s")%(self.dimension(), self.level(),self.weight(),
self.sign(), self.base_ring())
def _cuspidal_submodule_dimension_formula(self):
r"""
Return the dimension of the cuspidal subspace, using the formula.
EXAMPLES::
sage: M = ModularSymbols(Gamma1(11),4)
sage: M.cuspidal_subspace().dimension()
20
sage: M._cuspidal_submodule_dimension_formula()
20
"""
if self.sign() == 0:
m = 2
else:
m = 1
return m * self.group().dimension_cusp_forms(self.weight())
def _cuspidal_new_submodule_dimension_formula(self):
r"""
Return the dimension of the new cuspidal subspace, via the formula.
EXAMPLES:
sage: M = ModularSymbols(Gamma1(22),2)
sage: M._cuspidal_new_submodule_dimension_formula()
8
sage: M.cuspidal_subspace().new_subspace().dimension()
8
"""
if self.sign() == 0:
m = 2
else:
m = 1
return m * self.group().dimension_new_cusp_forms(self.weight())
def _compute_hecke_matrix_prime_power(self, p, r):
r"""
Compute and return the matrix of the Hecke operator `T(p^r)`.
EXAMPLES::
sage: m = ModularSymbols(Gamma1(11),2)
sage: m._compute_hecke_matrix_prime_power(3,4).charpoly('x')
x^11 - 291*x^10 + 30555*x^9 - 1636145*x^8 + 59637480*x^7 + 1983040928*x^6 - 401988683888*x^5 - 14142158875680*x^4 + 3243232720819520*x^3 - 103658398669404480*x^2 + 197645665452381696*x - 97215957397309696
"""
return self._compute_hecke_matrix_prime(p**r)
def _degeneracy_raising_matrix_1(self, M):
r"""
Return the matrix of the degeneracy raising map to `M`.
INPUT:
- ``M`` -- an ambient space of Gamma1 modular symbols, of level a
multiple of the level of self
OUTPUT:
(matrix) The matrix of the degeneracy raising matrix to the higher level.
EXAMPLES::
sage: M = ModularSymbols(Gamma1(7),3)
sage: N = ModularSymbols(Gamma1(21), 3)
sage: M._degeneracy_raising_matrix_1(N)
8 x 64 dense matrix over Rational Field
sage: M.dimension()
8
sage: N.dimension()
64
"""
level = int(M.level())
N = self.level()
# 1. Find coset representatives H for Gamma_1(M.level()) \ Gamma_1(self.level())
H = arithgroup.degeneracy_coset_representatives_gamma1(M.level(), N, 1)
# 2. The map is
# [P,pi(g)] |--> sum_{h in H} [P, pi(h*g)]
#
MS = matrix_space.MatrixSpace(self.base_ring(), self.dimension(), M.dimension())
if self.dimension() == 0 or M.dimension() == 0:
return MS(0)
rows = []
B = self.manin_basis()
syms = self.manin_symbols()
k = self.weight()
G = matrix_space.MatrixSpace(integer_ring.IntegerRing(),2)
H = [G(h) for h in H]
for n in B:
z = M(0)
s = syms.manin_symbol(n)
g = G(list(s.lift_to_sl2z(N)))
i = s.i
# We apply each matrix in H according to the above formula
for h in H:
hg = h*g
z += M((i, hg[1,0], hg[1,1]))
rows.append(z.element())
A = MS(rows)
return A
def boundary_space(self):
r"""
Return the space of boundary modular symbols for this space.
EXAMPLES::
sage: M = ModularSymbols(100,2)
sage: M.boundary_space()
Space of Boundary Modular Symbols for Congruence Subgroup Gamma0(100) of weight 2 and over Rational Field
"""
try:
return self.__boundary_space
except AttributeError:
pass
self.__boundary_space = boundary.BoundarySpace_wtk_g1(
self.level(), self.weight(), self.sign(), self.base_ring())
return self.__boundary_space
def manin_symbols(self):
r"""
Return the Manin symbol list of this modular symbol space.
EXAMPLES::
sage: M = ModularSymbols(Gamma1(30),4)
sage: M.manin_symbols()
Manin Symbol List of weight 4 for Gamma1(30)
sage: len(M.manin_symbols())
1728
"""
try:
return self.__manin_symbols
except AttributeError:
self.__manin_symbols = manin_symbols.ManinSymbolList_gamma1(
level=self.level(), weight=self.weight())
return self.__manin_symbols
def modular_symbols_of_level(self, N):
r"""
Returns a space of modular symbols with the same parameters as
this space except with level `N`.
INPUT:
- ``N`` (int) -- a positive integer.
OUTPUT:
(Modular Symbol space) A space of modular symbols with the
same defining properties (weight, sign, etc.) as this space
except with level `N`.
For example, if self is the space of modular symbols of weight `2`
for `\Gamma_0(22)`, and level is `11`, then this function returns
the modular symbol space of weight `2` for `\Gamma_0(11)`.
EXAMPLES::
sage: M = ModularSymbols(Gamma1(30),4); M
Modular Symbols space of dimension 144 for Gamma_1(30) of weight 4 with sign 0 and over Rational Field
sage: M.modular_symbols_of_level(22)
Modular Symbols space of dimension 90 for Gamma_1(22) of weight 4 with sign 0 and over Rational Field
"""
return modsym.ModularSymbols(arithgroup.Gamma1(N), self.weight(),self.sign(), self.base_ring())
class ModularSymbolsAmbient_wtk_gamma_h(ModularSymbolsAmbient):
def __init__(self, group, weight, sign, F, custom_init=None):
r"""
Initialize a space of modular symbols for `\Gamma_H(N)`.
INPUT:
- ``group`` - a congruence subgroup
`\Gamma_H(N)`.
- ``weight`` - int, the weight = 2
- ``sign`` - int, either -1, 0, or 1
- ``F`` - field
EXAMPLES::
sage: ModularSymbols(GammaH(15,[4]),2)
Modular Symbols space of dimension 9 for Congruence Subgroup Gamma_H(15) with H generated by [4] of weight 2 with sign 0 and over Rational Field
"""
ModularSymbolsAmbient.__init__(self,
weight=weight, group=group,
sign=sign, base_ring=F, custom_init=custom_init)
def _dimension_formula(self):
r"""
Return None: we have no dimension formulas for `\Gamma_H(N)` spaces.
EXAMPLES::
sage: M = ModularSymbols(GammaH(15,[4]),2)
sage: M.dimension()
9
sage: M._dimension_formula()
"""
return None
def _repr_(self):
r"""
Return a string representation of this space.
EXAMPLES::
sage: M = ModularSymbols(GammaH(15,[4]),2)
sage: M # indirect doctest
Modular Symbols space of dimension 9 for Congruence Subgroup Gamma_H(15) with H generated by [4] of weight 2 with sign 0 and over Rational Field
"""
return ("Modular Symbols space of dimension %s for %s of weight %s with sign %s " + \
"and over %s")%(self.dimension(), self.group(),self.weight(),
self.sign(), self.base_ring())
def _cuspidal_submodule_dimension_formula(self):
r"""
Return None: we have no dimension formulas for `\Gamma_H(N)` spaces.
EXAMPLES::
sage: ModularSymbols(GammaH(15,[4]),2)._cuspidal_submodule_dimension_formula() is None
True
"""
return None
def _cuspidal_new_submodule_dimension_formula(self):
r"""
Return None: we have no dimension formulas for `\Gamma_H(N)` spaces.
EXAMPLES::
sage: ModularSymbols(GammaH(15,[4]),2)._cuspidal_new_submodule_dimension_formula() is None
True
"""
return None
def _compute_hecke_matrix_prime_power(self, p, r):
r"""
Return matrix of a prime-power Hecke operator.
EXAMPLES::
sage: M = ModularSymbols(GammaH(15,[4]),2)
sage: M._compute_hecke_matrix_prime_power(2, 3)
[10 0 5 0 1 0 0 4 0]
[ 0 10 0 0 -4 -5 0 -1 6]
[ 5 0 10 0 -4 0 0 -1 0]
[ 0 0 0 5 -7 0 10 -3 4]
[ 0 0 0 0 -1 0 0 -4 0]
[ 0 -5 0 0 -1 10 0 -4 -6]
[ 0 0 0 10 -3 0 5 -7 -4]
[ 0 0 0 0 -4 0 0 -1 0]
[ 0 0 0 0 0 0 0 0 3]
sage: M.hecke_matrix(7)^2 == M.hecke_matrix(49) + 7 * M.diamond_bracket_operator(7).matrix() # indirect doctest
True
"""
return self._compute_hecke_matrix_prime(p**r)
def _degeneracy_raising_matrix_1(self, level):
r"""
Return matrix of a degeneracy raising map.
EXAMPLES::
sage: ModularSymbols(GammaH(15,[4]),2)._degeneracy_raising_matrix_1(ModularSymbols(GammaH(30, [19]), 2))
Traceback (most recent call last):
...
NotImplementedError
"""
raise NotImplementedError
def boundary_space(self):
r"""
Return the space of boundary modular symbols for this space.
EXAMPLES::
sage: M = ModularSymbols(GammaH(15,[4]),2)
sage: M.boundary_space()
Boundary Modular Symbols space for Congruence Subgroup Gamma_H(15) with H generated by [4] of weight 2 over Rational Field
"""
try:
return self.__boundary_space
except AttributeError:
pass
self.__boundary_space = boundary.BoundarySpace_wtk_gamma_h(
self.group(), self.weight(), self.sign(), self.base_ring())
return self.__boundary_space
def manin_symbols(self):
r"""
Return the Manin symbol list of this modular symbol space.
EXAMPLES::
sage: M = ModularSymbols(GammaH(15,[4]),2)
sage: M.manin_symbols()
Manin Symbol List of weight 2 for Congruence Subgroup Gamma_H(15) with H generated by [4]
sage: len(M.manin_symbols())
96
"""
try:
return self.__manin_symbols
except AttributeError:
self.__manin_symbols = manin_symbols.ManinSymbolList_gamma_h(
group=self.group(), weight=self.weight())
return self.__manin_symbols
def modular_symbols_of_level(self, N):
r"""
Returns a space of modular symbols with the same parameters as
this space except with level `N`, which should be either a divisor or a
multiple of the level of self.
TESTS::
sage: M = ModularSymbols(GammaH(15,[7]),6)
sage: M.modular_symbols_of_level(5)
Modular Symbols space of dimension 4 for Gamma_0(5) of weight 6 with sign 0 over Rational Field
sage: M.modular_symbols_of_level(30)
Traceback (most recent call last):
...
ValueError: N (=30) should be a factor of the level of this space (=15)
sage: M.modular_symbols_of_level(73)
Traceback (most recent call last):
...
ValueError: N (=73) should be a factor of the level of this space (=15)
"""
if self.level() % N == 0:
return modsym.ModularSymbols(self.group().restrict(N), self.weight(), self.sign(), self.base_ring())
# We deliberately don't allow N to be a multiple of the level here,
# because there are many possibilities for what H could be at the
# higher level (and we don't implement the degeneracy raising maps
# anyway)
else:
raise ValueError("N (=%s) should be a factor of the level\
of this space (=%s)" % (N, self.level()))
class ModularSymbolsAmbient_wtk_eps(ModularSymbolsAmbient):
def __init__(self, eps, weight, sign, base_ring, custom_init=None):
"""
Space of modular symbols with given weight, character, base ring and
sign.
INPUT:
- ``eps`` - dirichlet.DirichletCharacter, the
"Nebentypus" character.
- ``weight`` - int, the weight = 2
- ``sign`` - int, either -1, 0, or 1
- ``base_ring`` - the base ring. It must be possible to change the ring
of the character to this base ring (not always canonically).
EXAMPLES::
sage: eps = DirichletGroup(4).gen(0)
sage: eps.order()
2
sage: ModularSymbols(eps, 2)
Modular Symbols space of dimension 0 and level 4, weight 2, character [-1], sign 0, over Rational Field
sage: ModularSymbols(eps, 3)
Modular Symbols space of dimension 2 and level 4, weight 3, character [-1], sign 0, over Rational Field
We next create a space with character of order bigger than 2.
::
sage: eps = DirichletGroup(5).gen(0)
sage: eps # has order 4
Dirichlet character modulo 5 of conductor 5 mapping 2 |--> zeta4
sage: ModularSymbols(eps, 2).dimension()
0
sage: ModularSymbols(eps, 3).dimension()
2
Here is another example::
sage: G, e = DirichletGroup(5).objgen()
sage: M = ModularSymbols(e,3)
sage: loads(M.dumps()) == M
True
"""
level = eps.modulus()
ModularSymbolsAmbient.__init__(self,
weight = weight,
group = arithgroup.Gamma1(level),
sign = sign,
base_ring = base_ring,
character = eps.change_ring(base_ring),
custom_init=custom_init)
def _repr_(self):
r"""
Return a string representation of this space.
EXAMPLES::
sage: G, e = DirichletGroup(5).objgen()
sage: M = ModularSymbols(e,3)
sage: M # indirect doctest
Modular Symbols space of dimension 2 and level 5, weight 3, character [zeta4], sign 0, over Cyclotomic Field of order 4 and degree 2
"""
return ("Modular Symbols space of dimension %s and level %s, weight %s, character %s, sign %s, " + \
"over %s")%(self.dimension(), self.level(), self.weight(),
self.character()._repr_short_(), self.sign(), self.base_ring())
def _cuspidal_submodule_dimension_formula(self):
r"""
Return the dimension for the cuspidal subspace of this space, given by the formula.
EXAMPLES::
sage: G, e = DirichletGroup(50).objgen()
sage: M = ModularSymbols(e^2,2)
sage: M.dimension()
16
sage: M._cuspidal_submodule_dimension_formula()
12
"""
if self.base_ring().characteristic() != 0:
raise NotImplementedError
if self.sign() == 0:
m = 2
else:
m = 1
return m * self.group().dimension_cusp_forms(self.weight(), eps=self.character())
def _cuspidal_new_submodule_dimension_formula(self):
r"""
Return the dimension for the new cuspidal subspace of this space, given by the formula.
EXAMPLES::
sage: G, e = DirichletGroup(50).objgen()
sage: M = ModularSymbols(e,3)
sage: M.dimension()
30
sage: M._cuspidal_new_submodule_dimension_formula()
10
"""
if self.base_ring().characteristic() != 0:
raise NotImplementedError
if self.sign() == 0:
m = 2
else:
m = 1
return m * self.group().dimension_new_cusp_forms(self.weight(), eps=self.character())
def _matrix_of_operator_on_modular_symbols(self, codomain, R, character_twist=False):
"""
INPUT:
- ``self`` - this space of modular symbols
- ``codomain`` - space of modular symbols
- ``R`` - list of lists [a,b,c,d] of length 4, which
we view as elements of GL_2(Q).
OUTPUT: a matrix, which represents the operator
.. math::
x \mapsto \sum_{g in R} g.x
where g.x is the formal linear fractional transformation on modular
symbols.
EXAMPLES::
sage: G, e = DirichletGroup(5).objgen()
sage: M = ModularSymbols(e,3)
sage: M.dimension()
2
sage: M._matrix_of_operator_on_modular_symbols(M,HeilbronnCremona(3))
[ 6 6]
[ 0 10]
"""
eps = self.character()
rows = []
for b in self.basis():
v = formal_sum.FormalSum(0, check=False)
for c, x in b.modular_symbol_rep():
for g in R:
y = x.apply(g)
if character_twist:
v += y*c*eps(g[0])
else:
v += y*c
w = codomain(v).element()
rows.append(w)
M = matrix_space.MatrixSpace(self.base_ring(), len(rows), codomain.degree(), sparse=False)
return M(rows)
def _degeneracy_raising_matrix_1(self, M):
r"""
Return the matrix of the degeneracy raising map to ``M``, which should
be a space of modular symbols with level a multiple of the level of
self and with compatible character.
INPUT:
- ``M`` -- a space of modular symbols with character, whose level
should be an integer multiple of the level of self, and whose
character should be the Dirichlet character at that level obtained by
extending the character of self.
The input is *not* sanity-checked in any way -- use with care!
OUTPUT:
(matrix) The matrix of the degeneracy raising matrix to the higher level.
EXAMPLES::
sage: eps = DirichletGroup(4).gen(0)
sage: M = ModularSymbols(eps, 3); M
Modular Symbols space of dimension 2 and level 4, weight 3, character [-1], sign 0, over Rational Field
sage: M._degeneracy_raising_matrix_1(ModularSymbols(eps.extend(20), 3))
[ 1 0 0 0 -1 -1 3 1 0 2 -3 0]
[ 0 5 1 -2 -3 3 0 4 -1 5 -7 -1]
"""
level = int(M.level())
N = self.level()
# 1. Find coset representatives H for Gamma_0(M.level()) \ Gamma_0(self.level())
H = arithgroup.degeneracy_coset_representatives_gamma0(M.level(), N, 1)
# 2. The map is
# [P,pi(g)] |--> sum_{h in H} [P, pi(h*g)]
#
MS = matrix_space.MatrixSpace(self.base_ring(), self.dimension(), M.dimension())
if self.dimension() == 0 or M.dimension() == 0:
return MS(0)
rows = []
B = self.manin_basis()
syms = self.manin_symbols()
k = self.weight()
G = matrix_space.MatrixSpace(integer_ring.IntegerRing(),2)
H = [G(h) for h in H]
eps = self.character() # note: in my thesis I twisted by eps^(-1), which is definitely a mistake
# since twisting by eps gives the right answer and by eps^(-1) does not.
for n in B:
z = M(0)
s = syms.manin_symbol(n)
g = G(list(s.lift_to_sl2z(N)))
i = s.i
# We apply each matrix in H according to the above formula
for h in H:
hg = h*g
z += eps(h[0,0])*M((i, hg[1,0], hg[1,1]))
rows.append(z.element())
A = MS(rows)
return A
def _dimension_formula(self):
r"""
Return None: we have no dimension formula for `\Gamma_H(N)` spaces.
EXAMPLES::
sage: eps = DirichletGroup(5).gen(0)
sage: M = ModularSymbols(eps, 2)
sage: M.dimension()
0
sage: M._dimension_formula()
"""
return None
def boundary_space(self):
r"""
Return the space of boundary modular symbols for this space.
EXAMPLES::
sage: eps = DirichletGroup(5).gen(0)
sage: M = ModularSymbols(eps, 2)
sage: M.boundary_space()
Boundary Modular Symbols space of level 5, weight 2, character [zeta4] and dimension 0 over Cyclotomic Field of order 4 and degree 2
"""
try:
return self.__boundary_space
except AttributeError:
pass
self.__boundary_space = boundary.BoundarySpace_wtk_eps(
self.character(), self.weight(), self.sign())
return self.__boundary_space
def manin_symbols(self):
r"""
Return the Manin symbol list of this modular symbol space.
EXAMPLES::
sage: eps = DirichletGroup(5).gen(0)
sage: M = ModularSymbols(eps, 2)
sage: M.manin_symbols()
Manin Symbol List of weight 2 for Gamma1(5) with character [zeta4]
sage: len(M.manin_symbols())
6
"""
try:
return self.__manin_symbols
except AttributeError:
self.__manin_symbols = manin_symbols.ManinSymbolList_character(
character=self.character(), weight=self.weight())
return self.__manin_symbols
def modular_symbols_of_level(self, N):
r"""
Returns a space of modular symbols with the same parameters as
this space except with level `N`.
INPUT:
- ``N`` (int) -- a positive integer.
OUTPUT:
(Modular Symbol space) A space of modular symbols with the
same defining properties (weight, sign, etc.) as this space
except with level `N`.
EXAMPLES::
sage: eps = DirichletGroup(5).gen(0)
sage: M = ModularSymbols(eps, 2); M
Modular Symbols space of dimension 0 and level 5, weight 2, character [zeta4], sign 0, over Cyclotomic Field of order 4 and degree 2
sage: M.modular_symbols_of_level(15)
Modular Symbols space of dimension 0 and level 15, weight 2, character [1, zeta4], sign 0, over Cyclotomic Field of order 4 and degree 2
"""
if self.level() % N == 0:
eps = self.character().restrict(N)
elif N % self.level() == 0:
eps = self.character().extend(N)
else:
raise ValueError("The level N (=%s) must be a divisor or multiple of the modulus of the character (=%s)"%(N, self.level()))
return modsym.ModularSymbols(eps, self.weight(), self.sign(), self.base_ring())
def modular_symbols_of_sign(self, sign):
r"""
Returns a space of modular symbols with the same defining
properties (weight, level, etc.) as this space except with given
sign.
INPUT:
- ``sign`` (int) -- A sign (`+1`, `-1` or `0`).
OUTPUT:
(ModularSymbolsAmbient) A space of modular symbols with the
same defining properties (weight, level, etc.) as this space
except with given sign.
EXAMPLES::
sage: eps = DirichletGroup(5).gen(0)
sage: M = ModularSymbols(eps, 2); M
Modular Symbols space of dimension 0 and level 5, weight 2, character [zeta4], sign 0, over Cyclotomic Field of order 4 and degree 2
sage: M.modular_symbols_of_sign(0) == M
True
sage: M.modular_symbols_of_sign(+1)
Modular Symbols space of dimension 0 and level 5, weight 2, character [zeta4], sign 1, over Cyclotomic Field of order 4 and degree 2
sage: M.modular_symbols_of_sign(-1)
Modular Symbols space of dimension 0 and level 5, weight 2, character [zeta4], sign -1, over Cyclotomic Field of order 4 and degree 2
"""
return modsym.ModularSymbols(self.character(), self.weight(), sign, self.base_ring())
def modular_symbols_of_weight(self, k):
r"""
Returns a space of modular symbols with the same defining
properties (weight, sign, etc.) as this space except with weight
`k`.
INPUT:
- ``k`` (int) -- A positive integer.
OUTPUT:
(ModularSymbolsAmbient) A space of modular symbols with the
same defining properties (level, sign) as this space
except with given weight.
EXAMPLES::
sage: eps = DirichletGroup(5).gen(0)
sage: M = ModularSymbols(eps, 2); M
Modular Symbols space of dimension 0 and level 5, weight 2, character [zeta4], sign 0, over Cyclotomic Field of order 4 and degree 2
sage: M.modular_symbols_of_weight(3)
Modular Symbols space of dimension 2 and level 5, weight 3, character [zeta4], sign 0, over Cyclotomic Field of order 4 and degree 2
sage: M.modular_symbols_of_weight(2) == M
True
"""
return modsym.ModularSymbols(self.character(), k, self.sign(), self.base_ring())
def _hecke_images(self, i, v):
"""
Return images of the `i`-th standard basis vector under the
Hecke operators `T_p` for all integers in `v`.
INPUT:
- ``i`` - nonnegative integer
- ``v`` - a list of positive integer
OUTPUT:
- ``matrix`` - whose rows are the Hecke images
EXAMPLES::
sage: G, e = DirichletGroup(50,QQ).objgen()
sage: M = ModularSymbols(e^2,2)
sage: M.dimension()
15
sage: M._hecke_images(8,range(1,5))
[ 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 1 0 0 0 0 1 0 0]
[ 0 1 0 2 0 -1 1 1 0 0 0 0 0 0 0]
[ 0 1 1 -1 -1 0 -1 1 1 0 1 2 0 -2 2]
"""
if self.weight() != 2:
raise NotImplementedError("hecke images only implemented when the weight is 2")
chi = self.character()
# Find basis vector for ambient space such that it is not in
# the kernel of the dual space corresponding to self.
c = self.manin_generators()[self.manin_basis()[i]]
N = self.level()
if chi.order() > 2:
return heilbronn.hecke_images_nonquad_character_weight2(c.u,c.v,N,
v, chi, self.manin_gens_to_basis())
else:
return heilbronn.hecke_images_quad_character_weight2(c.u,c.v,N,
v, chi, self.manin_gens_to_basis())
raise NotImplementedError
|
<reponame>marcreyesph/tf-selfpacedtuts
from skimage import data
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import os
# Minimize console warnings by tensorflow
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
sess = tf.InteractiveSession()
img_dataset = [os.path.join('celeba_dataset_minified', img_i)
for img_i in os.listdir('celeba_dataset_minified')
if '.jpg' in img_i]
img_dataset_read = [plt.imread(img_i)
for img_i in img_dataset]
img_data = np.array(img_dataset_read)
img_data_mean = np.mean(img_data, axis=0)
img_data_std = np.std(img_data, axis=0)
img_normalized = ((img_data[0] - img_data_mean) / img_data_std)
"""plt.hist(img_normalized.ravel(), 20)
print(img_normalized.shape)
plt.show()
"""
# The image tensor
img = tf.placeholder(tf.float32, shape=[None, None], name='img')
# Make 2-D to 3-D (HxW) to (HxWx1)
"""tf.expand_dims() takes two parameters, the base tensor and the column where
we want to insert the new dimension
"""
# Insert new dimension to column two [x: y: <here>] cf. [0: 1: 2]
img_3d = tf.expand_dims(img, 2)
dims = img_3d.get_shape()
print(dims)
# Insert new dimension to column zero or the start [<here>: y, z, a] cf. [0: 1: 2: 3]
img_4d = tf.expand_dims(img_3d, 0)
print(img_4d.get_shape().as_list())
# Create placeholders for gabor's params
mean = tf.placeholder(tf.float32, name='mean')
sigma = tf.placeholder(tf.float32, name='sigma')
ksize = tf.placeholder(tf.int32, name='ksize')
# Redo set of operations for creation of gabor kernel
# Linspace
x = tf.linspace(-3.0, 3.0, ksize)
# Gaussian curve or normal distrib curve
z = (tf.exp(tf.negative(tf.pow(x - mean, 2.0) /
(2.0 * tf.pow(sigma, 2.0)))) *
(1.0 / (sigma * tf.sqrt(2.0 * 3.1415))))
# 2-D matrix [Nx1] x [1xN]
z_2d = tf.matmul(
tf.reshape(z, tf.stack([ksize, 1])),
tf.reshape(z, tf.stack([1, ksize])))
ys = tf.sin(x)
ys = tf.reshape(ys, tf.stack([ksize, 1]))
ones = tf.ones(tf.stack([1, ksize]))
# Repeatedly multiply one to ys
wave = tf.matmul(ys, ones)
gabor = tf.multiply(wave, z_2d)
gabor_4d = tf.reshape(gabor, tf.stack([ksize, ksize, 1, 1]))
# The convolution part takes a little longer time to compile
# Convolve the two
convolved = tf.nn.conv2d(img_4d, gabor_4d, strides=[1, 1, 1, 1], padding='SAME', name='convolved')
convolved_img = convolved[0, :, :, 0]
# Show result
result = convolved_img.eval(feed_dict={
img: data.camera(),
mean: 0.0,
sigma: 1.0,
ksize: 100
})
plt.imshow(result, cmap='gray')
plt.show() |
<gh_stars>0
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'targets': [
{
# GN version: //ios/web/public/app
'target_name': 'ios_web_app',
'type': 'static_library',
'include_dirs': [
'../..',
],
'dependencies': [
'ios_web',
'../../base/base.gyp:base',
'../../base/base.gyp:base_i18n',
'../../crypto/crypto.gyp:crypto',
'../../net/net.gyp:net',
'../../ui/base/ui_base.gyp:ui_base',
'../../ui/gfx/gfx.gyp:gfx',
'../../ui/gfx/gfx.gyp:gfx_geometry',
],
'sources': [
'app/web_main.mm',
'app/web_main_loop.h',
'app/web_main_loop.mm',
'app/web_main_runner.h',
'app/web_main_runner.mm',
'public/app/web_main.h',
'public/app/web_main_delegate.h',
'public/app/web_main_parts.h',
],
},
{
# GN version: //ios/web
'target_name': 'ios_web',
'type': 'static_library',
'include_dirs': [
'../..',
],
'dependencies': [
'ios_web_core',
'js_resources',
'user_agent',
'../../base/base.gyp:base',
'../../components/url_formatter/url_formatter.gyp:url_formatter',
'../../ios/net/ios_net.gyp:ios_net',
'../../ios/third_party/blink/blink_html_tokenizer.gyp:blink_html_tokenizer',
'../../net/net.gyp:net',
'../../ui/base/ui_base.gyp:ui_base',
'../../ui/gfx/gfx.gyp:gfx',
'../../ui/gfx/gfx.gyp:gfx_geometry',
'../../ui/resources/ui_resources.gyp:ui_resources',
'../../url/url.gyp:url_lib',
],
'sources': [
'<(SHARED_INTERMEDIATE_DIR)/ui/resources/grit/webui_resources_map.cc',
'active_state_manager_impl.h',
'active_state_manager_impl.mm',
'alloc_with_zone_interceptor.h',
'alloc_with_zone_interceptor.mm',
'browser_state.mm',
'browser_url_rewriter_impl.cc',
'browser_url_rewriter_impl.h',
'browsing_data_managers/crw_browsing_data_manager.h',
'browsing_data_managers/crw_cookie_browsing_data_manager.h',
'browsing_data_managers/crw_cookie_browsing_data_manager.mm',
'browsing_data_partition_impl.h',
'browsing_data_partition_impl.mm',
'crw_browsing_data_store.mm',
'interstitials/html_web_interstitial_impl.h',
'interstitials/html_web_interstitial_impl.mm',
'interstitials/native_web_interstitial_impl.h',
'interstitials/native_web_interstitial_impl.mm',
'interstitials/web_interstitial_facade_delegate.h',
'interstitials/web_interstitial_impl.h',
'interstitials/web_interstitial_impl.mm',
'load_committed_details.cc',
'navigation/crw_session_certificate_policy_manager.h',
'navigation/crw_session_certificate_policy_manager.mm',
'navigation/crw_session_controller+private_constructors.h',
'navigation/crw_session_controller.h',
'navigation/crw_session_controller.mm',
'navigation/crw_session_entry.h',
'navigation/crw_session_entry.mm',
'navigation/navigation_item_facade_delegate.h',
'navigation/navigation_item_impl.h',
'navigation/navigation_item_impl.mm',
'navigation/navigation_manager_delegate.h',
'navigation/navigation_manager_facade_delegate.h',
'navigation/navigation_manager_impl.h',
'navigation/navigation_manager_impl.mm',
'navigation/nscoder_util.h',
'navigation/nscoder_util.mm',
'navigation/time_smoother.cc',
'navigation/time_smoother.h',
'navigation/web_load_params.h',
'navigation/web_load_params.mm',
'net/cert_host_pair.cc',
'net/cert_host_pair.h',
'net/cert_policy.cc',
'net/cert_store_impl.cc',
'net/cert_store_impl.h',
'net/cert_verifier_block_adapter.cc',
'net/cert_verifier_block_adapter.h',
'net/certificate_policy_cache.cc',
'net/clients/crw_csp_network_client.h',
'net/clients/crw_csp_network_client.mm',
'net/clients/crw_js_injection_network_client.h',
'net/clients/crw_js_injection_network_client.mm',
'net/clients/crw_js_injection_network_client_factory.h',
'net/clients/crw_js_injection_network_client_factory.mm',
'net/clients/crw_passkit_delegate.h',
'net/clients/crw_passkit_network_client.h',
'net/clients/crw_passkit_network_client.mm',
'net/clients/crw_passkit_network_client_factory.h',
'net/clients/crw_passkit_network_client_factory.mm',
'net/clients/crw_redirect_network_client.h',
'net/clients/crw_redirect_network_client.mm',
'net/clients/crw_redirect_network_client_factory.h',
'net/clients/crw_redirect_network_client_factory.mm',
'net/cookie_notification_bridge.h',
'net/cookie_notification_bridge.mm',
'net/crw_cert_verification_controller.h',
'net/crw_cert_verification_controller.mm',
'net/crw_request_tracker_delegate.h',
'net/crw_url_verifying_protocol_handler.h',
'net/crw_url_verifying_protocol_handler.mm',
'net/request_group_util.h',
'net/request_group_util.mm',
'net/request_tracker_data_memoizing_store.h',
'net/request_tracker_factory_impl.h',
'net/request_tracker_factory_impl.mm',
'net/request_tracker_impl.h',
'net/request_tracker_impl.mm',
'net/web_http_protocol_handler_delegate.h',
'net/web_http_protocol_handler_delegate.mm',
'public/active_state_manager.h',
'public/block_types.h',
'public/browser_state.h',
'public/browser_url_rewriter.h',
'public/browsing_data_partition.h',
'public/browsing_data_partition_client.cc',
'public/browsing_data_partition_client.h',
'public/cert_policy.h',
'public/cert_store.h',
'public/certificate_policy_cache.h',
'public/crw_browsing_data_store.h',
'public/crw_browsing_data_store_delegate.h',
'public/favicon_status.cc',
'public/favicon_status.h',
'public/favicon_url.cc',
'public/favicon_url.h',
'public/interstitials/web_interstitial.h',
'public/interstitials/web_interstitial_delegate.h',
'public/load_committed_details.h',
'public/navigation_item.h',
'public/navigation_manager.h',
'public/origin_util.cc',
'public/origin_util.h',
'public/referrer.h',
'public/referrer_util.cc',
'public/referrer_util.h',
'public/security_style.h',
'public/ssl_status.cc',
'public/ssl_status.h',
'public/string_util.h',
'public/url_scheme_util.h',
'public/url_schemes.h',
'public/url_schemes.mm',
'public/url_util.h',
'public/user_metrics.h',
'public/web/url_data_source_ios.h',
'public/web_client.h',
'public/web_client.mm',
'public/web_controller_factory.h',
'public/web_controller_factory.mm',
'public/web_kit_constants.h',
'public/web_state/credential.h',
'public/web_state/crw_web_controller_observer.h',
'public/web_state/crw_web_user_interface_delegate.h',
'public/web_state/crw_web_view_proxy.h',
'public/web_state/crw_web_view_scroll_view_proxy.h',
'public/web_state/global_web_state_observer.h',
'public/web_state/js/credential_util.h',
'public/web_state/js/crw_js_injection_evaluator.h',
'public/web_state/js/crw_js_injection_manager.h',
'public/web_state/js/crw_js_injection_receiver.h',
'public/web_state/page_display_state.h',
'public/web_state/page_display_state.mm',
'public/web_state/ui/crw_content_view.h',
'public/web_state/ui/crw_generic_content_view.h',
'public/web_state/ui/crw_native_content.h',
'public/web_state/ui/crw_native_content_provider.h',
'public/web_state/ui/crw_web_delegate.h',
'public/web_state/ui/crw_web_view_content_view.h',
'public/web_state/url_verification_constants.h',
'public/web_state/web_state.h',
'public/web_state/web_state_observer.h',
'public/web_state/web_state_observer_bridge.h',
'public/web_state/web_state_policy_decider.h',
'public/web_state/web_state_user_data.h',
'public/web_thread.h',
'public/web_thread_delegate.h',
'public/web_ui_ios_data_source.h',
'public/web_view_counter.h',
'public/web_view_creation_util.h',
'public/web_view_type.h',
'string_util.cc',
'ui_web_view_util.h',
'ui_web_view_util.mm',
'url_scheme_util.mm',
'url_util.cc',
'user_metrics.cc',
'weak_nsobject_counter.h',
'weak_nsobject_counter.mm',
'web_kit_constants.cc',
'web_state/blocked_popup_info.h',
'web_state/blocked_popup_info.mm',
'web_state/credential.cc',
'web_state/crw_pass_kit_downloader.h',
'web_state/crw_pass_kit_downloader.mm',
'web_state/crw_recurring_task_delegate.h',
'web_state/crw_web_view_proxy_impl.h',
'web_state/crw_web_view_proxy_impl.mm',
'web_state/crw_web_view_scroll_view_proxy.mm',
'web_state/error_translation_util.h',
'web_state/error_translation_util.mm',
'web_state/frame_info.h',
'web_state/global_web_state_event_tracker.cc',
'web_state/global_web_state_event_tracker.h',
'web_state/global_web_state_observer.cc',
'web_state/js/credential_util.mm',
'web_state/js/crw_js_early_script_manager.h',
'web_state/js/crw_js_early_script_manager.mm',
'web_state/js/crw_js_injection_manager.mm',
'web_state/js/crw_js_injection_receiver.mm',
'web_state/js/crw_js_invoke_parameter_queue.h',
'web_state/js/crw_js_invoke_parameter_queue.mm',
'web_state/js/crw_js_plugin_placeholder_manager.h',
'web_state/js/crw_js_plugin_placeholder_manager.mm',
'web_state/js/crw_js_post_request_loader.h',
'web_state/js/crw_js_post_request_loader.mm',
'web_state/js/crw_js_window_id_manager.h',
'web_state/js/crw_js_window_id_manager.mm',
'web_state/js/page_script_util.h',
'web_state/js/page_script_util.mm',
'web_state/ui/crw_context_menu_provider.h',
'web_state/ui/crw_context_menu_provider.mm',
'web_state/ui/crw_debug_web_view.h',
'web_state/ui/crw_debug_web_view.mm',
'web_state/ui/crw_generic_content_view.mm',
'web_state/ui/crw_simple_web_view_controller.h',
'web_state/ui/crw_static_file_web_view.h',
'web_state/ui/crw_static_file_web_view.mm',
'web_state/ui/crw_swipe_recognizer_provider.h',
'web_state/ui/crw_touch_tracking_recognizer.h',
'web_state/ui/crw_touch_tracking_recognizer.mm',
'web_state/ui/crw_ui_simple_web_view_controller.h',
'web_state/ui/crw_ui_simple_web_view_controller.mm',
'web_state/ui/crw_ui_web_view_web_controller.h',
'web_state/ui/crw_ui_web_view_web_controller.mm',
'web_state/ui/crw_web_controller+protected.h',
'web_state/ui/crw_web_controller.h',
'web_state/ui/crw_web_controller.mm',
'web_state/ui/crw_web_controller_container_view.h',
'web_state/ui/crw_web_controller_container_view.mm',
'web_state/ui/crw_web_view_content_view.mm',
'web_state/ui/crw_wk_script_message_router.h',
'web_state/ui/crw_wk_script_message_router.mm',
'web_state/ui/crw_wk_simple_web_view_controller.h',
'web_state/ui/crw_wk_simple_web_view_controller.mm',
'web_state/ui/crw_wk_web_view_web_controller.h',
'web_state/ui/crw_wk_web_view_web_controller.mm',
'web_state/ui/web_view_js_utils.h',
'web_state/ui/web_view_js_utils.mm',
'web_state/ui/wk_back_forward_list_item_holder.h',
'web_state/ui/wk_back_forward_list_item_holder.mm',
'web_state/ui/wk_web_view_configuration_provider.h',
'web_state/ui/wk_web_view_configuration_provider.mm',
'web_state/web_controller_observer_bridge.h',
'web_state/web_controller_observer_bridge.mm',
'web_state/web_state.cc',
'web_state/web_state_facade_delegate.h',
'web_state/web_state_impl.h',
'web_state/web_state_impl.mm',
'web_state/web_state_observer.cc',
'web_state/web_state_observer_bridge.mm',
'web_state/web_state_policy_decider.mm',
'web_state/web_state_weak_ptr_factory.cc',
'web_state/web_state_weak_ptr_factory.h',
'web_state/web_view_internal_creation_util.h',
'web_state/web_view_internal_creation_util.mm',
'web_state/wk_web_view_security_util.h',
'web_state/wk_web_view_security_util.mm',
'web_thread_impl.cc',
'web_thread_impl.h',
'web_view_counter_impl.h',
'web_view_counter_impl.mm',
'web_view_creation_util.mm',
'webui/crw_web_ui_manager.h',
'webui/crw_web_ui_manager.mm',
'webui/crw_web_ui_page_builder.h',
'webui/crw_web_ui_page_builder.mm',
'webui/shared_resources_data_source_ios.cc',
'webui/shared_resources_data_source_ios.h',
'webui/url_data_manager_ios.cc',
'webui/url_data_manager_ios.h',
'webui/url_data_manager_ios_backend.cc',
'webui/url_data_manager_ios_backend.h',
'webui/url_data_source_ios.cc',
'webui/url_data_source_ios_impl.cc',
'webui/url_data_source_ios_impl.h',
'webui/url_fetcher_block_adapter.h',
'webui/url_fetcher_block_adapter.mm',
'webui/web_ui_ios_controller_factory_registry.cc',
'webui/web_ui_ios_controller_factory_registry.h',
'webui/web_ui_ios_data_source_impl.cc',
'webui/web_ui_ios_data_source_impl.h',
'webui/web_ui_ios_impl.h',
'webui/web_ui_ios_impl.mm',
],
'link_settings': {
# TODO(crbug.com/541549): change to regular linking once support for
# iOS 7 is dropped.
'xcode_settings': {
'OTHER_LDFLAGS': [
'-weak_framework WebKit',
]
},
},
},
# Target shared by ios_web and CrNet.
{
# GN version: //ios/web:core
'target_name': 'ios_web_core',
'type': 'static_library',
'dependencies': [
'../../base/base.gyp:base',
],
'include_dirs': [
'../..',
],
'sources': [
'crw_network_activity_indicator_manager.h',
'crw_network_activity_indicator_manager.mm',
'history_state_util.h',
'history_state_util.mm',
],
},
{
# GN version: //ios/web:web_bundle_ui
'target_name': 'ios_web_js_bundle_ui',
'type': 'none',
'variables': {
'closure_entry_point': '__crWeb.webBundle',
'js_bundle_files': [
'web_state/js/resources/base.js',
'web_state/js/resources/common.js',
'web_state/js/resources/console.js',
'web_state/js/resources/core.js',
'web_state/js/resources/core_dynamic_ui.js',
'web_state/js/resources/dialog_overrides.js',
'web_state/js/resources/message.js',
'web_state/js/resources/message_dynamic_ui.js',
'web_state/js/resources/web_bundle_ui.js',
'web_state/js/resources/window_open_ui.js',
],
},
'sources': [
'web_state/js/resources/web_bundle_ui.js',
],
'link_settings': {
'mac_bundle_resources': [
'<(SHARED_INTERMEDIATE_DIR)/web_bundle_ui.js',
],
},
'includes': [
'js_compile_bundle.gypi'
],
},
{
# GN version: //ios/web:web_bundle_wk
'target_name': 'ios_web_js_bundle_wk',
'type': 'none',
'variables': {
'closure_entry_point': '__crWeb.webBundle',
'js_bundle_files': [
'web_state/js/resources/base.js',
'web_state/js/resources/common.js',
'web_state/js/resources/console.js',
'web_state/js/resources/core.js',
'web_state/js/resources/core_dynamic_wk.js',
'web_state/js/resources/dialog_overrides.js',
'web_state/js/resources/message.js',
'web_state/js/resources/message_dynamic_wk.js',
'web_state/js/resources/web_bundle_wk.js',
'web_state/js/resources/window_open_wk.js',
],
},
'sources': [
'web_state/js/resources/web_bundle_wk.js',
],
'link_settings': {
'mac_bundle_resources': [
'<(SHARED_INTERMEDIATE_DIR)/web_bundle_wk.js',
],
},
'includes': [
'js_compile_bundle.gypi'
],
},
{
# GN version: //ios/web:js_resources
'target_name': 'js_resources',
'type': 'none',
'dependencies': [
'ios_web_js_bundle_ui',
'ios_web_js_bundle_wk',
],
'sources': [
'web_state/js/resources/post_request.js',
'web_state/js/resources/plugin_placeholder.js',
'web_state/js/resources/window_id.js',
'webui/resources/web_ui.js',
],
'link_settings': {
'mac_bundle_resources': [
'<(SHARED_INTERMEDIATE_DIR)/post_request.js',
'<(SHARED_INTERMEDIATE_DIR)/plugin_placeholder.js',
'<(SHARED_INTERMEDIATE_DIR)/window_id.js',
'<(SHARED_INTERMEDIATE_DIR)/web_ui.js',
],
},
'includes': [
'js_compile_checked.gypi'
],
},
{
# GN version: //ios/web:test_support
'target_name': 'ios_web_test_support',
'type': 'static_library',
'dependencies': [
'../../ios/testing/ios_testing.gyp:ocmock_support',
'../../ios/third_party/gcdwebserver/gcdwebserver.gyp:gcdwebserver',
'../../testing/gmock.gyp:gmock',
'../../testing/gtest.gyp:gtest',
'../../third_party/ocmock/ocmock.gyp:ocmock',
'ios_web',
],
'include_dirs': [
'../..',
],
'sources': [
'public/test/crw_test_js_injection_receiver.h',
'public/test/crw_test_js_injection_receiver.mm',
'public/test/http_server.h',
'public/test/http_server.mm',
'public/test/js_test_util.h',
'public/test/js_test_util.mm',
'public/test/response_providers/data_response_provider.h',
'public/test/response_providers/data_response_provider.mm',
'public/test/response_providers/file_based_response_provider.h',
'public/test/response_providers/file_based_response_provider.mm',
'public/test/response_providers/file_based_response_provider_impl.cc',
'public/test/response_providers/file_based_response_provider_impl.h',
'public/test/response_providers/response_provider.cc',
'public/test/response_providers/response_provider.h',
'public/test/response_providers/string_response_provider.h',
'public/test/response_providers/string_response_provider.mm',
'public/test/scoped_testing_web_client.cc',
'public/test/scoped_testing_web_client.h',
'public/test/test_browser_state.cc',
'public/test/test_browser_state.h',
'public/test/test_web_client.h',
'public/test/test_web_client.mm',
'public/test/test_web_state.cc',
'public/test/test_web_state.h',
'public/test/test_web_thread.h',
'public/test/test_web_thread_bundle.h',
'public/test/test_web_view_content_view.h',
'public/test/test_web_view_content_view.mm',
'public/test/web_test_util.h',
'test/crw_fake_web_controller_observer.h',
'test/crw_fake_web_controller_observer.mm',
'test/test_web_thread.cc',
'test/test_web_thread_bundle.cc',
'test/web_int_test.h',
'test/web_int_test.mm',
'test/web_test.h',
'test/web_test.mm',
'test/web_test_suite.cc',
'test/web_test_suite.h',
'test/wk_web_view_crash_utils.h',
'test/wk_web_view_crash_utils.mm',
],
},
{
# GN version: //ios/web:user_agent
'target_name': 'user_agent',
'type': 'static_library',
'include_dirs': [
'../..',
],
'dependencies': [
'../../base/base.gyp:base'
],
'sources': [
'public/user_agent.h',
'public/user_agent.mm',
],
},
],
}
|
<filename>tests/test_queries.py
# coding: utf-8
# Copyright 2010 <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer
from twisted.trial import unittest
import txmongo
mongo_host = "localhost"
mongo_port = 27017
class TestMongoQueries(unittest.TestCase):
timeout = 5
@defer.inlineCallbacks
def setUp(self):
self.conn = yield txmongo.MongoConnection(mongo_host, mongo_port)
self.coll = self.conn.mydb.mycol
@defer.inlineCallbacks
def test_SingleCursorIteration(self):
yield self.coll.insert([{'v':i} for i in xrange(10)], safe=True)
res = yield self.coll.find()
self.assertEqual(len(res), 10)
@defer.inlineCallbacks
def test_MultipleCursorIterations(self):
yield self.coll.insert([{'v':i} for i in xrange(450)], safe=True)
res = yield self.coll.find()
self.assertEqual(len(res), 450)
@defer.inlineCallbacks
def test_LargeData(self):
yield self.coll.insert([{'v':' '*(2**19)} for i in xrange(4)], safe=True)
res = yield self.coll.find()
self.assertEqual(len(res), 4)
@defer.inlineCallbacks
def test_SpecifiedFields(self):
yield self.coll.insert([{k: v for k in 'abcdefg'} for v in xrange(5)], safe=True)
res = yield self.coll.find(fields={'a': 1, 'c': 1})
cnt = yield self.coll.count(fields={'a': 1, 'c': 1})
self.assertEqual(res[0].keys(), ['a', 'c', '_id'])
res = yield self.coll.find(fields=['a', 'c'])
cnt = yield self.coll.count(fields=['a', 'c'])
self.assertEqual(res[0].keys(), ['a', 'c', '_id'])
res = yield self.coll.find(fields=[])
cnt = yield self.coll.count(fields=[])
self.assertEqual(res[0].keys(), ['_id'])
self.assertRaises(TypeError, self.coll._fields_list_to_dict, [1])
@defer.inlineCallbacks
def test_group(self):
yield self.coll.insert([{'v': i % 2} for i in xrange(5)], safe=True)
reduce_ = '''
function(curr, result) {
result.total += curr.v;
}
'''
keys = {'v': 1}
initial = {'total': 0}
cond = {'v': {'$in': [0, 1]}}
final = '''
function(result) {
result.five = 5;
}
'''
res = yield self.coll.group(keys, initial, reduce_, cond, final)
self.assertEqual(len(res['retval']), 2)
keys = '''
function(doc) {
return {'value': 5, 'v': 1};
}
'''
res = yield self.coll.group(keys, initial, reduce_, cond, final)
self.assertEqual(len(res['retval']), 1)
@defer.inlineCallbacks
def tearDown(self):
yield self.coll.drop()
yield self.conn.disconnect()
class TestMongoQueriesEdgeCases(unittest.TestCase):
timeout = 5
@defer.inlineCallbacks
def setUp(self):
self.conn = yield txmongo.MongoConnection(mongo_host, mongo_port)
self.coll = self.conn.mydb.mycol
@defer.inlineCallbacks
def test_BelowBatchThreshold(self):
yield self.coll.insert([{'v':i} for i in xrange(100)], safe=True)
res = yield self.coll.find()
self.assertEqual(len(res), 100)
@defer.inlineCallbacks
def test_EqualToBatchThreshold(self):
yield self.coll.insert([{'v':i} for i in xrange(101)], safe=True)
res = yield self.coll.find()
self.assertEqual(len(res), 101)
@defer.inlineCallbacks
def test_AboveBatchThreshold(self):
yield self.coll.insert([{'v':i} for i in xrange(102)], safe=True)
res = yield self.coll.find()
self.assertEqual(len(res), 102)
@defer.inlineCallbacks
def tearDown(self):
yield self.coll.drop()
yield self.conn.disconnect()
class TestLimit(unittest.TestCase):
timeout = 5
@defer.inlineCallbacks
def setUp(self):
self.conn = yield txmongo.MongoConnection(mongo_host, mongo_port)
self.coll = self.conn.mydb.mycol
@defer.inlineCallbacks
def test_LimitBelowBatchThreshold(self):
yield self.coll.insert([{'v':i} for i in xrange(50)], safe=True)
res = yield self.coll.find(limit=20)
self.assertEqual(len(res), 20)
@defer.inlineCallbacks
def test_LimitAboveBatchThreshold(self):
yield self.coll.insert([{'v':i} for i in xrange(200)], safe=True)
res = yield self.coll.find(limit=150)
self.assertEqual(len(res), 150)
@defer.inlineCallbacks
def test_LimitAtBatchThresholdEdge(self):
yield self.coll.insert([{'v':i} for i in xrange(200)], safe=True)
res = yield self.coll.find(limit=100)
self.assertEqual(len(res), 100)
yield self.coll.drop(safe=True)
yield self.coll.insert([{'v':i} for i in xrange(200)], safe=True)
res = yield self.coll.find(limit=101)
self.assertEqual(len(res), 101)
yield self.coll.drop(safe=True)
yield self.coll.insert([{'v':i} for i in xrange(200)], safe=True)
res = yield self.coll.find(limit=102)
self.assertEqual(len(res), 102)
@defer.inlineCallbacks
def test_LimitAboveMessageSizeThreshold(self):
yield self.coll.insert([{'v':' '*(2**20)} for i in xrange(8)], safe=True)
res = yield self.coll.find(limit=5)
self.assertEqual(len(res), 5)
@defer.inlineCallbacks
def test_HardLimit(self):
yield self.coll.insert([{'v':i} for i in xrange(200)], safe=True)
res = yield self.coll.find(limit=-150)
self.assertEqual(len(res), 150)
@defer.inlineCallbacks
def test_HardLimitAboveMessageSizeThreshold(self):
yield self.coll.insert([{'v':' '*(2**20)} for i in xrange(8)], safe=True)
res = yield self.coll.find(limit=-6)
self.assertEqual(len(res), 4)
@defer.inlineCallbacks
def tearDown(self):
yield self.coll.drop(safe=True)
yield self.conn.disconnect()
class TestSkip(unittest.TestCase):
timeout = 5
@defer.inlineCallbacks
def setUp(self):
self.conn = yield txmongo.MongoConnection(mongo_host, mongo_port)
self.coll = self.conn.mydb.mycol
@defer.inlineCallbacks
def test_Skip(self):
yield self.coll.insert([{'v':i} for i in xrange(5)], safe=True)
res = yield self.coll.find(skip=3)
self.assertEqual(len(res), 2)
yield self.coll.drop(safe=True)
yield self.coll.insert([{'v':i} for i in xrange(5)], safe=True)
res = yield self.coll.find(skip=5)
self.assertEqual(len(res), 0)
yield self.coll.drop(safe=True)
yield self.coll.insert([{'v':i} for i in xrange(5)], safe=True)
res = yield self.coll.find(skip=6)
self.assertEqual(len(res), 0)
@defer.inlineCallbacks
def test_SkipWithLimit(self):
yield self.coll.insert([{'v':i} for i in xrange(5)], safe=True)
res = yield self.coll.find(skip=3, limit=1)
self.assertEqual(len(res), 1)
yield self.coll.drop(safe=True)
yield self.coll.insert([{'v':i} for i in xrange(5)], safe=True)
res = yield self.coll.find(skip=4, limit=2)
self.assertEqual(len(res), 1)
yield self.coll.drop(safe=True)
yield self.coll.insert([{'v':i} for i in xrange(5)], safe=True)
res = yield self.coll.find(skip=4, limit=1)
self.assertEqual(len(res), 1)
yield self.coll.drop(safe=True)
yield self.coll.insert([{'v':i} for i in xrange(5)], safe=True)
res = yield self.coll.find(skip=5, limit=1)
self.assertEqual(len(res), 0)
@defer.inlineCallbacks
def tearDown(self):
yield self.coll.drop(safe=True)
yield self.conn.disconnect()
|
<reponame>jyshangguan/visfitter<filename>VisFitter/mcmc_emcee.py<gh_stars>0
import acor
import emcee
import corner
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from scipy.stats import truncnorm
from time import time
PI2 = 2. * np.pi
__all__ = ["EmceeModel"]
#The probability functions
def lnprior(params, data, model):
"""
Calculate the ln prior probability.
"""
lnprior = 0.0
parIndex = 0
parDict = model.get_modelParDict()
for modelName in model._modelList:
parFitDict = parDict[modelName]
for parName in parFitDict.keys():
if parFitDict[parName]["vary"]:
parValue = params[parIndex]
parIndex += 1
pr1, pr2 = parFitDict[parName]["range"]
if (parValue < pr1) or (parValue > pr2):
lnprior -= np.inf
else:
pass
return lnprior
def ChSq(data, model, unct=None):
'''
This function calculate the Chi square of the observed data and
the model.
Parameters
----------
data : float array
The observed data.
model : float array
The model.
unct : float array
The uncertainties.
Returns
-------
chsq : float
The Chi square
Notes
-----
None.
'''
if unct is None:
unct = np.ones_like(data)
wrsd = (data - model)/unct #The weighted residual
chsq = np.sum(wrsd**2) + np.sum( np.log(PI2 * unct**2) )
return chsq
def lnlike_amp(params, data, model):
"""
Calculate the ln likelihood using only the amplitude data.
"""
visa = data["visamp"]
visae = data["visampe"]
model.updateParList(params)
vism = model.Amplitude(data["u"], data["v"])
lnL = -0.5 * ChSq(visa, vism, visae)
return lnL
#def lnlike_amp(params, data, model):
# """
# Calculate the ln likelihood using only the amplitude data.
# """
# x, y, ye = data
# model.updateParList(params)
# ym = model.Amplitude(*x)
# lnL = -0.5 * ChSq(y, ym, ye)
# return lnL
def lnprob_amp(params, data, model):
"""
Calculate the probability at the parameter spacial position.
"""
lp = lnprior(params, data, model)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike_amp(params, data, model)
def lnlike_t3(params, data, model):
"""
Calculate the ln likelihood using both the amplitude and the closure phase.
"""
visa = data["visamp"]
visae = data["visampe"]
t3p = data["t3phi"]
t3pe = data["t3phie"]
model.updateParList(params)
visam = model.Amplitude(data["u"], data["v"])
t3pm = model.Closure_Phase(data["t3uv1"], data["t3uv2"], data["t3uv3"])
lnL = -0.5 * ChSq(visa, visam, visae) - 0.5 * ChSq(t3p, t3pm, t3pe)
return lnL
def lnprob_t3(params, data, model):
"""
Calculate the probability at the parameter spacial position.
"""
lp = lnprior(params, data, model)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike_t3(params, data, model)
class EmceeModel(object):
"""
The MCMC model for emcee.
"""
def __init__(self, data, model, mode="amplitude"):
self.__data = data
self.__model = model
self.__dim = len(model.get_parVaryList())
if mode == "amplitude":
self.__lnprob = lnprob_amp
elif mode == "closure phase": #Fit the closure phase
self.__lnprob = lnprob_t3
else:
raise ValueError("The mode ({0}) is not recognized!".format(mode))
def from_prior(self):
"""
The prior of all the parameters are uniform.
"""
parList = []
parDict = self.__model.get_modelParDict()
for modelName in self.__model._modelList:
parFitDict = parDict[modelName]
for parName in parFitDict.keys():
if parFitDict[parName]["vary"]:
parRange = parFitDict[parName]["range"]
parType = parFitDict[parName]["type"]
if parType == "c":
r1, r2 = parRange
p = (r2 - r1) * np.random.rand() + r1 #Uniform distribution
elif parType == "d":
p = np.random.choice(parRange, 1)[0]
else:
raise TypeError("The parameter type '{0}' is not recognised!".format(parType))
parList.append(p)
else:
pass
parList = np.array(parList)
return parList
def EnsembleSampler(self, nwalkers, **kwargs):
"""
Generate the EnsembleSampler.
"""
self.sampler = emcee.EnsembleSampler(nwalkers, self.__dim, self.__lnprob,
args=[self.__data, self.__model], **kwargs)
self.__nwalkers = nwalkers
return self.sampler
def p_ball(self, p0, ratio=5e-2, nwalkers=None):
"""
Generate the positions of parameters around the input positions.
The scipy.stats.truncnorm is used to generate the truncated normal distrubution
of the parameters within the prior ranges.
"""
ndim = self.__dim
if nwalkers is None:
nwalkers = self.__nwalkers
pRange = np.array(self.__model.get_parVaryRanges())
p = np.zeros((nwalkers, ndim))
for d in range(ndim):
r0, r1 = pRange[d]
std = (r1 - r0) * ratio
loc = p0[d]
a = (r0 - loc) / std
b = (r1 - loc) /std
p[:, d] = truncnorm.rvs(a=a, b=b, loc=loc, scale=std, size=nwalkers)
return p
def p_prior(self):
"""
Generate the positions in the parameter space from the prior.
The result p0 shape is (nwalkers, dim).
"""
p0 = [self.from_prior() for i in range(self.__nwalkers)]
return p0
def p_logl_max(self, chain=None, lnlike=None, QuietMode=True):
"""
Find the position in the sampled parameter space that the likelihood is
the highest.
"""
if (not chain is None) and (not lnlike is None):
if not QuietMode:
print("The chain and lnlike are provided!")
else:
chain = self.sampler.chain
lnlike = self.sampler.lnprobability
idx = lnlike.ravel().argmax()
p = chain.reshape(-1, self.__dim)[idx]
return p
def p_logl_min(self):
"""
Find the position in the sampled parameter space that the likelihood is
the lowest.
"""
chain = self.sampler.chain
lnlike = self.sampler.lnprobability
idx = lnlike.ravel().argmin()
p = chain.reshape(-1, self.__dim)[idx]
return p
def get_logl(self, p):
"""
Get the likelihood at the given position.
"""
return self.__lnprob(p, self.__data, self.__model)
def run_mcmc(self, pos, iterations, printFrac=1, quiet=False, **kwargs):
"""
Run the MCMC chain.
This function just wraps up the sampler.sample() so that there is output
in the middle of the run.
"""
if not quiet:
t0 = time()
#Notice that the third parameters yielded by EnsembleSampler and PTSampler are different.
for i, (pos0, lnlike0, logl0) in enumerate(self.sampler.sample(pos, iterations=iterations, **kwargs)):
if not (i + 1) % int(printFrac * iterations):
if quiet:
pass
else:
progress = 100. * (i + 1) / iterations
idx = lnlike0.argmax()
lmax = lnlike0[idx]
lmin = lnlike0.min()
pmax = pos0.reshape((-1, self.__dim))[idx]
pname = self.__model.get_parVaryNames(latex=False)
print("-----------------------------")
print("[{0:<4.1f}%] lnL_max: {1:.3e}, lnL_min: {2:.3e}".format(progress, lmax, lmin))
for p, name in enumerate(pname):
print("{0:18s} {1:10.3e}".format(name, pmax[p]))
print( "**MCMC time elapsed: {0:.3f} min".format( (time()-t0)/60. ) )
if not quiet:
print("MCMC finishes!")
return pos, lnlike0, logl0
def integrated_time(self):
"""
Estimate the integrated autocorrelation time of a time series.
Since it seems there is something wrong with the sampler.integrated_time(),
I have to calculated myself using acor package.
"""
chain = self.sampler.chain
tauParList = []
for npar in range(self.__dim):
tauList = []
for nwal in range(self.__nwalkers):
pchain = chain[nwal, :, npar]
try:
tau, mean, sigma = acor.acor(pchain)
except:
tau = np.nan
tauList.append(tau)
tauParList.append(tauList)
return tauParList
def accfrac_mean(self):
"""
Return the mean acceptance fraction of the sampler.
"""
return np.mean(self.sampler.acceptance_fraction)
def posterior_sample(self, burnin=0, fraction=0):
"""
Return the samples merging from the chains of all the walkers.
"""
sampler = self.sampler
nwalkers = self.__nwalkers
chain = sampler.chain
lnprob = sampler.lnprobability[:, -1]
if burnin > (chain.shape[1]/2.0):
raise ValueError("The burn-in length ({0}) is too long!".format(burnin))
if fraction>0:
lnpLim = np.percentile(lnprob, fraction)
fltr = lnprob >= lnpLim
print("ps: {0}/{1} walkers are selected.".format(np.sum(fltr), nwalkers))
samples = chain[fltr, burnin:, :].reshape((-1, self.__dim))
else:
samples = chain[:, burnin:, :].reshape((-1, self.__dim))
return samples
def p_median(self, ps=None, **kwargs):
"""
Return the median value of the parameters according to their posterior
samples.
"""
if ps is None:
ps = self.posterior_sample(**kwargs)
else:
pass
parMedian = np.median(ps, axis=0)
return parMedian
def p_uncertainty(self, low=1, center=50, high=99, burnin=50, ps=None, **kwargs):
"""
Return the uncertainty of each parameter according to its posterior sample.
"""
if ps is None:
ps = self.posterior_sample(burnin=burnin, **kwargs)
else:
pass
parRange = np.percentile(ps, [low, center, high], axis=0)
return parRange
def print_parameters(self, truths=None, low=1, center=50, high=99, **kwargs):
"""
Print the ranges of the parameters according to their posterior samples
and the values of the maximum a posterior (MAP).
"""
nameList = self.__model.get_parVaryNames(latex=False)
parRange = self.p_uncertainty(low, center, high, **kwargs)
pMAP = self.p_logl_max()
ttList = ["Name", "L ({0}%)".format(low),
"C ({0}%)".format(center),
"H ({0}%)".format(high), "MAP"]
if not truths is None:
ttList.append("Truth")
tt = " ".join(["{0:12s}".format(i) for i in ttList])
print("{:-<74}".format(""))
print(tt)
for d in range(self.__dim):
plow = parRange[0, d]
pcen = parRange[1, d]
phgh = parRange[2, d]
pmax = pMAP[d]
name = nameList[d]
if (pmax < plow) or (pmax > phgh):
print("[MCMC Warning]: The best-fit '{0}' is not consistent with its posterior sample".format(name))
pl = [plow, pcen, phgh]
info = "{0:12s} {1[0]:<12.3e} {1[1]:<12.3e} {1[2]:<12.3e} {2:<12.3e}".format(name, pl, pmax)
if truths is None:
print(info)
else:
print(info+" {0:<12.3e}".format(truths[d]))
p_logl_max = self.p_logl_max()
print("lnL_max: {0:.3e}".format(self.get_logl(p_logl_max)))
def Save_Samples(self, filename, **kwargs):
"""
Save the posterior samples.
"""
samples = self.posterior_sample(**kwargs)
np.savetxt(filename, samples, delimiter=",")
def Save_BestFit(self, filename, low=1, center=50, high=99, **kwargs):
nameList = self.__model.get_parVaryNames(latex=False)
parRange = self.p_uncertainty(low, center, high, **kwargs)
pMAP = self.p_logl_max()
ttList = ["Name", "L ({0}%)".format(low),
"C ({0}%)".format(center),
"H ({0}%)".format(high), "MAP"]
tt = " ".join(["{0:12s}".format(i) for i in ttList])
fp = open(filename, "w")
fp.write(tt+"\n")
for d in range(self.__dim):
plow = parRange[0, d]
pcen = parRange[1, d]
phgh = parRange[2, d]
pmax = pMAP[d]
name = nameList[d]
pl = [plow, pcen, phgh]
info = "{0:12s} {1[0]:<12.3e} {1[1]:<12.3e} {1[2]:<12.3e} {2:<12.3e}".format(name, pl, pmax)
fp.write(info+"\n")
p_logl_max = self.p_logl_max()
fp.write("#lnL_max: {0:.3e}".format(self.get_logl(p_logl_max)))
def plot_corner(self, filename=None, burnin=0, fraction=0, ps=None, nuisance=True, **kwargs):
"""
Plot the corner diagram that illustrate the posterior probability distribution
of each parameter.
"""
if ps is None:
ps = self.posterior_sample(burnin, fraction)
parname = self.__model.get_parVaryNames()
dim = self.__dim
fig = corner.corner(ps[:, 0:dim], labels=parname[0:dim], **kwargs)
if filename is None:
return fig
else:
plt.savefig(filename)
plt.close()
def plot_chain(self, filename=None, truths=None):
dim = self.__dim
sampler = self.sampler
nameList = self.__model.get_parVaryNames()
chain = sampler.chain
fig, axes = plt.subplots(dim, 1, sharex=True, figsize=(8, 3*dim))
for loop in range(dim):
axes[loop].plot(chain[:, :, loop].T, color="k", alpha=0.4)
axes[loop].yaxis.set_major_locator(MaxNLocator(5))
if not truths is None:
axes[loop].axhline(truths[loop], color="r", lw=2)
axes[loop].set_ylabel(nameList[loop], fontsize=24)
if filename is None:
return (fig, axes)
else:
plt.savefig(filename)
plt.close()
def plot_lnlike(self, filename=None, iterList=[0.5, 0.8, 1.0], **kwargs):
lnprob = self.sampler.lnprobability
_, niter = lnprob.shape
iterList = np.around(niter * np.array(iterList)) - 1
fig = plt.figure()
for i in iterList:
l = lnprob[:, int(i)]
plt.hist(l[~np.isinf(l)], label="iter: {0}".format(i), **kwargs)
plt.legend(loc="upper left")
if filename is None:
ax = plt.gca()
return (fig, ax)
else:
plt.savefig(filename)
plt.close()
def reset(self):
"""
Reset the sampler, for completeness.
"""
self.sampler.reset()
def diagnose(self):
"""
Diagnose whether the MCMC run is reliable.
"""
nameList = self.__model.get_parVaryNames(latex=False)
print("---------------------------------")
print("Mean acceptance fraction: {0:.3f}".format(self.accfrac_mean()))
print("PN : ACT (min-max)")
it = self.integrated_time()
for loop in range(self.__dim):
itPar = it[loop]
print("{0:9s}: {i[0]:.3f}-{i[1]:.3f}".format(nameList[loop], i=[min(itPar), max(itPar)]))
def __getstate__(self):
return self.__dict__
def __setstate__(self, dict):
self.__dict__ = dict
def __del__(self):
del self.__data
del self.__model
parList = self.__dict__.keys()
if "sampler" in parList:
del self.sampler
|
# -*- coding: utf-8 -*-
import sys
import os
from decimal import Decimal
import unittest
import magento
from mock import patch, MagicMock
import trytond.tests.test_tryton
from trytond.tests.test_tryton import POOL, USER, DB_NAME, CONTEXT
from test_base import TestBase, load_json
from trytond.transaction import Transaction
DIR = os.path.abspath(os.path.normpath(
os.path.join(
__file__,
'..', '..', '..', '..', '..', 'trytond'
)
))
if os.path.isdir(DIR):
sys.path.insert(0, os.path.dirname(DIR))
def mock_inventory_api(mock=None, data=None):
if mock is None:
mock = MagicMock(spec=magento.Inventory)
handle = MagicMock(spec=magento.Inventory)
handle.update.side_effect = lambda id, data: True
if data is None:
handle.__enter__.return_value = handle
else:
handle.__enter__.return_value = data
mock.return_value = handle
return mock
def mock_product_api(mock=None, data=None):
if mock is None:
mock = MagicMock(spec=magento.Product)
handle = MagicMock(spec=magento.Product)
handle.info.side_effect = \
lambda id, identifierType: load_json('products', str(id))
if data is None:
handle.__enter__.return_value = handle
else:
handle.__enter__.return_value = data
mock.return_value = handle
return mock
class TestProduct(TestBase):
'''
Tests the methods of product
'''
def test_0010_import_product_categories(self):
"""
Test the import of product category using magento data
"""
Category = POOL.get('product.category')
MagentoCategory = POOL.get('magento.instance.product_category')
with Transaction().start(DB_NAME, USER, CONTEXT) as txn:
self.setup_defaults()
categories_before_import = Category.search([], count=True)
category_tree = load_json('categories', 'category_tree')
with txn.set_context({'current_channel': self.channel1.id}):
Category.create_tree_using_magento_data(category_tree)
categories_after_import = Category.search([], count=True)
self.assertTrue(
categories_before_import < categories_after_import
)
# Look for Root Category
root_categories = Category.search([
('parent', '=', None)
])
self.assertEqual(len(root_categories[0].magento_ids), 1)
root_category = root_categories[0]
self.assertEqual(root_category.magento_ids[0].magento_id, 1)
self.assertEqual(len(root_category.childs), 1)
self.assertEqual(len(root_category.childs[0].childs), 4)
self.assertTrue(
MagentoCategory.search([
('channel', '=', self.channel1)
], count=True) > 0
)
self.assertTrue(
MagentoCategory.search([
('channel', '=', self.channel2)
], count=True) == 0
)
def test_0020_import_simple_product(self):
"""
Test the import of simple product using Magento Data
"""
Category = POOL.get('product.category')
Product = POOL.get('product.product')
ProductSaleChannelListing = POOL.get('product.product.channel_listing')
with Transaction().start(DB_NAME, USER, CONTEXT) as txn:
self.setup_defaults()
category_data = load_json('categories', '8')
with txn.set_context({
'current_channel': self.channel1.id,
'company': self.company.id,
}):
Category.create_using_magento_data(category_data)
products_before_import = Product.search([], count=True)
product_data = load_json('products', '17')
product = Product.find_or_create_using_magento_data(
product_data
)
self.assertEqual(product.category.magento_ids[0].magento_id, 8)
self.assertEqual(
product.channel_listings[0].magento_product_type, 'simple'
)
self.assertEqual(product.name, 'BlackBerry 8100 Pearl')
products_after_import = Product.search([], count=True)
self.assertTrue(products_after_import > products_before_import)
# Make sure the categs are created only in channel1 and not
# not in channel2
self.assertTrue(ProductSaleChannelListing.search(
[('channel', '=', self.channel1)],
count=True) > 0
)
self.assertTrue(ProductSaleChannelListing.search(
[('channel', '=', self.channel2)],
count=True) == 0
)
def test_0300_import_product_wo_categories(self):
"""
Test the import of a product using magento data which doesn't
have categories
"""
Product = POOL.get('product.product')
with Transaction().start(DB_NAME, USER, CONTEXT) as txn:
self.setup_defaults()
product_data = load_json('products', '17-wo-category')
with txn.set_context({
'current_channel': self.channel1.id,
'company': self.company.id,
}):
product = Product.find_or_create_using_magento_data(
product_data
)
self.assertEqual(
product.channel_listings[0].magento_product_type, 'simple'
)
self.assertEqual(product.name, 'BlackBerry 8100 Pearl')
self.assertEqual(
product.category.name, 'Unclassified Magento Products'
)
def test_0040_import_configurable_product(self):
"""
Test the import of a configurable product using Magento Data
"""
Category = POOL.get('product.category')
Product = POOL.get('product.product')
with Transaction().start(DB_NAME, USER, CONTEXT) as txn:
self.setup_defaults()
category_data = load_json('categories', '17')
product_data = load_json('products', '135')
with txn.set_context({
'current_channel': self.channel1.id,
'company': self.company.id,
}):
Category.create_using_magento_data(category_data)
product = Product.find_or_create_using_magento_data(
product_data
)
self.assertEqual(
product.category.magento_ids[0].magento_id, 17
)
self.assertEqual(
product.channel_listings[0].magento_product_type,
'configurable'
)
def test_0050_import_grouped_product(self):
"""
Test the import of a grouped product using magento data
"""
Category = POOL.get('product.category')
Product = POOL.get('product.product')
with Transaction().start(DB_NAME, USER, CONTEXT) as txn:
self.setup_defaults()
category_data = load_json('categories', 22)
product_data = load_json('products', 54)
with txn.set_context({
'current_channel': self.channel1.id,
'company': self.company.id,
}):
Category.create_using_magento_data(category_data)
product = Product.find_or_create_using_magento_data(
product_data
)
self.assertEqual(
product.category.magento_ids[0].magento_id, 22
)
self.assertEqual(
product.channel_listings[0].magento_product_type,
'grouped'
)
def test_0060_import_downloadable_product(self):
"""
Test the import of a downloadable product using magento data
"""
Product = POOL.get('product.product')
with Transaction().start(DB_NAME, USER, CONTEXT) as txn:
self.setup_defaults()
product_data = load_json('products', '170')
with txn.set_context({
'current_channel': self.channel1.id,
'company': self.company.id,
}):
product = Product.find_or_create_using_magento_data(
product_data
)
self.assertEqual(
product.template.type, 'service'
)
self.assertEqual(
product.channel_listings[0].magento_product_type,
'downloadable'
)
self.assertEqual(
product.category.name,
'Unclassified Magento Products'
)
def test_0070_update_product_using_magento_data(self):
"""
Check if the product template gets updated using magento data
"""
Product = POOL.get('product.product')
Category = POOL.get('product.category')
with Transaction().start(DB_NAME, USER, CONTEXT):
self.setup_defaults()
with Transaction().set_context({
'current_channel': self.channel1.id,
'company': self.company.id,
}):
category_data = load_json('categories', '17')
Category.create_using_magento_data(category_data)
product_data = load_json('products', '135')
product1 = \
Product.find_or_create_using_magento_data(
product_data
)
product_id_before_updation = product1.id
product_name_before_updation = product1.name
product_code_before_updation = \
product1.products[0].code
product_description_before_updation = \
product1.products[0].description
# Use a JSON file with product name, code and description
# changed and everything else same
product_data = load_json('products', '135001')
product2 = \
product1.update_from_magento_using_data(
product_data
)
self.assertEqual(
product_id_before_updation, product2.id
)
self.assertNotEqual(
product_name_before_updation,
product2.name
)
self.assertNotEqual(
product_code_before_updation,
product2.products[0].code
)
self.assertNotEqual(
product_description_before_updation,
product2.products[0].description
)
def test_0103_update_product_using_magento_id(self):
"""
Check if the product template gets updated using magento ID
"""
Product = POOL.get('product.product')
Category = POOL.get('product.category')
with Transaction().start(DB_NAME, USER, CONTEXT):
self.setup_defaults()
with Transaction().set_context({
'current_channel': self.channel1.id,
'company': self.company.id,
}):
category_data = load_json('categories', '17')
Category.create_using_magento_data(category_data)
product_data = load_json('products', '135001')
product1 = \
Product.find_or_create_using_magento_data(
product_data
)
product_id_before_updation = product1.id
product_name_before_updation = product1.name
product_code_before_updation = \
product1.products[0].code
product_description_before_updation = \
product1.products[0].description
# Use a JSON file with product name, code and description
# changed and everything else same
with patch('magento.Product', mock_product_api(), create=True):
product2 = product1.update_from_magento()
self.assertEqual(
product_id_before_updation, product2.id
)
self.assertNotEqual(
product_name_before_updation,
product2.name
)
self.assertNotEqual(
product_code_before_updation,
product2.products[0].code
)
self.assertNotEqual(
product_description_before_updation,
product2.products[0].description
)
@unittest.skip("Skip till mock is there")
def test_0080_export_product_stock_information(self):
"""
This test checks if the method to call for updation of product
stock info does not break anywhere in between.
This method does not check the API calls
"""
Product = POOL.get('product.product')
Category = POOL.get('product.category')
with Transaction().start(DB_NAME, USER, CONTEXT):
self.setup_defaults()
with Transaction().set_context({
'current_channel': self.channel1.id,
'company': self.company.id,
}):
category_data = load_json('categories', '17')
Category.create_using_magento_data(category_data)
# case 1: Product is valid
product_data = load_json('products', '41')
product = Product.find_or_create_using_magento_data(
product_data
)
self.assertEqual(len(product.channel_listings), 1)
listing = product.channel_listings[0]
listing.export_inventory()
self.assertEqual(listing.state, 'active')
# case 2: Use another Product that does not exist
product_data = load_json('products', '135')
product = Product.find_or_create_using_magento_data(
product_data
)
self.assertEqual(len(product.channel_listings), 1)
listing = product.channel_listings[0]
listing.export_inventory()
self.assertEqual(listing.state, 'disabled')
def test_0090_tier_prices(self):
"""Checks the function field on product price tiers
"""
PriceList = POOL.get('product.price_list')
ProductPriceTier = POOL.get('product.price_tier')
Product = POOL.get('product.product')
Category = POOL.get('product.category')
User = POOL.get('res.user')
with Transaction().start(DB_NAME, USER, CONTEXT) as txn:
self.setup_defaults()
context = User.get_preferences(context_only=True)
context.update({
'current_channel': self.channel1.id,
'company': self.company.id,
})
with txn.set_context(context):
category_data = load_json('categories', '17')
Category.create_using_magento_data(category_data)
product_data = load_json('products', '135')
product = Product.find_or_create_using_magento_data(
product_data
)
price_list, = PriceList.create([{
'name': 'Test Pricelist',
'lines': [('create', [{
'quantity': 10,
'formula': 'unit_price*0.9'
}])]
}])
self.channel1.price_list = price_list
self.channel1.save()
self.assertEqual(len(product.channel_listings), 1)
listing = product.channel_listings[0]
tier, = ProductPriceTier.create([{
'product_listing': listing.id,
'quantity': 10,
}])
self.assertEqual(
listing.product.list_price * Decimal('0.9'), tier.price
)
def suite():
"""Test Suite"""
_suite = trytond.tests.test_tryton.suite()
_suite.addTests([
unittest.TestLoader().loadTestsFromTestCase(TestProduct),
])
return _suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
|
<gh_stars>10-100
#!/usr/bin/env python
#
# Copyright 2014 <NAME>
#
# gnTEAM, School of Computer Science, University of Manchester.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU General Public License.
#
# author: <NAME>
# email: <EMAIL>
#
# For details, see www.cs.man.ac.uk/~filannim/
'''It returns the ISO-TimeML-annotated input documents.'''
__author__ = "<NAME> <<EMAIL>>"
__version__ = "0.1"
__codename__ = "purple tempo"
import cPickle
import glob
import logging
import os
import xml.etree.cElementTree as cElementTree
from classifier import IdentificationClassifier
from classifier import NormalisationClassifier
from classifier import RelationClassifier
from settings import PATH_MODEL_FOLDER
class ManTIME(object):
def __init__(self, reader, writer, extractor, model_name, pipeline=True,
domain='general'):
assert domain in ('general', 'clinical')
self.post_processing_pipeline = pipeline
self.reader = reader
self.writer = writer
self.extractor = extractor
self.documents = []
self.model_name = model_name
self.model_path = '{}/{}/model.pickle'.format(PATH_MODEL_FOLDER,
self.model_name)
try:
self.model = cPickle.load(open(os.path.abspath(self.model_path)))
logging.info('{} model: loaded.'.format(self.model.name))
except IOError:
self.model = None
logging.info('{} model: built.'.format(model_name))
self.domain = domain
def train(self, folder):
folder = os.path.abspath(folder)
assert os.path.isdir(folder), 'Folder doesn\'t exist.'
identifier = IdentificationClassifier()
normaliser = NormalisationClassifier()
linker = RelationClassifier()
# corpus collection
input_files = os.path.join(folder, self.reader.file_filter)
documents = sorted(glob.glob(input_files))
for index, input_file in enumerate(documents, start=1):
basename = os.path.basename(input_file)
position = '[{}/{}]'.format(index, len(documents))
try:
logging.info('{} Doc {}.'.format(position, basename))
doc = self.extractor.extract(self.reader.parse(input_file))
self.documents.append(doc)
except cElementTree.ParseError:
msg = '{} Doc {} skipped: parse error.'.format(position,
basename)
logging.error(msg)
# training models (identification and normalisation)
modl = identifier.train(self.documents, self.model_name)
modl = normaliser.train(self.documents, modl)
modl = linker.train(self.documents, modl)
self.model = modl
# dumping models
cPickle.dump(modl, open(self.model_path, 'w'))
return modl
def label(self, input_obj):
# according to the type
assert self.model, 'Model not loaded.'
identifier = IdentificationClassifier()
normaliser = NormalisationClassifier()
linker = RelationClassifier()
try:
doc = self.extractor.extract(self.reader.parse(input_obj))
annotated_doc = identifier.test([doc], self.model,
self.post_processing_pipeline)
annotated_doc = normaliser.test([doc], self.model, self.domain)
annotated_doc = linker.test([doc], self.model)
output = self.writer.write(annotated_doc)
return output
except cElementTree.ParseError:
msg = 'Document {} skipped: parse error.'.format(
os.path.relpath(input_obj))
logging.error(msg)
return ['']
|
<gh_stars>1-10
# Lint as: python2, python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate tensorflow graphs for testing tfcompile."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
from absl import app
import six
from six.moves import range
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import error_interpolation
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import saver as saver_lib
FLAGS = None
def tfadd(_):
x = constant_op.constant([1], name='x_const')
y = constant_op.constant([2], name='y_const')
math_ops.add(x, y, name='x_y_sum')
def tfadd_with_ckpt(out_dir):
x = array_ops.placeholder(dtypes.int32, name='x_hold')
y = variables.VariableV1(constant_op.constant([0]), name='y_saved')
math_ops.add(x, y, name='x_y_sum')
init_op = variables.global_variables_initializer()
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V1)
with session.Session() as sess:
sess.run(init_op)
sess.run(y.assign(y + 42))
# Without the checkpoint, the variable won't be set to 42.
ckpt = os.path.join(out_dir, 'test_graph_tfadd_with_ckpt.ckpt')
saver.save(sess, ckpt)
def tfadd_with_ckpt_saver(out_dir):
x = array_ops.placeholder(dtypes.int32, name='x_hold')
y = variables.VariableV1(constant_op.constant([0]), name='y_saved')
math_ops.add(x, y, name='x_y_sum')
init_op = variables.global_variables_initializer()
saver = saver_lib.Saver(name='abcprefix', write_version=saver_pb2.SaverDef.V1)
with session.Session() as sess:
sess.run(init_op)
sess.run(y.assign(y + 42))
# Without the checkpoint, the variable won't be set to 42.
ckpt_file = os.path.join(out_dir, 'test_graph_tfadd_with_ckpt_saver.ckpt')
saver.save(sess, ckpt_file)
# Without the SaverDef, the restore op won't be named correctly.
saver_file = os.path.join(out_dir, 'test_graph_tfadd_with_ckpt_saver.saver')
with open(saver_file, 'wb') as f:
f.write(six.ensure_binary(saver.as_saver_def().SerializeToString()))
def tfassert_eq(_):
x = array_ops.placeholder(dtypes.int32, name='x_hold')
y = array_ops.placeholder(dtypes.int32, name='y_hold')
control_flow_ops.Assert(
math_ops.equal(x, y), ['Expected x == y.'], name='assert_eq')
math_ops.add(x, math_ops.negative(y), name='x_y_diff')
def tfcond(_):
p = array_ops.placeholder(dtypes.bool, name='p_hold')
x = array_ops.placeholder(dtypes.int32, name='x_hold')
y = array_ops.placeholder(dtypes.int32, name='y_hold')
z = control_flow_ops.cond(p, lambda: x, lambda: y)
array_ops.identity(z, name='result')
def tfgather(_):
params = array_ops.placeholder(dtypes.float32, name='params')
indices = array_ops.placeholder(dtypes.int32, name='indices')
array_ops.gather(params, indices, name='gather_output')
def tfmatmul(_):
x = array_ops.placeholder(dtypes.float32, name='x_hold')
y = array_ops.placeholder(dtypes.float32, name='y_hold')
math_ops.matmul(x, y, name='x_y_prod')
def tfmatmulandadd(_):
# This tests multiple outputs.
x = array_ops.placeholder(dtypes.float32, name='x_hold')
y = array_ops.placeholder(dtypes.float32, name='y_hold')
math_ops.matmul(x, y, name='x_y_prod')
math_ops.add(x, y, name='x_y_sum')
def tffunction(_):
@function.Defun(dtypes.int32, dtypes.int32)
def test_func(a, b):
return a + b
x = constant_op.constant([1], name='x_const')
y = constant_op.constant([2], name='y_const')
test_func(x, y, name='func_call') # pylint: disable=unexpected-keyword-arg
def tfsplits(_):
"""A more complex graph, including splits."""
x = array_ops.placeholder(dtypes.float32, shape=[2, 2], name='x')
y = array_ops.placeholder(dtypes.float32, shape=[2, 2], name='y')
for _ in range(3):
x0, x1 = array_ops.split(x, 2, 0)
y0, y1 = array_ops.split(y, 2, 0)
x0 += 1
y0 += 1
z = math_ops.matmul(x, y, name='x_y_prod')
a = array_ops.concat([x0, y1], axis=0, name='concat_x0_y1')
b = array_ops.concat([y0, x1], axis=0, name='concat_y0_x1')
x = math_ops.matmul(a, b, name='a_b')
y = math_ops.add(x, z)
array_ops.identity(y, name='result')
def tftop_k(_):
x = array_ops.placeholder(dtypes.int32, shape=[5], name='x')
output = nn_ops.top_k(x, 2, name='values')
array_ops.identity(output[1], name='indices')
def tfvariable_readonly(_):
x = variables.Variable(1000.0, name='x')
unused_y = variables.Variable(1000.0, name='y')
old_x = x.value()
with ops.control_dependencies([old_x]):
new_value = math_ops.add(old_x, 42.0)
array_ops.identity(new_value, name='result')
# TODO(b/147908587): Change x and the two constants back to have a scalar shape
# when the bug is fixed.
def tfvariable(_):
x = variables.Variable([1000.0], name='x', shape=[1])
old_x = x.value()
with ops.control_dependencies([old_x]):
new_x = x.assign_add([42.0])
array_ops.stack([old_x, new_x], name='result')
def tfvariable_sequential_updates(_):
x = variables.Variable(1.0, name='x')
y = variables.Variable(1.0, name='y')
updates = control_flow_ops.no_op()
for _ in range(3):
with ops.control_dependencies([updates]):
x_val = x.read_value() + y
updates = x.assign_sub(0.1 * x_val)
array_ops.identity(updates, name='result')
def export_debug_info(exported_graph):
"""Exports debug information from a graph.
Args:
exported_graph: A Graph that has been created by tracing a saveable view.
Returns:
Corresponding GraphDebugInfo with traces for all ops in exported_graph.
"""
exported_operations = []
for op in exported_graph.get_operations():
exported_operations.append(('', op))
return error_interpolation.create_graph_debug_info_def(exported_operations)
def write_graph(build_graph, out_dir, debug_info=False):
"""Build a graph using build_graph and write it out."""
g = ops.Graph()
with g.as_default():
build_graph(out_dir)
filename = os.path.join(out_dir, 'test_graph_%s.pb' % build_graph.__name__)
with open(filename, 'wb') as f:
f.write(six.ensure_binary(g.as_graph_def().SerializeToString()))
if debug_info:
filename_debuginfo = os.path.join(
out_dir, 'test_debuginfo_%s.pb' % build_graph.__name__)
test_debuginfo = export_debug_info(g)
with open(filename_debuginfo, 'wb') as f:
f.write(
six.ensure_binary(
test_debuginfo.SerializeToString(deterministic=True)))
def main(_):
control_flow_util.enable_control_flow_v2()
write_graph(tfadd, FLAGS.out_dir, debug_info=True)
write_graph(tfadd_with_ckpt, FLAGS.out_dir)
write_graph(tfadd_with_ckpt_saver, FLAGS.out_dir)
write_graph(tfassert_eq, FLAGS.out_dir)
write_graph(tfcond, FLAGS.out_dir)
write_graph(tffunction, FLAGS.out_dir)
write_graph(tfgather, FLAGS.out_dir)
write_graph(tfmatmul, FLAGS.out_dir)
write_graph(tfmatmulandadd, FLAGS.out_dir)
write_graph(tfsplits, FLAGS.out_dir)
write_graph(tftop_k, FLAGS.out_dir)
write_graph(tfvariable, FLAGS.out_dir)
write_graph(tfvariable_readonly, FLAGS.out_dir)
write_graph(tfvariable_sequential_updates, FLAGS.out_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--out_dir',
type=str,
default='',
help='Output directory for graphs, checkpoints and savers.')
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
<reponame>Abdullah0297445/Django-Projects<filename>Miniature Hospital Management System/hospital_app/views.py
from django.shortcuts import render, get_object_or_404
from django.contrib.auth.models import User
from .models import Patient, DiagReport
from django.urls import reverse
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from .forms import ReportCreateForm
from django.contrib.messages.views import SuccessMessageMixin
class ReportListView(ListView):
model = DiagReport
template_name = 'hospital_app/home.html'
context_object_name = 'reports'
ordering = ['-date_created']
paginate_by = 5
class DocListView(ListView):
model = User
template_name = 'hospital_app/doc_view.html'
context_object_name = 'docs'
#ordering = ['-date_joined']
paginate_by = 5
class PatListView(ListView):
model = Patient
template_name = 'hospital_app/pat_view.html'
context_object_name = 'pats'
#ordering = ['-date_joined']
paginate_by = 5
def get_queryset(self):
user = get_object_or_404(User, username=self.kwargs.get('username'))
return user.patient_set.all()
class DocReportListView(ListView):
model = DiagReport
template_name = 'hospital_app/doc_reports.html'
context_object_name = 'docreps'
paginate_by = 5
def get_queryset(self):
user = get_object_or_404(User, username=self.kwargs.get('username'))
pat = user.patient_set.first()
reports = pat.diagreport_set.all()
for pat in user.patient_set.all():
reports |= pat.diagreport_set.all()
return reports.order_by('-date_created')#DiagReport.objects.filter(doc=user).order_by('-date_created')
class PatReportListView(ListView):
model = Patient
template_name = 'hospital_app/pat_reports.html'
context_object_name = 'reports'
paginate_by = 5
def get_queryset(self):
pat = get_object_or_404(Patient, id=self.kwargs.get('pk'))
reports = pat.diagreport_set.all()
return reports.order_by('-date_created')
class ReportDetailView(DetailView):
model = DiagReport
class ReportCreateView(LoginRequiredMixin, CreateView):
model = DiagReport
form_class = ReportCreateForm
def form_valid(self, form):
form.instance.pat.doc = self.request.user
return super().form_valid(form)
class ReportUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = DiagReport
fields = ['title', 'rprt', 'pat']
def form_valid(self, form):
form.instance.pat.doc = self.request.user
return super().form_valid(form)
def test_func(self):
report = self.get_object()
if self.request.user == report.pat.doc:
return True
else:
return False
class ReportDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = DiagReport
success_url = '/'
def test_func(self):
report = self.get_object()
if self.request.user == report.pat.doc:
return True
else:
return False
class PatientCreateView(LoginRequiredMixin, SuccessMessageMixin, CreateView):
model = Patient
success_message = 'New patient record has been added successfully !'
fields = ['first', 'last', 'age', 'gender']
template_name = 'hospital_app/patient_create.html'
def form_valid(self, form):
form.instance.doc = self.request.user
return super().form_valid(form)
class PatientUpdateView(LoginRequiredMixin,UserPassesTestMixin, UpdateView):
model = Patient
fields = ['first', 'last', 'age', 'gender']
template_name = 'hospital_app/patient_update.html'
# def get_absolute_url(self):
# return reverse('Pat-List', args=[self.request.user.username])
#
def form_valid(self, form):
form.instance.doc = self.request.user
return super().form_valid(form)
def test_func(self):
patient = self.get_object()
if self.request.user == patient.doc:
return True
else:
return False
class PatientDeleteView(LoginRequiredMixin, UserPassesTestMixin, SuccessMessageMixin, DeleteView):
model = Patient
success_message = 'Patient record has been deleted!'
template_name = 'hospital_app/patient_delete.html'
def get_success_url(self):
# I cannot access the 'pk' of the deleted object here
return reverse('Pat-List', kwargs={'username':self.request.user.username})
def test_func(self):
patient = self.get_object()
if self.request.user == patient.doc:
return True
else:
return False
def about(request):
return render(request, 'hospital_app/about.html', {"title":"About"}) |
#!/usr/bin/env python
import datetime
import os
import click
import numpy as np
from mpi4py import MPI
from epg.launching import launcher, logger
from epg.envs.random_robots import RandomHopper, DirHopper, NormalHopper
from epg.evolution import ES
"""
Evolved Policy Gradients (EPG)
------------------------------
Run via:
cd <path_to_EPG_folder/EPG>
PYTHONPATH=. python epg/launch_local.py
Test via:
PYTHONPATH=. python epg/launch_local.py --test true
"""
def env_selector(env_id, seed=0):
if 'RandomHopper' == env_id:
env = RandomHopper(seed=seed)
elif 'DirHopper' == env_id:
env = DirHopper(seed=seed)
elif 'NormalHopper' == env_id:
env = NormalHopper(seed=seed)
else:
raise Exception('Unknown environment.')
return env
def setup_es(seed=0, env_id='DirHopper', log_path='/tmp/out', n_cpu=1, **agent_args):
seed = MPI.COMM_WORLD.Get_rank() * 1000
assert agent_args is not None
np.random.seed(seed)
env = env_selector(env_id, seed)
env.seed(seed)
es = ES(env, env_id, **agent_args)
logger.log('Experiment configuration: {}'.format(str(locals())))
return es
def test_run(seed=0, env_id='DirHopper', log_path='/tmp/out', n_cpu=1, **agent_args):
es = setup_es(seed, env_id, log_path, n_cpu, **agent_args)
es.test(**agent_args, n_cpu=n_cpu)
def run(seed=0, env_id='DirHopper', log_path='/tmp/out', n_cpu=1, **agent_args):
es = setup_es(seed, env_id, log_path, n_cpu, **agent_args)
es.train(**agent_args, n_cpu=n_cpu)
@click.command()
@click.option("--test", type=bool, default=False)
def main(test):
d = datetime.datetime.now()
date = '{}-{}'.format(d.month, d.day)
time = '{:02d}-{:02d}'.format(d.hour, d.minute)
# Experiment params
# -----------------
env_id = 'DirHopper'
# Number of noise vector seeds for ES
outer_n_samples_per_ep = 8
# Perform policy SGD updates every `inner_opt_freq` steps
inner_opt_freq = 64
# Perform `inner_max_n_epoch` total SGD policy updates,
# so in total `inner_steps` = `inner_opt_freq` * `inner_max_n_epoch`
inner_max_n_epoch = 128
# Temporal convolutions slide over buffer of length `inner_buffer_size`
inner_buffer_size = inner_opt_freq * 8
# Use PPO bootstrapping?
ppo = True
# Evolve policy initialization togeher with loss function?
gpi = False
# Fix PPO alpha (ppo_factor) to 0.5?
fix_ppo = False
# Use memory structure?
mem = False
# Number of outer loop epochs
outer_n_epoch = 2000
# Outer loop theta L2 penalty
outer_l2 = 0.001
# Outer loop noise standard deviation
outer_std = 0.01
# Outer loop Adam step size
outer_learning_rate = 1e-2
# Inner loop batch size per gradient update
inner_opt_batch_size = 32
# Number of times to cycle through the sampled dataset in the inner loop
inner_n_opt_steps = 1
# Inner loop adam step size
inner_lr = 1e-3
# Plotting frequency in number of outer loop epochs
plot_freq = 50
# Maximum number of cpus used per MPI process
max_cpu = 2
# Local experiment log path
launcher.LOCAL_LOG_PATH = os.path.expanduser("~/EPG_experiments")
# Where to load theta from for `--test true` purposes
theta_load_path = '~/EPG_experiments/<path_to_theta.npy>/theta.npy'
# -----------------
exp_tag = '{}-{}-{}{}{}{}'.format(
outer_n_samples_per_ep,
inner_opt_freq,
inner_max_n_epoch,
'-p' if ppo else '',
'-i' if gpi else '',
'-f' if fix_ppo else '',
).replace('.', '')
exp_name = '{}-{}-{}'.format(time, env_id.lower(), exp_tag)
job_name = 'epg-{}--{}'.format(date, exp_name)
epg_args = dict(
env_id=env_id,
n_cpu=max_cpu,
log_path=os.path.join(launcher.LOCAL_LOG_PATH, date, exp_name),
load_theta_path=theta_load_path if test else None,
plot_freq=plot_freq,
outer_n_epoch=outer_n_epoch,
outer_l2=outer_l2,
outer_std=outer_std,
outer_learning_rate=outer_learning_rate,
outer_n_samples_per_ep=outer_n_samples_per_ep,
inner_opt_freq=inner_opt_freq,
inner_max_n_epoch=inner_max_n_epoch,
inner_opt_batch_size=inner_opt_batch_size,
inner_buffer_size=inner_buffer_size,
inner_n_opt_steps=inner_n_opt_steps,
inner_lr=inner_lr,
mem=mem,
inner_use_ppo=ppo,
fix_ppo=fix_ppo,
gpi=gpi,
)
mpi_machines = 1
mpi_proc_per_machine = int(np.ceil(outer_n_samples_per_ep / mpi_machines / float(max_cpu)))
logger.log(
'Running experiment {}/{} with {} noise vectors on {} machines with {}'
' MPI processes per machine, each using {} pool processes.'.format(
date, exp_name, outer_n_samples_per_ep, mpi_machines, mpi_proc_per_machine, max_cpu))
# Experiment launcher
launcher.call(job_name=job_name,
fn=test_run if test else run,
kwargs=epg_args,
log_relpath=os.path.join(date, exp_name),
mpi_proc_per_machine=mpi_proc_per_machine,
mpi_machines=mpi_machines)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
unit test for loop functions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2007 by <NAME>.
:license: BSD, see LICENSE for more details.
"""
from py.test import raises
from jinja2.exceptions import UndefinedError, TemplateSyntaxError
SIMPLE = '''{% for item in seq %}{{ item }}{% endfor %}'''
ELSE = '''{% for item in seq %}XXX{% else %}...{% endfor %}'''
EMPTYBLOCKS = '''<{% for item in seq %}{% else %}{% endfor %}>'''
CONTEXTVARS = '''{% for item in seq %}\
{{ loop.index }}|{{ loop.index0 }}|{{ loop.revindex }}|{{
loop.revindex0 }}|{{ loop.first }}|{{ loop.last }}|{{
loop.length }}###{% endfor %}'''
CYCLING = '''{% for item in seq %}{{ loop.cycle('<1>', '<2>') }}{% endfor %}\
{% for item in seq %}{{ loop.cycle(*through) }}{% endfor %}'''
SCOPE = '''{% for item in seq %}{% endfor %}{{ item }}'''
VARLEN = '''{% for item in iter %}{{ item }}{% endfor %}'''
NONITER = '''{% for item in none %}...{% endfor %}'''
RECURSIVE = '''{% for item in seq recursive -%}
[{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}'''
LOOPLOOP = '''{% for row in table %}
{%- set rowloop = loop -%}
{% for cell in row -%}
[{{ rowloop.index }}|{{ loop.index }}]
{%- endfor %}
{%- endfor %}'''
LOOPERROR1 = '''\
{% for item in [1] if loop.index == 0 %}...{% endfor %}'''
LOOPERROR2 = '''\
{% for item in [] %}...{% else %}{{ loop }}{% endfor %}'''
LOOPFILTER = '''\
{% for item in range(10) if item is even %}[{{ item }}]{% endfor %}'''
EXTENDEDLOOPFILTER = '''\
{% for item in range(10) if item is even %}[{{ loop.index
}}:{{ item }}]{% endfor %}'''
LOOPUNASSIGNABLE = '''\
{% for loop in seq %}...{% endfor %}'''
def test_simple(env):
tmpl = env.from_string(SIMPLE)
assert tmpl.render(seq=range(10)) == '0123456789'
def test_else(env):
tmpl = env.from_string(ELSE)
assert tmpl.render() == '...'
def test_empty_blocks(env):
tmpl = env.from_string(EMPTYBLOCKS)
assert tmpl.render() == '<>'
def test_context_vars(env):
tmpl = env.from_string(CONTEXTVARS)
one, two, _ = tmpl.render(seq=[0, 1]).split('###')
(one_index, one_index0, one_revindex, one_revindex0, one_first,
one_last, one_length) = one.split('|')
(two_index, two_index0, two_revindex, two_revindex0, two_first,
two_last, two_length) = two.split('|')
assert int(one_index) == 1 and int(two_index) == 2
assert int(one_index0) == 0 and int(two_index0) == 1
assert int(one_revindex) == 2 and int(two_revindex) == 1
assert int(one_revindex0) == 1 and int(two_revindex0) == 0
assert one_first == 'True' and two_first == 'False'
assert one_last == 'False' and two_last == 'True'
assert one_length == two_length == '2'
def test_cycling(env):
tmpl = env.from_string(CYCLING)
output = tmpl.render(seq=range(4), through=('<1>', '<2>'))
assert output == '<1><2>' * 4
def test_scope(env):
tmpl = env.from_string(SCOPE)
output = tmpl.render(seq=range(10))
assert not output
def test_varlen(env):
def inner():
for item in range(5):
yield item
tmpl = env.from_string(VARLEN)
output = tmpl.render(iter=inner())
assert output == '01234'
def test_noniter(env):
tmpl = env.from_string(NONITER)
raises(TypeError, tmpl.render)
def test_recursive(env):
tmpl = env.from_string(RECURSIVE)
assert tmpl.render(seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a='a')])
]) == '[1<[1][2]>][2<[1][2]>][3<[a]>]'
def test_looploop(env):
tmpl = env.from_string(LOOPLOOP)
assert tmpl.render(table=['ab', 'cd']) == '[1|1][1|2][2|1][2|2]'
def test_reversed_bug(env):
tmpl = env.from_string('{% for i in items %}{{ i }}{% if not loop.last %}'
',{% endif %}{% endfor %}')
assert tmpl.render(items=reversed([3, 2, 1])) == '1,2,3'
def test_loop_errors(env):
tmpl = env.from_string(LOOPERROR1)
raises(UndefinedError, tmpl.render)
tmpl = env.from_string(LOOPERROR2)
assert tmpl.render() == ''
def test_loop_filter(env):
tmpl = env.from_string(LOOPFILTER)
assert tmpl.render() == '[0][2][4][6][8]'
tmpl = env.from_string(EXTENDEDLOOPFILTER)
assert tmpl.render() == '[1:0][2:2][3:4][4:6][5:8]'
def test_loop_unassignable(env):
raises(TemplateSyntaxError, env.from_string, LOOPUNASSIGNABLE)
|
<filename>loader.py
import os
import json
import logging
from launcher import *
class Loader(object):
def __init__(self, dconn):
self.launchers = {}
self.home_root = '/home'
self.dconn = dconn
def get_launcher(self, launcher_name, user = None):
if launcher_name not in self.launchers:
return None
launchers = self.launchers[launcher_name]
if user is not None:
if user in launchers:
return launchers[user]
else:
return None
elif 1 == len(launchers):
return list(launchers.values())[0]
else:
logging.error('ambiguous launchers found with name %s for users: %s'
, launcher_name
, ''.join([u for u in launchers.keys()]))
return None
def add_launcher(self, user, launcher):
if launcher.name not in self.launchers:
self.launchers[launcher.name] = {user: launcher}
else:
self.launchers[launcher.name][user] = launcher
def load_one(self, user, launcher_name, conf, home):
workdir = conf['work_dir']
workdir = workdir.replace('~', home)
if not os.path.exists(workdir):
logging.warn("cann't find workdir for %s/%s:%s, ignore it's configure"
, user
, launcher_name
, workdir)
return None
start_cmd = conf.get('start_cmd', '')
if 0 == len(start_cmd) and 'all' != launcher_name:
logging.warn("not start_cmd found for %s/%s, ignore it's configure"
, user
, launcher_name)
return None
out_dir = conf.get('out_dir', None)
launcher = Launcher(user,
launcher_name,
home,
workdir,
out_dir,
self.dconn)
launcher.is_help = conf.get('is_help', False)
launcher.set_start_command(start_cmd
, conf.get('pre_start_cmd', None)
, conf.get('post_start_cmd', None)
, conf.get('ignore_pre_start_error', False)
, conf.get('ignore_post_start_error', False)
)
default_stop_cmd = f"ps aux|grep -h {launcher.cmd_user} | grep -Evh 'grep|ftlauncher|su|sshd' | awk '{{print $2}}'|xargs -n 1 -I p kill p"
launcher.set_stop_command(conf.get('stop_cmd', default_stop_cmd)
, conf.get('pre_stop_cmd', None)
, conf.get('post_stop_cmd', None)
, conf.get('ignore_pre_stop_error', False)
, conf.get('ignore_post_stop_error', False)
)
default_status_cmd = f"ps aux|grep -h {launcher.cmd_user} | grep -Evh 'grep|ftlauncher|su|sshd'"
launcher.set_status_command(conf.get('status_cmd', default_status_cmd))
dependence_names = conf.get('dependences', [])
launcher.dependence_names = dependence_names if isinstance(dependence_names, list) else dependence_names.split()
self.add_launcher(user, launcher)
logging.info('load launcher %s success', launcher_name)
return launcher
def load_4_user(self, user, home):
conf_dir_4_user = '.ftapp.conf'
if not os.path.exists(conf_dir_4_user):
return
logging.info('load_4_user {0}'.format(user))
try:
user_all_launchers = []
oldcwd = os.getcwd()
os.chdir(conf_dir_4_user)
conf_files = os.listdir('.')
logging.info(f'{user} have configured:{conf_files}')
for conf_file_name in conf_files:
launcher_name, conf_ext = os.path.splitext(conf_file_name)
if u'.json' != conf_ext:
continue
try:
logging.info('loading launcher {0}'.format(launcher_name))
full_name = f'{user}/{launcher_name}'
launcher = self.get_launcher(launcher_name, user)
if launcher is not None:
continue
conf_file = open(conf_file_name, 'r')
conf = json.load(conf_file)
launcher = self.load_one(user, launcher_name, conf, home)
if not launcher.is_help:
user_all_launchers.append(full_name)
except Exception as e:
logging.error("load launcher %s/%s failed, detail:%s", user, launcher_name, str(e))
finally:
if 0 != len(user_all_launchers):
user_all_conf = {"work_dir":"~", "dependences":user_all_launchers}
try:
self.load_one(user, 'all', user_all_conf, home)
except Exception as e:
logging.error(f"load launcher {user}/all failed, detail:{str(e)}")
os.chdir(oldcwd)
def load(self, home_root):
users = os.listdir(home_root)
for user in users:
home = os.path.join(home_root, user)
self.load_user(user, home)
def load_user(self, user, home):
oldcwd = os.getcwd()
try:
os.chdir(home)
self.load_4_user(user, home)
finally:
os.chdir(oldcwd)
def split_launcher_name(self, full_name):
user = None
dep_name = full_name
if '/' in str(full_name):
user, dep_name = full_name.split('/')
return (user, dep_name)
def resolve(self):
for launcher_name, launchers in self.launchers.items():
for user, launcher in launchers.items():
is_resoloved = True
for dependence_name in launcher.dependence_names:
dep_user, dep_name = self.split_launcher_name(dependence_name)
dep_launcher = self.get_launcher(dep_name, dep_user)
if dep_launcher is not None:
logging.info(f"resolove {user}/{launcher.name}'s depency {dependence_name}")
launcher.add_dependence(dep_launcher)
else:
is_resoloved = False
launcher.is_resoloved = is_resoloved
def list(self, user=None):
result = []
for launcher_name, launchers in self.launchers.items():
for user1, launcher in launchers.items():
if user is None or user == user1:
result.append('{0}/{1}'.format(user1, launcher_name))
result = sorted(result)
return result
|
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from gibson2.envs.igibson_env import iGibsonEnv
import gibson2
import gym
import numpy as np
import os
import sys
def set_path(path: str):
try:
sys.path.index(path)
except ValueError:
sys.path.insert(0, path)
# path to custom tf_agents
set_path('/media/suresh/research/awesome-robotics/active-slam/catkin_ws/src/sim-environment/src/tensorflow/stanford/agents')
# set_path('/home/guttikon/awesome_robotics/sim-environment/src/tensorflow/stanford/agents')
from tf_agents.environments import gym_wrapper
from tf_agents.environments import tf_py_environment
from tf_agents.environments import wrappers
from tf_agents.policies import random_tf_policy
from utils.navigate_env import NavigateGibsonEnv
def load(config_file,
model_id=None,
env_mode='headless',
action_timestep=1.0 / 10.0,
physics_timestep=1.0 / 40.0,
device_idx=0,
gym_env_wrappers=(),
env_wrappers=(),
spec_dtype_map=None):
env = NavigateGibsonEnv(config_file=config_file,
scene_id=model_id,
mode=env_mode,
action_timestep=action_timestep,
physics_timestep=physics_timestep,
device_idx=device_idx)
discount = env.config.get('discount_factor', 0.99)
max_episode_steps = env.config.get('max_step', 500)
return wrap_env(
env,
discount=discount,
max_episode_steps=max_episode_steps,
gym_env_wrappers=gym_env_wrappers,
time_limit_wrapper=wrappers.TimeLimit,
env_wrappers=env_wrappers,
spec_dtype_map=spec_dtype_map,
auto_reset=True
)
def wrap_env(env,
discount=1.0,
max_episode_steps=0,
gym_env_wrappers=(),
time_limit_wrapper=wrappers.TimeLimit,
env_wrappers=(),
spec_dtype_map=None,
auto_reset=True):
for wrapper in gym_env_wrappers:
env = wrapper(env)
env = gym_wrapper.GymWrapper(
env,
discount=discount,
spec_dtype_map=spec_dtype_map,
match_obs_space_dtype=True,
auto_reset=auto_reset,
simplify_box_bounds=True
)
if max_episode_steps > 0:
env = time_limit_wrapper(env, max_episode_steps)
for wrapper in env_wrappers:
env = wrapper(env)
return env
if __name__ == '__main__':
eval_py_env = load(
config_file=os.path.join('./configs/', 'turtlebot_navigate.yaml'),
env_mode='gui',
device_idx=0,
)
eval_tf_env = tf_py_environment.TFPyEnvironment(eval_py_env)
rnd_policy = random_tf_policy.RandomTFPolicy(
time_step_spec=eval_tf_env.time_step_spec(),
action_spec=eval_tf_env.action_spec())
for _ in range(5):
time_step = eval_tf_env.reset()
for _ in range(100):
action_step = rnd_policy.action(time_step)
time_step = eval_tf_env.step(action_step.action)
|
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Layer
class MaskedDense(Layer):
"""
Masked fully connected layer. For full documentation of the fully-connected architecture, see the
TensorFlow Keras Dense layer documentation.
This layer implements masking consistent with the MANN API to support developing sparse models.
"""
def __init__(
self,
units,
use_bias = True,
activation = None,
kernel_initializer = 'random_normal',
mask_initializer = 'ones',
bias_initializer = 'zeros',
**kwargs
):
"""
Parameters
----------
units : int
The number of artificial neurons to use
use_bias : bool (default True)
Whether to use a bias calculation in the outputs
activation : None, str, or function (default None)
The activation function to use on the outputs
kernel_initializer : str or keras initialization function (default 'random_normal')
The weight initialization function to use
mask_initializer : str or keras initialization function (default 'ones')
The mask initialization function to use
bias_initializer : str or keras initialization function (default 'zeros')
The bias initialization function to use
"""
super(MaskedDense, self).__init__(**kwargs)
self.units = int(units) if not isinstance(units, int) else units
self.use_bias = use_bias
self.activation = tf.keras.activations.get(activation)
self.kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self.mask_initializer = tf.keras.initializers.get(mask_initializer)
self.bias_initializer = tf.keras.initializers.get(bias_initializer)
def build(self, input_shape):
self.w = self.add_weight(
shape = (input_shape[-1], self.units),
initializer = self.kernel_initializer,
trainable = True,
name = 'weights'
)
self.w_mask = self.add_weight(
shape = self.w.shape,
initializer = self.mask_initializer,
trainable = False,
name = 'weights_mask'
)
if self.use_bias:
self.b = self.add_weight(
shape = (self.units,),
initializer = self.bias_initializer,
trainable = True,
name = 'bias'
)
self.b_mask = self.add_weight(
shape = self.b.shape,
initializer = self.mask_initializer,
trainable = False,
name = 'bias_mask'
)
def call(self, inputs):
if self.use_bias:
return self.activation(tf.matmul(inputs, self.w * self.w_mask) + (self.b * self.b_mask))
else:
return self.activation(tf.matmul(inputs, self.w * self.w_mask))
def get_config(self):
config = super().get_config().copy()
config.update(
{
'units' : self.units,
'use_bias' : self.use_bias,
'activation' : tf.keras.activations.serialize(self.activation),
'kernel_initializer' : tf.keras.initializers.serialize(self.kernel_initializer),
'mask_initializer' : tf.keras.initializers.serialize(self.mask_initializer),
'bias_initializer' : tf.keras.initializers.serialize(self.bias_initializer)
}
)
return config
def set_masks(self, new_masks):
"""
Set the masks for the layer
Parameters
----------
new_masks : list of arrays or array-likes
The new masks to set for the layer
"""
if not self.use_bias:
self.set_weights(
[self.w.numpy() * new_masks[0].astype(np.float), new_masks[0].astype(np.float)]
)
else:
self.set_weights(
[self.w.numpy() * new_masks[0].astype(np.float), self.b.numpy() * new_masks[1].astype(np.float), new_masks[0].astype(np.float), new_masks[1].astype(np.float)]
)
@classmethod
def from_config(cls, config):
return cls(
units = config['units'],
use_bias = config['use_bias'],
activation = config['activation'],
kernel_initializer = config['kernel_initializer'],
mask_initializer = config['mask_initializer'],
bias_initializer = config['bias_initializer']
) |
import pytest
import sunpy.net.dataretriever.sources.eve as eve
from sunpy.net import Fido
from sunpy.net import attrs as a
from sunpy.net._attrs import Instrument, Level, Time
from sunpy.net.vso.attrs import Source
from sunpy.net.dataretriever.client import QueryResponse
from sunpy.net.fido_factory import UnifiedResponse
from sunpy.net.vso import VSOClient
from sunpy.time import parse_time
from sunpy.time.timerange import TimeRange
LCClient = eve.EVEClient()
@pytest.mark.remote_data
@pytest.mark.parametrize("timerange,url_start,url_end", [
(TimeRange('2012/4/21', '2012/4/21'),
'http://lasp.colorado.edu/eve/data_access/evewebdata/quicklook/L0CS/SpWx/2012/20120421_EVE_L0CS_DIODES_1m.txt',
'http://lasp.colorado.edu/eve/data_access/evewebdata/quicklook/L0CS/SpWx/2012/20120421_EVE_L0CS_DIODES_1m.txt'
),
(TimeRange('2012/5/5', '2012/5/6'),
'http://lasp.colorado.edu/eve/data_access/evewebdata/quicklook/L0CS/SpWx/2012/20120505_EVE_L0CS_DIODES_1m.txt',
'http://lasp.colorado.edu/eve/data_access/evewebdata/quicklook/L0CS/SpWx/2012/20120506_EVE_L0CS_DIODES_1m.txt',
),
(TimeRange('2012/7/7', '2012/7/14'),
'http://lasp.colorado.edu/eve/data_access/evewebdata/quicklook/L0CS/SpWx/2012/20120707_EVE_L0CS_DIODES_1m.txt',
'http://lasp.colorado.edu/eve/data_access/evewebdata/quicklook/L0CS/SpWx/2012/20120714_EVE_L0CS_DIODES_1m.txt',
)
])
def test_get_url_for_time_range(timerange, url_start, url_end):
urls = LCClient._get_url_for_timerange(timerange)
assert isinstance(urls, list)
assert urls[0] == url_start
assert urls[-1] == url_end
def test_can_handle_query():
ans1 = eve.EVEClient._can_handle_query(
Time('2012/8/9', '2012/8/10'), Instrument('eve'), Level(0))
assert ans1 is True
ans2 = eve.EVEClient._can_handle_query(Time('2012/7/7', '2012/7/7'))
assert ans2 is False
ans3 = eve.EVEClient._can_handle_query(
Time('2012/8/9', '2012/8/10'), Instrument('eve'), Source('sdo'))
assert ans3 is False
ans4 = eve.EVEClient._can_handle_query(
Time('2012/8/9', '2012/8/10'), Instrument('eve'), Level('0CS'))
assert ans4 is True
ans5 = eve.EVEClient._can_handle_query(
Time('2012/8/9', '2012/8/10'), Instrument('eve'), Level('wibble'))
assert ans5 is False
ans6 = eve.EVEClient._can_handle_query(
Time('2012/8/9', '2012/8/10'), Instrument('eve'), Level(0.5))
assert ans6 is False
@pytest.mark.remote_data
def test_query():
qr1 = LCClient.search(Time('2012/8/9', '2012/8/10'), Instrument('eve'))
assert isinstance(qr1, QueryResponse)
assert len(qr1) == 2
assert qr1.time_range().start == parse_time('2012/08/09')
assert qr1.time_range().end == parse_time('2012/08/11') # includes end.
@pytest.mark.remote_data
@pytest.mark.parametrize("time,instrument", [
(Time('2012/11/27', '2012/11/27'), Instrument('eve')),
])
def test_get(time, instrument):
qr1 = LCClient.search(time, instrument)
res = LCClient.fetch(qr1)
assert len(res) == len(qr1)
@pytest.mark.remote_data
@pytest.mark.parametrize(
'query',
[(a.Time('2012/10/4', '2012/10/6') & a.Instrument('eve') & a.Level(0))])
def test_fido(query):
qr = Fido.search(query)
client = qr.get_response(0).client
assert isinstance(qr, UnifiedResponse)
assert isinstance(client, eve.EVEClient)
response = Fido.fetch(qr)
assert len(response) == qr._numfile
@pytest.mark.remote_data
@pytest.mark.parametrize(
'time',
[(a.Time('2012/10/4', '2012/10/6')), (a.Time('2012/11/27', '2012/11/27'))])
def test_levels(time):
"""
Test the correct handling of level
Level 0 comes from EVEClient, other levels from EVE.
"""
eve_a = a.Instrument('EVE')
qr = Fido.search(time, eve_a)
clients = {type(a.client) for a in qr.responses}
assert clients == {VSOClient}
qr = Fido.search(time, eve_a, a.Level(0))
clients = {type(a.client) for a in qr.responses}
assert clients == {eve.EVEClient}
# This is broken because the VSO Eve client doesn't provide a way of allowing Level.
#qr = Fido.search(time, eve_a, a.Level(0) | a.Level(1))
#clients = {type(a.client) for a in qr.responses}
#assert clients == {eve.EVEClient}
|
<reponame>freelan-developers/plix
"""
Test the configuration parser.
"""
from __future__ import print_function
from unittest import TestCase
from contextlib import contextmanager
from voluptuous import (
MultipleInvalid,
)
from six import StringIO
from mock import (
patch,
MagicMock,
)
import plix.configuration
class ConfigurationTests(TestCase):
def test_load_from_stream(self):
stream = StringIO(
u"""
script:
- alpha
- beta
""",
)
loaded_conf = plix.configuration.load_from_stream(stream=stream)
self.assertEqual(
['alpha', 'beta'],
loaded_conf['script'],
)
def test_load_from_file(self):
@contextmanager
def mocked_open(*args, **kwargs):
yield StringIO(
u"""
script:
- alpha
- beta
""",
)
with patch('plix.configuration.open', mocked_open, create=True):
loaded_conf = plix.configuration.load_from_file(filename='foo.yml')
self.assertEqual(
['alpha', 'beta'],
loaded_conf['script'],
)
def test_command_or_command_list_with_strings(self):
value = "hello"
self.assertEqual(
[value],
plix.configuration.command_or_command_list(value),
)
def test_command_or_command_list_with_lists(self):
value = ["hello"]
self.assertEqual(
value,
plix.configuration.command_or_command_list(value),
)
def test_command_or_command_list_with_tuples(self):
value = ("hello",)
self.assertEqual(
value,
plix.configuration.command_or_command_list(value),
)
def test_command_or_command_list_with_int(self):
with self.assertRaises(ValueError):
plix.configuration.command_or_command_list(42)
def test_command_or_command_list_with_floats(self):
with self.assertRaises(ValueError):
plix.configuration.command_or_command_list(42.0)
def test_command_or_command_list_with_none(self):
with self.assertRaises(ValueError):
plix.configuration.command_or_command_list(None)
def test_normalize_with_appropriate_configuration(self):
conf = {
'matrix': {
'alpha': 1,
'beta': 2,
},
'install': ('install.sh',),
'script': ['alpha'],
}
ref_conf = conf.copy()
norm_conf = plix.configuration.normalize(conf)
for key in ref_conf:
self.assertEqual(ref_conf[key], norm_conf[key])
def test_normalize_with_inappropriate_configuration(self):
conf = {
'matrix': [],
'script': {
'key': 'value',
},
}
with self.assertRaises(MultipleInvalid) as ex:
plix.configuration.normalize(conf)
self.assertEqual(2, len(ex.exception.errors))
def test_normalize_transforms_values(self):
conf = {
'script': 'alpha',
}
ref_conf = {
'script': ['alpha'],
}
norm_conf = plix.configuration.normalize(conf)
self.assertEqual(ref_conf['script'], norm_conf['script'])
def test_normalize_parses_executors(self):
my_module = MagicMock()
my_executor = my_module.MyExecutor()
conf = {
'executor': 'my_module.MyExecutor',
}
ref_conf = {
'executor': my_executor,
}
with patch.dict(
'sys.modules',
{'my_module': my_module},
):
norm_conf = plix.configuration.normalize(conf)
self.assertEqual(ref_conf['executor'], norm_conf['executor'])
def test_normalize_parses_executors_with_options(self):
my_module = MagicMock()
my_executor = my_module.MyExecutor()
my_module.MyExecutor.reset_mock()
conf = {
'executor': {
'name': 'my_module.MyExecutor',
'options': {
'a': 'alpha',
'b': 'beta',
},
},
}
ref_conf = {
'executor': my_executor,
}
with patch.dict(
'sys.modules',
{'my_module': my_module},
):
norm_conf = plix.configuration.normalize(conf)
self.assertEqual(ref_conf['executor'], norm_conf['executor'])
my_module.MyExecutor.assert_called_once_with(
options={
'a': 'alpha',
'b': 'beta',
},
)
|
from copy import copy
__author__ = 'Anthony'
import numpy as np
import cv2
import cv
from scipy.cluster.hierarchy import fclusterdata
from scipy.spatial.distance import pdist, squareform
from hungarian import linear_assignment
show_sub_img = False
show_raw_img = False
show_cluster_img = True
show_kalman_img = True
sub_window = "No background"
cap = cv2.VideoCapture("overpass.mp4")
fourcc = cv2.cv.CV_FOURCC('P', 'I', 'M', '1')
diff_out = cv2.VideoWriter("overpass_diff.avi", fourcc, 30, (1920, 1080), isColor=False)
cluster_out = cv2.VideoWriter("overpass_cluster.avi", fourcc, 30, (1920, 1080))
kalman_out = cv2.VideoWriter("overpass_kalman.avi", fourcc, 30, (1920, 1080))
if show_cluster_img:
cluster_window = "Clusters"
def frame_diff(old, new):
diff_frame = cv2.absdiff(cv2.cvtColor(old, cv2.COLOR_BGR2GRAY), cv2.cvtColor(new, cv2.COLOR_BGR2GRAY))
#kernel = np.ones((3,3),np.uint8)
#new = cv2.cvtColor(new, cv2.COLOR_BGR2GRAY)
#blur_frame = new #cv2.morphologyEx(new, cv2.MORPH_OPEN, kernel)
if show_sub_img:
cv2.imshow(sub_window, diff_frame)
diff_out.write(diff_frame)
return diff_frame
def make_2d_kalman(x, y):
kalman = cv.CreateKalman(4, 2, 0)
kalman_state = cv.CreateMat(4, 1, cv.CV_32FC1)
kalman_process_noise = cv.CreateMat(4, 1, cv.CV_32FC1)
kalman_measurement = cv.CreateMat(2, 1, cv.CV_32FC1)
# set previous state for prediction
kalman.state_pre[0, 0] = x
kalman.state_pre[1, 0] = y
kalman.state_pre[2, 0] = 0
kalman.state_pre[3, 0] = 0
# set kalman transition matrix
kalman.transition_matrix[0, 0] = 1
kalman.transition_matrix[0, 1] = 0
kalman.transition_matrix[0, 2] = .5
kalman.transition_matrix[0, 3] = 0
kalman.transition_matrix[1, 0] = 0
kalman.transition_matrix[1, 1] = 1
kalman.transition_matrix[1, 2] = 0
kalman.transition_matrix[1, 3] = .5
kalman.transition_matrix[2, 0] = 0
kalman.transition_matrix[2, 1] = 0
kalman.transition_matrix[2, 2] = 0
kalman.transition_matrix[2, 3] = 1
kalman.transition_matrix[3, 0] = 0
kalman.transition_matrix[3, 1] = 0
kalman.transition_matrix[3, 2] = 0
kalman.transition_matrix[3, 3] = 1
# set Kalman Filter
cv.SetIdentity(kalman.measurement_matrix, cv.RealScalar(1))
cv.SetIdentity(kalman.process_noise_cov, cv.RealScalar(.01))
cv.SetIdentity(kalman.measurement_noise_cov, cv.RealScalar(.01))
cv.SetIdentity(kalman.error_cov_post, cv.RealScalar(1))
return kalman, kalman_measurement, kalman_state, kalman_process_noise
#
# params for ShiTomasi corner detection
feature_params = dict(maxCorners=500,
qualityLevel=.5,
minDistance=10,)
#blockSize=7)
# params for subpix corner refinement.
subpix_params = dict(zeroZone=(-1,-1),winSize=(10,10),
criteria = (cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS,20,0.03))
# Parameters for lucas kanade optical flow
lk_params = dict(winSize=(15, 15),
maxLevel=2,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# Create some random colors
color = np.random.randint(0, 255, (100, 3))
#
count = 0
tracks = []
features = []
kalmans = []
ret, raw_frame = cap.read()
ret, raw_frame2 = cap.read()
frame = frame_diff(raw_frame, raw_frame2)
while True:
cv2.imshow("raw_video", raw_frame)
ret, raw_frame2 = cap.read()
old_frame = frame
frame = frame_diff(raw_frame, raw_frame2)
raw_frame = raw_frame2
if True:#features is None or len(features) <= 2:
features = cv2.goodFeaturesToTrack(frame, **feature_params)
if features is None:
continue
if features is not None and len(features) > 3:
cv2.cornerSubPix(frame, features, **subpix_params)
tracks = [[p] for p in features.reshape((-1,2))] # reshape features into pairs.
#else:
tmp = np.float32(features).reshape(-1, 1, 2)
# calculate optical flow
new_features, lk_status, lk_error = cv2.calcOpticalFlowPyrLK(old_frame,
frame,
tmp,
None,
**lk_params)
# remove points that are "lost"
features = [point[0] for (status, point) in zip(lk_status, new_features) if status]
new_features = np.array(new_features).reshape((-1, 2))
if not len(features) > 2:
continue
cluster_assignments = fclusterdata(features, 50, criterion='distance')
if show_cluster_img:
cluster_frame = copy(raw_frame2)
for assignment, feature in zip(cluster_assignments, features):
if assignment < len(color):
cv2.circle(cluster_frame, (int(feature[0]),
int(feature[1])), 5, color[assignment], 10)
#cv2.imshow('Clusters', cluster_frame)
cluster_out.write(cluster_frame)
clusters = []
for i in range(max(cluster_assignments)):
clusters.append([])
for assignment, data in zip(cluster_assignments, features):
clusters[assignment-1].append(data)
large_clusters = [cluster for cluster in clusters if len(cluster) > 1]
cluster_means = []
for cluster in large_clusters:
mean = np.mean(cluster, axis=0)
cluster_means.append(mean)
if not kalmans: # if we aren't tracking any cars, see if there are any cars to track
kalmans = [make_2d_kalman(point[0], point[1]) for point in cluster_means]
lost = [0] * len(kalmans)
# kalman predict
predictions = [cv.KalmanPredict(kalman[0]) for kalman in kalmans]
estimates = [(prediction[0, 0], prediction[1, 0]) for prediction in predictions]
# perform linear assignment
if estimates:
dist = pdist(cluster_means + estimates)
points_found = len(cluster_means)
#dist = pdist([[1,1], [1.2,1.2], [3,3], [25,25], [24,26],[1.25,1.25], [1.3,1.3]])
square_dist = squareform(dist)
chopped = square_dist[:points_found, points_found:] #
assignments = linear_assignment(chopped) # we now have a list of pairs for each point.
#print assignments
new = range(points_found)
successfully_tracked = []
for assignment in assignments:
new.remove(assignment[0])
if square_dist[assignment[0], assignment[1]] < 50:
successfully_tracked.append(assignment)
else:
lost[assignment[1]] += 1
else:
assignments = np.ndarray([])
if assignments.size == 0:
lost = [l+1 for l in lost]
#next loops estimates
# kalman measurement updates
states = []
for assignment in successfully_tracked: # measurement update
x = cluster_means[assignment[0]][0]
y = cluster_means[assignment[0]][1]
assigned_kalman = kalmans[assignment[1]]
assigned_kalman[1][0, 0] = x
assigned_kalman[1][1, 0] = y
corrected = cv.KalmanCorrect(assigned_kalman[0], assigned_kalman[1])
states.append((corrected[0, 0], corrected[1, 0]))
lost[assignment[1]] = 0
if estimates:
for new_point in new:
new_filter = make_2d_kalman(*cluster_means[new_point])
prediction = cv.KalmanPredict(kalman[0])
estimates.append((prediction[0, 0], prediction[1, 0]))
kalmans.append(new_filter)
lost.append(0)
remove_idxs = []
#print lost
for idx, lost_count in enumerate(lost):
if lost_count > 6:
remove_idxs.append(idx)
for idx in remove_idxs[::-1]:
lost.pop(idx)
kalmans.pop(idx)
kal_idx = 0
#print estimates
if show_kalman_img:
kalman_img = copy(raw_frame2)
for point in estimates:
point = int(point[0]), int(point[1])
cv2.circle(kalman_img, point, 6, (255, 0, 0),3)
#cv2.imshow("Kalman Centers", kalman_img)
kalman_out.write(kalman_img)
k = cv2.waitKey(30)
if k == 27:
break
diff_out.release()
cluster_out.release()
kalman_out.release()
print "goodbye"
|
<reponame>co2palm/antivirus_demo
#!/usr/bin/env python2
import argparse
import pickle
import requests
import sys
import os
from sklearn.externals import joblib
MACHINE_TYPES = {
"IMAGE_FILE_MACHINE_UNKNOWN": 0,
"IMAGE_FILE_MACHINE_I386": 0x014c,
"IMAGE_FILE_MACHINE_R3000": 0x0162,
"IMAGE_FILE_MACHINE_R4000": 0x0166,
"IMAGE_FILE_MACHINE_R10000": 0x0168,
"IMAGE_FILE_MACHINE_WCEMIPSV2": 0x0169,
"IMAGE_FILE_MACHINE_ALPHA": 0x0184,
"IMAGE_FILE_MACHINE_SH3": 0x01a2,
"IMAGE_FILE_MACHINE_SH3DSP": 0x01a3,
"IMAGE_FILE_MACHINE_SH3E": 0x01a4,
"IMAGE_FILE_MACHINE_SH4": 0x01a6,
"IMAGE_FILE_MACHINE_SH5": 0x01a8,
"IMAGE_FILE_MACHINE_ARM": 0x01c0,
"IMAGE_FILE_MACHINE_THUMB": 0x01c2,
"IMAGE_FILE_MACHINE_AM33": 0x01d3,
"IMAGE_FILE_MACHINE_POWERPC": 0x01F0,
"IMAGE_FILE_MACHINE_POWERPCFP": 0x01f1,
"IMAGE_FILE_MACHINE_IA64": 0x0200,
"IMAGE_FILE_MACHINE_MIPS16": 0x0266,
"IMAGE_FILE_MACHINE_ALPHA64": 0x0284,
"IMAGE_FILE_MACHINE_MIPSFPU": 0x0366,
"IMAGE_FILE_MACHINE_MIPSFPU16": 0x0466,
"IMAGE_FILE_MACHINE_TRICORE": 0x0520,
"IMAGE_FILE_MACHINE_CEF": 0x0CEF,
"IMAGE_FILE_MACHINE_EBC": 0x0EBC,
"IMAGE_FILE_MACHINE_AMD64": 0x8664,
"IMAGE_FILE_MACHINE_M32R": 0x9041,
"IMAGE_FILE_MACHINE_CEE": 0xC0EE
}
PE_CHARACTERISTICS = {
"IMAGE_FILE_RELOCS_STRIPPED": 0x0001,
"IMAGE_FILE_EXECUTABLE_IMAGE": 0x0002,
"IMAGE_FILE_LINE_NUMS_STRIPPED": 0x0004,
"IMAGE_FILE_LOCAL_SYMS_STRIPPED": 0x0008,
"IMAGE_FILE_AGGRESIVE_WS_TRIM": 0x0010,
"IMAGE_FILE_LARGE_ADDRESS_AWARE": 0x0020,
"IMAGE_FILE_BYTES_REVERSED_LO": 0x0080,
"IMAGE_FILE_32BIT_MACHINE": 0x0100,
"IMAGE_FILE_DEBUG_STRIPPED": 0x0200,
"IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP": 0x0400,
"IMAGE_FILE_NET_RUN_FROM_SWAP": 0x0800,
"IMAGE_FILE_SYSTEM": 0x1000,
"IMAGE_FILE_DLL": 0x2000,
"IMAGE_FILE_UP_SYSTEM_ONLY": 0x4000,
"IMAGE_FILE_BYTES_REVERSED_HI": 0x8000
}
SUBSYSTEMS = {
"IMAGE_SUBSYSTEM_UNKNOWN": 0,
"IMAGE_SUBSYSTEM_NATIVE": 1,
"IMAGE_SUBSYSTEM_WINDOWS_GUI": 2,
"IMAGE_SUBSYSTEM_WINDOWS_CUI": 3,
"IMAGE_SUBSYSTEM_POSIX_CUI": 7,
"IMAGE_SUBSYSTEM_NATIVE_WINDOWS": 8,
"IMAGE_SUBSYSTEM_WINDOWS_CE_GUI": 9,
"IMAGE_SUBSYSTEM_EFI_APPLICATION": 10,
"IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER": 11,
"IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER": 12,
"IMAGE_SUBSYSTEM_EFI_ROM": 13,
"IMAGE_SUBSYSTEM_XBOX": 14,
"IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION": 16,
}
DLL_CHARACTERISTICS = {
"IMAGE_LIBRARY_PROCESS_INIT": 0x0001,
"IMAGE_LIBRARY_PROCESS_TERM": 0x0002,
"IMAGE_LIBRARY_THREAD_INIT": 0x0004,
"IMAGE_LIBRARY_THREAD_TERM": 0x0008,
"IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA": 0x0020,
"IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE": 0x0040,
"IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY": 0x0080,
"IMAGE_DLLCHARACTERISTICS_NX_COMPAT": 0x0100,
"IMAGE_DLLCHARACTERISTICS_NO_ISOLATION": 0x0200,
"IMAGE_DLLCHARACTERISTICS_NO_SEH": 0x0400,
"IMAGE_DLLCHARACTERISTICS_NO_BIND": 0x0800,
"IMAGE_DLLCHARACTERISTICS_APPCONTAINER": 0x1000,
"IMAGE_DLLCHARACTERISTICS_WDM_DRIVER": 0x2000,
"IMAGE_DLLCHARACTERISTICS_GUARD_CF": 0x4000,
"IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE": 0x8000
}
def get_data(url):
"""Download json data from manamyzer url"""
r = requests.get(url)
return r.json()
def feature_extraction(data):
"""Extract the features from manalyzer data"""
features = {}
md5 = data.keys()[0]
data = data[md5]
features['md5'] = md5
features['Machine']= MACHINE_TYPES[data['PE Header']['Machine']]
features['SizeOfOptionalHeader'] = data['PE Header']['SizeOfOptionalHeader']
features['Characteristics'] = 0
for charac in data['PE Header']['Characteristics']:
features['Characteristics'] += PE_CHARACTERISTICS[charac]
features['SizeOfCode'] = data['Image Optional Header']['SizeOfCode']
features['SizeOfInitializedData'] = data['Image Optional Header']['SizeOfInitializedData']
features['SizeOfUninitializedData'] = data['Image Optional Header']['SizeOfUninitializedData']
features['AddressOfEntryPoint'] = data['Image Optional Header']['AddressOfEntryPoint']
features['BaseOfCode'] = data['Image Optional Header']['AddressOfEntryPoint']
try:
features['BaseOfData'] = data['Image Optional Header']['BaseOfData']
except KeyError:
features['BaseOfData'] = 0
features['ImageBase'] = data['Image Optional Header']['ImageBase']
features['SectionAlignment'] = data['Image Optional Header']['SectionAlignment']
features['FileAlignment'] = data['Image Optional Header']['FileAlignment']
osv = data['Image Optional Header']['OperatingSystemVersion'].split('.')
features['MajorOperatingSystemVersion'] = int(osv[0])
features['MinorOperatingSystemVersion'] = int(osv[1])
ssv = data['Image Optional Header']['SubsystemVersion'].split('.')
features['MajorSubsystemVersion'] = int(ssv[0])
features['MinorSubsystemVersion'] = int(ssv[1])
features['Subsystem'] = SUBSYSTEMS[data['Image Optional Header']['Subsystem']]
features['DllCharacteristics'] = 0
for char in data["Image Optional Header"]["DllCharacteristics"]:
features['DllCharacteristics'] += DLL_CHARACTERISTICS[char]
features['SizeOfStackReserve'] = data['Image Optional Header']['SizeofStackReserve']
features['SizeOfStackCommit'] = data['Image Optional Header']['SizeofStackCommit']
features['SizeOfHeapReserve'] = data['Image Optional Header']['SizeofHeapReserve']
features['SizeOfHeapCommit'] = data['Image Optional Header']['SizeofHeapCommit']
features['LoaderFlags'] = data['Image Optional Header']['LoaderFlags']
features['NumberOfRvaAndSizes'] = data['Image Optional Header']['NumberOfRvaAndSizes']
# Sections
features['SectionsNb'] = len(data['Sections'])
entropy = map(lambda x:x['Entropy'], data['Sections'].values())
features['SectionsMeanEntropy'] = sum(entropy) / float(len(entropy))
features['SectionsMinEntropy'] = min(entropy)
features['SectionsMaxEntropy'] = max(entropy)
raw_sizes = map(lambda x:x['SizeOfRawData'], data['Sections'].values())
features['SectionsMeanRawsize'] = sum(raw_sizes) / float(len(raw_sizes))
features['SectionsMinRawsize'] = min(raw_sizes)
features['SectionsMaxRawsize'] = max(raw_sizes)
virtual_sizes = map(lambda x:x['VirtualSize'], data['Sections'].values())
features['SectionsMeanVirtualsize'] = sum(virtual_sizes) / float(len(virtual_sizes))
features['SectionsMinVirtualsize'] = min(virtual_sizes)
features['SectionsMaxVirtualsize'] = max(virtual_sizes)
# Imports
if 'Imports' in data.keys():
features['ImportsNbDLL'] = len(data['Imports'])
features['ImportsNb'] = sum(map(len, data['Imports'].values()))
else:
features['ImportsNbDLL'] = 0
features['ImportsNb'] = 0
# Resources
if 'Resources' in data.keys():
features['ResourcesNb'] = len(data['Resources'])
entropy = map(lambda x:x['Entropy'], data['Resources'].values())
features['ResourcesMeanEntropy'] = sum(entropy) / float(len(entropy))
features['ResourcesMinEntropy'] = min(entropy)
features['ResourcesMaxEntropy'] = max(entropy)
sizes = map(lambda x:x['Size'], data['Resources'].values())
features['ResourcesMeanSize'] = sum(sizes) / float(len(sizes))
features['ResourcesMinSize'] = min(sizes)
features['ResourcesMaxSize'] = max(sizes)
else:
features['ResourcesNb'] = 0
features['ResourcesMeanEntropy'] = 0
features['ResourcesMinEntropy'] = 0
features['ResourcesMaxEntropy'] = 0
features['ResourcesMeanSize'] = 0
features['ResourcesMinSize'] = 0
features['ResourcesMaxSize'] = 0
if "Version Info" in data.keys():
features['VersionInformationSize'] = len(data['Version Info'].keys())
else:
features['VersionInformationSize'] = 0
return features
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Detect malicious file from manalyzer infos')
parser.add_argument('URL', help='Manalyzer url')
args = parser.parse_args()
# Load classifier
clf = joblib.load(os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'classifier/classifier.pkl'
))
features = pickle.loads(open(os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'classifier/features.pkl'),
'r').read()
)
if 'manalyzer.org' not in args.URL:
print('This is not a manalyzer url')
sys.exit(1)
if '/report/' in args.URL:
url = args.URL.replace('/report/', '/json/')
else:
url = args.URL
data = get_data(url)
if data == {}:
print("Impossible to retrieve the data, quitting")
sys.exit(1)
else:
# Extract the features
data_pe = feature_extraction(data)
pe_features = map(lambda x:data_pe[x], features)
res= clf.predict([pe_features])[0]
print('The file %s is %s' % (
data_pe['md5'],
['malicious', 'legitimate'][res])
)
|
# Time: O(n^2 * l^2), n is the number of strings
# Space: O(1) , l is the max length of strings
class Solution:
def stringMatching(self, words: List[str]) -> List[str]:
result = []
for i, pattern in enumerate(words):
for j, text in enumerate(words):
if i != j and pattern in text:
result.append(pattern)
break
return result
class Solution(object):
def stringMatching(self, words):
arr = ' '.join(words)
return [i for i in words if arr.count(i) >= 2]
# KMP
# Time: O(n^2 * l), n is the number of strings
# Space: O(l) , l is the max length of strings
class Solution:
def stringMatching(self, words: List[str]) -> List[str]:
def getPrefix(word):
prefix = [0]
j = 0
for s in word[1:]:
while j and s != word[j]:
j = prefix[j-1]
if s == word[j]:
j += 1
prefix += j,
return prefix
def kmp(word, pattern, prefix):
if len(word) < len(pattern):
return False
i = j = 0
for s in word:
while j and pattern[j] != s:
j = prefix[j-1]
if pattern[j] == s:
j += 1
if j == len(pattern):
return True
return False
res = []
for i, pattern in enumerate(words):
prefix = getPrefix(pattern)
for j, word in enumerate(words):
if j != i and kmp(word, pattern, prefix):
res += pattern,
break
return res
class Solution:
def stringMatching(self, words: List[str]) -> List[str]:
def add(word: str):
node = trie
for c in word:
node = node.setdefault(c, {})
def get(word: str) -> bool:
node = trie
for c in word:
if (node := node.get(c)) is None: return False
return True
words.sort(key=len, reverse=True)
trie, result = {}, []
for word in words:
if get(word): result.append(word)
for i in range(len(word)):
add(word[i:])
return result
# Time: O(n + m + z) = O(n), n is the total size of patterns
# , m is the total size of query string
# , z is the number of all matched strings
# , O(n) = O(m) = O(z) in this problem
# Space: O(t), t is the total size of ac automata trie
import collections
class AhoNode(object):
def __init__(self):
self.children = collections.defaultdict(AhoNode)
self.indices = []
self.suffix = None
self.output = None
class AhoTrie(object):
def step(self, letter):
while self.__node and letter not in self.__node.children:
self.__node = self.__node.suffix
self.__node = self.__node.children[letter] if self.__node else self.__root
return self.__get_ac_node_outputs(self.__node)
def reset(self):
self.__node = self.__root
def __init__(self, patterns):
self.__root = self.__create_ac_trie(patterns)
self.__node = self.__create_ac_suffix_and_output_links(self.__root)
def __create_ac_trie(self, patterns): # Time: O(n), Space: O(t)
root = AhoNode()
for i, pattern in enumerate(patterns):
node = root
for c in pattern:
node = node.children[c]
node.indices.append(i)
return root
def __create_ac_suffix_and_output_links(self, root): # Time: O(n), Space: O(t)
queue = collections.deque()
for node in root.children.values():
queue.append(node)
node.suffix = root
while queue:
node = queue.popleft()
for c, child in node.children.items():
queue.append(child)
suffix = node.suffix
while suffix and c not in suffix.children:
suffix = suffix.suffix
child.suffix = suffix.children[c] if suffix else root
child.output = child.suffix if child.suffix.indices else child.suffix.output
return root
def __get_ac_node_outputs(self, node): # Time: O(z)
result = []
for i in node.indices:
result.append(i)
output = node.output
while output:
for i in output.indices:
result.append(i)
output = output.output
return result
class Solution(object):
def stringMatching(self, words):
"""
:type words: List[str]
:rtype: List[str]
"""
trie = AhoTrie(words)
lookup = set()
for i in range(len(words)):
trie.reset()
for c in words[i]:
for j in trie.step(c):
if j != i:
lookup.add(j)
return [words[i] for i in lookup]
|
<filename>Neural Network for Regression.py
"""
@author: <NAME>
"""
# importing all the required libraries
import numpy as np
import matplotlib .pyplot as plt
# function to initialize parameters to be uniformly distributed random numbers
# between 0.0 and 1.0
def randInitializeWeights(L_in, L_out):
W = np.random.rand(L_out, 1 + L_in)
return W
# function to calculate sigmoid of activity
def sigmoid(z):
return 1 / (1 + np.exp(-z))
# function to calculate sigmoid gradient
def sigmoidGradient(z):
return np.multiply(sigmoid(z), 1 - sigmoid(z))
# function to compute cost and gradients
def computeCost(X, y, Theta1, Theta2):
m, n = X.shape
J = 0
Theta1_grad = np.zeros(Theta1.shape)
Theta2_grad = np.zeros(Theta2.shape)
# Forward Propagation:
# input layer values (with bias unit)
a1 = np.concatenate((np.ones((m, 1)), X), axis=1)
# calculating activity of hidden layer
z2 = a1 * Theta1.T
a, b = z2.shape
# calculating activation of hidden layer (with bias unit)
a2 = np.concatenate((np.ones((a, 1)), sigmoid(z2)), axis=1)
# calculating activity of output layer
z3 = a2 * Theta2.T
# calculating activation of output layer
a3 = sigmoid(z3)
# hypothesis
h = a3
# calculating mean squared error cost
J = (1/(2 * m)) * np.sum(np.square(np.subtract(h, y)))
# Backpropagation:
# calculating gradients
d3 = h - y
d2 = np.multiply(d3 * Theta2, sigmoidGradient(np.concatenate((np.ones((a, 1)), z2), axis=1)))
c, d = d2.shape
d2 = d2[:, [1, d-1]]
delta1 = d2.T * a1
delta2 = d3.T * a2
Theta1_grad = delta1 / m
Theta2_grad = delta2 / m
return J, Theta1_grad, Theta2_grad
# function for gradient descent
def gradientDescent(x, y, Theta1, Theta2, alpha, num_iters):
# initializing matrix to store cost history
J_history = np.zeros((num_iters,1))
# initializing matrix to store parameter/theta history
nn_params_history = np.matrix(np.concatenate((Theta1.ravel(), Theta2.ravel()), axis = 0))
for iter in range(0, num_iters):
J, Theta1_grad, Theta2_grad = computeCost(x, y, Theta1, Theta2)
#updating parameters/thetas
Theta1 = np.subtract(Theta1, alpha * Theta1_grad)
Theta2 = np.subtract(Theta2, alpha * Theta2_grad)
J_history[iter] = J
nn_params_history = np.concatenate((nn_params_history, np.concatenate((Theta1.ravel(), Theta2.ravel()), axis = 1)), axis = 0)
return J_history, nn_params_history, Theta1, Theta2
def main():
input_layer_size = 2
hidden_layer_size = 2
output_layer_size = 2
# training data
x = np.matrix([0.05, 0.1])
y = np.matrix([0.01, 0.99])
m, n = x.shape
initial_Theta1 = randInitializeWeights(input_layer_size, hidden_layer_size)
initial_Theta2 = randInitializeWeights(hidden_layer_size, output_layer_size)
# no. of iterations
iterations = 7000
# learning rate
alpha = 0.1
J_history, nn_params_history, Theta1, Theta2 = gradientDescent(x, y, initial_Theta1, initial_Theta2, alpha, iterations)
# plotting total cost vs iterations
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_title('total cost vs iterations')
ax.set_xlabel(r'iterations')
ax.set_ylabel(r'$J(\theta)$')
ax.scatter(range(iterations), J_history, color='blue', s=10)
fig.set_size_inches(8, 5)
plt.savefig('total cost vs iterations')
fig.show()
# plotting each parameter/theta vs iterations
for i in range(12):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlabel(r'iterations')
ax.scatter(range(iterations+1), nn_params_history[:,i], color='blue', s=10)
fig.set_size_inches(8, 5)
if i == 0:
ax.set_title(r'$\theta^1_1,_0$' + ' vs iterations')
ax.set_ylabel(r'$\theta^1_1,_0$')
plt.savefig('theta_1_1_0 vs iterations')
elif i == 1:
ax.set_title(r'$\theta^1_1,_1$' + ' vs iterations')
ax.set_ylabel(r'$\theta^1_1,_1$')
plt.savefig('theta_1_1_1 vs iterations')
elif i == 2:
ax.set_title(r'$\theta^1_1,_2$' + ' vs iterations')
ax.set_ylabel(r'$\theta^1_1,_2$')
plt.savefig('theta_1_1_2 vs iterations')
elif i == 3:
ax.set_title(r'$\theta^1_2,_0$' + ' vs iterations')
ax.set_ylabel(r'$\theta^1_2,_0$')
plt.savefig('theta_1_2_0 vs iterations')
elif i == 4:
ax.set_title(r'$\theta^1_2,_1$' + ' vs iterations')
ax.set_ylabel(r'$\theta^1_2,_1$')
plt.savefig('theta_1_2_1 vs iterations')
elif i == 5:
ax.set_title(r'$\theta^1_2,_2$' + ' vs iterations')
ax.set_ylabel(r'$\theta^1_2,_2$')
plt.savefig('theta_1_2_2 vs iterations')
elif i == 6:
ax.set_title(r'$\theta^2_1,_0$' + ' vs iterations')
ax.set_ylabel(r'$\theta^2_1,_0$')
plt.savefig('theta_2_1_0 vs iterations')
elif i == 7:
ax.set_title(r'$\theta^2_1,_1$' + ' vs iterations')
ax.set_ylabel(r'$\theta^2_1,_1$')
plt.savefig('theta_2_1_1 vs iterations')
elif i == 8:
ax.set_title(r'$\theta^2_1,_2$' + ' vs iterations')
ax.set_ylabel(r'$\theta^2_1,_2$')
plt.savefig('theta_2_1_2 vs iterations')
elif i == 9:
ax.set_title(r'$\theta^2_2,_0$' + ' vs iterations')
ax.set_ylabel(r'$\theta^2_2,_0$')
plt.savefig('theta_2_2_0 vs iterations')
elif i == 10:
ax.set_title(r'$\theta^2_2,_1$' + ' vs iterations')
ax.set_ylabel(r'$\theta^2_2,_1$')
plt.savefig('theta_2_2_1 vs iterations')
elif i == 11:
ax.set_title(r'$\theta^2_2,_2$' + ' vs iterations')
ax.set_ylabel(r'$\theta^2_2,_2$')
plt.savefig('theta_2_2_2 vs iterations')
fig.show()
if __name__ == '__main__':
main() |
# Borrowed for PyTorch repo
# This script outputs relevant system environment info
# Run it with `python collect_env.py`.
import re
import subprocess
import sys
from collections import namedtuple
from setup import NeodroidPackage
import neodroid
PY3 = sys.version_info >= (3, 0)
# System Environment Information
SystemEnv = namedtuple(
"SystemEnv",
[
"neo_version",
"is_a_development_build",
"os",
"python_version",
"pip_version", # 'pip' or 'pip3'
"pip_packages",
],
)
def run_cmd(command):
"""Returns (return-code, stdout, stderr)"""
p = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True
)
output, err = p.communicate()
rc = p.returncode
if PY3:
output = output.decode("ascii")
err = err.decode("ascii")
return rc, output.strip(), err.strip()
def run_and_read_all(run_lambda, command):
"""Runs command using run_lambda; reads and returns entire output if rc is 0"""
rc, out, _ = run_lambda(command)
if rc is not 0:
return None
return out
def run_and_parse_first_match(run_lambda, command, regex):
"""Runs command using run_lambda, returns the first regex match if it exists"""
rc, out, _ = run_lambda(command)
if rc is not 0:
return None
match = re.search(regex, out)
if match is None:
return None
return match.group(1)
def get_platform():
if sys.platform.startswith("linux"):
return "linux"
elif sys.platform.startswith("win32"):
return "win32"
elif sys.platform.startswith("cygwin"):
return "cygwin"
elif sys.platform.startswith("darwin"):
return "darwin"
else:
return sys.platform
def get_mac_version(run_lambda):
return run_and_parse_first_match(run_lambda, "sw_vers -productVersion", r"(.*)")
def get_windows_version(run_lambda):
return run_and_read_all(run_lambda, "wmic os get Caption | findstr /v Caption")
def get_lsb_version(run_lambda):
return run_and_parse_first_match(
run_lambda, "lsb_release -a", r"Description:\t(.*)"
)
def check_release_file(run_lambda):
return run_and_parse_first_match(
run_lambda, "cat /etc/*-release", r'PRETTY_NAME="(. *)"'
)
def get_os(run_lambda):
platform = get_platform()
if platform is "win32" or platform is "cygwin":
return get_windows_version(run_lambda)
if platform == "darwin":
version = get_mac_version(run_lambda)
if version is None:
return None
return f"Mac OSX {version}"
if platform == "linux":
# Ubuntu/Debian based
desc = get_lsb_version(run_lambda)
if desc is not None:
return desc
# Try reading /etc/*-release
desc = check_release_file(run_lambda)
if desc is not None:
return desc
return platform
# Unknown platform
return platform
def req_grep_fmt():
r = "\|".join(
[
f'{req.split(">")[0].split("=")[0]}'
for req in (
NeodroidPackage().extras["all"] + NeodroidPackage().requirements
)
]
)
return r
def get_pip_packages(run_lambda):
# People generally have `pip` as `pip` or `pip3`
def run_with_pip(pip):
return run_and_read_all(
run_lambda,
pip + f' list - -format=legacy | grep "Neodroid\|{req_grep_fmt()}"',
)
if not PY3:
return "pip", run_with_pip("pip")
# Try to figure out if the user is running pip or pip3.
out2 = run_with_pip("pip")
out3 = run_with_pip("pip3")
number_of_pips = len([x for x in [out2, out3] if x is not None])
if number_of_pips is 0:
return "pip", out2
if number_of_pips == 1:
if out2 is not None:
return "pip", out2
return "pip3", out3
# num_pips is 2. Return pip3 by default b/c that most likely
# is the one associated with Python 3
return "pip3", out3
def get_env_info():
run_lambda = run_cmd
pip_version, pip_list_output = get_pip_packages(run_lambda)
return SystemEnv(
neo_version=neodroid.__version__,
is_a_development_build=neodroid.IS_DEVELOP,
python_version=f"{sys.version_info[0]}.{sys.version_info[1]}",
pip_version=pip_version,
pip_packages=pip_list_output,
os=get_os(run_lambda),
)
def pretty_str(env_info):
def replace_all_none_objects(dct, replacement="Could not collect"):
for key in dct.keys():
if dct[key] is not None:
continue
dct[key] = replacement
return dct
def replace_bools(dct, true="Yes", false="No"):
for key in dct.keys():
if dct[key] is True:
dct[key] = true
elif dct[key] is False:
dct[key] = false
return dct
def prepend(text, tag="[prepend]"):
lines = text.split("\n")
updated_lines = [tag + line for line in lines]
return "\n".join(updated_lines)
def replace_if_empty(text, replacement="No relevant packages"):
if text is not None and len(text) == 0:
return replacement
return text
mutable_dict = env_info._asdict()
mutable_dict = replace_bools(mutable_dict) # Replace True with Yes, False with No
mutable_dict = replace_all_none_objects(
mutable_dict
) # Replace all None objects with 'Could not collect'
mutable_dict["pip_packages"] = replace_if_empty(
mutable_dict["pip_packages"]
) # If either of these are '', replace with 'No relevant packages'
if mutable_dict["pip_packages"]:
mutable_dict["pip_packages"] = prepend(
mutable_dict["pip_packages"], f"[{env_info.pip_version}] "
)
return r"""
Neo version: {neo_version}
Is a development build: {is_a_development_build}
OS: {os}
Python version: {python_version}
Versions of relevant libraries:
{pip_packages}
""".format(
**mutable_dict
).strip()
def get_pretty_env_info():
return pretty_str(get_env_info())
def main():
print(get_pip_packages(run_cmd))
print("Collecting environment information...")
output = get_pretty_env_info()
print(output)
if __name__ == "__main__":
main()
|
<filename>web/addons/l10n_in_hr_payroll/report/report_hr_salary_employee_bymonth.py<gh_stars>1-10
#-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import time
from openerp.osv import osv
from openerp.report import report_sxw
class report_hr_salary_employee_bymonth(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(report_hr_salary_employee_bymonth, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'get_employee': self.get_employee,
'get_periods': self.get_periods,
'get_months_tol': self.get_months_tol,
'get_total': self.get_total,
})
self.context = context
self.mnths = []
self.mnths_total = []
self.total = 0.0
def get_periods(self, form):
# Get start year-month-date and end year-month-date
first_year = int(form['start_date'][0:4])
last_year = int(form['end_date'][0:4])
first_month = int(form['start_date'][5:7])
last_month = int(form['end_date'][5:7])
no_months = (last_year-first_year) * 12 + last_month - first_month + 1
current_month = first_month
current_year = first_year
# Get name of the months from integer
mnth_name = []
for count in range(0, no_months):
m = datetime.date(current_year, current_month, 1).strftime('%b')
mnth_name.append(m)
self.mnths.append(str(current_month) + '-' + str(current_year))
if current_month == 12:
current_month = 0
current_year = last_year
current_month = current_month + 1
for c in range(0, (12-no_months)):
mnth_name.append('')
self.mnths.append('')
return [mnth_name]
def get_salary(self, form, emp_id, emp_salary, total_mnths):
category_id = form.get('category_id', [])
category_id = category_id and category_id[0] or False
self.cr.execute("select to_char(date_to,'mm-yyyy') as to_date ,sum(pl.total) \
from hr_payslip_line as pl \
left join hr_payslip as p on pl.slip_id = p.id \
left join hr_employee as emp on emp.id = p.employee_id \
left join resource_resource as r on r.id = emp.resource_id \
where p.state = 'done' and p.employee_id = %s and pl.category_id = %s \
group by r.name, p.date_to,emp.id",(emp_id, category_id,))
sal = self.cr.fetchall()
salary = dict(sal)
total = 0.0
cnt = 0
for month in self.mnths:
if month <> '':
if len(month) != 7:
month = '0' + str(month)
if month in salary and salary[month]:
emp_salary.append(salary[month])
total += salary[month]
total_mnths[cnt] = total_mnths[cnt] + salary[month]
else:
emp_salary.append(0.00)
else:
emp_salary.append('')
total_mnths[cnt] = ''
cnt = cnt + 1
return emp_salary, total, total_mnths
def get_employee(self, form):
emp_salary = []
salary_list = []
total_mnths=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
emp_obj = self.pool.get('hr.employee')
emp_ids = form.get('employee_ids', [])
employees = emp_obj.browse(self.cr, self.uid, emp_ids, context=self.context)
for emp_id in employees:
emp_salary.append(emp_id.name)
total = 0.0
emp_salary, total, total_mnths = self.get_salary(form, emp_id.id, emp_salary, total_mnths)
emp_salary.append(total)
salary_list.append(emp_salary)
emp_salary = []
self.mnths_total.append(total_mnths)
return salary_list
def get_months_tol(self):
return self.mnths_total
def get_total(self):
for item in self.mnths_total:
for count in range(1, len(item)):
if item[count] == '':
continue
self.total += item[count]
return self.total
class wrapped_report_employee_salary_bymonth(osv.AbstractModel):
_name = 'report.l10n_in_hr_payroll.report_hrsalarybymonth'
_inherit = 'report.abstract_report'
_template = 'l10n_in_hr_payroll.report_hrsalarybymonth'
_wrapped_report_class = report_hr_salary_employee_bymonth
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
<reponame>HelloAllen8893/AliOS-Things<filename>build/site_scons/scons_upload.py
import os, json
import subprocess
import sys
import platform
import serial
from serial.tools import miniterm
from serial.tools.list_ports import comports
from scons_util import *
# Global variables
PORT = None
# Functions
def _run_upload_cmd(target, aos_path, cmd_file, program_path=None, bin_dir=None):
""" Run the command from cmd file """
ret = 0
usermsg = None
global PORT
host_os = get_host_os()
if not host_os:
error("Unsupported Operating System!")
configs = read_json(cmd_file)
if not configs:
error("Can not read flash configs from %s" % cmd_file)
if 'description' in configs:
usermsg = configs['description']
if usermsg:
log(usermsg + '\n\n')
if not PORT and '@PORT@' in configs['cmd']:
PORT = miniterm.ask_for_port()
exec_cmd = []
for item in configs['cmd']:
if type(item) == dict:
if host_os in item:
item = item[host_os]
else:
error("Flash command is not defined for %s!" % host_os)
if '@AOSROOT@/out/@TARGET@' in item:
if program_path:
item = item.replace('@AOSROOT@', program_path)
item = item.replace('@AOSROOT@', aos_path)
item = item.replace('@TARGET@', target)
if PORT:
item = item.replace('@PORT@', PORT)
exec_cmd += [item]
# Support user defined binaries' dir
if bin_dir and os.path.isdir(bin_dir):
for index, value in enumerate(exec_cmd):
if '.bin' in value or '.cfg' in value or '.elf' in value:
basename = os.path.basename(value)
exec_cmd[index] = os.path.join(bin_dir, basename)
info("Running cmd:\n\t'%s'" % ' '.join(exec_cmd))
if (host_os == 'Win32'):
ret = subprocess.call(exec_cmd, shell=True)
log("---host_os:%s\n" % host_os)
else:
ret = subprocess.call(exec_cmd, stdout=sys.stdout, stderr=sys.stderr)
log("---host_os:%s\n" % host_os)
return ret
def _upload_image(target, aos_path, registry_file, program_path=None, bin_dir=None):
""" Upload image according to configs """
(app, board) = target.split('@')
cmd_file_dir = os.path.dirname(registry_file)
cmd_files = None
ret = 0
# Check binary exist
elf_file = os.path.join(program_path if program_path else aos_path, "out", target, "binary", "%s.bin" % target)
if not os.path.exists(elf_file):
error("Please build target[%s] first" % target)
# Get valid board from registry file
registry_board = read_json(registry_file)
if not registry_board:
error("Can not read registered boards from %s" % registry_file)
# 1. Fully match with board name: key = board
for key in registry_board:
if key == board:
cmd_files = registry_board[key]
# 2. Part match with board name: key in board
if not cmd_files:
for key in registry_board:
if key in board:
cmd_files = registry_board[key]
# 3. Use 'ALLOTHERS' if cmd_files is still None
if not cmd_files:
if 'ALLOTHERS' in registry_board:
cmd_files = registry_board['ALLOTHERS']
if cmd_files:
for cmd_file in cmd_files:
if "not support" in cmd_file:
info("This command is not supported on %s" % board)
return 1
ret = _run_upload_cmd(target, aos_path, os.path.join(cmd_file_dir, cmd_file), program_path, bin_dir)
else:
error("The board %s is not registered in %s" % (board, registry_file))
return ret
def aos_upload(target, work_path=None, bin_dir=None):
program_path = None
if '@' not in target or len(target.split('@')) != 2:
error("Target invalid!")
if work_path:
aos_path = os.environ.get("AOS_SDK_PATH")
if not aos_path or not os.path.isdir(aos_path):
error("Looks like AOS_SDK_PATH is not correctly set." )
program_path = os.getcwd()
else:
if os.path.isdir('./core') or os.path.isdir('./include/aos'):
info("Currently in aos_sdk_path: '%s'\n" % os.getcwd())
aos_path = os.getcwd()
else:
info("Not in aos_sdk_path, curr_path:'%s'\n" % os.getcwd())
aos_path = os.environ.get("AOS_SDK_PATH")
if not aos_path or not os.path.isdir(aos_path):
error("Looks like AOS_SDK_PATH is not correctly set." )
else:
info("Load aos configs success, set '%s' as sdk path\n" % aos_path)
registry_file = os.path.split(os.path.realpath(__file__))[0] + '/upload/registry_board.json'
if os.path.isfile(registry_file):
ret = _upload_image(target, aos_path, registry_file, program_path, bin_dir)
else:
error("Can not find file: %s" % registry_file)
if ret == 0:
info("Firmware upload succeed!\n")
else:
error("Firmware upload failed!\n")
return ret
|
<filename>python/StateRepresentation.py
#!/usr/bin/env python
"""Uses tilecoding to create state.
"""
import numpy as np
from tiles import *
import time
# image tiles
NUM_RANDOM_POINTS = 100
CHANNELS = 4
NUM_IMAGE_TILINGS = 4
NUM_IMAGE_INTERVALS = 4
SCALE_RGB = NUM_IMAGE_INTERVALS / 256.0
IMAGE_START_INDEX = 0
# constants relating to image size recieved
IMAGE_HEIGHT = 480 # rows
IMAGE_WIDTH = 640 # columns
NUMBER_OF_COLOR_CHANNELS = 3 #red, blue, green
PIXEL_FEATURE_LENGTH = np.power(NUM_IMAGE_INTERVALS, NUMBER_OF_COLOR_CHANNELS) * NUM_IMAGE_TILINGS
DID_BUMP_FEATURE_LENGTH = 1
TOTAL_FEATURE_LENGTH = PIXEL_FEATURE_LENGTH * NUM_RANDOM_POINTS + DID_BUMP_FEATURE_LENGTH
PIXEL_DISTANCE_CONSIDERED_BUMP = 230 #How close an object is in front of the avatar before it is considered to "bump" into it
# Channels
RED_CHANNEL = 0
GREEN_CHANNEL = 1
BLUE_CHANNEL = 2
DEPTH_CHANNEL = 3
OBS_KEY = 'RGBD_INTERLEAVED'
WALL_THRESHOLD = 0.2 #If the prediction is greater than this, the pavlov agent will avert
class StateRepresentation(object):
def __init__(self):
self.pointsOfInterest = []
self.numberOfTimesBumping = 0
self.randomYs = np.random.choice(IMAGE_HEIGHT, NUM_RANDOM_POINTS, replace=False)
self.randomXs = np.random.choice(IMAGE_WIDTH, NUM_RANDOM_POINTS, replace=False)
for i in range(NUM_RANDOM_POINTS):
point = self.randomXs[i], self.randomYs[i]
self.pointsOfInterest.append(point)
def didBump(self, observation):
obs = observation[OBS_KEY]
midPix = obs[IMAGE_WIDTH / 2, IMAGE_HEIGHT / 2]
didBump = False
depths = obs[:,:, DEPTH_CHANNEL]
#closestPixel = np.amin(depths)
closestPixel = midPix[DEPTH_CHANNEL]
#print("Pixel: " + str(closestPixel))
if closestPixel < PIXEL_DISTANCE_CONSIDERED_BUMP:
self.numberOfTimesBumping +=1
didBump = True
"""
if didBump:
print("!!!!! BUMPED " + str(self.numberOfTimesBumping) + " time !!!!!")
time.sleep(1.0)
"""
return didBump
def getEmptyPhi(self):
return np.zeros(TOTAL_FEATURE_LENGTH)
"""
Name: getPhi
Description: Creates the feature representation (phi) for a given observation. The representation
created by individually tile coding each NUM_RANDOM_POINTS rgb values together, and then assembling them.
Finally, the didBump value is added to the end of the representation. didBump is determined to be true if
the closest pixel in view is less than PIXEL_DISTANCE_CONSIDERED_BUMP
Input: the observation. This is the full pixel rgbd values for each of the IMAGE_WIDTH X IMAGE_HEIGHT pixels in view
Output: The feature vector
"""
def getPhi(self, observation):
if not observation:
return None
rgbdObs = observation[OBS_KEY]
#tilecode each pixel indivudually and then assemble
phi = []
for point in self.pointsOfInterest:
#Get the pixel value at that point
x = point[0]
y = point[1]
red = rgbdObs[y, x, RED_CHANNEL] / 256.0
green = rgbdObs[y, x, GREEN_CHANNEL] / 256.0
blue = rgbdObs[y, x, BLUE_CHANNEL] / 256.0
pixelRep = np.zeros(PIXEL_FEATURE_LENGTH)
#Tile code these 3 values together
indexes = tiles(NUM_IMAGE_TILINGS, PIXEL_FEATURE_LENGTH, [red, green, blue])
for index in indexes:
pixelRep[index] = 1.0
#Assemble with other pixels
phi.extend(pixelRep)
didBump = self.didBump(observation)
phi.append(int(didBump))
return np.array(phi)
|
# BSD Licence
# Copyright (c) 2009, Science & Technology Facilities Council (STFC)
# All rights reserved.
#
# See the LICENSE file in the source distribution of this software for
# the full license text.
"""
An implementation of model.WebMapService drived from a single CDMS
file, potentially a CDML aggregation file.
"""
from model import Layer, Grid
import cdtime, cdutil, re
import cdms2 as cdms
from ows_common.exceptions import InvalidParameterValue
import numpy.oldnumeric.ma as MA
import numpy.oldnumeric as N
class GridError(Exception):
pass
class CdmsGrid(Grid):
"""An adaptor for a cdms variable.
"""
def __init__(self, var, title=None):
"""
@param var: The cdms variable. This should be of shape (x, lat).
"""
try:
self._var = var
self._setGrid()
except GridError, e:
# This isn't a simple grid
#!TODO: we could regrid here.
raise e
self._setMetadata(title=title)
def _setGrid(self):
"""Check the grid is simple and initialise.
"""
y = self.getYAxis()
dy_a = y[1:] - y[:-1]
if not (dy_a == (N.ones(len(dy_a)) * dy_a[0])).all():
raise SimpleGridError, "Y Axis not equally spaced"
self.y0 = y[0]
self.dy = dy_a[0]
self.ny = len(y)
self.iy = self._var.getAxisList().index(y)
x = self.getXAxis()
dx_a = x[1:] - x[:-1]
if not (dx_a == (N.ones(len(dx_a)) * dx_a[0])).all():
raise SimpleGridError, "X Axis not equally spaced"
self.x0 = x[0]
self.dx = dx_a[0]
self.nx = len(x)
self.ix = self._var.getAxisList().index(x)
def _setMetadata(self, title=None):
if title:
self.long_name = title
else:
try:
self.long_name = self._var.long_name
except AttributeError:
self.long_name = 'Unknown'
try:
self.units = self._var.units
except AttributeError:
self.units = ''
def _getValue(self):
return self._var
value = property(_getValue)
class CdmsLatLonGrid(CdmsGrid):
"""
Specialise CdmsGrid for EPSG:4326
"""
crs = 'EPSG:4326'
def getXAxis(self):
return self._var.getLongitude()
def getYAxis(self):
return self._var.getLatitude()
class CdmsBNGGrid(CdmsGrid):
"""
Specialise CdmsGrid for British National Grid coordinate system
EPSG:27700 OSGB:36
"""
crs = 'EPSG:27700'
def getXAxis(self):
return self._var.getAxisList()[1]
def getYAxis(self):
return self._var.getAxisList()[0]
class SimpleCdmsLayer(Layer):
def __init__(self, cdmsVar, minValue=None, maxValue=None, GridClass=CdmsGrid):
"""
@param cdmsVar: variable object
@todo: Add crs attribute
"""
self.GridClass = GridClass
self.var = cdmsVar
try:
long_name = self.var.long_name
except AttributeError:
long_name = self.var.id
super(SimpleCdmsLayer, self).__init__(long_name)
#if 'time' in self.var.axes:
# self.dimensions = dict(time=CdmsTimeDimension(self.var.axes['time']))
if self.var.getTime():
self.dimensions = dict(time=CdmsTimeDimension(self.var.getTime()))
else:
self.dimensions = {}
## if minValue is not None:
## self.minValue = minValue
## else:
## self.minValue = self.var.min_value
## if maxValue is not None:
## self.maxValue = maxValue
## else:
## self.maxValue = self.var.max_value
self.minValue = minValue
self.maxValue = maxValue
try:
self.units = self.var.units
except AttributeError:
self.units = '?'
def selectGrid(self, bbox, dimensionSpec):
"""
@warning: Hacked for UKCIP02.
@todo: replace lat/lon references with generic X/Y code.
"""
(lon1, lat1, lon2, lat2) = bbox
sel = dict(northings=(lat1, lat2, 'cce'), eastings=(lon1, lon2, 'cce'))
if 'time' in self.dimensions:
sel['time'] = self.dimensions['time'].iso2reltime(dimensionSpec['time'])
sel['squeeze'] = 1
v = self.var(**sel)
return self.GridClass(v, title=self.title)
def describe(self, dimensionSpec):
return self.var.long_name + ' at ' + self.dimensions['time']
class CdmsTimeDimension(object):
"""
@todo: Move to impl.py when interface migration complete.
"""
def __init__(self, timeAxis):
self._axis = timeAxis
self.units = 'ISO8601'
def _getExtent(self):
comptimes = self._axis.asComponentTime()
return ','.join(['%s-%s-%sT%s:%s:%sZ' % (x.year, x.month, x.day,
x.hour, x.minute, x.second)
for x in comptimes])
extent = property(_getExtent)
def iso2reltime(self, time, yearOverride=None):
mo = re.match(r'(\d+)-(\d+)-(\d+)T(\d+):(\d+):([0-9.]+)Z', time)
if not mo:
raise InvalidParameterValue('Time %s not recognised' % time, 'time')
(year, month, day, hour, minute, second) = mo.groups()
if yearOverride:
year = yearOverride
c = cdtime.comptime(int(year), int(month), int(day),
int(hour), int(minute), float(second))
return c.torel(self._axis.units)
def iso2timeDelta(self, time):
return [self.iso2reltime(x) for x in time.split('D')]
#-----------------------------------------------------------------------------
import unittest
class TestBNGGrid(unittest.TestCase):
def setUp(self):
import os
try:
self.data_dir = os.environ['TEST_DATA_DIR']
self.rainfall = cdms.open(self.data_dir+'/ukcip02/rainfall_1961-2000.nc')
except:
raise RuntimeError("""
Test data not found. Please set the TEST_DATA_DIR environment variable.
""")
# wire in the render_imp logger to nose
import render_imp
render_imp.logger = render_imp.logging.getLogger('nose.render_imp')
def _makeGrid(self):
v = self.rainfall['rainfall'][0]
self.assertEquals(v.id, 'rainfall')
return CdmsBNGGrid(v, 'UKCIP02 rainfall data')
def testGridAttributes(self):
g = self._makeGrid()
self.assertEquals(g.dx, 5000)
assert g.dy == -5000
self.assertEquals(g.nx, 180)
assert g.ny == 290
assert g.x0 == -200000
assert g.y0 == -200000
self.assertNotEquals(g.ix, g.iy)
def testGridValue(self):
g = self._makeGrid()
v = g.value
# Value should be masked
assert v.mask()
def testRender(self):
from render_imp import RGBARenderer
from matplotlib.cm import get_cmap
g = self._makeGrid()
# Set arbitary min/max values for now
r = RGBARenderer(MA.minimum(g.value), MA.maximum(g.value))
xn = g.x0+g.dx*g.nx
yn = g.y0+g.dy*g.ny
bbox = (min(g.x0, xn), min(g.y0, yn),
max(g.x0, xn), max(g.y0, yn))
img = r.renderGrid(g, bbox, 400, 400, get_cmap())
img.save('whole_domain.png')
assert img.size == (400, 400)
bbox2 = (bbox[0], bbox[1], bbox[0] + (bbox[2]-bbox[0])/2,
bbox[1] + (bbox[3]-bbox[1])/2)
img = r.renderGrid(g, bbox2, 400, 400, get_cmap())
img.save('ll_quad.png')
|
<gh_stars>1-10
# reference ==> https://www.shadertoy.com/view/WtScDt#
import taichi as ti
ti.init(arch = ti.cuda)
res_x = 800
res_y = 450
pixels = ti.Vector.field(3, ti.f32, shape=(res_x, res_y))
cos_record = ti.Vector.field(3, ti.f32)
ti.root.dense(ti.i, 16).dense(ti.jk, (res_x,res_y)).place(cos_record) # 8*2 texture
ti.Vector.field(3, ti.f32, shape=(res_x, res_y))
filter_ = True
@ti.func
def clamp(v, v_min, v_max):
return ti.min(ti.max(v, v_min), v_max)
@ti.func
def smoothstep(edge1, edge2, v):
assert(edge1 != edge2)
t = (v-edge1) / float(edge2-edge1)
t = clamp(t, 0.0, 1.0)
return (3-2 * t) * t**2
@ti.func
def fwidth(x,k,i,j):
ddx = cos_record[k,i+1,j] - cos_record[k,i,j]
ddy = cos_record[k,i,j+1] - cos_record[k,i,j]
return abs(ddx) + abs(ddy)
@ti.func
def fcos(x,k,i,j):
w = fwidth(x,k,i,j)
res = ti.cos(x) * ti.sin(0.5* w)/(0.5*w) #exact
#res = ti.cos(x) * smoothstep(6.2832, 0.0, w) # approx
return res
@ti.func
def mcos(x,k,i,j):
res = ti.cos(x)
if filter_:
res = fcos(x,k,i,j)
return res
@ti.func
def getcolor(t, k, i,j):
col = ti.Vector([0.6, 0.5, 0.4])
col += 0.14 * mcos(cos_record[k,i,j],k+0,i,j)
col += 0.13 * mcos(cos_record[k+1,i,j],k+1,i,j)
col += 0.12 * mcos(cos_record[k+2,i,j],k+2,i,j)
col += 0.11 * mcos(cos_record[k+3,i,j],k+3,i,j)
col += 0.10 * mcos(cos_record[k+4,i,j],k+4,i,j)
col += 0.09 * mcos(cos_record[k+5,i,j],k+5,i,j)
col += 0.08 * mcos(cos_record[k+6,i,j],k+6,i,j)
col += 0.07 * mcos(cos_record[k+7,i,j],k+7,i,j)
return col
@ti.func
def init_texture(p,i,j):
t = p.x
offset = 0
for s in range(2):
cos1 = 6.2832 * t * 1.0 + ti.Vector([0.0, 0.5, 0.6])
cos2 = 6.2832 * t * 3.1 + ti.Vector([0.5, 0.6, 1.0])
cos3 = 6.2832 * t * 5.1 + ti.Vector([0.1, 0.7, 1.1])
cos4 = 6.2832 * t * 9.1 + ti.Vector([0.1, 0.5, 1.2])
cos5 = 6.2832 * t * 17.1 + ti.Vector([0.0, 0.3, 0.9])
cos6 = 6.2832 * t * 31.1 + ti.Vector([0.1, 0.5, 1.3])
cos7 = 6.2832 * t * 65.1 + ti.Vector([0.1, 0.5, 1.3])
cos8 = 6.2832 * t * 131.1 + ti.Vector([0.3, 0.2, 0.8])
cos_record[offset,i,j] = cos1
cos_record[offset+1, i, j] = cos2
cos_record[offset+2, i, j] = cos3
cos_record[offset+3, i, j] = cos4
cos_record[offset+4, i, j] = cos5
cos_record[offset+5, i, j] = cos6
cos_record[offset+6, i, j] = cos7
cos_record[offset+7, i, j] = cos8
t = p.y
offset = 8
@ti.kernel
def render(time:ti.f32):
for i, j in pixels:
q = ti.Vector([2 * i - res_x, 2 * j - res_y]) / res_y
p = 2.0 * q / q.dot(q)
p += 0.05 * time
init_texture(p,i,j)
for i,j in pixels:
q = ti.Vector([2*i-res_x, 2*j- res_y]) / res_y
p = 2.0 * q / q.dot(q)
p += 0.05*time
col = min(getcolor(p.x,0,i,j), getcolor(p.y,8,i,j))
col *= 1.5 - 0.2*q.norm()
pixels[i,j] = col
gui = ti.GUI("Canvas", res=(res_x, res_y))
for i in range(100000):
t = i * 0.03
render(t)
gui.set_image(pixels)
gui.show() |
from django.contrib.auth import get_user_model
from django.contrib.auth.views import LoginView
from django.test import TestCase, Client
from django.urls import reverse
from posts.models import Post
class GeneralTestCase(TestCase):
def setUp(self):
self.client = Client()
User = get_user_model()
self.user = User.objects.create_user(username='pupupu', first_name='pu', last_name='pu', password='<PASSWORD>')
self.user_2 = User.objects.create_user(username='quququ', first_name='qu', last_name='qu', password='<PASSWORD>')
def test_create_profile(self):
response = self.client.get('/profile/pupupu/')
self.assertEqual(response.status_code, 200)
def test_auth_create_post(self):
self.client.force_login(self.user)
response = self.client.get('/new/')
self.assertEqual(response.status_code, 200)
def test_redirect_after_create_post(self):
response = self.client.get('/new/', follow=True)
self.assertEqual(response.resolver_match.func.__name__, LoginView.as_view().__name__)
self.assertTemplateUsed(response, template_name='users/login.html')
def test_check_new_post(self):
self.client.force_login(self.user)
response = self.client.post('/new/', {'text': 'testtest'}, follow=True)
self.assertEqual(response.status_code, 200)
response = self.client.get('')
self.assertEqual((response.context['page_obj'])[-1].text, 'testtest')
response = self.client.get(reverse('posts:profile', kwargs={'username': self.user.username}))
self.assertEqual((response.context['page_obj'])[-1].text, 'testtest')
response = self.client.get(reverse('posts:post_detail', kwargs={'post_id': self.user.posts.last().id}))
self.assertEqual(response.context['post'].text, 'testtest')
def test_edit_post_and_check(self):
self.client.force_login(self.user)
self.client.post('/new/', {'text': 'testtest'}, follow=True)
update_url = reverse('posts:edit_post', kwargs={'post_id': self.user.posts.last().id})
self.client.post(update_url, {'text': 'updated'}, follow=True)
response = self.client.get('')
self.assertEqual((response.context['page_obj'])[-1].text, 'updated')
response = self.client.get(reverse('posts:profile', kwargs={'username': self.user.username}))
self.assertEqual((response.context['page_obj'])[-1].text, 'updated')
response = self.client.get(reverse('posts:post_detail', kwargs={'post_id': self.user.posts.last().id}))
self.assertEqual(response.context['post'].text, 'updated')
def test_response_404(self):
response = self.client.get('/pashalka_marvel/')
self.assertEqual(response.status_code, 404)
def test_img_tag(self):
self.client.force_login(self.user)
with open('media/posts/social-image.jpg', 'rb') as img:
post = self.client.post('/new/', {'text': 'text with image', 'image': img})
response = self.client.get('')
self.assertContains(response, 'img')
response = self.client.get(reverse('posts:profile', kwargs={'username': self.user.username}))
self.assertContains(response, 'img')
response = self.client.get(reverse('posts:post_detail', kwargs={'post_id': self.user.posts.last().id}))
self.assertContains(response, 'img')
def test_get_not_img(self):
self.client.force_login(self.user)
with open('media/posts/cheats.txt') as img:
self.client.post('/new/', {'text': 'text with image', 'image': img})
self.assertEqual(self.user.posts.count(), 0)
def test_cache_index_page(self):
self.client.force_login(self.user)
self.client.get('')
self.client.post('/new/', {'text': 'testcache'})
response = self.client.get('')
self.assertContains(response, 'testcache')
def test_auth_follow_and_unfollow(self):
self.client.force_login(self.user)
response = self.client.get(reverse('posts:follow', kwargs={'username': 'pupupu'}), follow=True)
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse('posts:unfollow', kwargs={'username': 'pupupu'}), follow=True)
self.assertEqual(response.status_code, 200)
def test_visible_post_for_follow_or_not(self):
Post.objects.create(author=self.user, text='pupupu_text')
self.client.force_login(self.user)
self.client.get(reverse('posts:follow', kwargs={'username': 'pupupu'}), follow=True)
response = self.client.get('/follow/')
self.assertContains(response, 'pupupu_text')
self.client.force_login(self.user_2)
response = self.client.get('/follow/')
self.assertNotContains(response, 'pupupu_text')
def test_not_auth_add_comment(self):
post = Post.objects.create(author=self.user, text='pupupu_text')
response = self.client.get(reverse('posts:new_comment', kwargs={'post_id': post.id}))
self.assertEqual(response.status_code, 302)
|
'''
Created on 28 Feb 2018
@author: lbtanh
'''
from __future__ import division
import face_recognition
import cv2
import os
import time
import datetime
import pickle
# import pp
import sys
from PIL import Image, ImageEnhance
import numpy as np
from py2neo.packages.neo4j.v1.packstream import LIST_16
def read_all_image(image_parent_dir):
import os
dict_face_encodings = {}
src_dirs = [d for d in os.listdir(image_parent_dir) if os.path.isdir(os.path.join(image_parent_dir, d))]
for folder in src_dirs:
list_known_face_encodings = []
input_dir = os.path.join(image_parent_dir,folder)
os.chdir(input_dir)
# names = input_dir.split('\\')#change with windows
labels = folder
list_file = os.listdir(input_dir)
for image in list_file:
if image.endswith(".jpg") or image.endswith(".jpeg") or image.endswith('.png'):
# tmp_image = (str(labels) + '_'+ image.split('.')[0]).replace(' ','').lower()
try:
tmp_image = face_recognition.load_image_file(image)
tmp_face_encoding = face_recognition.face_encodings(tmp_image, num_jitters= 1)
list_known_face_encodings.append(tmp_face_encoding)
except Exception as ex:
print (ex, image, tmp_face_encoding)
pass
dict_face_encodings[str(folder)] = list_known_face_encodings
return dict_face_encodings
def draw_rectangle(event, x, y, flags, params):
global drawing, top_left_pt, bottom_right_pt, small_frame, resize_output, x_init, y_init
list_face = []
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True
x_init, y_init = x, y
elif event == cv2.EVENT_MOUSEMOVE:
if drawing:
top_left_pt = (int(min(x_init, x)/resize_output), int(min(y_init, y)/resize_output))
bottom_right_pt = (int(max(x_init, x)/resize_output), int(max(y_init, y)/resize_output))
small_frame=small_frame[y_init:y, x_init:x]
elif event == cv2.EVENT_LBUTTONUP:
drawing = False
top_left_pt = (int(min(x_init, x)/resize_output), int(min(y_init, y)/resize_output))
bottom_right_pt = (int(max(x_init, x)/resize_output), int(max(y_init, y)/resize_output))
small_frame=small_frame[y_init:y, x_init:x]
list_face.append([top_left_pt, bottom_right_pt])
return list_face
def processing():
# Select ROI
(x0,y0), (x1,y1) = top_left_pt, bottom_right_pt
if __name__=='__main__':
resize_output = 0.2
drawing = False
top_left_pt = (-1,-1)
bottom_right_pt = (-1,-1)
small_frame =[]
x_init = 10
y_init = 10
small_frame = cv2.imread("C:/Users/lbtanh/workspace/recognition_release/resource2/Tin/tin.jpg")
im2 = cv2.resize(small_frame, (0, 0), fx=resize_output, fy= resize_output)
cv2.imshow('crop_face',im2)
cv2.setMouseCallback('crop_face', draw_rectangle)
cv2.waitKey(0)
print(small_frame.size)
# change to select ROI
|
from abc import ABC, abstractmethod
import numpy as np
class Intrinsics(ABC):
@property
@abstractmethod
def f_x(self) -> np.float32:
pass
@property
@abstractmethod
def f_y(self) -> np.float32:
pass
@property
@abstractmethod
def c_x(self) -> np.float32:
pass
@property
@abstractmethod
def c_y(self) -> np.float32:
pass
@property
@abstractmethod
def height(self) -> int:
pass
@property
@abstractmethod
def width(self) -> int:
pass
@property
def K(self) -> np.ndarray:
return np.array([[self.f_x, 0, self.c_x, 0],
[0, self.f_y, self.c_y, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]], dtype='float32')
@property
def normalized_K(self):
K = self.K
K[0, :] /= self.width
K[1, :] /= self.height
return K
class CylindricalIntrinsics(Intrinsics):
@property
def height(self) -> int:
return 1024
@property
def width(self) -> int:
return 2048
@property
def f_x(self) -> np.float32:
return np.float32(325.949323452201668)
@property
def c_x(self) -> np.float32:
return np.float32(1024.000000000000000)
@property
def f_y(self) -> np.float32:
return np.float32(1023.000000000000000)
@property
def c_y(self) -> np.float32:
return np.float32(511.500000000000000)
class SphericalIntrinsics(Intrinsics):
@property
def height(self) -> int:
return 1024
@property
def width(self) -> int:
return 2048
@property
def f_x(self) -> np.float32:
return np.float32(325.949323452201668)
@property
def c_x(self) -> np.float32:
return np.float32(1024.000000000000000)
@property
def f_y(self) -> np.float32:
return np.float32(325.949323452201668)
@property
def c_y(self) -> np.float32:
return np.float32(512.000000000000000)
class PinholeIntrinsics(Intrinsics):
@property
def f_x(self) -> np.float32:
return np.float32(322.2142583720755)
@property
def f_y(self) -> np.float32:
return np.float32(322.2142583720755)
@property
def c_x(self) -> np.float32:
return np.float32(384.0)
@property
def c_y(self) -> np.float32:
return np.float32(384.0)
@property
def height(self) -> int:
return 768
@property
def width(self) -> int:
return 768
@property
def fov(self) -> int:
return 100
class Pinhole90Intrinsics(Intrinsics):
@property
def f_x(self) -> np.float32:
return np.float32(384.0)
@property
def f_y(self) -> np.float32:
return np.float32(384.0)
@property
def c_x(self) -> np.float32:
return np.float32(384.0)
@property
def c_y(self) -> np.float32:
return np.float32(384.0)
@property
def height(self) -> int:
return 768
@property
def width(self) -> int:
return 768
@property
def fov(self) -> int:
return 90
|
<filename>spambayes/Outlook2000/msgstore.py
from __future__ import generators
import sys, os, re
import locale
from time import timezone
import email
from email.MIMEImage import MIMEImage
from email.Message import Message
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.Parser import HeaderParser
from email.Utils import formatdate
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# MAPI imports etc.
from win32com.client import Dispatch, constants
from win32com.mapi import mapi, mapiutil
from win32com.mapi.mapitags import *
import pythoncom
import winerror
# Additional MAPI constants we dont have in Python
MESSAGE_MOVE = 0x1 # from MAPIdefs.h
MSGFLAG_READ = 0x1 # from MAPIdefs.h
MSGFLAG_UNSENT = 0x00000008
MYPR_BODY_HTML_A = 0x1013001e # magic <wink>
MYPR_BODY_HTML_W = 0x1013001f # ditto
MYPR_MESSAGE_ID_A = 0x1035001E # more magic (message id field used for Exchange)
CLEAR_READ_FLAG = 0x00000004
CLEAR_RN_PENDING = 0x00000020
CLEAR_NRN_PENDING = 0x00000040
SUPPRESS_RECEIPT = 0x1
FOLDER_DIALOG = 0x00000002
USE_DEFERRED_ERRORS = mapi.MAPI_DEFERRED_ERRORS # or set to zero to see what changes <wink>
#import warnings
#if sys.version_info >= (2, 3):
# # sick off the new hex() warnings!
# warnings.filterwarnings("ignore", category=FutureWarning, append=1)
# Nod to our automated test suite. Currently supports a hack so our test
# message is filtered, and also for raising exceptions at key times.
# see tester.py for more details.
test_suite_running = False
test_suite_failure_request = None
test_suite_failure = None
# Set to the number of times we should fail, or None for all times.
test_suite_failure_count = None
# Sometimes the test suite will request that we simulate MAPI errors.
def help_test_suite(checkpoint_name):
global test_suite_failure_request, test_suite_failure_count
if test_suite_running and \
test_suite_failure_request == checkpoint_name:
if test_suite_failure_count:
test_suite_failure_count -= 1
if test_suite_failure_count==0:
test_suite_failure_request = None
raise test_suite_failure[0], test_suite_failure[1]
# Exceptions raised by this module. Raw MAPI exceptions should never
# be raised to the caller.
class MsgStoreException(Exception):
def __init__(self, mapi_exception, extra_msg = None):
self.mapi_exception = mapi_exception
self.extra_msg = extra_msg
Exception.__init__(self, mapi_exception, extra_msg)
def __str__(self):
try:
if self.mapi_exception is not None:
err_str = GetCOMExceptionString(self.mapi_exception)
else:
err_str = self.extra_msg or ''
return "%s: %s" % (self.__class__.__name__, err_str)
# Python silently consumes exceptions here, and uses
# <unprintable object>
except:
print "FAILED to str() a MsgStore exception!"
import traceback
traceback.print_exc()
# Exception raised when you attempt to get a message or folder that doesn't
# exist. Usually means you are querying an ID that *was* valid, but has
# since been moved or deleted.
# Note you may get this exception "getting" objects (such as messages or
# folders), or accessing properties once the object was created (the message
# may be moved under us at any time)
class NotFoundException(MsgStoreException):
pass
# Exception raised when you try and modify a "read only" object.
# Only currently examples are Hotmail and IMAP folders.
class ReadOnlyException(MsgStoreException):
pass
# The object has changed since it was opened.
class ObjectChangedException(MsgStoreException):
pass
# Utility functions for exceptions. Convert a COM exception to the best
# manager exception.
def MsgStoreExceptionFromCOMException(com_exc):
if IsNotFoundCOMException(com_exc):
return NotFoundException(com_exc)
if IsReadOnlyCOMException(com_exc):
return ReadOnlyException(com_exc)
scode = NormalizeCOMException(com_exc)[0]
# And simple scode based ones.
if scode == mapi.MAPI_E_OBJECT_CHANGED:
return ObjectChangedException(com_exc)
return MsgStoreException(com_exc)
def NormalizeCOMException(exc_val):
hr, msg, exc, arg_err = exc_val
if hr == winerror.DISP_E_EXCEPTION and exc:
# 'client' exception - unpack 'exception object'
wcode, source, msg, help1, help2, hr = exc
return hr, msg, exc, arg_err
# Build a reasonable string from a COM exception tuple
def GetCOMExceptionString(exc_val):
hr, msg, exc, arg_err = NormalizeCOMException(exc_val)
err_string = mapiutil.GetScodeString(hr)
return "Exception 0x%x (%s): %s" % (hr, err_string, msg)
# Does this exception probably mean "object not found"?
def IsNotFoundCOMException(exc_val):
hr, msg, exc, arg_err = NormalizeCOMException(exc_val)
return hr in [mapi.MAPI_E_OBJECT_DELETED, mapi.MAPI_E_NOT_FOUND]
# Does this exception probably mean "object not available 'cos you ain't logged
# in, or 'cos the server is down"?
def IsNotAvailableCOMException(exc_val):
hr, msg, exc, arg_err = NormalizeCOMException(exc_val)
return hr == mapi.MAPI_E_FAILONEPROVIDER
def IsReadOnlyCOMException(exc_val):
# This seems to happen for IMAP mails (0x800cccd3)
# and also for hotmail messages (0x8004dff7)
known_failure_codes = -2146644781, -2147164169
exc_val = NormalizeCOMException(exc_val)
return exc_val[0] in known_failure_codes
def ReportMAPIError(manager, what, exc_val):
hr, exc_msg, exc, arg_err = exc_val
if hr == mapi.MAPI_E_TABLE_TOO_BIG:
err_msg = what + _(" failed as one of your\r\n" \
"Outlook folders is full. Futher operations are\r\n" \
"likely to fail until you clean up this folder.\r\n\r\n" \
"This message will not be reported again until SpamBayes\r\n"\
"is restarted.")
else:
err_msg = what + _(" failed due to an unexpected Outlook error.\r\n") \
+ GetCOMExceptionString(exc_val) + "\r\n\r\n" + \
_("It is recommended you restart Outlook at the earliest opportunity\r\n\r\n" \
"This message will not be reported again until SpamBayes\r\n"\
"is restarted.")
manager.ReportErrorOnce(err_msg)
# Our objects.
class MAPIMsgStore:
# Stash exceptions in the class for ease of use by consumers.
MsgStoreException = MsgStoreException
NotFoundException = NotFoundException
ReadOnlyException = ReadOnlyException
ObjectChangedException = ObjectChangedException
def __init__(self, outlook = None):
self.outlook = outlook
cwd = os.getcwd() # remember the cwd - mapi changes it under us!
mapi.MAPIInitialize(None)
logonFlags = (mapi.MAPI_NO_MAIL |
mapi.MAPI_EXTENDED |
mapi.MAPI_USE_DEFAULT)
self.session = mapi.MAPILogonEx(0, None, None, logonFlags)
# Note that if the CRT still has a default "C" locale, MAPILogonEx()
# will change it. See locale comments in addin.py
locale.setlocale(locale.LC_NUMERIC, "C")
self.mapi_msg_stores = {}
self.default_store_bin_eid = None
os.chdir(cwd)
def Close(self):
self.mapi_msg_stores = None
self.session.Logoff(0, 0, 0)
self.session = None
mapi.MAPIUninitialize()
def GetProfileName(self):
# Return the name of the MAPI profile currently in use.
# XXX - note - early win32all versions are missing
# GetStatusTable :(
try:
self.session.GetStatusTable
except AttributeError:
# We try and recover from this when win32all is updated, so no need to whinge.
return None
MAPI_SUBSYSTEM = 39
restriction = mapi.RES_PROPERTY, (mapi.RELOP_EQ, PR_RESOURCE_TYPE,
(PR_RESOURCE_TYPE, MAPI_SUBSYSTEM))
table = self.session.GetStatusTable(0)
rows = mapi.HrQueryAllRows(table,
(PR_DISPLAY_NAME_A,), # columns to retrieve
restriction, # only these rows
None, # any sort order is fine
0) # any # of results is fine
assert len(rows)==1, "Should be exactly one row"
(tag, val), = rows[0]
# I can't convince MAPI to give me the Unicode name, so we assume
# encoded as MBCS.
return val.decode("mbcs", "ignore")
def _GetMessageStore(self, store_eid): # bin eid.
try:
# Will usually be pre-fetched, so fast-path out
return self.mapi_msg_stores[store_eid]
except KeyError:
pass
given_store_eid = store_eid
if store_eid is None:
# Find the EID for the default store.
tab = self.session.GetMsgStoresTable(0)
# Restriction for the table: get rows where PR_DEFAULT_STORE is true.
# There should be only one.
restriction = (mapi.RES_PROPERTY, # a property restriction
(mapi.RELOP_EQ, # check for equality
PR_DEFAULT_STORE, # of the PR_DEFAULT_STORE prop
(PR_DEFAULT_STORE, True))) # with True
rows = mapi.HrQueryAllRows(tab,
(PR_ENTRYID,), # columns to retrieve
restriction, # only these rows
None, # any sort order is fine
0) # any # of results is fine
# get first entry, a (property_tag, value) pair, for PR_ENTRYID
row = rows[0]
eid_tag, store_eid = row[0]
self.default_store_bin_eid = store_eid
# Open it.
store = self.session.OpenMsgStore(
0, # no parent window
store_eid, # msg store to open
None, # IID; accept default IMsgStore
# need write access to add score fields
mapi.MDB_WRITE |
# we won't send or receive email
mapi.MDB_NO_MAIL |
USE_DEFERRED_ERRORS)
# cache it
self.mapi_msg_stores[store_eid] = store
if given_store_eid is None: # The default store
self.mapi_msg_stores[None] = store
return store
def GetRootFolder(self, store_id = None):
# if storeID is None, gets the root folder from the default store.
store = self._GetMessageStore(store_id)
hr, data = store.GetProps((PR_ENTRYID, PR_IPM_SUBTREE_ENTRYID), 0)
store_eid = data[0][1]
subtree_eid = data[1][1]
eid = mapi.HexFromBin(store_eid), mapi.HexFromBin(subtree_eid)
return self.GetFolder(eid)
def _OpenEntry(self, id, iid = None, flags = None):
# id is already normalized.
store_id, item_id = id
store = self._GetMessageStore(store_id)
if flags is None:
flags = mapi.MAPI_MODIFY | USE_DEFERRED_ERRORS
return store.OpenEntry(item_id, iid, flags)
# Normalize an "external" hex ID to an internal binary ID.
def NormalizeID(self, item_id):
assert type(item_id)==type(()), \
"Item IDs must be a tuple (not a %r)" % item_id
try:
store_id, entry_id = item_id
return mapi.BinFromHex(store_id), mapi.BinFromHex(entry_id)
except ValueError:
raise MsgStoreException(None, "The specified ID '%s' is invalid" % (item_id,))
def _GetSubFolderIter(self, folder):
table = folder.GetHierarchyTable(0)
rows = mapi.HrQueryAllRows(table,
(PR_ENTRYID, PR_STORE_ENTRYID, PR_DISPLAY_NAME_A),
None,
None,
0)
for (eid_tag, eid), (store_eid_tag, store_eid), (name_tag, name) in rows:
item_id = store_eid, eid
sub = self._OpenEntry(item_id)
table = sub.GetContentsTable(0)
yield MAPIMsgStoreFolder(self, item_id, name, table.GetRowCount(0))
for store_folder in self._GetSubFolderIter(sub):
yield store_folder
def GetFolderGenerator(self, folder_ids, include_sub):
for folder_id in folder_ids:
try:
folder_id = self.NormalizeID(folder_id)
except MsgStoreException, details:
print "NOTE: Skipping invalid folder", details
continue
try:
folder = self._OpenEntry(folder_id)
table = folder.GetContentsTable(0)
except pythoncom.com_error, details:
# We will ignore *all* such errors for the time
# being, but give verbose details for results we don't
# know about
if IsNotAvailableCOMException(details):
print "NOTE: Skipping folder for this session - temporarily unavailable"
elif IsNotFoundCOMException(details):
print "NOTE: Skipping deleted folder"
else:
print "WARNING: Unexpected MAPI error opening folder"
print GetCOMExceptionString(details)
continue
rc, props = folder.GetProps( (PR_DISPLAY_NAME_A,), 0)
yield MAPIMsgStoreFolder(self, folder_id, props[0][1],
table.GetRowCount(0))
if include_sub:
for f in self._GetSubFolderIter(folder):
yield f
def GetFolder(self, folder_id):
# Return a single folder given the ID.
try: # catch all MAPI errors
try:
# See if this is an Outlook folder item
sid = mapi.BinFromHex(folder_id.StoreID)
eid = mapi.BinFromHex(folder_id.EntryID)
folder_id = sid, eid
except AttributeError:
# No 'EntryID'/'StoreID' properties - a 'normal' ID
folder_id = self.NormalizeID(folder_id)
folder = self._OpenEntry(folder_id)
table = folder.GetContentsTable(0)
# Ensure we have a long-term ID.
rc, props = folder.GetProps( (PR_ENTRYID, PR_DISPLAY_NAME_A), 0)
folder_id = folder_id[0], props[0][1]
return MAPIMsgStoreFolder(self, folder_id, props[1][1],
table.GetRowCount(0))
except pythoncom.com_error, details:
raise MsgStoreExceptionFromCOMException(details)
def GetMessage(self, message_id):
# Return a single message given either the ID, or an Outlook
# message representing the object.
try: # catch all MAPI exceptions.
try:
eid = mapi.BinFromHex(message_id.EntryID)
sid = mapi.BinFromHex(message_id.Parent.StoreID)
message_id = sid, eid
except AttributeError:
# No 'EntryID'/'StoreID' properties - a 'normal' ID
message_id = self.NormalizeID(message_id)
mapi_object = self._OpenEntry(message_id)
hr, data = mapi_object.GetProps(MAPIMsgStoreMsg.message_init_props,0)
return MAPIMsgStoreMsg(self, data)
except pythoncom.com_error, details:
raise MsgStoreExceptionFromCOMException(details)
def YieldReceiveFolders(self, msg_class = "IPM.Note"):
# Get the main receive folder for each message store.
tab = self.session.GetMsgStoresTable(0)
rows = mapi.HrQueryAllRows(tab,
(PR_ENTRYID,), # columns to retrieve
None, # all rows
None, # any sort order is fine
0) # any # of results is fine
for row in rows:
# get first entry, a (property_tag, value) pair, for PR_ENTRYID
eid_tag, store_eid = row[0]
try:
store = self._GetMessageStore(store_eid)
folder_eid, ret_class = store.GetReceiveFolder(msg_class, 0)
hex_folder_eid = mapi.HexFromBin(folder_eid)
hex_store_eid = mapi.HexFromBin(store_eid)
except pythoncom.com_error, details:
if not IsNotAvailableCOMException(details):
print "ERROR enumerating a receive folder -", details
continue
try:
folder = self.GetFolder((hex_store_eid, hex_folder_eid))
# For 'unconfigured' stores, or "stand-alone" PST files,
# this is a root folder - so not what we wan't. Only return
# folders with a parent.
if folder.GetParent() is not None:
yield folder
except MsgStoreException, details:
print "ERROR opening receive folder -", details
# but we just continue
continue
_MapiTypeMap = {
type(0.0): PT_DOUBLE,
type(0): PT_I4,
type(''): PT_STRING8,
type(u''): PT_UNICODE,
# In Python 2.2.2, bool isn't a distinct type (type(1==1) is type(0)).
# type(1==1): PT_BOOLEAN,
}
def GetPropFromStream(mapi_object, prop_id):
try:
stream = mapi_object.OpenProperty(prop_id,
pythoncom.IID_IStream,
0, 0)
chunks = []
while 1:
chunk = stream.Read(4096)
if not chunk:
break
chunks.append(chunk)
return "".join(chunks)
except pythoncom.com_error, d:
print "Error getting property", mapiutil.GetPropTagName(prop_id), \
"from stream:", d
return ""
def GetPotentiallyLargeStringProp(mapi_object, prop_id, row):
got_tag, got_val = row
if PROP_TYPE(got_tag) == PT_ERROR:
ret = ""
if got_val == mapi.MAPI_E_NOT_FOUND:
pass # No property for this message.
elif got_val == mapi.MAPI_E_NOT_ENOUGH_MEMORY:
# Too big for simple properties - get via a stream
ret = GetPropFromStream(mapi_object, prop_id)
else:
tag_name = mapiutil.GetPropTagName(prop_id)
err_string = mapiutil.GetScodeString(got_val)
print "Warning - failed to get property %s: %s" % (tag_name,
err_string)
else:
ret = got_val
return ret
# Some nasty stuff for getting RTF out of the message
def GetHTMLFromRTFProperty(mapi_object, prop_tag = PR_RTF_COMPRESSED):
try:
rtf_stream = mapi_object.OpenProperty(prop_tag, pythoncom.IID_IStream,
0, 0)
html_stream = mapi.WrapCompressedRTFStream(rtf_stream, 0)
html = mapi.RTFStreamToHTML(html_stream)
except pythoncom.com_error, details:
if not IsNotFoundCOMException(details):
print "ERROR getting RTF body", details
return ""
# html may be None if RTF not originally from HTML, but here we
# always want a string
return html or ''
class MAPIMsgStoreFolder:
def __init__(self, msgstore, id, name, count):
self.msgstore = msgstore
self.id = id
self.name = name
self.count = count
def __repr__(self):
return "<%s '%s' (%d items), id=%s/%s>" % (self.__class__.__name__,
self.name,
self.count,
mapi.HexFromBin(self.id[0]),
mapi.HexFromBin(self.id[1]))
def __eq__(self, other):
if other is None: return False
ceid = self.msgstore.session.CompareEntryIDs
return ceid(self.id[0], other.id[0]) and \
ceid(self.id[1], other.id[1])
def __ne__(self, other):
return not self.__eq__(other)
def GetID(self):
return mapi.HexFromBin(self.id[0]), mapi.HexFromBin(self.id[1])
def GetFQName(self):
parts = []
parent = self
while parent is not None:
parts.insert(0, parent.name)
try:
# Ignore errors fetching parents - the caller just wants the
# name - it may not be correctly 'fully qualified', but at
# least we get something.
parent = parent.GetParent()
except MsgStoreException:
break
# We now end up with [0] being an empty string??, [1] being the
# information store root folder name, etc. Outlook etc all just
# use the information store name here.
if parts and not parts[0]:
del parts[0]
# Don't catch exceptions on the item itself - that is fatal,
# and should be caught by the caller.
# Replace the "root" folder name with the information store name
# as Outlook, our Folder selector etc do.
mapi_store = self.msgstore._GetMessageStore(self.id[0])
hr, data = mapi_store.GetProps((PR_DISPLAY_NAME_A,), 0)
name = data[0][1]
if parts:
# and replace with new name
parts[0] = name
else:
# This can happen for the very root folder (ie, parent of the
# top-level folder shown by Outlook. This folder should *never*
# be used directly.
parts = [name]
print "WARNING: It appears you are using the top-level root of " \
"the information store as a folder. You probably don't "\
"want to do that"
return "/".join(parts)
def _FolderFromMAPIFolder(self, mapifolder):
# Finally get the display name.
hr, data = mapifolder.GetProps((PR_ENTRYID, PR_DISPLAY_NAME_A,), 0)
eid = self.id[0], data[0][1]
name = data[1][1]
count = mapifolder.GetContentsTable(0).GetRowCount(0)
return MAPIMsgStoreFolder(self.msgstore, eid, name, count)
def GetParent(self):
# return a folder object with the parent, or None if there is no
# parent (ie, a top-level folder). Raises an exception if there is
# an error fetching the parent (which implies something wrong with the
# item itself, rather than this being top-level)
try:
folder = self.msgstore._OpenEntry(self.id)
prop_ids = PR_PARENT_ENTRYID,
hr, data = folder.GetProps(prop_ids,0)
# Put parent ids together
parent_eid = data[0][1]
parent_id = self.id[0], parent_eid
if hr != 0 or \
self.msgstore.session.CompareEntryIDs(parent_eid, self.id[1]):
# No parent EID, or EID same as ours.
return None
parent = self.msgstore._OpenEntry(parent_id)
# Finally get the item itself
return self._FolderFromMAPIFolder(parent)
except pythoncom.com_error, details:
raise MsgStoreExceptionFromCOMException(details)
def OpenEntry(self, iid = None, flags = None):
return self.msgstore._OpenEntry(self.id, iid, flags)
def GetOutlookItem(self):
try:
hex_item_id = mapi.HexFromBin(self.id[1])
hex_store_id = mapi.HexFromBin(self.id[0])
return self.msgstore.outlook.Session.GetFolderFromID(hex_item_id, hex_store_id)
except pythoncom.com_error, details:
raise MsgStoreExceptionFromCOMException(details)
def GetMessageGenerator(self, only_filter_candidates = True):
folder = self.OpenEntry()
table = folder.GetContentsTable(0)
table.SetColumns(MAPIMsgStoreMsg.message_init_props, 0)
if only_filter_candidates:
# Limit ourselves to IPM.* objects - ie, messages.
restriction = (mapi.RES_PROPERTY, # a property restriction
(mapi.RELOP_GE, # >=
PR_MESSAGE_CLASS_A, # of the this prop
(PR_MESSAGE_CLASS_A, "IPM."))) # with this value
table.Restrict(restriction, 0)
while 1:
# Getting 70 at a time was the random number that gave best
# perf for me ;)
rows = table.QueryRows(70, 0)
if len(rows) == 0:
break
for row in rows:
# Our restriction helped, but may not have filtered
# every message we don't want to touch.
# Note no exception will be raised below if the message is
# moved under us, as we don't need to access any properties.
msg = MAPIMsgStoreMsg(self.msgstore, row)
if not only_filter_candidates or msg.IsFilterCandidate():
yield msg
def GetNewUnscoredMessageGenerator(self, scoreFieldName):
folder = self.msgstore._OpenEntry(self.id)
table = folder.GetContentsTable(0)
# Resolve the field name
resolve_props = ( (mapi.PS_PUBLIC_STRINGS, scoreFieldName), )
resolve_ids = folder.GetIDsFromNames(resolve_props, 0)
field_id = PROP_TAG( PT_DOUBLE, PROP_ID(resolve_ids[0]))
# Setup the properties we want to read.
table.SetColumns(MAPIMsgStoreMsg.message_init_props, 0)
# Set up the restriction
# Need to check message-flags
# (PR_CONTENT_UNREAD is optional, and somewhat unreliable
# PR_MESSAGE_FLAGS & MSGFLAG_READ is the official way)
prop_restriction = (mapi.RES_BITMASK, # a bitmask restriction
(mapi.BMR_EQZ, # when bit is clear
PR_MESSAGE_FLAGS,
MSGFLAG_READ))
exist_restriction = mapi.RES_EXIST, (field_id,)
not_exist_restriction = mapi.RES_NOT, (exist_restriction,)
# A restriction for the message class
class_restriction = (mapi.RES_PROPERTY, # a property restriction
(mapi.RELOP_GE, # >=
PR_MESSAGE_CLASS_A, # of the this prop
(PR_MESSAGE_CLASS_A, "IPM."))) # with this value
# Put the final restriction together
restriction = (mapi.RES_AND, (prop_restriction,
not_exist_restriction,
class_restriction))
table.Restrict(restriction, 0)
while 1:
rows = table.QueryRows(70, 0)
if len(rows) == 0:
break
for row in rows:
# Note no exception will be raised below if the message is
# moved under us, as we don't need to access any properties.
msg = MAPIMsgStoreMsg(self.msgstore, row)
if msg.IsFilterCandidate():
yield msg
def IsReceiveFolder(self, msg_class = "IPM.Note"):
# Is this folder the nominated "receive folder" for its store?
try:
mapi_store = self.msgstore._GetMessageStore(self.id[0])
eid, ret_class = mapi_store.GetReceiveFolder(msg_class, 0)
return mapi_store.CompareEntryIDs(eid, self.id[1])
except pythoncom.com_error:
# Error getting the receive folder from the store (or maybe our
# store - but that would be insane!). Either way, we can't be it!
return False
def CreateFolder(self, name, comments = None, type = None,
open_if_exists = False, flags = None):
if type is None: type = mapi.FOLDER_GENERIC
if flags is None: flags = 0
if open_if_exists: flags |= mapi.OPEN_IF_EXISTS
folder = self.OpenEntry()
ret = folder.CreateFolder(type, name, comments, None, flags)
return self._FolderFromMAPIFolder(ret)
def GetItemCount(self):
try:
folder = self.OpenEntry()
return folder.GetContentsTable(0).GetRowCount(0)
except pythoncom.com_error, details:
raise MsgStoreExceptionFromCOMException(details)
# EmptyFolder() *permanently* deletes ALL messages and subfolders from
# this folder without deleting the folder itself.
#
# WORD OF WARNING: This is a *very dangerous* function that has the
# potential to destroy a user's mail. Don't even *think* about calling
# this function on anything but the Certain Spam folder!
def EmptyFolder(self, parentWindow):
try:
folder = self.OpenEntry()
folder.EmptyFolder(parentWindow, None, FOLDER_DIALOG)
except pythoncom.com_error, details:
raise MsgStoreExceptionFromCOMException(details)
def DoesFolderHaveOutlookField(self, field_name):
# Returns True if the specified folder has an *Outlook* field with
# the given name, False if the folder does not have it, or None
# if we can't tell, or there was an error, etc.
# We have discovered that Outlook stores 'Fields' for a folder as a
# PR_USERFIELDS field in the hidden, 'associated' message with
# message class IPC.MS.REN.USERFIELDS. This is a binary property
# which is undocumented, but probably could be reverse-engineered
# with a little effort (see 'dump_props --dump-folder-user-props' for
# an example of the raw data. For now, the simplest thing appears
# to be to check for a \0 character, followed by the property name
# as an ascii string.
try:
folder = self.msgstore._OpenEntry(self.id)
table = folder.GetContentsTable(mapi.MAPI_ASSOCIATED)
restriction = (mapi.RES_PROPERTY,
(mapi.RELOP_EQ,
PR_MESSAGE_CLASS_A,
(PR_MESSAGE_CLASS_A, 'IPC.MS.REN.USERFIELDS')))
cols = (PR_USERFIELDS,)
table.SetColumns(cols, 0)
rows = mapi.HrQueryAllRows(table, cols, restriction, None, 0)
if len(rows)>1:
print "Eeek - only expecting one row from IPC.MS.REN.USERFIELDS"
print "got", repr(rows)
return None
if len(rows)==0:
# New folders with no userdefined fields do not have such a row,
# but this is a clear indication it does not exist.
return False
row = rows[0]
val = GetPotentiallyLargeStringProp(folder, cols[0], row[0])
except pythoncom.com_error, details:
raise MsgStoreExceptionFromCOMException(details)
if type(val) != type(''):
print "Value type incorrect - expected string, got", repr(val)
return None
return val.find("\0" + field_name) >= 0
def DeleteMessages(self, message_things):
# A *permanent* delete - MAPI has no concept of 'Deleted Items',
# only Outlook does. If you want a "soft" delete, you must locate
# deleted item (via a special ID) and move it to there yourself
# message_things may be ID tuples, or MAPIMsgStoreMsg instances.
real_ids = []
for thing in message_things:
if isinstance(thing, MAPIMsgStoreMsg):
real_ids.append( thing.id[1] )
thing.mapi_object = thing.id = thing.folder_id = None
else:
real_ids.append(self.msgstore.NormalizeID(thing)[1])
try:
folder = self.msgstore._OpenEntry(self.id)
# Nuke my MAPI reference, and set my ID to None
folder.DeleteMessages(real_ids, 0, None, 0)
except pythoncom.com_error, details:
raise MsgStoreExceptionFromCOMException(details)
def CreateTemporaryMessage(self, msg_flags = None):
# Create a message designed to be used temporarily. It is your
# responsibility to delete when you are done with it.
# If msg_flags is not None, it should be an integer for the
# PR_MESSAGE_FLAGS property. Note that Outlook appears to refuse
# to set user properties on a message marked as 'unsent', which
# is the default. Setting to, eg, 1 marks it as a "not unsent, read"
# message, which works fine with user properties.
try:
folder = self.msgstore._OpenEntry(self.id)
imsg = folder.CreateMessage(None, 0)
if msg_flags is not None:
props = (PR_MESSAGE_FLAGS,msg_flags),
imsg.SetProps(props)
imsg.SaveChanges(0)
hr, data = imsg.GetProps((PR_ENTRYID, PR_STORE_ENTRYID), 0)
eid = data[0][1]
storeid = data[1][1]
msg_id = mapi.HexFromBin(storeid), mapi.HexFromBin(eid)
except pythoncom.com_error, details:
raise MsgStoreExceptionFromCOMException(details)
return self.msgstore.GetMessage(msg_id)
class MAPIMsgStoreMsg:
# All the properties we must initialize a message with.
# These include all the IDs we need, parent IDs, any properties needed
# to determine if this is a "filterable" message, etc
message_init_props = (PR_ENTRYID, PR_STORE_ENTRYID, PR_SEARCH_KEY,
PR_PARENT_ENTRYID, # folder ID
PR_MESSAGE_CLASS_A, # 'IPM.Note' etc
PR_RECEIVED_BY_ENTRYID, # who received it
PR_SUBJECT_A,
PR_TRANSPORT_MESSAGE_HEADERS_A,
)
def __init__(self, msgstore, prop_row):
self.msgstore = msgstore
self.mapi_object = None
# prop_row is a single mapi property row, with fields as above.
# NOTE: We can't trust these properties for "large" values
# (ie, strings, PT_BINARY, objects etc.), as they sometimes come
# from the IMAPITable (which has a 255 limit on property values)
# and sometimes from the object itself (which has no restriction).
# This limitation is documented by MAPI.
# Thus, we don't trust "PR_TRANSPORT_MESSAGE_HEADERS_A" more than
# to ask "does the property exist?"
tag, eid = prop_row[0] # ID
tag, store_eid = prop_row[1]
tag, searchkey = prop_row[2]
tag, parent_eid = prop_row[3]
tag, msgclass = prop_row[4]
recby_tag, recby = prop_row[5]
tag, subject = prop_row[6]
headers_tag, headers = prop_row[7]
self.id = store_eid, eid
self.folder_id = store_eid, parent_eid
self.msgclass = msgclass
self.subject = subject
has_headers = PROP_TYPE(headers_tag)==PT_STRING8
# Search key is the only reliable thing after a move/copy operation
# only problem is that it can potentially be changed - however, the
# Outlook client provides no such (easy/obvious) way
# (ie, someone would need to really want to change it <wink>)
# Thus, searchkey is our long-lived message key.
self.searchkey = searchkey
# To check if a message has ever been received, we check the
# PR_RECEIVED_BY_ENTRYID flag. Tim wrote in an old comment that
# An article on the web said the distinction can't be made with 100%
# certainty, but that a good heuristic is to believe that a
# msg has been received iff at least one of these properties
# has a sensible value: RECEIVED_BY_EMAIL_ADDRESS, RECEIVED_BY_NAME,
# RECEIVED_BY_ENTRYID PR_TRANSPORT_MESSAGE_HEADERS
# But MarkH can't find it, and believes and tests that
# PR_RECEIVED_BY_ENTRYID is all we need (but has since discovered a
# couple of messages without any PR_RECEIVED_BY properties - but *with*
# PR_TRANSPORT_MESSAGE_HEADERS - *sigh*)
self.was_received = PROP_TYPE(recby_tag) == PT_BINARY or has_headers
self.dirty = False
# For use with the spambayes.message messageinfo database.
self.stored_attributes = ['c', 't', 'original_folder',
'date_modified']
self.t = None
self.c = None
self.date_modified = None
self.original_folder = None
def getDBKey(self):
# Long lived search key.
return self.searchkey
def __repr__(self):
if self.id is None:
id_str = "(deleted/moved)"
else:
id_str = mapi.HexFromBin(self.id[0]), mapi.HexFromBin(self.id[1])
return "<%s, '%s' id=%s>" % (self.__class__.__name__,
self.GetSubject(),
id_str)
# as per search-key comments above, we also "enforce" this at the Python
# level. 2 different messages, but one copied from the other, will
# return "==".
# Not being consistent could cause subtle bugs, especially in interactions
# with various test tools.
# Compare the GetID() results if you need to know different messages.
def __hash__(self):
return hash(self.searchkey)
def __eq__(self, other):
ceid = self.msgstore.session.CompareEntryIDs
return ceid(self.searchkey, other.searchkey)
def __ne__(self, other):
return not self.__eq__(other)
def GetID(self):
return mapi.HexFromBin(self.id[0]), mapi.HexFromBin(self.id[1])
def GetSubject(self):
return self.subject
def GetOutlookItem(self):
hex_item_id = mapi.HexFromBin(self.id[1])
hex_store_id = mapi.HexFromBin(self.id[0])
return self.msgstore.outlook.Session.GetItemFromID(hex_item_id, hex_store_id)
def IsFilterCandidate(self):
# We don't attempt to filter:
# * Non-mail items
# * Messages that weren't actually received - this generally means user
# composed messages yet to be sent, or copies of "sent items".
# It does *not* exclude messages that were user composed, but still
# actually received by the user (ie, when you mail yourself)
# GroupWise generates IPM.Anti-Virus.Report.45 (but I'm not sure how
# it manages given it is an external server, and as far as I can tell,
# this does not appear in the headers.
if test_suite_running:
# While the test suite is running, we *only* filter test msgs.
return self.subject == "SpamBayes addin auto-generated test message"
class_check = self.msgclass.lower()
for check in "ipm.note", "ipm.anti-virus":
if class_check.startswith(check):
break
else:
# Not matching class - no good
return False
# Must match msg class to get here.
return self.was_received
def _GetPotentiallyLargeStringProp(self, prop_id, row):
return GetPotentiallyLargeStringProp(self.mapi_object, prop_id, row)
def _GetMessageText(self):
parts = self._GetMessageTextParts()
# parts is (headers, body, html) - which needs more formalizing -
# GetMessageText should become deprecated - it makes no sense in the
# face of multi-part messages.
return "\n".join(parts)
def _GetMessageTextParts(self):
# This is almost reliable :). The only messages this now fails for
# are for "forwarded" messages, where the forwards are actually
# in an attachment. Later.
# Note we *dont* look in plain text attachments, which we arguably
# should.
# This should be refactored into a function that returns the headers,
# plus a list of email package sub-objects suitable for sending to
# the classifier.
from spambayes import mboxutils
self._EnsureObject()
prop_ids = (PR_BODY_A,
MYPR_BODY_HTML_A,
PR_TRANSPORT_MESSAGE_HEADERS_A)
hr, data = self.mapi_object.GetProps(prop_ids,0)
body = self._GetPotentiallyLargeStringProp(prop_ids[0], data[0])
html = self._GetPotentiallyLargeStringProp(prop_ids[1], data[1])
headers = self._GetPotentiallyLargeStringProp(prop_ids[2], data[2])
# xxx - not sure what to do if we have both.
if not html:
html = GetHTMLFromRTFProperty(self.mapi_object)
# Some Outlooks deliver a strange notion of headers, including
# interior MIME armor. To prevent later errors, try to get rid
# of stuff now that can't possibly be parsed as "real" (SMTP)
# headers.
headers = mboxutils.extract_headers(headers)
# Mail delivered internally via Exchange Server etc may not have
# headers - fake some up.
if not headers:
headers = self._GetFakeHeaders()
# Mail delivered via the Exchange Internet Mail MTA may have
# gibberish at the start of the headers - fix this.
elif headers.startswith("Microsoft Mail"):
headers = "X-MS-Mail-Gibberish: " + headers
# This mail typically doesn't have a Received header, which
# is a real PITA for running the incremental testing setup.
# To make life easier, we add in the fake one that the message
# would have got if it had had no headers at all.
if headers.find("Received:") == -1:
prop_ids = PR_MESSAGE_DELIVERY_TIME
hr, data = self.mapi_object.GetProps(prop_ids, 0)
value = self._format_received(data[0][1])
headers = "Received: %s\n%s" % (value, headers)
if not html and not body:
# Only ever seen this for "multipart/signed" messages, so
# without any better clues, just handle this.
# Find all attachments with
# PR_ATTACH_MIME_TAG_A=multipart/signed
# XXX - see also self._GetAttachmentsToInclude(), which
# scans the attachment table - we should consolidate!
table = self.mapi_object.GetAttachmentTable(0)
restriction = (mapi.RES_PROPERTY, # a property restriction
(mapi.RELOP_EQ, # check for equality
PR_ATTACH_MIME_TAG_A, # of the given prop
(PR_ATTACH_MIME_TAG_A, "multipart/signed")))
try:
rows = mapi.HrQueryAllRows(table,
(PR_ATTACH_NUM,), # columns to get
restriction, # only these rows
None, # any sort order is fine
0) # any # of results is fine
except pythoncom.com_error:
# For some reason there are no rows we can get
rows = []
if len(rows) == 0:
pass # Nothing we can fetch :(
else:
if len(rows) > 1:
print "WARNING: Found %d rows with multipart/signed" \
"- using first only" % len(rows)
row = rows[0]
(attach_num_tag, attach_num), = row
assert attach_num_tag != PT_ERROR, \
"Error fetching attach_num prop"
# Open the attachment
attach = self.mapi_object.OpenAttach(attach_num,
None,
mapi.MAPI_DEFERRED_ERRORS)
prop_ids = (PR_ATTACH_DATA_BIN,)
hr, data = attach.GetProps(prop_ids, 0)
attach_body = GetPotentiallyLargeStringProp(attach, prop_ids[0], data[0])
# What we seem to have here now is a *complete* multi-part
# mime message - that Outlook must have re-constituted on
# the fly immediately after pulling it apart! - not unlike
# exactly what we are doing ourselves right here - putting
# it into a message object, so we can extract the text, so
# we can stick it back into another one. Ahhhhh.
msg = email.message_from_string(attach_body)
assert msg.is_multipart(), "Should be multi-part: %r" % attach_body
# reduce down all sub messages, collecting all text/ subtypes.
# (we could make a distinction between text and html, but
# it is all joined together by this method anyway.)
def collect_text_parts(msg):
collected = ''
if msg.is_multipart():
for sub in msg.get_payload():
collected += collect_text_parts(sub)
else:
if msg.get_content_maintype()=='text':
collected += msg.get_payload()
else:
#print "skipping content type", msg.get_content_type()
pass
return collected
body = collect_text_parts(msg)
return headers, body, html
def _GetFakeHeaders(self):
# This is designed to fake up some SMTP headers for messages
# on an exchange server that do not have such headers of their own.
prop_ids = PR_SUBJECT_A, PR_SENDER_NAME_A, PR_DISPLAY_TO_A, \
PR_DISPLAY_CC_A, PR_MESSAGE_DELIVERY_TIME, \
MYPR_MESSAGE_ID_A, PR_IMPORTANCE, PR_CLIENT_SUBMIT_TIME,
hr, data = self.mapi_object.GetProps(prop_ids, 0)
headers = ["X-Exchange-Message: true"]
for header, index, potentially_large, format_func in (\
("Subject", 0, True, None),
("From", 1, True, self._format_address),
("To", 2, True, self._format_address),
("CC", 3, True, self._format_address),
("Received", 4, False, self._format_received),
("Message-ID", 5, True, None),
("Importance", 6, False, self._format_importance),
("Date", 7, False, self._format_time),
("X-Mailer", 7, False, self._format_version),
):
if potentially_large:
value = self._GetPotentiallyLargeStringProp(prop_ids[index],
data[index])
else:
value = data[index][1]
if value:
if format_func:
value = format_func(value)
headers.append("%s: %s" % (header, value))
return "\n".join(headers) + "\n"
def _format_received(self, raw):
# Fake up a 'received' header. It's important that the date
# is right, so that sort+group.py will work. The rest is just more
# clues for the tokenizer to find.
return "(via local Exchange server); %s" % (self._format_time(raw),)
def _format_time(self, raw):
return formatdate(int(raw)-timezone, True)
def _format_importance(self, raw):
# olImportanceHigh = 2, olImportanceLow = 0, olImportanceNormal = 1
return {0 : "low", 1 : "normal", 2 : "high"}[raw]
def _format_version(self, unused):
return "Microsoft Exchange Client"
_address_re = re.compile(r"[()<>,:@!/=; ]")
def _format_address(self, raw):
# Fudge up something that's in the appropriate form. We don't
# have enough information available to get an actual working
# email address.
addresses = raw.split(";")
formattedAddresses = []
for address in addresses:
address = address.strip()
if address.find("@") >= 0:
formattedAddress = address
else:
formattedAddress = "\"%s\" <%s>" % \
(address, self._address_re.sub('.', address))
formattedAddresses.append(formattedAddress)
return "; ".join(formattedAddresses)
def _EnsureObject(self):
if self.mapi_object is None:
try:
help_test_suite("MAPIMsgStoreMsg._EnsureObject")
self.mapi_object = self.msgstore._OpenEntry(self.id)
except pythoncom.com_error, details:
raise MsgStoreExceptionFromCOMException(details)
def _GetAttachmentsToInclude(self):
# Get the list of attachments to include in the email package
# Message object. Currently only images (BUT - consider consolidating
# with the attachment handling above for signed messages!)
from spambayes.Options import options
from spambayes.ImageStripper import image_large_size_attribute
# For now, we know these are the only 2 options that need attachments.
if not options['Tokenizer', 'crack_images'] and \
not options['Tokenizer', 'image_size']:
return []
try:
table = self.mapi_object.GetAttachmentTable(0)
tags = PR_ATTACH_NUM,PR_ATTACH_MIME_TAG_A,PR_ATTACH_SIZE,PR_ATTACH_DATA_BIN
attach_rows = mapi.HrQueryAllRows(table, tags, None, None, 0)
except pythoncom.com_error, why:
attach_rows = []
attachments = []
# Create a new attachment for each image.
for row in attach_rows:
attach_num = row[0][1]
# mime-tag may not exist - eg, seen on bounce messages
mime_tag = None
if PROP_TYPE(row[1][0]) != PT_ERROR:
mime_tag = row[1][1]
# oh - what is the library for this!?
if mime_tag:
typ, subtyp = mime_tag.split('/', 1)
if typ == 'image':
size = row[2][1]
# If it is too big, just write the size. ImageStripper.py
# checks this attribute.
if size > options["Tokenizer", "max_image_size"]:
sub = MIMEImage(None, subtyp)
setattr(sub, image_large_size_attribute, size)
else:
attach = self.mapi_object.OpenAttach(attach_num,
None, mapi.MAPI_DEFERRED_ERRORS)
data = GetPotentiallyLargeStringProp(attach,
PR_ATTACH_DATA_BIN, row[3])
sub = MIMEImage(data, subtyp)
attachments.append(sub)
return attachments
def GetEmailPackageObject(self, strip_mime_headers=True):
# Return an email.Message object.
#
# strip_mime_headers is a hack, and should be left True unless you're
# trying to display all the headers for diagnostic purposes. If we
# figure out something better to do, it should go away entirely.
#
# Problem #1: suppose a msg is multipart/alternative, with
# text/plain and text/html sections. The latter MIME decorations
# are plain missing in what _GetMessageText() returns. If we leave
# the multipart/alternative in the headers anyway, the email
# package's "lax parsing" won't complain about not finding any
# sections, but since the type *is* multipart/alternative then
# anyway, the tokenizer finds no text/* parts at all to tokenize.
# As a result, only the headers get tokenized. By stripping
# Content-Type from the headers (if present), the email pkg
# considers the body to be text/plain (the default), and so it
# does get tokenized.
#
# Problem #2: Outlook decodes quoted-printable and base64 on its
# own, but leaves any Content-Transfer-Encoding line in the headers.
# This can cause the email pkg to try to decode the text again,
# with unpleasant (but rarely fatal) results. If we strip that
# header too, no problem -- although the fact that a msg was
# encoded in base64 is usually a good spam clue, and we miss that.
#
# Short course: we either have to synthesize non-insane MIME
# structure, or eliminate all evidence of original MIME structure.
# We used to do the latter - but now that we must give valid
# multipart messages which include attached images, we are forced
# to try and do the former (but actually the 2 options are not
# mutually exclusive - first we eliminate all evidence of original
# MIME structure, before allowing the email package to synthesize
# non-insane MIME structure.
# We still jump through hoops though - if we have no interesting
# attachments we attempt to return as close as possible as what
# we always returned in the past - a "single-part" message with the
# text and HTML as a simple text body.
header_text, body, html = self._GetMessageTextParts()
try: # catch all exceptions!
# Try and decide early if we want multipart or not.
# We originally just looked at the content-type - but Outlook
# is unreliable WRT that header! Also, consider a message multipart message
# with only text and html sections and no additional attachments.
# Outlook will generally have copied the HTML and Text sections
# into the relevant properties and they will *not* appear as
# attachments. We should return the 'single' message here to keep
# as close to possible to what we used to return. We can change
# this policy in the future - but we would probably need to insist
# on a full re-train as the training tokens will have changed for
# many messages.
attachments = self._GetAttachmentsToInclude()
new_content_type = None
if attachments:
_class = MIMEMultipart
payload = []
if body:
payload.append(MIMEText(body))
if html:
payload.append(MIMEText(html, 'html'))
payload += attachments
new_content_type = "multipart/mixed"
else:
# Single message part with both text and HTML.
_class = Message
payload = body + '\n' + html
try:
root_msg = HeaderParser(_class=_class).parsestr(header_text)
except email.Errors.HeaderParseError:
raise # sob
# ack - it is about here we need to do what the old code did
# below: But - the fact the code below is dealing only
# with content-type (and the fact we handle that above) makes
# it less obvious....
## But even this doesn't get *everything*. We can still see:
## "multipart message with no defined boundary" or the
## HeaderParseError above. Time to get brutal - hack out
## the Content-Type header, so we see it as plain text.
#if msg is None:
# butcher_pos = text.lower().find("\ncontent-type: ")
# if butcher_pos < 0:
# # This error just just gunna get caught below anyway
# raise RuntimeError(
# "email package croaked with a MIME related error, but "
# "there appears to be no 'Content-Type' header")
# # Put it back together, skipping the original "\n" but
# # leaving the header leaving "\nSpamBayes-Content-Type: "
# butchered = text[:butcher_pos] + "\nSpamBayes-" + \
# text[butcher_pos+1:] + "\n\n"
# msg = email.message_from_string(butchered)
# patch up mime stuff - these headers will confuse the email
# package as it walks the attachments.
if strip_mime_headers:
for h, new_val in (('content-type', new_content_type),
('content-transfer-encoding', None)):
try:
root_msg['X-SpamBayes-Original-' + h] = root_msg[h]
del root_msg[h]
except KeyError:
pass
if new_val is not None:
root_msg[h] = new_val
root_msg.set_payload(payload)
# We used to call email.message_from_string(text) and catch:
# email.Errors.BoundaryError: should no longer happen - we no longer
# ask the email package to parse anything beyond headers.
# email.Errors.HeaderParseError: caught above
except:
text = '\r\n'.join([header_text, body, html])
print "FAILED to create email.message from: ", `text`
raise
return root_msg
# XXX - this is the OLD version of GetEmailPackageObject() - it
# temporarily remains as a testing aid, to ensure that the different
# mime structure we now generate has no negative affects.
# Use 'sandbox/export.py -o' to export to the testdata directory
# in the old format, then run the cross-validation tests.
def OldGetEmailPackageObject(self, strip_mime_headers=True):
# Return an email.Message object.
#
# strip_mime_headers is a hack, and should be left True unless you're
# trying to display all the headers for diagnostic purposes. If we
# figure out something better to do, it should go away entirely.
#
# Problem #1: suppose a msg is multipart/alternative, with
# text/plain and text/html sections. The latter MIME decorations
# are plain missing in what _GetMessageText() returns. If we leave
# the multipart/alternative in the headers anyway, the email
# package's "lax parsing" won't complain about not finding any
# sections, but since the type *is* multipart/alternative then
# anyway, the tokenizer finds no text/* parts at all to tokenize.
# As a result, only the headers get tokenized. By stripping
# Content-Type from the headers (if present), the email pkg
# considers the body to be text/plain (the default), and so it
# does get tokenized.
#
# Problem #2: Outlook decodes quoted-printable and base64 on its
# own, but leaves any Content-Transfer-Encoding line in the headers.
# This can cause the email pkg to try to decode the text again,
# with unpleasant (but rarely fatal) results. If we strip that
# header too, no problem -- although the fact that a msg was
# encoded in base64 is usually a good spam clue, and we miss that.
#
# Short course: we either have to synthesize non-insane MIME
# structure, or eliminate all evidence of original MIME structure.
# Since we don't have a way to the former, by default this function
# does the latter.
import email
text = self._GetMessageText()
try:
try:
msg = email.message_from_string(text)
except email.Errors.BoundaryError:
# In case this is the
# "No terminating boundary and no trailing empty line"
# flavor of BoundaryError, we can supply a trailing empty
# line to shut it up. It's certainly ill-formed MIME, and
# probably spam. We don't care about the exact MIME
# structure, just the words it contains, so no harm and
# much good in trying to suppress this error.
try:
msg = email.message_from_string(text + "\n\n")
except email.Errors.BoundaryError:
msg = None
except email.Errors.HeaderParseError:
# This exception can come from parsing the header *or* the
# body of a mime message.
msg = None
# But even this doesn't get *everything*. We can still see:
# "multipart message with no defined boundary" or the
# HeaderParseError above. Time to get brutal - hack out
# the Content-Type header, so we see it as plain text.
if msg is None:
butcher_pos = text.lower().find("\ncontent-type: ")
if butcher_pos < 0:
# This error just just gunna get caught below anyway
raise RuntimeError(
"email package croaked with a MIME related error, but "
"there appears to be no 'Content-Type' header")
# Put it back together, skipping the original "\n" but
# leaving the header leaving "\nSpamBayes-Content-Type: "
butchered = text[:butcher_pos] + "\nSpamBayes-" + \
text[butcher_pos+1:] + "\n\n"
msg = email.message_from_string(butchered)
except:
print "FAILED to create email.message from: ", `text`
raise
if strip_mime_headers:
if msg.has_key('content-type'):
del msg['content-type']
if msg.has_key('content-transfer-encoding'):
del msg['content-transfer-encoding']
return msg
# end of OLD GetEmailPackageObject
def SetField(self, prop, val):
# Future optimization note - from GetIDsFromNames doco
# Name-to-identifier mapping is represented by an object's
# PR_MAPPING_SIGNATURE property. PR_MAPPING_SIGNATURE contains
# a MAPIUID structure that indicates the service provider
# responsible for the object. If the PR_MAPPING_SIGNATURE
# property is the same for two objects, assume that these
# objects use the same name-to-identifier mapping.
# [MarkH: MAPIUID objects are supported and hashable]
# XXX If the SpamProb (Hammie, whatever) property is passed in as an
# XXX int, Outlook displays the field as all blanks, and sorting on
# XXX it doesn't do anything, etc. I don't know why. Since I'm
# XXX running Python 2.2.2, the _MapiTypeMap above confuses ints
# XXX with bools, but the problem persists even if I comment out the
# XXX PT_BOOLEAN entry from that dict. Dumping in prints below show
# XXX that type_tag is 3 then, and that matches the defn of PT_I4 in
# XXX my system header files.
# XXX Later: This works after all, but the field shows up as all
# XXX blanks unless I *first* modify the view (like Messages) in
# XXX Outlook to define a custom Integer field of the same name.
self._EnsureObject()
try:
if type(prop) != type(0):
props = ( (mapi.PS_PUBLIC_STRINGS, prop), )
propIds = self.mapi_object.GetIDsFromNames(props, mapi.MAPI_CREATE)
type_tag = _MapiTypeMap.get(type(val))
if type_tag is None:
raise ValueError, "Don't know what to do with '%r' ('%s')" % (
val, type(val))
prop = PROP_TAG(type_tag, PROP_ID(propIds[0]))
help_test_suite("MAPIMsgStoreMsg.SetField")
if val is None:
# Delete the property
self.mapi_object.DeleteProps((prop,))
else:
self.mapi_object.SetProps(((prop,val),))
self.dirty = True
except pythoncom.com_error, details:
raise MsgStoreExceptionFromCOMException(details)
def GetField(self, prop):
# xxx - still raise_errors?
self._EnsureObject()
if type(prop) != type(0):
props = ( (mapi.PS_PUBLIC_STRINGS, prop), )
prop = self.mapi_object.GetIDsFromNames(props, 0)[0]
if PROP_TYPE(prop) == PT_ERROR: # No such property
return None
prop = PROP_TAG( PT_UNSPECIFIED, PROP_ID(prop))
try:
hr, props = self.mapi_object.GetProps((prop,), 0)
((tag, val), ) = props
if PROP_TYPE(tag) == PT_ERROR:
if val == mapi.MAPI_E_NOT_ENOUGH_MEMORY:
# Too big for simple properties - get via a stream
return GetPropFromStream(self.mapi_object, prop)
return None
return val
except pythoncom.com_error, details:
raise MsgStoreExceptionFromCOMException(details)
def GetReadState(self):
val = self.GetField(PR_MESSAGE_FLAGS)
return (val&MSGFLAG_READ) != 0
def SetReadState(self, is_read):
try:
self._EnsureObject()
# always try and clear any pending delivery reports of read/unread
help_test_suite("MAPIMsgStoreMsg.SetReadState")
if is_read:
self.mapi_object.SetReadFlag(USE_DEFERRED_ERRORS|SUPPRESS_RECEIPT)
else:
self.mapi_object.SetReadFlag(USE_DEFERRED_ERRORS|CLEAR_READ_FLAG)
if __debug__:
if self.GetReadState() != is_read:
print "MAPI SetReadState appears to have failed to change the message state"
print "Requested set to %s but the MAPI field after was %r" % \
(is_read, self.GetField(PR_MESSAGE_FLAGS))
except pythoncom.com_error, details:
raise MsgStoreExceptionFromCOMException(details)
def Save(self):
assert self.dirty, "asking me to save a clean message!"
# It seems that *not* specifying mapi.MAPI_DEFERRED_ERRORS solves a lot
# problems! So we don't!
try:
help_test_suite("MAPIMsgStoreMsg.Save")
self.mapi_object.SaveChanges(mapi.KEEP_OPEN_READWRITE)
self.dirty = False
except pythoncom.com_error, details:
raise MsgStoreExceptionFromCOMException(details)
def _DoCopyMove(self, folder, isMove):
assert not self.dirty, \
"asking me to move a dirty message - later saves will fail!"
try:
dest_folder = self.msgstore._OpenEntry(folder.id)
source_folder = self.msgstore._OpenEntry(self.folder_id)
flags = 0
if isMove: flags |= MESSAGE_MOVE
eid = self.id[1]
help_test_suite("MAPIMsgStoreMsg._DoCopyMove")
source_folder.CopyMessages((eid,),
None,
dest_folder,
0,
None,
flags)
# At this stage, I think we have lost meaningful ID etc values
# Set everything to None to make it clearer what is wrong should
# this become an issue. We would need to re-fetch the eid of
# the item, and set the store_id to the dest folder.
self.id = None
self.folder_id = None
except pythoncom.com_error, details:
raise MsgStoreExceptionFromCOMException(details)
def MoveTo(self, folder):
self._DoCopyMove(folder, True)
def CopyTo(self, folder):
self._DoCopyMove(folder, False)
# Functions to perform operations, but report the error (ONCE!) to the
# user. Any errors are re-raised so the caller can degrade gracefully if
# necessary.
# XXX - not too happy with these - they should go, and the caller should
# handle (especially now that we work exclusively with exceptions from
# this module.
def MoveToReportingError(self, manager, folder):
try:
self.MoveTo(folder)
except MsgStoreException, details:
ReportMAPIError(manager, _("Moving a message"),
details.mapi_exception)
def CopyToReportingError(self, manager, folder):
try:
self.MoveTo(folder)
except MsgStoreException, details:
ReportMAPIError(manager, _("Copying a message"),
details.mapi_exception)
def GetFolder(self):
# return a folder object with the parent, or None
folder_id = (mapi.HexFromBin(self.folder_id[0]),
mapi.HexFromBin(self.folder_id[1]))
return self.msgstore.GetFolder(folder_id)
def RememberMessageCurrentFolder(self):
self._EnsureObject()
try:
folder = self.GetFolder()
# Also save this information in our messageinfo database, which
# means that restoring should work even with IMAP.
self.original_folder = folder.id[0], folder.id[1]
props = ( (mapi.PS_PUBLIC_STRINGS, "SpamBayesOriginalFolderStoreID"),
(mapi.PS_PUBLIC_STRINGS, "SpamBayesOriginalFolderID")
)
resolve_ids = self.mapi_object.GetIDsFromNames(props, mapi.MAPI_CREATE)
prop_ids = PROP_TAG( PT_BINARY, PROP_ID(resolve_ids[0])), \
PROP_TAG( PT_BINARY, PROP_ID(resolve_ids[1]))
prop_tuples = (prop_ids[0],folder.id[0]), (prop_ids[1],folder.id[1])
self.mapi_object.SetProps(prop_tuples)
self.dirty = True
except pythoncom.com_error, details:
raise MsgStoreExceptionFromCOMException(details)
def GetRememberedFolder(self):
props = ( (mapi.PS_PUBLIC_STRINGS, "SpamBayesOriginalFolderStoreID"),
(mapi.PS_PUBLIC_STRINGS, "SpamBayesOriginalFolderID")
)
try:
self._EnsureObject()
resolve_ids = self.mapi_object.GetIDsFromNames(props, mapi.MAPI_CREATE)
prop_ids = PROP_TAG( PT_BINARY, PROP_ID(resolve_ids[0])), \
PROP_TAG( PT_BINARY, PROP_ID(resolve_ids[1]))
hr, data = self.mapi_object.GetProps(prop_ids,0)
if hr != 0:
return None
(store_tag, store_id), (eid_tag, eid) = data
folder_id = mapi.HexFromBin(store_id), mapi.HexFromBin(eid)
help_test_suite("MAPIMsgStoreMsg.GetRememberedFolder")
return self.msgstore.GetFolder(folder_id)
except:
# Try to get it from the message info database, if possible
if self.original_folder:
return self.msgstore.GetFolder(self.original_folder)
print "Error locating origin of message", self
return None
def test():
outlook = Dispatch("Outlook.Application")
inbox = outlook.Session.GetDefaultFolder(constants.olFolderInbox)
folder_id = inbox.Parent.StoreID, inbox.EntryID
store = MAPIMsgStore()
for folder in store.GetFolderGenerator([folder_id,], True):
print folder
for msg in folder.GetMessageGenerator():
print msg
store.Close()
if __name__=='__main__':
test()
|
<filename>tools/gen_profile.py
#!/usr/bin/env python2
import sys, os, collections, subprocess, sniper_lib, sniper_config
def ex_ret(cmd):
return subprocess.Popen(cmd, stdout = subprocess.PIPE).communicate()[0]
def cppfilt(name):
return ex_ret([ 'c++filt', name ])
class Function:
def __init__(self, eip, name, location):
self.eip = eip
self.name = cppfilt(name).strip()
self.location = location.split(':')
self.img = self.location[0]
self.offset = long(self.location[1])
# link-time address
self.ieip = str(long(eip, 16) - self.offset)
def __str__(self):
return self.name
#return '[%8s] %s' % (self.eip, self.name)
class Call:
def __init__(self, name, eip, stack, data):
self.name = name
self.eip = eip
self.stack = stack
self.data = data
def add(self, data):
for k, v in data.items():
self.data[k] = self.data.get(k, 0) + v
def buildTotal(self, prof):
# Assumes all children have already been visited!
self.children = prof.children[self.stack]
# Add self to global total
for k, v in self.data.items():
prof.totals[k] = prof.totals.get(k, 0) + v
# Add all children to our total
self.total = dict(self.data)
for stack in self.children.copy():
for k, v in prof.calls[stack].total.items():
self.total[k] += v
# Child is to be folded: add it to self, remove from list of children
if prof.calls[stack].folded:
for k, v in prof.calls[stack].data.items():
if k != 'calls':
self.data[k] += v
self.children.remove(stack)
for grandchild in prof.calls[stack].children:
self.children.add(grandchild)
# Fold into parents?
self.folded = prof.foldCall(self)
class Category(Call):
def __init__(self, name):
self.name = name
self.stack = ''
self.data = {}
def printLine(self, prof, obj):
print >> obj, '%6.2f%%\t' % (100 * self.data['nonidle_elapsed_time'] / float(prof.totals['nonidle_elapsed_time'])) + \
'%6.2f%%\t' % (100 * self.data['instruction_count'] / float(prof.totals['instruction_count'])) + \
'%7.2f\t' % (self.data['instruction_count'] / (prof.fs_to_cycles * float(self.data['nonidle_elapsed_time']))) + \
'%7.2f\t' % (1000 * self.data['l2miss'] / float(self.data['instruction_count'])) + \
self.name
class CallPrinter:
def __init__(self, prof, obj, opt_cutoff):
self.prof = prof
self.obj = obj
self.opt_cutoff = opt_cutoff
def printTree(self, stack, offset = 0):
call = self.prof.calls[stack]
self.printLine(call, offset = offset)
for child in sorted(call.children, key = lambda stack: self.prof.calls[stack].total['nonidle_elapsed_time'], reverse = True):
child_time = self.prof.calls[child].total['nonidle_elapsed_time'] + self.prof.calls[child].total['waiting_cost']
if child_time / float(self.prof.totals['nonidle_elapsed_time']) < self.opt_cutoff:
break
self.printTree(child, offset = offset + 1)
class CallPrinterDefault(CallPrinter):
def printHeader(self):
print >> self.obj, '%7s\t%7s\t%7s\t%7s\t%7s\t%7s\t%7s\t%s' % ('calls', 'time', 't.self', 't.wait', 'icount', 'ipc', 'l2.mpki', 'name')
def printLine(self, call, offset):
print >> self.obj, '%7d\t' % call.data['calls'] + \
'%6.2f%%\t' % (100 * call.total['nonidle_elapsed_time'] / float(self.prof.totals['nonidle_elapsed_time'] or 1)) + \
'%6.2f%%\t' % (100 * call.data['nonidle_elapsed_time'] / float(self.prof.totals['nonidle_elapsed_time'] or 1)) + \
'%6.2f%%\t' % (100 * call.data['waiting_cost'] / float(self.prof.totals['total_coretime'] or 1)) + \
'%6.2f%%\t' % (100 * call.total['instruction_count'] / float(self.prof.totals['instruction_count'] or 1)) + \
'%7.2f\t' % (call.total['instruction_count'] / (self.prof.fs_to_cycles * float(call.total['nonidle_elapsed_time'] or 1))) + \
'%7.2f\t' % (1000 * call.total['l2miss'] / float(call.total['instruction_count'] or 1)) + \
' ' * offset + call.name
class CallPrinterAbsolute(CallPrinter):
def printHeader(self):
print >> self.obj, '%7s\t%9s\t%9s\t%9s\t%9s\t%9s\t%9s\t%s' % ('calls', 'cycles', 'c.self', 'c.wait', 'icount', 'i.self', 'l2miss', 'name')
def printLine(self, call, offset):
print >> self.obj, '%7d\t' % call.data['calls'] + \
'%9d\t' % (self.prof.fs_to_cycles * float(call.total['nonidle_elapsed_time'])) + \
'%9d\t' % (self.prof.fs_to_cycles * float(call.data['nonidle_elapsed_time'])) + \
'%9d\t' % (self.prof.fs_to_cycles * float(call.data['waiting_cost'])) + \
'%9d\t' % call.total['instruction_count'] + \
'%9d\t' % call.data['instruction_count'] + \
'%9d\t' % call.total['l2miss'] + \
' ' * offset + call.name
class Profile:
def __init__(self, resultsdir = '.'):
filename = os.path.join(resultsdir, 'sim.rtntracefull')
if not os.path.exists(filename):
raise IOError('Cannot find trace file %s' % filename)
results = sniper_lib.get_results(resultsdir = resultsdir)
config = results['config']
stats = results['results']
freq = 1e9 * float(sniper_config.get_config(config, 'perf_model/core/frequency'))
self.fs_to_cycles = freq / 1e15
self.functions = {}
self.calls = {}
self.children = collections.defaultdict(set)
self.roots = set()
self.totals = {}
fp = open(filename)
self.headers = fp.readline().strip().split('\t')
for line in fp:
if line.startswith(':'):
eip, name, location = line.strip().split('\t')
eip = eip[1:]
self.functions[eip] = Function(eip, name, location)
else:
line = line.strip().split('\t')
stack = line[0].split(':')
eip = stack[-1]
stack = ':'.join(map(self.translateEip, stack))
data = dict(zip(self.headers[1:], map(long, line[1:])))
if stack in self.calls:
self.calls[stack].add(data)
else:
self.calls[stack] = Call(str(self.functions[eip]), eip, stack, data)
parent = stack.rpartition(':')[0]
self.children[parent].add(stack)
self.roots = set(self.calls.keys())
for parent in self.calls:
for child in self.children[parent]:
self.roots.remove(child)
# Construct a list of calls where each child is ordered before its parent.
calls_ordered = collections.deque()
calls_tovisit = collections.deque(self.roots)
while calls_tovisit:
stack = calls_tovisit.pop()
calls_ordered.appendleft(stack)
calls_tovisit.extend(self.children[stack])
# Now implement a non-recursive version of buildTotal, which requires that each
# function's children have been visited before processing the parent,
# by visiting calls_ordered in left-to-right order.
for stack in calls_ordered:
self.calls[stack].buildTotal(self)
ncores = int(config['general/total_cores'])
self.totals['total_coretime'] = ncores * stats['barrier.global_time'][0]
def translateEip(self, eip):
if eip in self.functions:
return self.functions[eip].ieip
else:
return eip
def foldCall(self, call):
if call.name == '.plt':
return True
else:
return False
def write(self, obj = sys.stdout, opt_absolute = False, opt_cutoff = .001):
if opt_absolute:
printer = CallPrinterAbsolute(self, obj, opt_cutoff = opt_cutoff)
else:
printer = CallPrinterDefault(self, obj, opt_cutoff = opt_cutoff)
printer.printHeader()
for stack in sorted(self.roots, key = lambda stack: self.calls[stack].total['nonidle_elapsed_time'], reverse = True):
printer.printTree(stack)
def writeCallgrind(self, obj):
bystatic = dict([ (fn.ieip, Category(fn.eip)) for fn in self.functions.values() ])
for stack in self.calls:
fn = self.functions[self.calls[stack].eip]
bystatic[fn.ieip].add(self.calls[stack].data)
children = {}
for _stack in self.children[stack]:
_ieip = self.functions[self.calls[_stack].eip].ieip
if _ieip not in children:
children[_ieip] = Category(self.calls[_stack].eip)
children[_ieip].add(self.calls[_stack].total)
children[_ieip].calls = self.calls[_stack].data['calls']
bystatic[fn.ieip].children = children
costs = (
('Cycles', 'Cycles', lambda data: long(self.fs_to_cycles * data['nonidle_elapsed_time'])),
('Calls', 'Calls', lambda data: data['calls']),
('Icount', 'Instruction count', lambda data: data['instruction_count']),
('L2', 'L2 load misses', lambda data: data['l2miss']),
)
def formatData(data):
return ' '.join(map(str, [ fn(data) for _, _, fn in costs ]))
print >> obj, 'cmd: Sniper run'
print >> obj, 'positions: instr'
print >> obj, 'events:', ' '.join([ cost for cost, _, _ in costs ])
for cost, desc, _ in costs:
print >> obj, 'event: %s : %s' % (cost, desc)
print >> obj, 'summary:', formatData(self.totals)
print >> obj
for site in sorted(bystatic.values(), key = lambda v: v.data.get('instruction_count',0), reverse=True):
if not site.data:
continue
fn = self.functions[site.name]
print >> obj, 'ob=%s' % fn.location[0]
print >> obj, 'fl=%s' % fn.location[2]
print >> obj, 'fn=%s' % fn.name
print >> obj, '0x%x' % long(fn.ieip), formatData(site.data)
for _site in site.children.values():
_fn = self.functions[_site.name]
print >> obj, 'cob=%s' % _fn.location[0]
print >> obj, 'cfi=%s' % _fn.location[2]
print >> obj, 'cfn=%s' % _fn.name
print >> obj, 'calls=%s 0x%x' % (_site.calls, long(_fn.ieip))
print >> obj, '0x%x' % long(_fn.ieip), formatData(_site.data)
print >> obj
def summarize(self, catnames, catfilters, obj = sys.stdout):
def get_catname(func):
stack = func.stack
while stack:
has_parent = (':' in stack)
# Find category for this function by trying a match against all filters in catfilters
for catname, catfilter in catfilters:
if catfilter(self.calls[stack], self):
if catname:
return catname
elif has_parent:
# catname == None means fold into the parent
# break out of this for loop, and visit parent function
break
else:
# Ignore fold matches for root functions, try to match with another category
continue
# Visit parent function
stack = stack.rpartition(':')[0]
bytype = dict([ (name, Category(name)) for name in catnames ])
for func in self.calls.values():
if not func.folded:
catname = get_catname(func)
bytype[catname].add(func.data)
print >> obj, '%7s\t%7s\t%7s\t%7s' % ('time', 'icount', 'ipc', 'l2.mpki')
for name in catnames:
if bytype[name].data:
bytype[name].printLine(self, obj = obj)
if __name__ == '__main__':
import getopt
def usage():
print '%s [-d <resultsdir (.)> | -o <outputdir>] [--abs]' % sys.argv[0]
sys.exit(1)
HOME = os.path.dirname(__file__)
resultsdir = '.'
outputdir = None
opt_absolute = False
try:
opts, cmdline = getopt.getopt(sys.argv[1:], "hd:o:", ['abs'])
except getopt.GetoptError, e:
# print help information and exit:
print >> sys.stderr, e
usage()
for o, a in opts:
if o == '-h':
usage()
sys.exit()
if o == '-d':
resultsdir = a
if o == '-o':
outputdir = a
if o == '--abs':
opt_absolute = True
prof = Profile(resultsdir)
prof.write(file(os.path.join(outputdir, 'sim.profile'), 'w') if outputdir else sys.stdout, opt_absolute = opt_absolute)
if outputdir:
callgrindfile = os.path.join(outputdir, 'callgrind.out.sniper')
prof.writeCallgrind(file(callgrindfile, 'w'))
gprof2dot_py = os.path.join(HOME, 'gprof2dot.py')
dotbasefile = os.path.join(outputdir, 'sim.profile')
os.system('%s --format=callgrind --output=%s.dot %s' % (gprof2dot_py, dotbasefile, callgrindfile))
import distutils.spawn
if distutils.spawn.find_executable('dot'):
os.system('dot -Tpng %s.dot -o %s.png' % (dotbasefile, dotbasefile))
os.system('dot -Tsvg %s.dot -o %s.svg' % (dotbasefile, dotbasefile))
|
<filename>paper_trader/interpreter/interpreter.py
from argparse import ArgumentError, ArgumentParser
from cmd import Cmd
class _CmdLineParser(ArgumentParser):
"""
An extension to ArgumentParser set up for interpreter line parsing
"""
def __init__(self, *nargs, **kwargs):
super().__init__(add_help=False, exit_on_error=False, *nargs, **kwargs)
def error(self, msg):
raise Exception(msg)
def add_arg(*nargs, **kwargs):
"""
Add an argument to the command. Takes the same arguments as ArgumentParser.add_argument
"""
def registerer(func):
if not hasattr(func, "_parser"):
func._parser = _CmdLineParser(prog=func.__name__[4:])
func._parser.add_argument(*nargs, **kwargs)
return func
return registerer
class BaseInterpreter(Cmd):
"""
To use:
Subclass and add commands by adding methods with the name 'cmd_foo'
Add arguments to the command using the add_arg decorator.
"""
intro = "Type help of ? to list commands\n"
prompt = "(paper-trader) "
def __init__(self, *nargs, **kwargs):
super().__init__(*nargs, **kwargs)
self._parsers = dict[str, ArgumentParser]()
for cmd_func in dir(self):
if not cmd_func.startswith("cmd_"):
continue
func = getattr(self, cmd_func)
self._parsers[cmd_func[4:]] = func._parser
cmds = list(self._parsers.keys())
cmds.extend(("help", "exit", "quit"))
cmds.sort()
self.intro = "Available commands: " + ", ".join(cmds)
def default(self, line: str):
cmd, args = BaseInterpreter.split_line(line)
try:
parser = self._parsers[cmd]
except KeyError:
self.unknown_cmd(cmd)
else:
try:
args = parser.parse_args(
args=args
) # Doesn't consider quotes at this stage
except ArgumentError as e:
print(e, file=self.stdout)
self.do_help(cmd)
except Exception as e:
print(e, file=self.stdout)
self.do_help(cmd)
else:
getattr(self, f"cmd_{cmd}")(**vars(args))
def do_help(self, line: str | None = None):
if line:
self._parsers[line].print_help(self.stdout)
else:
print(self.intro, file=self.stdout)
def do_exit(self, _):
return True
def do_quit(self, _):
return True
def do_EOF(self, _):
return True
def unknown_cmd(self, cmd: str):
print(f"Unknown command: {cmd}", file=self.stdout)
self.do_help()
@staticmethod
def split_line(line: str):
split = line.split(" ", 1)
if len(split) > 1:
cmd = split[0]
rest = split[1]
else:
cmd = split[0]
rest = None
params = []
if rest:
quote_split = []
while rest.count('"') > 1:
first = rest.index('"')
second = rest.index('"', first + 1)
before = rest[:first]
if before:
quote_split.append((before, False))
# fmt: off
quoted = rest[first + 1: second]
quote_split.append((quoted, True))
rest = rest[second + 1:]
# fmt: on
if rest:
quote_split.append((rest, False))
for s, is_quoted in quote_split:
if is_quoted:
params.append(s)
else:
params.extend(s for s in s.split(" ") if s)
return cmd, params
|
######################################################################################################################
# The Alpha Particles 2.0 project was made by <NAME>.
######################################################################################################################
### Program for Timing Experiment 1: Concatenation using a numpy array vs using a pandas DataFrame.
import random # Produce some random numbers.
import time # ... for timing experiments.
import os # ... for making a directory.
import numpy as np # numpy is one of the contestants.
import pandas as pd # pandas is one of the contestants.
import matplotlib.pyplot as plt # ... for producing plots of the results of the timing experiment.
def main_T1():
try:
os.mkdir("T1_Results") # This is the directory for saving the results of this timing experiment.
except: # If the folder already exists, there is no need to recreate it.
pass # Just keep going.
InitialParticleNum = 10 # ... an example. 10 alpha particles initially.
### Initialise lists for collecting the data of the timing experiment.
NumStep_List = []
timer_np_Mean_List = []
timer_DF_Mean_List = []
timer_np_Sum_List = []
timer_DF_Sum_List = []
### Initialise the numpy array and pandas DataFrame that are to be used for concatenating more columns to them.
x0 = np.array([[0.0]] * InitialParticleNum)
x0_DF = pd.DataFrame([0.0] * InitialParticleNum)
### Start the timing experiment.
for NumStep in range(100, 5100 + 500, 500): # NumStep represents the total number of simulation steps in the AlphaParticles2.py program.
NumStep_List.append(NumStep) # Collect the data.
Dict = {key : [[random.random()]] * InitialParticleNum for key in range(0, NumStep)} # In every simulation step in the AlphaParticles2.py program, there is a list of x-positions of the alpha particles. The list in the values in the dictionary represents that list of x-positions.
timer_np = [] # Initialise a list for collecting execution times of numpy array concatentation.
for key in range(0, len(Dict)): # Concatenate all of the lists into one numpy array.
start_np = time.time()
x0 = np.concatenate((x0, np.array(Dict[key])), axis = 1) # This is the numpy array concatenation we are timing.
end_np = time.time()
timer_np.append(end_np - start_np) # Collect the execution time for the numpy array concatenation.
timer_np_Mean_List.append(np.array(timer_np).mean()) # Summarise the execution times.
timer_np_Sum_List.append(np.array(timer_np).sum())
timer_DF = [] # Initialise a list for collecting execution times of pandas DataFrame concatentation.
for key in range(0, len(Dict)): # Concatenate all of the lists into one pandas DataFrame.
start_DF = time.time()
x0_DF = pd.concat([x0_DF, pd.DataFrame(Dict[key])], axis = 1) # This is the pandas DataFrame concatenation we are timing.
end_DF = time.time()
timer_DF.append(end_DF - start_DF)
timer_DF_Mean_List.append(np.array(timer_DF).mean()) # Summarise the execution times.
timer_DF_Sum_List.append(np.array(timer_DF).sum())
### Plot the results of the experiment.
# Plot the average execution time as a function of the number of simulation steps.
plt.figure()
plt.scatter(NumStep_List, timer_np_Mean_List, marker = "x")
plt.scatter(NumStep_List, timer_DF_Mean_List, marker = ".")
plt.xlabel("Total number of simulation steps")
plt.ylabel("Average time spent concatenating a list /s")
plt.legend(labels = ("numpy array concatenation", "pandas DataFrame concatenation"))
plt.savefig("T1_Results/AverageConcatenationTime.png")
# Plot the total execution time as a function of the number of simulation steps.
plt.figure()
plt.scatter(NumStep_List, timer_np_Sum_List, marker = "x")
plt.scatter(NumStep_List, timer_DF_Sum_List, marker = ".")
plt.xlabel("Total number of simulation steps")
plt.ylabel("Total time spent concatenating lists /s")
plt.legend(labels = ("numpy array concatenation", "pandas DataFrame concatenation"))
plt.savefig("T1_Results/TotalConcatenationTime.png")
if __name__ == "__main__":
main_T1()
# REFERENCES:
# Python Software Foundation (2020). random — Generate pseudo-random numbers, https://docs.python.org/3/library/random.html.
# Python Software Foundation (2020). time — Time access and conversions, https://docs.python.org/3/library/time.html.
# Python Software Foundation (2020). os — Miscellaneous operating system interfaces, https://docs.python.org/3/library/os.html.
# NumPy (2020). NumPy v1.19.0, https://numpy.org/.
# the pandas development team (2020). pandas, https://pandas.pydata.org/.
# Hunter, J, Dale, D, Firing, E, Droettboom, M & The Matplotlib development team Matplotlib: Visualization with Python, https://matplotlib.org/. |
import numpy as np
from teacher.metrics import coverage, precision, fidelity, rule_fidelity
from teacher.tree import Rule
def test_coverage():
dataset_membership = {
'feat1': {
'val1': np.array([0.7, 1, 0.4]),
'val2': np.array([0.3, 0, 0.6])
},
'feat2': {
'val1': np.array([0.3, 0, 0.7]),
'val2': np.array([0.7, 1, 0.3])
},
'feat3': {
'val1': np.array([0.5, 0.9, 0.3]),
'val2': np.array([0.5, 0.1, 0.7])
}
}
rule = Rule((('feat1', 'val1'), ('feat2', 'val1'), ('feat3', 'val1')), 'conse', 0.5)
np.testing.assert_almost_equal(coverage([rule], dataset_membership), 0.6666666666)
def test_coverage_multiple_rules():
dataset_membership = {
'feat1': {
'val1': np.array([0.7, 1, 0.4]),
'val2': np.array([0.3, 0, 0.6])
},
'feat2': {
'val1': np.array([0.3, 0, 0.7]),
'val2': np.array([0.7, 1, 0.3])
},
'feat3': {
'val1': np.array([0.5, 0.9, 0.3]),
'val2': np.array([0.5, 0.1, 0.7])
}
}
r1 = Rule((('feat1', 'val1'), ('feat2', 'val1'), ('feat3', 'val1')), 'conse', 0.5)
r2 = Rule((('feat1', 'val1'), ('feat2', 'val2'), ('feat3', 'val1')), 'conse', 0.5)
np.testing.assert_almost_equal(coverage([r1, r2], dataset_membership), 1)
def test_precision():
dataset_membership = {
'feat1': {
'val1': np.array([0.7, 1, 0.4]),
'val2': np.array([0.3, 0, 0.6])
},
'feat2': {
'val1': np.array([0.3, 0, 0.7]),
'val2': np.array([0.7, 1, 0.3])
},
'feat3': {
'val1': np.array([0.5, 0.9, 0.3]),
'val2': np.array([0.5, 0.1, 0.7])
}
}
y = np.array(['conse', 'conse', 'noconse'])
rule = Rule((('feat1', 'val1'), ('feat2', 'val1'), ('feat3', 'val1')), 'conse', 0.5)
np.testing.assert_almost_equal(precision([rule], dataset_membership, y), 0.5)
def test_fidelity():
y = np.array(['conse', 'conse', 'noconse'])
y_local = np.array(['conse', 'noconse', 'noconse'])
np.testing.assert_almost_equal(fidelity(y, y_local), 0.6666666666666666)
def test_rule_fidelity():
dataset_membership = {
'feat1': {
'val1': np.array([0.7, 1, 0.4]),
'val2': np.array([0.3, 0, 0.6])
},
'feat2': {
'val1': np.array([0.3, 0, 0.7]),
'val2': np.array([0.7, 1, 0.3])
},
'feat3': {
'val1': np.array([0.5, 0.9, 0.3]),
'val2': np.array([0.5, 0.1, 0.7])
}
}
y = np.array(['conse', 'conse', 'noconse'])
y_local = np.array(['conse', 'noconse', 'conse'])
rule = Rule((('feat1', 'val1'), ('feat2', 'val1'), ('feat3', 'val1')), 'conse', 0.5)
np.testing.assert_almost_equal(rule_fidelity(y, y_local, dataset_membership, [rule]), 0.5)
|
import tweepy
import requests
from random import randrange
randNum=randrange(40)
def main():
response=requests.get("https://www.boredapi.com/api/activity")
joke=requests.get("https://official-joke-api.appspot.com/jokes/programming/random")
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
tweet=" Bored? Here's What You Can Do Today:\n"+response.json()['activity']+" \nActivity Type: #"+response.json()['type'] +" #boredombotsuggestions #pythonprogramming #python #fun #quarantinecoding #bored "
api = tweepy.API(auth)
api.update_status(status = (tweet))
for tweet in tweepy.Cursor(api.search, q='#python', rpp=5).items():
try:
api.create_favorite(tweet.id)
api.retweet(tweet.id)
api.create_friendship(tweet.author.name)
except:
continue
for tweet in tweepy.Cursor(api.search, q='#pythonprojects', rpp=3).items(10):
try:
api.create_favorite(tweet.id)
api.create_friendship(tweet.author.name)
except:
continue
for tweet in tweepy.Cursor(api.search, q='#programming', rpp=25).items(10):
user = api.get_user(screen_name = tweet.user.screen_name)
ID = user.id
print(ID)
test=joke.json()[0]['setup']+'\n'+joke.json()[0]['punchline']
try:
api.create_favorite(tweet.id)
api.create_friendship(tweet.author.name)
api.send_direct_message(ID, "Hi, BoredomBot has followed you because your post caused deiviations in the base temperature of my cold, digital, life unit, beep boop! "+test)
except:
continue
for tweet in tweepy.Cursor(api.search, q='#coding', rpp=25).items():
try:
api.create_favorite(tweet.id)
api.create_friendship(tweet.author.name)
api.retweet(tweet.id)
except:
continue
for tweet in tweepy.Cursor(api.search, q='#softwareengineering').items(50):
try:
api.create_favorite(tweet.id)
api.create_friendship(tweet.author.name)
except:
continue
for tweet in tweepy.Cursor(api.search, q='#100DaysOfCode', rpp=2).items():
try:
api.create_favorite(tweet.id)
api.create_friendship(tweet.author.name)
except:
continue
for tweet in tweepy.Cursor(api.search, q='#IoT', rpp=10).items():
try:
api.create_favorite(tweet.id)
api.create_friendship(tweet.author.name)
except:
continue
for tweet in tweepy.Cursor(api.search, q='#javascript').items(100):
try:
api.create_favorite(tweet.id)
api.create_friendship(tweet.author.name)
except:
continue
def follow_followers(api):
logger.info("Retrieving and following followers")
for follower in tweepy.Cursor(api.followers).items():
if not follower.following:
greet="Hello @"+follower.name +"!! Thank you so much for following! I've gone ahead and followed you back <3"
logger.info(f"Following {follower.name}")
follower.follow()
api.update_status(status = (greet))
if __name__=="__main__":
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
main()
follow_followers(api)
|
import re
import sys
import os
import numpy as np
import cv2
import torch
import torch.backends.cudnn as cudnn
from scipy.ndimage.filters import gaussian_filter
from gazenet.utils.registrar import *
from gazenet.models.saliency_prediction.tased.generator import load_video_frames
from gazenet.models.saliency_prediction.tased.model import TASED_v2
from gazenet.utils.sample_processors import InferenceSampleProcessor
MODEL_PATHS = {
"tased": os.path.join("gazenet", "models", "saliency_prediction", "tased", "checkpoints", "pretrained_tased_orig", "model.pt")}
INP_IMG_WIDTH = 384
INP_IMG_HEIGHT = 224
FRAMES_LEN = 32
@InferenceRegistrar.register
class TASEDInference(InferenceSampleProcessor):
def __init__(self, weights_file=MODEL_PATHS['tased'], w_size=32,
frames_len=FRAMES_LEN, inp_img_width=INP_IMG_WIDTH, inp_img_height=INP_IMG_HEIGHT,
device="cuda:0", width=None, height=None, **kwargs):
super().__init__(width=width, height=height, w_size=w_size, **kwargs)
self.short_name = "tased"
self._device = device
self.frames_len = frames_len
self.inp_img_width = inp_img_width
self.inp_img_height = inp_img_height
# load the model
self.model = TASED_v2()
self.model = self._load_model_(weights_file, self.model)
print("TASED model loaded from", weights_file)
self.model = self.model.to(device)
cudnn.benchmark = False
self.model.eval()
@staticmethod
def _load_model_(filepath, model):
if os.path.isfile(filepath):
weight_dict = torch.load(filepath)
model_dict = model.state_dict()
for name, param in weight_dict.items():
if 'module' in name:
name = '.'.join(name.split('.')[1:])
if name in model_dict:
if param.size() == model_dict[name].size():
model_dict[name].copy_(param)
else:
print(' size? ' + name, param.size(), model_dict[name].size())
else:
print(' name? ' + name)
return model
def infer_frame(self, grabbed_video_list, grouped_video_frames_list, grabbed_audio_list, audio_frames_list, info_list, properties_list,
video_frames_list, source_frames_idxs=None, **kwargs):
frames_idxs = range(len(grouped_video_frames_list)) if source_frames_idxs is None else source_frames_idxs
for f_idx, frame_id in enumerate(frames_idxs):
info = {"frame_detections_" + self.short_name: {
"saliency_maps": [], # detected
}}
video_frames_tensor = load_video_frames(video_frames_list[:frame_id+1],
frame_id+1,
img_width=self.inp_img_width, img_height=self.inp_img_height,
frames_len=self.frames_len)
video_frames = video_frames_tensor.to(self._device)
video_frames = torch.unsqueeze(video_frames, 0)
with torch.no_grad():
final_prediction = self.model(video_frames)
# get the visual feature maps
for prediction, prediction_name in zip([final_prediction],["saliency_maps"]):
saliency = prediction.cpu().data[0].numpy()
# saliency = (saliency*255.).astype(np.int)/255.
saliency = gaussian_filter(saliency, sigma=7)
saliency = saliency/np.max(saliency)
info["frame_detections_" + self.short_name][prediction_name].append((saliency, -1))
info_list[frame_id].update(**info)
kept_data = self._keep_extracted_frames_data(source_frames_idxs, grabbed_video_list, grouped_video_frames_list,
grabbed_audio_list, audio_frames_list, info_list, properties_list)
return kept_data
def preprocess_frames(self, video_frames_list=None, **kwargs):
features = super().preprocess_frames(**kwargs)
pad = features["preproc_pad_len"]
lim = features["preproc_lim_len"]
if video_frames_list is not None:
video_frames_list = list(video_frames_list)
features["video_frames_list"] = video_frames_list[:lim] + [video_frames_list[lim]] * pad
return features
def annotate_frame(self, input_data, plotter,
show_det_saliency_map=True,
enable_transform_overlays=True,
color_map=None,
**kwargs):
grabbed_video, grouped_video_frames, grabbed_audio, audio_frames, info, properties = input_data
properties = {**properties, "show_det_saliency_map": (show_det_saliency_map, "toggle", (True, False))}
grouped_video_frames = {**grouped_video_frames,
"PLOT": grouped_video_frames["PLOT"] + [["det_source_" + self.short_name,
"det_transformed_" + self.short_name]],
"det_source_" + self.short_name: grouped_video_frames["captured"],
"det_transformed_" + self.short_name: grouped_video_frames["captured"]
if enable_transform_overlays else np.zeros_like(grouped_video_frames["captured"])}
for saliency_map_name, frame_name in zip(["saliency_maps"],[""]):
if grabbed_video:
if show_det_saliency_map:
saliency_map = info["frame_detections_" + self.short_name][saliency_map_name][0][0]
frame_transformed = plotter.plot_color_map(np.uint8(255 * saliency_map), color_map=color_map)
if enable_transform_overlays:
frame_transformed = plotter.plot_alpha_overlay(grouped_video_frames["det_transformed_" +
frame_name + self.short_name],
frame_transformed, alpha=0.4)
else:
frame_transformed = plotter.resize(frame_transformed,
height=grouped_video_frames["det_transformed_" + frame_name +
self.short_name].shape[0],
width=grouped_video_frames["det_transformed_" + frame_name +
self.short_name].shape[1])
grouped_video_frames["det_transformed_" + frame_name + self.short_name] = frame_transformed
return grabbed_video, grouped_video_frames, grabbed_audio, audio_frames, info, properties
|
import numpy as np
import pandas as pd
import decorators
from scipy import optimize
import settings
import utility_functions as utilfunc
import agent_mutation
import PySAM.Battwatts as battery
import PySAM.BatteryTools as batt_tools
import PySAM.Utilityrate5 as utility
import PySAM.Cashloan as cashloan
#==============================================================================
# Load logger
logger = utilfunc.get_logger()
#==============================================================================
#%%
def calc_system_performance(kw, pv, utilityrate, loan, batt, costs, agent, en_batt=True, batt_simple_dispatch=0):
"""
Executes Battwatts, Utilityrate5, and Cashloan PySAM modules with system sizes (kw) as input
Parameters
----------
kw: Capacity (in kW)
pv: Dictionary with generation_hourly and consumption_hourly
utilityrate: PySAM Utilityrate5 module
loan: PySAM Cashloan module
batt: PySAM Battwatts module
costs: Dictionary with system costs
agent: pd.Series with agent attributes
en_batt: Enable battery
batt_simple_dispatch: batt.Battery.batt_simple_dispatch
- batt_simple_dispatch = 0 (peak shaving look ahead)
- batt_simple_dispatch = 1 (peak shaving look behind)
Returns
-------
-loan.Outputs.npv: the negative net present value of system + storage to be optimized for system sizing
"""
inv_eff = 0.96 # default SAM inverter efficiency for PV
gen_hourly = pv['generation_hourly']
load_hourly = pv['consumption_hourly'] # same field as 'load_kwh_per_customer_in_bin_initial' when summed
dc = [(i * kw) * 1000 for i in gen_hourly] # W
ac = [i * inv_eff for i in dc] # W
gen = [i / 1000 for i in ac] # W to kW
# Set up battery, with system generation conditional on the battery generation being included
if en_batt:
batt.Battery.dc = dc
batt.Battery.ac = ac
batt.Battery.batt_simple_enable = 1
batt.Battery.batt_simple_chemistry = 1 # default value is 1: li ion for residential
batt.Battery.batt_simple_dispatch = batt_simple_dispatch
batt.Battery.batt_simple_meter_position = 0 # default value
batt.Battery.inverter_efficiency = 100 # recommended by Darice for dc-connected
batt.Battery.load = load_hourly
# PV to Battery ratio (kW) - From Ashreeta, 02/08/2020
pv_to_batt_ratio = 1.31372
batt_capacity_to_power_ratio = 2 # hours of operation
desired_size = kw / pv_to_batt_ratio # Default SAM value for residential systems is 10
desired_power = desired_size / batt_capacity_to_power_ratio
batt_inputs = {
'batt_chem': batt.Battery.batt_simple_chemistry,
'batt_Qfull': 2.5, # default SAM value
'batt_Vnom_default': 3.6, # default SAM value
'batt_ac_or_dc': 0, # dc-connected
'desired_power': desired_power,
'desired_capacity': desired_size,
'desired_voltage': 500,
'size_by_ac_not_dc': 0, # dc-connected
'inverter_eff': batt.Battery.inverter_efficiency
# 'batt_dc_dc_efficiency': (optional)
}
# Default values for lead acid batteries
if batt.Battery.batt_simple_chemistry == 0:
batt_inputs['LeadAcid_q10'] = 93.2
batt_inputs['LeadAcid_q20'] = 100
batt_inputs['LeadAcid_qn'] = 58.12
# batt_inputs['LeadAcid_tn']: (optional)
# PySAM.BatteryTools.size_li_ion_battery is the same as dGen_battery_sizing_battwatts.py
batt_outputs = batt_tools.size_li_ion_battery(batt_inputs)
computed_size = batt_outputs['batt_computed_bank_capacity']
computed_power = batt_outputs['batt_power_discharge_max_kwdc']
batt.Battery.batt_simple_kwh = computed_size
batt.Battery.batt_simple_kw = computed_power
batt.execute()
# declare value for net billing sell rate
if agent.loc['compensation_style']=='none':
net_billing_sell_rate = 0.
else:
net_billing_sell_rate = agent.loc['wholesale_elec_price_dollars_per_kwh'] * agent.loc['elec_price_multiplier']
utilityrate = process_tariff(utilityrate, agent.loc['tariff_dict'], net_billing_sell_rate)
utilityrate.SystemOutput.gen = batt.Outputs.gen
loan.BatterySystem.en_batt = 1
loan.BatterySystem.batt_computed_bank_capacity = batt.Outputs.batt_bank_installed_capacity
loan.BatterySystem.batt_bank_replacement = batt.Outputs.batt_bank_replacement
# Battery capacity-based System Costs amount [$/kWhcap]
loan.BatterySystem.battery_per_kWh = costs['batt_capex_per_kwh']
# specify number of O&M types (1 = PV+batt)
loan.SystemCosts.add_om_num_types = 1
# specify O&M variables
loan.SystemCosts.om_capacity = [costs['system_om_per_kw'] + costs['system_variable_om_per_kw']]
loan.SystemCosts.om_capacity1 = [costs['batt_om_per_kw']]
loan.SystemCosts.om_production1 = [costs['batt_om_per_kwh'] * 1000]
loan.SystemCosts.om_replacement_cost1 = [0.]
# Battery capacity for System Costs values [kW]
loan.SystemCosts.om_capacity1_nameplate = batt.Battery.batt_simple_kw
# Battery production for System Costs values [kWh]
loan.SystemCosts.om_production1_values = [batt.Battery.batt_simple_kwh]
batt_costs = ((costs['batt_capex_per_kw']*batt.Battery.batt_simple_kw) +
(costs['batt_capex_per_kwh'] * batt.Battery.batt_simple_kwh))
else:
batt.Battery.batt_simple_enable = 0
loan.BatterySystem.en_batt = 0
computed_power = computed_size = 0
# declare value for net billing sell rate
if agent.loc['compensation_style']=='none':
net_billing_sell_rate = 0.
else:
net_billing_sell_rate = agent.loc['wholesale_elec_price_dollars_per_kwh'] * agent.loc['elec_price_multiplier']
utilityrate = process_tariff(utilityrate, agent.loc['tariff_dict'], net_billing_sell_rate)
utilityrate.SystemOutput.gen = gen
# specify number of O&M types (0 = PV only)
loan.SystemCosts.add_om_num_types = 0
# specify O&M variables
loan.SystemCosts.om_capacity = [costs['system_om_per_kw'] + costs['system_variable_om_per_kw']]
loan.SystemCosts.om_replacement_cost1 = [0.]
system_costs = costs['system_capex_per_kw'] * kw
batt_costs = 0
# Execute utility rate module
utilityrate.Load.load = load_hourly
utilityrate.execute()
# Process payment incentives
loan = process_incentives(loan, kw, computed_power, computed_size, gen_hourly, agent)
# Specify final Cashloan parameters
loan.FinancialParameters.system_capacity = kw
loan.SystemOutput.annual_energy_value = utilityrate.Outputs.annual_energy_value
loan.SystemOutput.gen = utilityrate.SystemOutput.gen
loan.ThirdPartyOwnership.elec_cost_with_system = utilityrate.Outputs.elec_cost_with_system
loan.ThirdPartyOwnership.elec_cost_without_system = utilityrate.Outputs.elec_cost_without_system
# Calculate system costs
direct_costs = (system_costs + batt_costs) * costs['cap_cost_multiplier']
sales_tax = 0
loan.SystemCosts.total_installed_cost = direct_costs + sales_tax
# Execute financial module
loan.execute()
return -loan.Outputs.npv
def calc_system_size_and_performance_pv(agent, sectors, rate_switch_table=None):
"""
Calculate the optimal system and battery size and generation profile, and resulting bill savings and financial metrics.
Parameters
----------
agent : 'pd.df'
individual agent object.
Returns
-------
agent: 'pd.df'
Adds several features to the agent dataframe:
- **agent_id**
- **system_kw** - system capacity selected by agent
- **batt_kw** - battery capacity selected by agent
- **batt_kwh** - battery energy capacity
- **npv** - net present value of system + storage
- **cash_flow** - array of annual cash flows from system adoption
- **batt_dispatch_profile** - array of hourly battery dispatch
- **annual_energy_production_kwh** - annual energy production (kwh) of system
- **naep** - normalized annual energy production (kwh/kW) of system
- **capacity_factor** - annual capacity factor
- **first_year_elec_bill_with_system** - first year electricity bill with adopted system ($/yr)
- **first_year_elec_bill_savings** - first year electricity bill savings with adopted system ($/yr)
- **first_year_elec_bill_savings_frac** - fraction of savings on electricity bill in first year of system adoption
- **max_system_kw** - maximum system size allowed as constrained by roof size or not exceeding annual consumption
- **first_year_elec_bill_without_system** - first year electricity bill without adopted system ($/yr)
- **avg_elec_price_cents_per_kwh** - first year electricity price (c/kwh)
- **cbi** - ndarray of capacity-based incentives applicable to agent
- **ibi** - ndarray of investment-based incentives applicable to agent
- **pbi** - ndarray of performance-based incentives applicable to agent
- **cash_incentives** - ndarray of cash-based incentives applicable to agent
- **export_tariff_result** - summary of structure of retail tariff applied to agent
"""
# Initialize new DB connection
model_settings = settings.init_model_settings()
con, cur = utilfunc.make_con(model_settings.pg_conn_string, model_settings.role)
# PV
pv = dict()
# Extract load profile after scaling hourly load to annual total
load_profile_df = agent_mutation.elec.get_and_apply_agent_load_profiles(con, agent)
pv['consumption_hourly'] = pd.Series(load_profile_df['consumption_hourly']).iloc[0]
del load_profile_df
# Using the scale offset factor of 1E6 for capacity factors
norm_scaled_pv_cf_profiles_df = agent_mutation.elec.get_and_apply_normalized_hourly_resource_solar(con, agent)
pv['generation_hourly'] = pd.Series(norm_scaled_pv_cf_profiles_df['solar_cf_profile'].iloc[0]) / 1e6
del norm_scaled_pv_cf_profiles_df
# Calculate normalized annual energy production
agent.loc['naep'] = float(np.sum(pv['generation_hourly']))
# Battwatts
if agent.loc['sector_abbr'] == 'res':
batt = battery.default("PVWattsBatteryResidential")
else:
batt = battery.default("PVWattsBatteryCommercial")
# Utilityrate5
if agent.loc['sector_abbr'] == 'res':
utilityrate = utility.default("PVWattsBatteryResidential")
else:
utilityrate = utility.default("PVWattsBatteryCommercial")
######################################
###--------- UTILITYRATE5 ---------###
###--- SYSTEM LIFETIME SETTINGS ---###
######################################
# Inflation rate [%]
utilityrate.Lifetime.inflation_rate = agent.loc['inflation_rate'] * 100
# Number of years in analysis [years]
utilityrate.Lifetime.analysis_period = agent.loc['economic_lifetime_yrs']
# Lifetime hourly system outputs [0/1]; Options: 0=hourly first year,1=hourly lifetime
utilityrate.Lifetime.system_use_lifetime_output = 0
######################################
###--------- UTILITYRATE5 ---------###
###---- DEGRADATION/ESCALATION ----###
######################################
# Annual energy degradation [%]
utilityrate.SystemOutput.degradation = [agent.loc['pv_degradation_factor'] * 100] # convert decimal to %
# Annual electricity rate escalation [%/year]
utilityrate.ElectricityRates.rate_escalation = [agent.loc['elec_price_escalator'] * 100] # convert decimal to %
######################################
###--------- UTILITYRATE5 ---------###
###---- NET METERING SETTINGS -----###
######################################
# Dictionary to map dGen compensation styles to PySAM options
nem_options = {'net metering':0, 'net billing':2, 'buy all sell all':4, 'none':2}
# Metering options [0=net energy metering,1=net energy metering with $ credits,2=net billing,3=net billing with carryover to next month,4=buy all - sell all]
utilityrate.ElectricityRates.ur_metering_option = nem_options[agent.loc['compensation_style']]
# Year end sell rate [$/kWh]
utilityrate.ElectricityRates.ur_nm_yearend_sell_rate = agent.loc['wholesale_elec_price_dollars_per_kwh'] * agent.loc['elec_price_multiplier']
if agent.loc['compensation_style']=='none':
net_billing_sell_rate = 0.
else:
net_billing_sell_rate = agent.loc['wholesale_elec_price_dollars_per_kwh'] * agent.loc['elec_price_multiplier']
######################################
###--------- UTILITYRATE5 ---------###
###-------- BUY/SELL RATES --------###
######################################
# Enable time step sell rates [0/1]
utilityrate.ElectricityRates.ur_en_ts_sell_rate = 0
# Time step sell rates [0/1]
utilityrate.ElectricityRates.ur_ts_sell_rate = [0.]
# Set sell rate equal to buy rate [0/1]
utilityrate.ElectricityRates.ur_sell_eq_buy = 0
######################################
###--------- UTILITYRATE5 ---------###
###-------- MISC. SETTINGS --------###
######################################
# Use single monthly peak for TOU demand charge; options: 0=use TOU peak,1=use flat peak
utilityrate.ElectricityRates.TOU_demand_single_peak = 0 # ?
# Optionally enable/disable electricity_rate [years]
utilityrate.ElectricityRates.en_electricity_rates = 1
######################################
###--------- UTILITYRATE5 ---------###
###----- TARIFF RESTRUCTURING -----###
######################################
utilityrate = process_tariff(utilityrate, agent.loc['tariff_dict'], net_billing_sell_rate)
######################################
###----------- CASHLOAN -----------###
###----- FINANCIAL PARAMETERS -----###
######################################
# Initiate cashloan model and set market-specific variables
# Assume res agents do not evaluate depreciation at all
# Assume non-res agents only evaluate federal depreciation (not state)
if agent.loc['sector_abbr'] == 'res':
loan = cashloan.default("PVWattsBatteryResidential")
loan.FinancialParameters.market = 0
else:
loan = cashloan.default("PVWattsBatteryCommercial")
loan.FinancialParameters.market = 1
loan.FinancialParameters.analysis_period = agent.loc['economic_lifetime_yrs']
loan.FinancialParameters.debt_fraction = 100 - (agent.loc['down_payment_fraction'] * 100)
loan.FinancialParameters.federal_tax_rate = [(agent.loc['tax_rate'] * 100) * 0.7] # SAM default
loan.FinancialParameters.inflation_rate = agent.loc['inflation_rate'] * 100
loan.FinancialParameters.insurance_rate = 0
loan.FinancialParameters.loan_rate = agent.loc['loan_interest_rate'] * 100
loan.FinancialParameters.loan_term = agent.loc['loan_term_yrs']
loan.FinancialParameters.mortgage = 0 # default value - standard loan (no mortgage)
loan.FinancialParameters.prop_tax_assessed_decline = 5 # PySAM default
loan.FinancialParameters.prop_tax_cost_assessed_percent = 95 # PySAM default
loan.FinancialParameters.property_tax_rate = 0 # PySAM default
loan.FinancialParameters.real_discount_rate = agent.loc['real_discount_rate'] * 100
loan.FinancialParameters.salvage_percentage = 0
loan.FinancialParameters.state_tax_rate = [(agent.loc['tax_rate'] * 100) * 0.3] # SAM default
loan.FinancialParameters.system_heat_rate = 0
######################################
###----------- CASHLOAN -----------###
###--------- SYSTEM COSTS ---------###
######################################
# System costs that are input to loan.SystemCosts will depend on system configuration (PV, batt, PV+batt)
# and are therefore specified in calc_system_performance()
system_costs = dict()
system_costs['system_capex_per_kw'] = agent.loc['system_capex_per_kw']
system_costs['system_om_per_kw'] = agent.loc['system_om_per_kw']
system_costs['system_variable_om_per_kw'] = agent.loc['system_variable_om_per_kw']
system_costs['cap_cost_multiplier'] = agent.loc['cap_cost_multiplier']
system_costs['batt_capex_per_kw'] = agent.loc['batt_capex_per_kw']
system_costs['batt_capex_per_kwh'] = agent.loc['batt_capex_per_kwh']
system_costs['batt_om_per_kw'] = agent.loc['batt_om_per_kw']
system_costs['batt_om_per_kwh'] = agent.loc['batt_om_per_kwh']
######################################
###----------- CASHLOAN -----------###
###---- DEPRECIATION PARAMETERS ---###
######################################
if agent.loc['sector_abbr'] == 'res':
loan.Depreciation.depr_fed_type = 0
loan.Depreciation.depr_sta_type = 0
else:
loan.Depreciation.depr_fed_type = 1
loan.Depreciation.depr_sta_type = 0
######################################
###----------- CASHLOAN -----------###
###----- TAX CREDIT INCENTIVES ----###
######################################
loan.TaxCreditIncentives.itc_fed_percent = agent.loc['itc_fraction_of_capex'] * 100
######################################
###----------- CASHLOAN -----------###
###-------- BATTERY SYSTEM --------###
######################################
loan.BatterySystem.batt_replacement_option = 2 # user schedule
batt_replacement_schedule = [0 for i in range(0, agent.loc['batt_lifetime_yrs'] - 1)] + [1]
loan.BatterySystem.batt_replacement_schedule = batt_replacement_schedule
######################################
###----------- CASHLOAN -----------###
###-------- SYSTEM OUTPUT ---------###
######################################
loan.SystemOutput.degradation = [agent.loc['pv_degradation_factor'] * 100]
######################################
###----------- CASHLOAN -----------###
###----------- LIFETIME -----------###
######################################
loan.Lifetime.system_use_lifetime_output = 0
# From dGen - calc_system_size_and_financial_performance()
max_size_load = agent.loc['load_kwh_per_customer_in_bin'] / agent.loc['naep']
max_size_roof = agent.loc['developable_roof_sqft'] * agent.loc['pv_kw_per_sqft']
max_system_kw = min(max_size_load, max_size_roof)
# set tolerance for minimize_scalar based on max_system_kw value
tol = min(0.25 * max_system_kw, 0.5)
# Calculate the PV system size that maximizes the agent's NPV, to a tolerance of 0.5 kW.
# Note that the optimization is technically minimizing negative NPV
# ! As is, because of the tolerance this function would not necessarily return a system size of 0 or max PV size if those are optimal
res_with_batt = optimize.minimize_scalar(calc_system_performance,
args = (pv, utilityrate, loan, batt, system_costs, True, 0),
bounds = (0, max_system_kw),
method = 'bounded',
tol = tol)
# PySAM Module outputs with battery
batt_loan_outputs = loan.Outputs.export()
batt_util_outputs = utilityrate.Outputs.export()
batt_annual_energy_kwh = np.sum(utilityrate.SystemOutput.gen)
batt_kw = batt.Battery.batt_simple_kw
batt_kwh = batt.Battery.batt_simple_kwh
batt_dispatch_profile = batt.Outputs.batt_power # ?
# Run without battery
res_no_batt = optimize.minimize_scalar(calc_system_performance,
args = (pv, utilityrate, loan, batt, system_costs, False, 0),
bounds = (0, max_system_kw),
method = 'bounded',
tol = tol)
# PySAM Module outputs without battery
no_batt_loan_outputs = loan.Outputs.export()
no_batt_util_outputs = utilityrate.Outputs.export()
no_batt_annual_energy_kwh = np.sum(utilityrate.SystemOutput.gen)
# Retrieve NPVs of system with batt and system without batt
npv_w_batt = batt_loan_outputs['npv']
npv_no_batt = no_batt_loan_outputs['npv']
# Choose the system with the higher NPV
if npv_w_batt >= npv_no_batt:
system_kw = res_with_batt.x
annual_energy_production_kwh = batt_annual_energy_kwh
first_year_elec_bill_with_system = batt_util_outputs['elec_cost_with_system_year1']
first_year_elec_bill_without_system = batt_util_outputs['elec_cost_without_system_year1']
npv = npv_w_batt
payback = batt_loan_outputs['payback']
cash_flow = list(batt_loan_outputs['cf_payback_with_expenses']) # ?
cbi_total = batt_loan_outputs['cbi_total']
cbi_total_fed = batt_loan_outputs['cbi_total_fed']
cbi_total_oth = batt_loan_outputs['cbi_total_oth']
cbi_total_sta = batt_loan_outputs['cbi_total_sta']
cbi_total_uti = batt_loan_outputs['cbi_total_uti']
ibi_total = batt_loan_outputs['ibi_total']
ibi_total_fed = batt_loan_outputs['ibi_total_fed']
ibi_total_oth = batt_loan_outputs['ibi_total_oth']
ibi_total_sta = batt_loan_outputs['ibi_total_sta']
ibi_total_uti = batt_loan_outputs['ibi_total_uti']
cf_pbi_total = batt_loan_outputs['cf_pbi_total']
pbi_total_fed = batt_loan_outputs['cf_pbi_total_fed']
pbi_total_oth = batt_loan_outputs['cf_pbi_total_oth']
pbi_total_sta = batt_loan_outputs['cf_pbi_total_sta']
pbi_total_uti = batt_loan_outputs['cf_pbi_total_uti']
else:
system_kw = res_no_batt.x
annual_energy_production_kwh = no_batt_annual_energy_kwh
first_year_elec_bill_with_system = no_batt_util_outputs['elec_cost_with_system_year1']
first_year_elec_bill_without_system = no_batt_util_outputs['elec_cost_without_system_year1']
npv = npv_no_batt
payback = no_batt_loan_outputs['payback']
cash_flow = list(no_batt_loan_outputs['cf_payback_with_expenses'])
batt_kw = 0
batt_kwh = 0
batt_dispatch_profile = np.nan
cbi_total = no_batt_loan_outputs['cbi_total']
cbi_total_fed = no_batt_loan_outputs['cbi_total_fed']
cbi_total_oth = no_batt_loan_outputs['cbi_total_oth']
cbi_total_sta = no_batt_loan_outputs['cbi_total_sta']
cbi_total_uti = no_batt_loan_outputs['cbi_total_uti']
ibi_total = no_batt_loan_outputs['ibi_total']
ibi_total_fed = no_batt_loan_outputs['ibi_total_fed']
ibi_total_oth = no_batt_loan_outputs['ibi_total_oth']
ibi_total_sta = no_batt_loan_outputs['ibi_total_sta']
ibi_total_uti = no_batt_loan_outputs['ibi_total_uti']
cf_pbi_total = no_batt_loan_outputs['cf_pbi_total']
pbi_total_fed = no_batt_loan_outputs['cf_pbi_total_fed']
pbi_total_oth = no_batt_loan_outputs['cf_pbi_total_oth']
pbi_total_sta = no_batt_loan_outputs['cf_pbi_total_sta']
pbi_total_uti = no_batt_loan_outputs['cf_pbi_total_uti']
# change 0 value to 1 to avoid divide by zero errors
if first_year_elec_bill_without_system == 0:
first_year_elec_bill_without_system = 1.0
# Add outputs to agent df
naep = annual_energy_production_kwh / system_kw
first_year_elec_bill_savings = first_year_elec_bill_without_system - first_year_elec_bill_with_system
first_year_elec_bill_savings_frac = first_year_elec_bill_savings / first_year_elec_bill_without_system
avg_elec_price_cents_per_kwh = first_year_elec_bill_without_system / agent.loc['load_kwh_per_customer_in_bin']
agent.loc['system_kw'] = system_kw
agent.loc['npv'] = npv
agent.loc['payback_period'] = np.round(np.where(np.isnan(payback), 30.1, payback), 1).astype(float)
agent.loc['cash_flow'] = cash_flow
agent.loc['annual_energy_production_kwh'] = annual_energy_production_kwh
agent.loc['naep'] = naep
agent.loc['capacity_factor'] = agent.loc['naep'] / 8760
agent.loc['first_year_elec_bill_with_system'] = first_year_elec_bill_with_system
agent.loc['first_year_elec_bill_savings'] = first_year_elec_bill_savings
agent.loc['first_year_elec_bill_savings_frac'] = first_year_elec_bill_savings_frac
agent.loc['max_system_kw'] = max_system_kw
agent.loc['first_year_elec_bill_without_system'] = first_year_elec_bill_without_system
agent.loc['avg_elec_price_cents_per_kwh'] = avg_elec_price_cents_per_kwh
agent.loc['batt_kw'] = batt_kw
agent.loc['batt_kwh'] = batt_kwh
agent.loc['batt_dispatch_profile'] = batt_dispatch_profile
# Financial outputs (find out which ones to include):
agent.loc['cbi'] = np.array({'cbi_total': cbi_total,
'cbi_total_fed': cbi_total_fed,
'cbi_total_oth': cbi_total_oth,
'cbi_total_sta': cbi_total_sta,
'cbi_total_uti': cbi_total_uti
})
agent.loc['ibi'] = np.array({'ibi_total': ibi_total,
'ibi_total_fed': ibi_total_fed,
'ibi_total_oth': ibi_total_oth,
'ibi_total_sta': ibi_total_sta,
'ibi_total_uti': ibi_total_uti
})
agent.loc['pbi'] = np.array({'pbi_total': cf_pbi_total,
'pbi_total_fed': pbi_total_fed,
'pbi_total_oth': pbi_total_oth,
'pbi_total_sta': pbi_total_sta,
'pbi_total_uti': pbi_total_uti
})
agent.loc['cash_incentives'] = ''
agent.loc['export_tariff_results'] = ''
out_cols = ['agent_id',
'system_kw',
'batt_kw',
'batt_kwh',
'npv',
'payback_period',
'cash_flow',
'batt_dispatch_profile',
'annual_energy_production_kwh',
'naep',
'capacity_factor',
'first_year_elec_bill_with_system',
'first_year_elec_bill_savings',
'first_year_elec_bill_savings_frac',
'max_system_kw',
'first_year_elec_bill_without_system',
'avg_elec_price_cents_per_kwh',
'cbi',
'ibi',
'pbi',
'cash_incentives',
'export_tariff_results'
]
return agent[out_cols]
#%%
def calc_financial_performance_wind(agent, sectors, rate_switch_table=None):
"""
Calculate bill savings and financial metrics based on pre-selected wind system size.
Parameters
----------
agent : 'pd.df'
individual agent object.
Returns
-------
agent: 'pd.df'
Adds several features to the agent dataframe:
- **agent_id**
- **system_kw** - system capacity selected by agent
- **npv** - net present value of system + storage
- **cash_flow** - array of annual cash flows from system adoption
- **batt_dispatch_profile** - array of hourly battery dispatch
- **annual_energy_production_kwh** - annual energy production (kwh) of system
- **naep** - normalized annual energy production (kwh/kW) of system
- **capacity_factor** - annual capacity factor
- **first_year_elec_bill_with_system** - first year electricity bill with adopted system ($/yr)
- **first_year_elec_bill_savings** - first year electricity bill savings with adopted system ($/yr)
- **first_year_elec_bill_savings_frac** - fraction of savings on electricity bill in first year of system adoption
- **max_system_kw** - maximum system size allowed as constrained by roof size or not exceeding annual consumption
- **first_year_elec_bill_without_system** - first year electricity bill without adopted system ($/yr)
- **avg_elec_price_cents_per_kwh** - first year electricity price (c/kwh)
- **cbi** - ndarray of capacity-based incentives applicable to agent
- **ibi** - ndarray of investment-based incentives applicable to agent
- **pbi** - ndarray of performance-based incentives applicable to agent
- **cash_incentives** - ndarray of cash-based incentives applicable to agent
- **export_tariff_result** - summary of structure of retail tariff applied to agent
"""
# Initialize new DB connection
model_settings = settings.init_model_settings()
con, cur = utilfunc.make_con(model_settings.pg_conn_string, model_settings.role)
# Extract load profile after scaling hourly load to annual total
load_profile_df = agent_mutation.elec.get_and_apply_agent_load_profiles(con, agent)
consumption_hourly = pd.Series(load_profile_df['consumption_hourly']).iloc[0]
del load_profile_df
# Using the scale offset factor of 1E6 for capacity factors
norm_scaled_wind_profiles_df = agent_mutation.elec.get_and_apply_normalized_hourly_resource_wind(con, agent)
generation_hourly = pd.Series(norm_scaled_wind_profiles_df['generation_hourly']).iloc[0]
del norm_scaled_wind_profiles_df
# Instantiate utilityrate5 model based on agent sector
if agent.loc['sector_abbr'] == 'res':
utilityrate = utility.default('WindPowerResidential')
else:
utilityrate = utility.default('WindPowerCommercial')
######################################
###--------- UTILITYRATE5 ---------###
###------- ELECTRICITYRATES -------###
######################################
# Use single monthly peak for TOU demand charge; options: 0=use TOU peak,1=use flat peak
utilityrate.ElectricityRates.TOU_demand_single_peak = 0 # ?
# Optionally enable/disable electricity_rate [years]
utilityrate.ElectricityRates.en_electricity_rates = 1
# Annual electricity rate escalation [%/year]
utilityrate.ElectricityRates.rate_escalation = [agent.loc['elec_price_escalator'] * 100] # convert decimal to %
# Enable time step sell rates [0/1]
utilityrate.ElectricityRates.ur_en_ts_sell_rate = 0
# Time step sell rates [0/1]
utilityrate.ElectricityRates.ur_ts_sell_rate = [0.]
# Set sell rate equal to buy rate [0/1]
utilityrate.ElectricityRates.ur_sell_eq_buy = 0
# Dictionary to map dGen compensation styles to PySAM options
nem_options = {'net metering':0, 'net billing':2, 'buy all sell all':4, 'none':2}
# Metering options [0=net energy metering,1=net energy metering with $ credits,2=net billing,3=net billing with carryover to next month,4=buy all - sell all]
utilityrate.ElectricityRates.ur_metering_option = nem_options[agent.loc['compensation_style']]
# Year end sell rate [$/kWh]
utilityrate.ElectricityRates.ur_nm_yearend_sell_rate = agent.loc['wholesale_elec_price_dollars_per_kwh'] * agent.loc['elec_price_multiplier']
if agent.loc['compensation_style']=='none':
net_billing_sell_rate = 0.
else:
net_billing_sell_rate = agent.loc['wholesale_elec_price_dollars_per_kwh'] * agent.loc['elec_price_multiplier']
# Restructure tariff object for PySAM compatibility
utilityrate = process_tariff(utilityrate, agent.loc['tariff_dict'], net_billing_sell_rate)
######################################
###--------- UTILITYRATE5 ---------###
###----------- LIFETIME -----------###
######################################
# Number of years in analysis [years]
utilityrate.Lifetime.analysis_period = agent.loc['economic_lifetime_yrs']
# Inflation rate [%]
utilityrate.Lifetime.inflation_rate = agent.loc['inflation_rate'] * 100
# Lifetime hourly system outputs [0/1]; Options: 0=hourly first year,1=hourly lifetime
utilityrate.Lifetime.system_use_lifetime_output = 0
######################################
###--------- UTILITYRATE5 ---------###
###-------- SYSTEM OUTPUT ---------###
######################################
# Annual energy degradation [%] -- Wind degradation already applied via 'derate_factor'
utilityrate.SystemOutput.degradation = [0.]
# System power generated [kW]
utilityrate.SystemOutput.gen = generation_hourly
######################################
###--------- UTILITYRATE5 ---------###
###-------- SYSTEM OUTPUT ---------###
######################################
# Electricity load (year 1) [kW]
utilityrate.Load.load = consumption_hourly
######################################
###--------- UTILITYRATE5 ---------###
###------------ EXECUTE -----------###
######################################
utilityrate.execute()
######################################
###----------- CASHLOAN -----------###
###----- FINANCIAL PARAMETERS -----###
######################################
# Initiate cashloan model and set market-specific variables
if agent.loc['sector_abbr'] == 'res':
loan = cashloan.default('WindPowerResidential')
loan.FinancialParameters.market = 0
else:
loan = cashloan.default('WindPowerCommercial')
loan.FinancialParameters.market = 1
loan.FinancialParameters.analysis_period = agent.loc['economic_lifetime_yrs']
loan.FinancialParameters.debt_fraction = 100 - (agent.loc['down_payment_fraction'] * 100)
loan.FinancialParameters.federal_tax_rate = [(agent.loc['tax_rate'] * 100) * 0.7] # SAM default
loan.FinancialParameters.inflation_rate = agent.loc['inflation_rate'] * 100
loan.FinancialParameters.insurance_rate = 0
loan.FinancialParameters.loan_rate = agent.loc['loan_interest_rate'] * 100
loan.FinancialParameters.loan_term = agent.loc['loan_term_yrs']
loan.FinancialParameters.mortgage = 0 # default value - standard loan (no mortgage)
loan.FinancialParameters.prop_tax_assessed_decline = 5 # PySAM default
loan.FinancialParameters.prop_tax_cost_assessed_percent = 95 # PySAM default
loan.FinancialParameters.property_tax_rate = 0 # PySAM default
loan.FinancialParameters.real_discount_rate = agent.loc['real_discount_rate'] * 100
loan.FinancialParameters.salvage_percentage = 0
loan.FinancialParameters.state_tax_rate = [(agent.loc['tax_rate'] * 100) * 0.3] # SAM default
loan.FinancialParameters.system_heat_rate = 0
loan.FinancialParameters.system_capacity = agent.loc['system_size_kw']
######################################
###----------- CASHLOAN -----------###
###--------- SYSTEM COSTS ---------###
######################################
# specify number of O&M types (0 = system only)
loan.SystemCosts.add_om_num_types = 0
# specify O&M variables
loan.SystemCosts.om_capacity = [agent.loc['system_om_per_kw'] + agent.loc['system_variable_om_per_kw']]
# Calculate and specify system costs
system_costs = agent.loc['system_capex_per_kw'] * agent.loc['system_size_kw']
batt_costs = 0
sales_tax = 0
direct_costs = (system_costs + batt_costs) * agent.loc['cap_cost_multiplier']
loan.SystemCosts.total_installed_cost = direct_costs + sales_tax
######################################
###----------- CASHLOAN -----------###
###---- DEPRECIATION PARAMETERS ---###
######################################
# Federal and State depreciation type
# Options: 0=none, 1=MACRS half year, 2=straight-line, 3=custom
if agent.loc['sector_abbr'] == 'res':
loan.Depreciation.depr_fed_type = 0
loan.Depreciation.depr_sta_type = 0
else:
loan.Depreciation.depr_fed_type = 1
loan.Depreciation.depr_sta_type = 0
######################################
###----------- CASHLOAN -----------###
###----- TAX CREDIT INCENTIVES ----###
######################################
# Federal percentage-based ITC percent [%]
loan.TaxCreditIncentives.itc_fed_percent = agent.loc['itc_fraction_of_capex'] * 100
######################################
###----------- CASHLOAN -----------###
###------ PAYMENT INCENTIVES ------###
######################################
# Specify payment incentives within Cashloan object
loan = process_incentives(loan, agent.loc['system_size_kw'], 0, 0, generation_hourly, agent)
######################################
###----------- CASHLOAN -----------###
###-------- BATTERY SYSTEM --------###
######################################
# Enable battery storage model [0/1]
loan.BatterySystem.en_batt = 0
######################################
###----------- CASHLOAN -----------###
###-------- SYSTEM OUTPUT ---------###
######################################
# Energy value [$] -- i.e. "bill savings"
loan.SystemOutput.annual_energy_value = utilityrate.Outputs.annual_energy_value
# Annual energy degradation [%] -- Wind degradation already applied via 'derate_factor'
loan.SystemOutput.degradation = [0.]
# Power generated by renewable resource [kW]
loan.SystemOutput.gen = utilityrate.SystemOutput.gen
######################################
###----------- CASHLOAN -----------###
###----------- LIFETIME -----------###
######################################
loan.Lifetime.system_use_lifetime_output = 0
######################################
###----------- CASHLOAN -----------###
###----- THIRD PARTY OWNERSHIP ----###
######################################
# Energy value [$]
loan.ThirdPartyOwnership.elec_cost_with_system = utilityrate.Outputs.elec_cost_with_system
# Energy value [$]
loan.ThirdPartyOwnership.elec_cost_without_system = utilityrate.Outputs.elec_cost_without_system
######################################
###-------- POSTPROCESSING --------###
###------------ RESULTS -----------###
######################################
# Get outputs from Utilityrate5 model
util_outputs = utilityrate.Outputs.export()
# Assign variables from Utilityrate5 outputs, others
system_kw = agent.loc['system_size_kw']
first_year_elec_bill_with_system = util_outputs['elec_cost_with_system_year1']
first_year_elec_bill_without_system = util_outputs['elec_cost_without_system_year1']
# PySAM cannot evaluate system sizes of 0 kW -- check and manually assign values if system_size_kw = 0
if system_kw > 0:
# Execute Cashloan model
loan.execute()
loan_outputs = loan.Outputs.export()
npv = loan_outputs['npv']
payback = loan_outputs['payback']
cash_flow = list(loan_outputs['cf_payback_with_expenses'])
cbi_total = loan_outputs['cbi_total']
cbi_total_fed = loan_outputs['cbi_total_fed']
cbi_total_oth = loan_outputs['cbi_total_oth']
cbi_total_sta = loan_outputs['cbi_total_sta']
cbi_total_uti = loan_outputs['cbi_total_uti']
ibi_total = loan_outputs['ibi_total']
ibi_total_fed = loan_outputs['ibi_total_fed']
ibi_total_oth = loan_outputs['ibi_total_oth']
ibi_total_sta = loan_outputs['ibi_total_sta']
ibi_total_uti = loan_outputs['ibi_total_uti']
cf_pbi_total = loan_outputs['cf_pbi_total']
pbi_total_fed = loan_outputs['cf_pbi_total_fed']
pbi_total_oth = loan_outputs['cf_pbi_total_oth']
pbi_total_sta = loan_outputs['cf_pbi_total_sta']
pbi_total_uti = loan_outputs['cf_pbi_total_uti']
else:
npv = 0.
payback = 30.1
cash_flow = [0.] * (agent.loc['economic_lifetime_yrs'] + 1)
cbi_total = cbi_total_fed = cbi_total_oth = cbi_total_sta = cbi_total_uti = 0.
ibi_total = ibi_total_fed = ibi_total_oth = ibi_total_sta = ibi_total_uti = 0.
cf_pbi_total = pbi_total_fed = pbi_total_oth = pbi_total_sta = pbi_total_uti = 0.
# change 0 value to 1 to avoid divide by zero errors
if first_year_elec_bill_without_system == 0:
first_year_elec_bill_without_system = 1.0
# Add outputs to agent df
first_year_elec_bill_savings = first_year_elec_bill_without_system - first_year_elec_bill_with_system
first_year_elec_bill_savings_frac = first_year_elec_bill_savings / first_year_elec_bill_without_system
avg_elec_price_cents_per_kwh = first_year_elec_bill_without_system / agent.loc['load_kwh_per_customer_in_bin']
# Specify variables to write to agent df -- also write placeholder batt values
agent.loc['system_kw'] = system_kw
agent.loc['npv'] = npv
agent.loc['payback_period'] = np.round(np.where(np.isnan(payback), 30.1, payback), 1).astype(float)
agent.loc['cash_flow'] = cash_flow
agent.loc['first_year_elec_bill_with_system'] = first_year_elec_bill_with_system
agent.loc['first_year_elec_bill_savings'] = first_year_elec_bill_savings
agent.loc['first_year_elec_bill_savings_frac'] = first_year_elec_bill_savings_frac
agent.loc['first_year_elec_bill_without_system'] = first_year_elec_bill_without_system
agent.loc['avg_elec_price_cents_per_kwh'] = avg_elec_price_cents_per_kwh
agent.loc['batt_kw'] = 0.
agent.loc['batt_kwh'] = 0.
agent.loc['batt_dispatch_profile'] = np.nan
# Specify incentive outputs
agent.loc['cbi'] = np.array({'cbi_total': cbi_total,
'cbi_total_fed': cbi_total_fed,
'cbi_total_oth': cbi_total_oth,
'cbi_total_sta': cbi_total_sta,
'cbi_total_uti': cbi_total_uti
})
agent.loc['ibi'] = np.array({'ibi_total': ibi_total,
'ibi_total_fed': ibi_total_fed,
'ibi_total_oth': ibi_total_oth,
'ibi_total_sta': ibi_total_sta,
'ibi_total_uti': ibi_total_uti
})
agent.loc['pbi'] = np.array({'pbi_total': cf_pbi_total,
'pbi_total_fed': pbi_total_fed,
'pbi_total_oth': pbi_total_oth,
'pbi_total_sta': pbi_total_sta,
'pbi_total_uti': pbi_total_uti
})
agent.loc['cash_incentives'] = ''
agent.loc['export_tariff_results'] = ''
out_cols = ['agent_id',
'system_kw',
'npv',
'payback_period',
'cash_flow',
'first_year_elec_bill_with_system',
'first_year_elec_bill_savings',
'first_year_elec_bill_savings_frac',
'first_year_elec_bill_without_system',
'avg_elec_price_cents_per_kwh',
'cbi',
'ibi',
'pbi',
'cash_incentives',
'export_tariff_results',
'batt_kw',
'batt_kwh',
'batt_dispatch_profile'
]
return agent[out_cols]
#%%
def process_tariff(utilityrate, tariff_dict, net_billing_sell_rate):
"""
Instantiate the utilityrate5 PySAM model and process the agent's rate json object to conform with PySAM input formatting.
Parameters
----------
agent : 'pd.Series'
Individual agent object.
Returns
-------
utilityrate: 'PySAM.Utilityrate5'
"""
######################################
###--------- UTILITYRATE5 ---------###
###--- FIXED AND ANNUAL CHARGES ---###
######################################
# Monthly fixed charge [$]
utilityrate.ElectricityRates.ur_monthly_fixed_charge = tariff_dict['fixed_charge']
# Annual minimum charge [$]
utilityrate.ElectricityRates.ur_annual_min_charge = 0. # not currently tracked in URDB rate attribute downloads
# Monthly minimum charge [$]
utilityrate.ElectricityRates.ur_monthly_min_charge = 0. # not currently tracked in URDB rate attribute downloads
######################################
###--------- UTILITYRATE5 ---------###
###-------- DEMAND CHARGES --------###
######################################
# Enable demand charge
utilityrate.ElectricityRates.ur_dc_enable = (tariff_dict['d_flat_exists']) | (tariff_dict['d_tou_exists'])
if utilityrate.ElectricityRates.ur_dc_enable:
if tariff_dict['d_flat_exists']:
# Reformat demand charge table from dGen format
n_periods = len(tariff_dict['d_flat_levels'][0])
n_tiers = len(tariff_dict['d_flat_levels'])
ur_dc_flat_mat = []
for period in range(n_periods):
for tier in range(n_tiers):
row = [period, tier+1, tariff_dict['d_flat_levels'][tier][period], tariff_dict['d_flat_prices'][tier][period]]
ur_dc_flat_mat.append(row)
# Demand rates (flat) table
utilityrate.ElectricityRates.ur_dc_flat_mat = ur_dc_flat_mat
if tariff_dict['d_tou_exists']:
# Reformat demand charge table from dGen format
n_periods = len(tariff_dict['d_tou_levels'][0])
n_tiers = len(tariff_dict['d_tou_levels'])
ur_dc_tou_mat = []
for period in range(n_periods):
for tier in range(n_tiers):
row = [period+1, tier+1, tariff_dict['d_tou_levels'][tier][period], tariff_dict['d_tou_prices'][tier][period]]
ur_dc_tou_mat.append(row)
# Demand rates (TOU) table
utilityrate.ElectricityRates.ur_dc_tou_mat = ur_dc_tou_mat
# Reformat 12x24 tables - original are indexed to 0, PySAM needs index starting at 1
d_wkday_12by24 = []
for m in range(len(tariff_dict['d_wkday_12by24'])):
row = [x+1 for x in tariff_dict['d_wkday_12by24'][m]]
d_wkday_12by24.append(row)
d_wkend_12by24 = []
for m in range(len(tariff_dict['d_wkend_12by24'])):
row = [x+1 for x in tariff_dict['d_wkend_12by24'][m]]
d_wkend_12by24.append(row)
# Demand charge weekday schedule
utilityrate.ElectricityRates.ur_dc_sched_weekday = d_wkday_12by24
# Demand charge weekend schedule
utilityrate.ElectricityRates.ur_dc_sched_weekend = d_wkend_12by24
######################################
###--------- UTILITYRATE5 ---------###
###-------- ENERGY CHARGES --------###
######################################
if tariff_dict['e_exists']:
# Dictionary to map dGen max usage units to PySAM options
max_usage_dict = {'kWh':0, 'kWh/kW':1, 'kWh daily':2, 'kWh/kW daily':3}
# If max usage units are 'kWh daily', divide max usage by 30 -- rate download procedure converts daily to monthly
modifier = 30. if tariff_dict['energy_rate_unit'] == 'kWh daily' else 1.
# Reformat energy charge table from dGen format
n_periods = len(tariff_dict['e_levels'][0])
n_tiers = len(tariff_dict['e_levels'])
ur_ec_tou_mat = []
for period in range(n_periods):
for tier in range(n_tiers):
row = [period+1, tier+1, tariff_dict['e_levels'][tier][period]/modifier, max_usage_dict[tariff_dict['energy_rate_unit']], tariff_dict['e_prices'][tier][period], net_billing_sell_rate]
ur_ec_tou_mat.append(row)
# Energy rates table
utilityrate.ElectricityRates.ur_ec_tou_mat = ur_ec_tou_mat
# Reformat 12x24 tables - original are indexed to 0, PySAM needs index starting at 1
e_wkday_12by24 = []
for m in range(len(tariff_dict['e_wkday_12by24'])):
row = [x+1 for x in tariff_dict['e_wkday_12by24'][m]]
e_wkday_12by24.append(row)
e_wkend_12by24 = []
for m in range(len(tariff_dict['e_wkend_12by24'])):
row = [x+1 for x in tariff_dict['e_wkend_12by24'][m]]
e_wkend_12by24.append(row)
# Energy charge weekday schedule
utilityrate.ElectricityRates.ur_ec_sched_weekday = e_wkday_12by24
# Energy charge weekend schedule
utilityrate.ElectricityRates.ur_ec_sched_weekend = e_wkend_12by24
return utilityrate
#%%
def process_incentives(loan, kw, batt_kw, batt_kwh, generation_hourly, agent):
######################################
###----------- CASHLOAN -----------###
###------ PAYMENT INCENTIVES ------###
######################################
# Read incentive dataframe from agent attributes
incentive_df = agent.loc['state_incentives']
# Check dtype of incentive_df - process incentives if pd.DataFrame, otherwise do not assign incentive values to cashloan
if isinstance(incentive_df, pd.DataFrame):
# Fill NaNs in incentive_df - assume max incentive duration of 5 years and max incentive value of $10,000
incentive_df = incentive_df.fillna(value={'incentive_duration_yrs' : 5, 'max_incentive_usd' : 10000})
# Filter for CBI's in incentive_df
cbi_df = (incentive_df.loc[pd.notnull(incentive_df['cbi_usd_p_w'])]
.sort_values(['cbi_usd_p_w'], axis=0, ascending=False)
.reset_index(drop=True)
)
# For multiple CBIs that are applicable to the agent, cap at 2 and use PySAM's "state" and "other" option
if len(cbi_df) == 1:
loan.PaymentIncentives.cbi_sta_amount = cbi_df['cbi_usd_p_w'].iloc[0]
loan.PaymentIncentives.cbi_sta_deprbas_fed = 0
loan.PaymentIncentives.cbi_sta_deprbas_sta = 0
loan.PaymentIncentives.cbi_sta_maxvalue = cbi_df['max_incentive_usd'].iloc[0]
loan.PaymentIncentives.cbi_sta_tax_fed = 0
loan.PaymentIncentives.cbi_sta_tax_sta = 0
elif len(cbi_df) >= 2:
loan.PaymentIncentives.cbi_sta_amount = cbi_df['cbi_usd_p_w'].iloc[0]
loan.PaymentIncentives.cbi_sta_deprbas_fed = 0
loan.PaymentIncentives.cbi_sta_deprbas_sta = 0
loan.PaymentIncentives.cbi_sta_maxvalue = cbi_df['max_incentive_usd'].iloc[0]
loan.PaymentIncentives.cbi_sta_tax_fed = 1
loan.PaymentIncentives.cbi_sta_tax_sta = 1
loan.PaymentIncentives.cbi_oth_amount = cbi_df['cbi_usd_p_w'].iloc[1]
loan.PaymentIncentives.cbi_oth_deprbas_fed = 0
loan.PaymentIncentives.cbi_oth_deprbas_sta = 0
loan.PaymentIncentives.cbi_oth_maxvalue = cbi_df['max_incentive_usd'].iloc[1]
loan.PaymentIncentives.cbi_oth_tax_fed = 1
loan.PaymentIncentives.cbi_oth_tax_sta = 1
else:
pass
# Filter for PBI's in incentive_df
pbi_df = (incentive_df.loc[pd.notnull(incentive_df['pbi_usd_p_kwh'])]
.sort_values(['pbi_usd_p_kwh'], axis=0, ascending=False)
.reset_index(drop=True)
)
# For multiple PBIs that are applicable to the agent, cap at 2 and use PySAM's "state" and "other" option
if len(pbi_df) == 1:
# Aamount input [$/kWh] requires sequence -- repeat pbi_usd_p_kwh using incentive_duration_yrs
loan.PaymentIncentives.pbi_sta_amount = [pbi_df['pbi_usd_p_kwh'].iloc[0]] * int(pbi_df['incentive_duration_yrs'].iloc[0])
loan.PaymentIncentives.pbi_sta_escal = 0.
loan.PaymentIncentives.pbi_sta_tax_fed = 1
loan.PaymentIncentives.pbi_sta_tax_sta = 1
loan.PaymentIncentives.pbi_sta_term = pbi_df['incentive_duration_yrs'].iloc[0]
elif len(pbi_df) >= 2:
# Aamount input [$/kWh] requires sequence -- repeat pbi_usd_p_kwh using incentive_duration_yrs
loan.PaymentIncentives.pbi_sta_amount = [pbi_df['pbi_usd_p_kwh'].iloc[0]] * int(pbi_df['incentive_duration_yrs'].iloc[0])
loan.PaymentIncentives.pbi_sta_escal = 0.
loan.PaymentIncentives.pbi_sta_tax_fed = 1
loan.PaymentIncentives.pbi_sta_tax_sta = 1
loan.PaymentIncentives.pbi_sta_term = pbi_df['incentive_duration_yrs'].iloc[0]
# Aamount input [$/kWh] requires sequence -- repeat pbi_usd_p_kwh using incentive_duration_yrs
loan.PaymentIncentives.pbi_oth_amount = [pbi_df['pbi_usd_p_kwh'].iloc[1]] * int(pbi_df['incentive_duration_yrs'].iloc[1])
loan.PaymentIncentives.pbi_oth_escal = 0.
loan.PaymentIncentives.pbi_oth_tax_fed = 1
loan.PaymentIncentives.pbi_oth_tax_sta = 1
loan.PaymentIncentives.pbi_oth_term = pbi_df['incentive_duration_yrs'].iloc[1]
else:
pass
# Filter for IBI's in incentive_df
ibi_df = (incentive_df.loc[pd.notnull(incentive_df['ibi_pct'])]
.sort_values(['ibi_pct'], axis=0, ascending=False)
.reset_index(drop=True)
)
# For multiple IBIs that are applicable to the agent, cap at 2 and use PySAM's "state" and "other" option
# NOTE: this specifies IBI percentage, instead of IBI absolute amount
if len(ibi_df) == 1:
loan.PaymentIncentives.ibi_sta_percent = ibi_df['ibi_pct'].iloc[0]
loan.PaymentIncentives.ibi_sta_percent_deprbas_fed = 0
loan.PaymentIncentives.ibi_sta_percent_deprbas_sta = 0
loan.PaymentIncentives.ibi_sta_percent_maxvalue = ibi_df['max_incentive_usd'].iloc[0]
loan.PaymentIncentives.ibi_sta_percent_tax_fed = 1
loan.PaymentIncentives.ibi_sta_percent_tax_sta = 1
elif len(ibi_df) >= 2:
loan.PaymentIncentives.ibi_sta_percent = ibi_df['ibi_pct'].iloc[0]
loan.PaymentIncentives.ibi_sta_percent_deprbas_fed = 0
loan.PaymentIncentives.ibi_sta_percent_deprbas_sta = 0
loan.PaymentIncentives.ibi_sta_percent_maxvalue = ibi_df['max_incentive_usd'].iloc[0]
loan.PaymentIncentives.ibi_sta_percent_tax_fed = 1
loan.PaymentIncentives.ibi_sta_percent_tax_sta = 1
loan.PaymentIncentives.ibi_oth_percent = ibi_df['ibi_pct'].iloc[1]
loan.PaymentIncentives.ibi_oth_percent_deprbas_fed = 0
loan.PaymentIncentives.ibi_oth_percent_deprbas_sta = 0
loan.PaymentIncentives.ibi_oth_percent_maxvalue = ibi_df['max_incentive_usd'].iloc[1]
loan.PaymentIncentives.ibi_oth_percent_tax_fed = 1
loan.PaymentIncentives.ibi_oth_percent_tax_sta = 1
else:
pass
else:
pass
return loan
#%%
@decorators.fn_timer(logger = logger, tab_level = 2, prefix = '')
def calc_max_market_share(dataframe, max_market_share_df):
in_cols = list(dataframe.columns)
dataframe = dataframe.reset_index()
dataframe['business_model'] = 'host_owned'
dataframe['metric'] = 'payback_period'
# Convert metric value to integer as a primary key, then bound within max market share ranges
max_payback = max_market_share_df[max_market_share_df.metric == 'payback_period'].payback_period.max()
min_payback = max_market_share_df[max_market_share_df.metric == 'payback_period'].payback_period.min()
max_mbs = max_market_share_df[max_market_share_df.metric == 'percent_monthly_bill_savings'].payback_period.max()
min_mbs = max_market_share_df[max_market_share_df.metric == 'percent_monthly_bill_savings'].payback_period.min()
# copy the metric valeus to a new column to store an edited version
payback_period_bounded = dataframe['payback_period'].values.copy()
# where the metric value exceeds the corresponding max market curve bounds, set the value to the corresponding bound
payback_period_bounded[np.where((dataframe.metric == 'payback_period') & (dataframe['payback_period'] < min_payback))] = min_payback
payback_period_bounded[np.where((dataframe.metric == 'payback_period') & (dataframe['payback_period'] > max_payback))] = max_payback
payback_period_bounded[np.where((dataframe.metric == 'percent_monthly_bill_savings') & (dataframe['payback_period'] < min_mbs))] = min_mbs
payback_period_bounded[np.where((dataframe.metric == 'percent_monthly_bill_savings') & (dataframe['payback_period'] > max_mbs))] = max_mbs
dataframe['payback_period_bounded'] = np.round(payback_period_bounded.astype(float), 1)
# scale and round to nearest int
dataframe['payback_period_as_factor'] = (dataframe['payback_period_bounded'] * 100).round().astype('int')
# add a scaled key to the max_market_share dataframe too
max_market_share_df['payback_period_as_factor'] = (max_market_share_df['payback_period'] * 100).round().astype('int')
# Join the max_market_share table and dataframe in order to select the ultimate mms based on the metric value.
dataframe = pd.merge(dataframe, max_market_share_df[['sector_abbr', 'max_market_share', 'metric', 'payback_period_as_factor', 'business_model']],
how = 'left', on = ['sector_abbr', 'metric','payback_period_as_factor','business_model'])
out_cols = in_cols + ['max_market_share', 'metric']
return dataframe[out_cols] |
# standard libraries
import argparse
from datetime import date
import json
import logging
import os
import pickle
import sys
# third-party libaries
import coremltools
import editdistance
import numpy as np
import onnx
from onnx import helper, shape_inference
import onnxruntime
import onnx_coreml
import torch
import torch.nn as nn
#project libraries
from get_paths import validation_paths
from import_export import preproc_to_dict, preproc_to_json, export_state_dict
from speech.loader import log_spectrogram_from_data, log_spectrogram_from_file
from speech.models.ctc_decoder import decode as ctc_decode
from speech.models import ctc_model
from speech.utils.compat import normalize
from speech.utils.convert import to_numpy
from speech.utils.io import load_config, load_state_dict, write_json
from speech.utils.stream_utils import make_full_window
from speech.utils.wave import array_from_wave
# ----- logging format/setup -----------
set_linewidth=160
np.set_printoptions(linewidth=set_linewidth)
torch.set_printoptions(linewidth=set_linewidth)
log_filename = "logs_probs-hiddencell_2020-05-20.log"
logging.basicConfig(stream=sys.stdout, filename=None, filemode='w', level=logging.DEBUG)
# -----------------------------
np.random.seed(2020)
torch.manual_seed(2020)
def main(model_name, num_frames):
model_fn, onnx_fn, coreml_fn, config_fn, preproc_fn, state_dict_path = validation_paths(model_name)
config = load_config(config_fn)
model_cfg = config["model"]
#with open(preproc_fn, 'rb') as fid:
# preproc = pickle.load(fid)
preproc = np.load(preproc_fn, allow_pickle=True)
freq_dim = preproc.input_dim
#load models
model_cfg.update({'blank_idx': config['preproc']['blank_idx']})
model = ctc_model.CTC(preproc.input_dim, preproc.vocab_size, model_cfg)
state_dict = load_state_dict(model_fn, torch.device('cpu'))
model.load_state_dict(state_dict)
onnx_model = onnx.load(onnx_fn)
coreml_model = coremltools.models.MLModel(coreml_fn)
# create PARAMS dict
hidden_size = model_cfg['encoder']['rnn']['dim']
PARAMS = {
"sample_rate": 16000,
"feature_win_len": 512,
"feature_win_step": 256,
"feature_size":257,
"chunk_size": 46,
"n_context": 15,
"blank_idx": model.blank,
"hidden_size": int(hidden_size)
}
PARAMS['stride'] = PARAMS['chunk_size'] - 2*PARAMS['n_context']
logging.warning(f"PARAMS dict: {PARAMS}")
# prepping and checking models
model.eval()
onnx.checker.check_model(onnx_model)
inferred_model = shape_inference.infer_shapes(onnx_model)
onnx.checker.check_model(inferred_model)
#creating the test data
#data_dct = gen_test_data(preproc, num_frames, freq_dim, PARAMS['hidden_size'])
#saving the preproc object as a dictionary
# TODO change preproc methods to use the python object
preproc_dict = preproc_to_dict(preproc_fn, export=False)
preproc_dict.update(PARAMS)
json_path = preproc_fn.replace('preproc.pyc', 'metadata.json')
write_json(json_path, preproc_dict)
# make predictions
audio_dir = '/Users/dustin/CS/consulting/firstlayerai/phoneme_classification/src/awni_speech/speech-lfs/model_convert/audio_files/Validatio-audio_2020-05-21/'
validate_all_models(model, onnx_fn, coreml_model, preproc, audio_dir, model_name, num_frames, PARAMS)
validation_tests = full_audio_infer(model, preproc, PARAMS, audio_dir)
write_output_json(PARAMS, preproc_dict, validation_tests, model_name)
def write_output_json(PARAMS:dict, preproc_dict:dict, validation_tests:dict, model_name:str, output_path:str=None):
output_json = {
"metadata": PARAMS,
"preproc": preproc_dict,
"validation_tests":validation_tests
}
logging.info(f"metadata to log: {PARAMS}")
logging.info(f"preproc to log: {preproc_dict}")
if output_path is None:
json_filename = model_name+"_metadata_"+str(date.today())+".json"
output_path = os.path.join("output", json_filename)
with open(output_path, 'w') as fid:
json.dump(output_json, fid)
def full_audio_infer(
model,
preproc,
PARAMS:dict,
audio_dir:str)->dict:
"""
conducts inference on all audio files in audio_dir and returns a dictionary
of the probabilities and phoneme predictions
Args
model (torch.nn.Module) - pytorch model
preproc (speech.loader.Preprocessor) - model preprocessor object
PARAMS (dict): dict of model evaluation parameters
"""
validation_tests=dict()
for audio_file in os.listdir(audio_dir):
hidden_in = torch.zeros((5, 1, PARAMS['hidden_size']), dtype=torch.float32)
cell_in = torch.zeros((5, 1, PARAMS['hidden_size']), dtype=torch.float32)
audio_path = os.path.join(audio_dir, audio_file)
audio_data, samp_rate = array_from_wave(audio_path)
assert PARAMS['sample_rate'] == samp_rate, "audio sample rate is not equal to default sample rate"
audio_data = make_full_window(audio_data, PARAMS['feature_win_len'], PARAMS['feature_win_step'])
features = log_spectrogram_from_data(audio_data, samp_rate)
norm_features = normalize(preproc, features)
# adds the batch dimension (1, time, 257)
norm_features = np.expand_dims(norm_features, axis=0)
torch_input = torch.from_numpy(norm_features)
# padding time dim, pads from the back: zero padding (0,0) to freq, 15 paddding (15,0) to time
padding = (0, 0, 15, 0)
padded_input = torch.nn.functional.pad(torch_input, padding, value=0)
fill_chunk_remainder = (padded_input.shape[1] - PARAMS['chunk_size']) % PARAMS['stride']
if fill_chunk_remainder != 0:
fill_chunk_padding = PARAMS['stride'] - fill_chunk_remainder
fill_chunk_pad = torch.zeros(1, fill_chunk_padding, PARAMS['feature_size'], dtype=torch.float32, requires_grad=False)
padded_input = torch.cat((padded_input, fill_chunk_pad),dim=1)
# process last chunk with stride of zeros
final_chunk_pad = torch.zeros(1, PARAMS['stride'], PARAMS['feature_size'], dtype=torch.float32, requires_grad=False)
padded_input = torch.cat((padded_input, final_chunk_pad),dim=1)
model_output = model(padded_input, (hidden_in, cell_in))
probs, (hidden_out, cell_out) = model_output
probs = to_numpy(probs)
int_labels = max_decode(probs[0], blank=PARAMS['blank_idx'])
predictions = preproc.decode(int_labels)
validation_tests.update({audio_file: {"logits": probs[0].tolist(), "maxDecodePhonemes": predictions}})
logging.info(f"probs dimension: {probs.shape}")
logging.info(f"prediction len: {len(predictions)}")
return validation_tests
def validate_all_models(
torch_model,
onnx_fn,
coreml_model,
preproc,
audio_dir:str,
model_name:str,
num_frames:int,
PARAMS:dict)->None:
""" This function compares the outputs of the torch, onnx, and coreml to ensure they are the same.
Args:
torch_model (torch.nn.Module)
onnx_fn (str): path to onnx model
coreml_model :
preproc (dict): preprocessing object
audio_dir (str): path to directory containing test audio files
model_name (str): name of model
num_frames (int): number of frames that the onnx and coreml models accept
PARAMS (dict): dictionary of hyperparameters
"""
stream_test_name = "Speak-out.wav"
predictions_dict= {}
# relativfe and absolute tolerances for function
rel_tol = 3e-1
abs_tol = 7e-2
check_preds = True # checks if the predictions of the torch and coreml models are equal
check_probs = True # checks if the probabilities across models are equal
check_hidden = True # checks if the hidden and cell states across models are equal
for audio_file in os.listdir(audio_dir):
test_h = np.zeros((5, 1, PARAMS['hidden_size'])).astype(np.float32)
test_c = np.zeros((5, 1, PARAMS['hidden_size'])).astype(np.float32)
audio_path = os.path.join(audio_dir, audio_file)
log_spec = log_spectrogram_from_file(audio_path)
features = normalize(preproc, log_spec)
features = features[:num_frames,:]
test_x = np.expand_dims(features, 0)
logging.debug(f"\n~~~~~~~~~~~~~~~~~~{audio_file}~~~~~~~~~~~~~~~~~~~~~~\n")
torch_output = torch_model(torch.from_numpy(test_x),(torch.from_numpy(test_h), torch.from_numpy(test_c)))
torch_probs, torch_h, torch_c = to_numpy(torch_output[0]), to_numpy(torch_output[1][0]), to_numpy(torch_output[1][1])
torch_max_decoder = max_decode(torch_probs[0], blank=PARAMS['blank_idx'])
# taking the first element of ctc_decode selects the top (and only) beam
torch_ctc_decoder = ctc_decode(torch_probs[0], beam_size=50, blank=PARAMS['blank_idx'])[0]
ort_session = onnxruntime.InferenceSession(onnx_fn)
ort_inputs = {
ort_session.get_inputs()[0].name: test_x,
ort_session.get_inputs()[1].name: test_h,
ort_session.get_inputs()[2].name: test_c
}
ort_output = ort_session.run(None, ort_inputs)
onnx_probs, onnx_h, onnx_c = [np.array(array) for array in ort_output]
logging.debug("onnxruntime prediction complete")
coreml_input = {'input': test_x, 'hidden_prev': test_h, 'cell_prev': test_c}
coreml_output = coreml_model.predict(coreml_input, useCPUOnly=True)
coreml_probs = np.array(coreml_output['output'])
coreml_h = np.array(coreml_output['hidden'])
coreml_c = np.array(coreml_output['cell'])
coreml_max_decoder = max_decode(coreml_probs[0], blank=PARAMS['blank_idx'])
# the zero index selection takes the top (and only) beam in the ctc_decode function
coreml_ctc_decoder = ctc_decode(coreml_probs[0], beam_size=50, blank=PARAMS['blank_idx'])[0]
logging.debug("coreml prediction completed")
if audio_file == stream_test_name:
stream_test_x = test_x
stream_test_h = test_h
stream_test_c = test_c
stream_test_probs = torch_probs
stream_test_h_out = torch_h
stream_test_c_out = torch_c
stream_test_max_decoder = torch_max_decoder
stream_test_ctc_decoder = torch_ctc_decoder
time_slice = 0 #num_frames//2 - 1
torch_probs_sample = torch_probs[0,time_slice,:]
torch_h_sample = torch_h[0,0,0:25]
torch_c_sample = torch_c[0,0,0:25]
torch_max_decoder_char = preproc.decode(torch_max_decoder)
torch_ctc_decoder_char = preproc.decode(torch_ctc_decoder[0])
logging.debug("\n-----Torch Output-----")
logging.debug(f"output {np.shape(torch_probs)}: \n{torch_probs_sample}")
logging.debug(f"hidden {np.shape(torch_h)}: \n{torch_h_sample}")
logging.debug(f"cell {np.shape(torch_c)}: \n{torch_c_sample}")
logging.debug(f"max decode: {torch_max_decoder_char}")
logging.debug(f"ctc decode: {torch_ctc_decoder_char}")
output_dict = {"torch_probs_(num_frames/2-1)":torch_probs_sample.tolist(), "torch_h_sample":torch_h_sample.tolist(),
"torch_c_sample": torch_c_sample.tolist(), "torch_max_decoder":torch_max_decoder_char,
"torch_ctc_decoder_beam=50":torch_ctc_decoder_char}
predictions_dict.update({audio_file: output_dict})
logging.debug("\n-----Coreml Output-----")
logging.debug(f"output {coreml_probs.shape}: \n{coreml_probs[0,time_slice,:]}")
logging.debug(f"hidden {coreml_h.shape}: \n{coreml_h[0,0,0:25]}")
logging.debug(f"cell {coreml_c.shape}: \n{coreml_c[0,0,0:25]}")
logging.debug(f"max decode: {coreml_max_decoder}")
logging.debug(f"ctc decode: {coreml_ctc_decoder}")
# Compare torch and Coreml predictions
if check_preds:
assert(torch_max_decoder==coreml_max_decoder), \
f"max decoder preds doesn't match, torch: {torch_max_decoder}, coreml: {coreml_max_decoder} for file: {audio_path}"
assert(torch_ctc_decoder[0]==coreml_ctc_decoder[0]), \
f"ctc decoder preds doesn't match, torch: {torch_ctc_decoder[0]}, coreml: {coreml_ctc_decoder[0]} for file: {audio_path}"
logging.debug("preds check passed")
if check_probs:
np.testing.assert_allclose(coreml_probs, torch_probs, rtol=rel_tol, atol=abs_tol)
np.testing.assert_allclose(torch_probs, onnx_probs, rtol=rel_tol, atol=abs_tol)
np.testing.assert_allclose(onnx_probs, coreml_probs, rtol=rel_tol, atol=abs_tol)
logging.debug("probs check passed")
if check_hidden:
np.testing.assert_allclose(coreml_h, torch_h, rtol=rel_tol, atol=abs_tol)
np.testing.assert_allclose(coreml_c, torch_c, rtol=rel_tol, atol=abs_tol)
np.testing.assert_allclose(torch_h, onnx_h, rtol=rel_tol, atol=abs_tol)
np.testing.assert_allclose(torch_c, onnx_c, rtol=rel_tol, atol=abs_tol)
np.testing.assert_allclose(onnx_h, coreml_h, rtol=rel_tol, atol=abs_tol)
np.testing.assert_allclose(onnx_c, coreml_c, rtol=rel_tol, atol=abs_tol)
logging.debug("hidden check passed")
logging.debug(f"\nChecks: preds: {check_preds}, probs: {check_probs}, hidden: {check_hidden} passed")
dict_to_json(predictions_dict, "./output/"+model_name+"_output.json")
def dict_to_json(input_dict, json_path):
with open(json_path, 'w') as fid:
json.dump(input_dict, fid)
def gen_test_data(preproc, num_frames, freq_dim, hidden_size):
test_x_zeros = np.zeros((1, num_frames, freq_dim)).astype(np.float32)
test_h_zeros = np.zeros((5, 1, hidden_size)).astype(np.float32)
test_c_zeros = np.zeros((5, 1, hidden_size)).astype(np.float32)
test_zeros = [test_x_zeros, test_h_zeros, test_c_zeros]
test_x_randn = np.random.randn(1, num_frames, freq_dim).astype(np.float32)
test_h_randn = np.random.randn(5, 1, hidden_size).astype(np.float32)
test_c_randn = np.random.randn(5, 1, hidden_size).astype(np.float32)
test_randn = [test_x_randn, test_h_randn, test_c_randn]
test_names = ["Speak_5_out", "Dustin-5-drz-test-20191202", "Dustin-5-plane-noise",
"LibSp_777-126732-0003", "LibSp_84-121123-0001",
"Speak_1_4ysq5X0Mvxaq1ArAntCWC2YkWHc2-1574725037",
"Speak_2_58cynYij95TbB9Nlz3TrKBbkg643-1574725017",
"Speak_3_CcSEvcOEineimGwKOk1c8P2eU0q1-1574725123",
"Speak_4_OVrsxD1n9Wbh0Hh6thej8FIBIOE2-1574725033",
"Speak_6_R3SdlQCwoYQkost3snFxzXS5vam2-1574726165"]
test_fns = ["Speak-out.wav", "Dustin-5-drz-test-20191202.wav", "Dustin-5-plane-noise.wav", "Librispeech-777-126732-0003.wav",
"Librispeech-84-121123-0001.wav", "Speak-4ysq5X0Mvxaq1ArAntCWC2YkWHc2-1574725037.wav",
"Speak-58cynYij95TbB9Nlz3TrKBbkg643-1574725017.wav", "Speak-CcSEvcOEineimGwKOk1c8P2eU0q1-1574725123.wav",
"Speak-OVrsxD1n9Wbh0Hh6thej8FIBIOE2-1574725033.wav", "Speak-R3SdlQCwoYQkost3snFxzXS5vam2-1574726165.wav"]
unused_names = ["Dustin-5-drz-test-20191202", "Dustin-5-plane-noise",
"LibSp_777-126732-0003", "LibSp_84-121123-0001",
"Speak_1_4ysq5X0Mvxaq1ArAntCWC2YkWHc2-1574725037",
"Speak_2_58cynYij95TbB9Nlz3TrKBbkg643-1574725017",
"Speak_3_CcSEvcOEineimGwKOk1c8P2eU0q1-1574725123",
"Speak_4_OVrsxD1n9Wbh0Hh6thej8FIBIOE2-1574725033",
"Speak_6_R3SdlQCwoYQkost3snFxzXS5vam2-1574726165"]
used_fns =["Dustin-5-drz-test-20191202.wav", "Dustin-5-plane-noise.wav", "Librispeech-777-126732-0003.wav",
"Librispeech-84-121123-0001.wav", "Speak-4ysq5X0Mvxaq1ArAntCWC2YkWHc2-1574725037.wav",
"Speak-58cynYij95TbB9Nlz3TrKBbkg643-1574725017.wav", "Speak-CcSEvcOEineimGwKOk1c8P2eU0q1-1574725123.wav",
"Speak-OVrsxD1n9Wbh0Hh6thej8FIBIOE2-1574725033.wav", "Speak-R3SdlQCwoYQkost3snFxzXS5vam2-1574726165.wav"]
base_path = './audio_files/'
audio_dct = load_audio(preproc, test_names, test_fns, base_path, test_h_zeros, test_c_zeros, num_frames)
test_dct = {'test_zeros': test_zeros, 'test_randn_seed-2020': test_randn}
test_dct.update(audio_dct)
return test_dct
def load_audio(preproc, test_names, test_fns, base_path, test_h, test_c, num_frames):
dct = {}
for test_name, test_fn in zip(test_names, test_fns):
audio_data = normalize(preproc, log_spectrogram_from_file(base_path+test_fn))
audio_data = audio_data[:num_frames,:]
audio_data = np.expand_dims(audio_data, 0)
dct.update({test_name : [audio_data, test_h, test_c]})
return dct
def max_decode(output, blank=39):
pred = np.argmax(output, 1)
prev = pred[0]
seq = [prev] if prev != blank else []
for p in pred[1:]:
if p != blank and p != prev:
seq.append(p)
prev = p
return seq
if __name__=="__main__":
# commmand format: python validation.py <model_name>
parser = argparse.ArgumentParser(description="validates the outputs of the models.")
parser.add_argument("model_name", help="name of the model.")
parser.add_argument("--num-frames", help="number of input frames in time dimension hard-coded in onnx model")
args = parser.parse_args()
main(args.model_name, int(args.num_frames))
|
import numpy as np
from nptyping import NDArray
from typing import Tuple
import warnings
class Blob:
"""A single blob."""
def __init__(
self,
id: int,
blob_shape: str,
amplitude: float,
width_prop: float,
width_perp: float,
v_x: float,
v_y: float,
pos_x: float,
pos_y: float,
t_init: float,
t_drain: float,
) -> None:
self.int = int
self.id = id
self.blob_shape = blob_shape
self.amplitude = amplitude
self.width_prop = width_prop
self.width_perp = width_perp
self.v_x = v_x
self.v_y = v_y
self.pos_x = pos_x
self.pos_y = pos_y
self.t_init = t_init
self.t_drain = t_drain
if self.v_x != 0:
self.theta = np.arctan(self.v_y / self.v_x)
else:
self.theta = np.pi / 2 * np.sign(self.v_y)
def discretize_blob(
self,
x: NDArray,
y: NDArray,
t: NDArray,
Ly: float,
periodic_y: bool = False,
) -> NDArray:
"""
Discretize blob on grid
The following blob shapes are implemented:
gauss: 2D gaussian function
exp: one sided exponential in x and gaussian in y
Returns
-------
discretized blob on 3d array with dimensions x,y and t : np.array
"""
if (self.width_perp > 0.1 * Ly or self.width_prop > 0.1 * Ly) and periodic_y:
warnings.warn("blob width big compared to Ly")
x_perp, y_perp = self.__rotate(
origin=(self.pos_x, self.pos_y), x=x, y=y, angle=-self.theta
)
if not periodic_y:
return self.__single_blob(x_perp, y_perp, t, Ly, periodic_y)
if np.sin(self.theta) == 0:
__x_border = Ly - self.pos_y
__adjusted_Ly = Ly
else:
__x_border = (Ly - self.pos_y) / np.sin(self.theta)
__adjusted_Ly = Ly / np.sin(self.theta)
if type(t) in [int, float]:
# t has dimensionality = 0, used for testing
__number_of_y_propagations = (
self.__prop_dir_blob_position(t) + __adjusted_Ly - __x_border
) // __adjusted_Ly
else:
__number_of_y_propagations = (
self.__prop_dir_blob_position(t)[0, 0] + __adjusted_Ly - __x_border
) // __adjusted_Ly
return (
self.__single_blob(
x_perp, y_perp, t, Ly, periodic_y, __number_of_y_propagations
)
+ self.__single_blob(
x_perp,
y_perp,
t,
Ly,
periodic_y,
__number_of_y_propagations,
x_offset=Ly * np.sin(self.theta),
y_offset=Ly * np.cos(self.theta),
)
+ self.__single_blob(
x_perp,
y_perp,
t,
Ly,
periodic_y,
__number_of_y_propagations,
x_offset=-Ly * np.sin(self.theta),
y_offset=-Ly * np.cos(self.theta),
)
)
def __single_blob(
self,
x_perp: NDArray,
y_perp: NDArray,
t: NDArray,
Ly: float,
periodic_y: bool,
number_of_y_propagations: NDArray = 0,
x_offset: NDArray = 0,
y_offset: NDArray = 0,
) -> NDArray:
return (
self.amplitude
* self.__drain(t)
* self.__propagation_direction_shape(
x_perp + x_offset,
t,
Ly,
periodic_y,
number_of_y_propagations=number_of_y_propagations,
)
* self.__perpendicular_direction_shape(
y_perp + y_offset,
Ly,
periodic_y,
number_of_y_propagations=number_of_y_propagations,
)
* self.__blob_arrival(t)
)
def __drain(self, t: NDArray) -> NDArray:
return np.exp(-(t - self.t_init) / self.t_drain)
def __blob_arrival(self, t: NDArray) -> NDArray:
return np.heaviside(t - self.t_init, 1)
def __propagation_direction_shape(
self,
x: NDArray,
t: NDArray,
Ly: float,
periodic_y: bool,
number_of_y_propagations: NDArray,
) -> NDArray:
if periodic_y:
x_diffs = (
x
- self.__prop_dir_blob_position(t)
+ number_of_y_propagations * Ly * np.sin(self.theta)
)
else:
x_diffs = x - self.__prop_dir_blob_position(t)
if self.blob_shape == "gauss":
return 1 / np.sqrt(np.pi) * np.exp(-(x_diffs ** 2 / self.width_prop ** 2))
elif self.blob_shape == "exp":
return np.exp(x_diffs) * np.heaviside(-1.0 * (x_diffs), 1)
else:
raise NotImplementedError(
self.__class__.__name__ + ".blob shape not implemented"
)
def __perpendicular_direction_shape(
self,
y: NDArray,
Ly: float,
periodic_y: bool,
number_of_y_propagations: NDArray,
) -> NDArray:
if periodic_y:
y_diffs = (
y
- self.__perp_dir_blob_position()
+ number_of_y_propagations * Ly * np.cos(self.theta)
)
else:
y_diffs = y - self.__perp_dir_blob_position()
return 1 / np.sqrt(np.pi) * np.exp(-(y_diffs ** 2) / self.width_perp ** 2)
def __prop_dir_blob_position(self, t: NDArray) -> NDArray:
return self.pos_x + (self.v_x ** 2 + self.v_y ** 2) ** 0.5 * (t - self.t_init)
def __perp_dir_blob_position(self) -> NDArray:
return self.pos_y
def __rotate(
self, origin: Tuple[float, float], x: NDArray, y: NDArray, angle: float
) -> Tuple[float, float]:
ox, oy = origin
px, py = x, y
qx = ox + np.cos(angle) * (px - ox) - np.sin(angle) * (py - oy)
qy = oy + np.sin(angle) * (px - ox) + np.cos(angle) * (py - oy)
return qx, qy
|
import os
import numpy as np
from collections import OrderedDict
import math
import matplotlib
import matplotlib.pyplot as plt
import PIL.Image as Image
import torch
from models.base_model import BaseModel
from models.modules.base_module import ModuleFactory
import utils.util as util
from models.modules.vgg import VGG16FeatureExtractor
class FOAFCGAN_alternate_training(BaseModel):
def __init__(self, opt):
super(FOAFCGAN_alternate_training, self).__init__(opt)
self._name = 'FOAFCGAN_alternate_training'
self._init_create_networks()
if self._is_train:
self._init_train_vars()
if not self._is_train or self._opt.load_epoch > 0:
self.load()
self._init_prefetch_inputs()
self._init_losses()
def _init_create_networks(self):
self._G = self._create_generator()
self._G.init_weights()
self._G = torch.nn.DataParallel(self._G, device_ids=[0])
self._D = self._create_discriminator()
# self._D.init_weights()
self._D = torch.nn.DataParallel(self._D, device_ids=[0])
self._vgg = VGG16FeatureExtractor()
self._vgg = torch.nn.DataParallel(self._vgg, device_ids=[0])
def _create_generator(self):
return ModuleFactory.get_by_name('generator_wgan')
def _create_discriminator(self):
return ModuleFactory.get_by_name('discriminator_wgan_cls')
def _init_train_vars(self):
self._current_lr_G = self._opt.lr_G
self._current_lr_D = self._opt.lr_D
self._optimizer_G = torch.optim.Adam(self._G.parameters(), lr=self._current_lr_G,
betas=[self._opt.G_adam_b1, self._opt.G_adam_b2])
self._optimizer_D = torch.optim.Adam(self._D.parameters(), lr=self._current_lr_D,
betas=[self._opt.D_adam_b1, self._opt.D_adam_b2])
def _init_prefetch_inputs(self):
self._input_img_occ = self._Tensor(self._opt.batch_size, 3, self._opt.image_size, self._opt.image_size)
self._input_img_none_occ = self._Tensor(self._opt.batch_size, 3, self._opt.image_size, self._opt.image_size)
self._input_occ_attr = self._Tensor(self._opt.batch_size, self._opt.attr_nc)
self._input_none_occ_attr = self._Tensor(self._opt.batch_size, self._opt.attr_nc)
def _init_losses(self):
self._compute_loss_l1 = torch.nn.L1Loss()
self._compute_loss_attr = torch.nn.MSELoss()
# real and fake occluded face image loss
self._loss_g_mask = self._Tensor([0])
self._loss_g_masked_fake = self._Tensor([0])
self._loss_g_mask_smooth = self._Tensor([0])
# self._loss_g_mask_hash = self._Tensor([0])
self._loss_g_attr = self._Tensor([0])
self._loss_g_synth_smooth = self._Tensor([0])
# fake occluded face image loss
self._loss_g_vaild = self._Tensor([0])
self._loss_g_hole = self._Tensor([0])
self._loss_g_perceptual = self._Tensor([0])
self._loss_g_style = self._Tensor([0])
# d loss
self._loss_d_attr = self._Tensor([0])
self._loss_d_real = self._Tensor([0])
self._loss_d_fake = self._Tensor([0])
self._loss_d_gp = self._Tensor([0])
def set_input(self, input):
self._input_img_occ.resize_(input['occ_img'].size()).copy_(input['occ_img'])
self._input_img_none_occ.resize_(input['none_occ_img'].size()).copy_(input['none_occ_img'])
if input['occ_attr'] is not None:
self._input_occ_attr.resize_(input['occ_attr'].size()).copy_(input['occ_attr'])
if input['none_occ_attr'] is not None:
self._input_none_occ_attr.resize_(input['none_occ_attr'].size()).copy_(input['none_occ_attr'])
self._input_img_occ = self._input_img_occ.to(self._device)
self._input_img_none_occ = self._input_img_none_occ.to(self._device)
self._input_occ_attr = self._input_occ_attr.to(self._device)
self._input_none_occ_attr = self._input_none_occ_attr.to(self._device)
def set_train(self):
self._G.train()
self._D.train()
self._is_train = True
def set_eval(self):
self._G.eval()
self._is_train = False
def forward(self, keep_data_for_visuals=False):
if not self._is_train:
im_occ = self._input_img_occ
fake_img, fake_img_mask = self._G.forward(im_occ)
fake_img_synthesis = fake_img_mask * im_occ + (1 - fake_img_mask) * fake_img
if keep_data_for_visuals:
self._vis_batch_occ_img = util.tensor2im(im_occ, idx=-1)
self._vis_batch_fake_img = util.tensor2im(fake_img.data, idx=-1)
self._vis_batch_fake_img_mask = util.tensor2maskim(fake_img_mask.data, idx=-1)
self._vis_batch_fake_synthesis = util.tensor2im(fake_img_synthesis.data, idx=-1)
self._vis_batch_none_occ_img = util.tensor2im(self._input_img_none_occ, idx=-1)
def optimize_parameters(self, train_generator=True, keep_data_for_visuals=False, has_GT=False, has_attr=False):
if self._is_train:
self._B = self._input_img_occ.size(0)
self._img_occ = self._input_img_occ
self._img_none_occ = self._input_img_none_occ
self._none_occ_attr = self._input_none_occ_attr
self._occ_attr = self._input_occ_attr
loss_D, fake_img_synthesis = self._forward_D(has_attr)
self._optimizer_D.zero_grad()
loss_D.backward()
self._optimizer_D.step()
loss_D_gp = self._gradinet_penalty_D(fake_img_synthesis)
self._optimizer_D.zero_grad()
loss_D_gp.backward()
self._optimizer_D.step()
if train_generator:
loss_G = self._forward_G(keep_data_for_visuals, has_GT, has_attr)
self._optimizer_G.zero_grad()
loss_G.backward()
self._optimizer_G.step()
def _forward_G(self, keep_data_for_visuals, has_GT, has_attr):
fake_img, fake_img_mask = self._G.forward(self._img_occ)
fake_img_synthesis = fake_img_mask * self._img_occ + (1 - fake_img_mask) * fake_img
if has_GT == True:
fake_img_synthesis_feature = self._vgg(fake_img_synthesis)
fake_img_feature = self._vgg(fake_img)
gt_img_feature = self._vgg(self._img_none_occ)
style = 0
perceptual = 0
for i in range(3):
style += self._compute_loss_l1(self._compute_loss_gram_matrix(fake_img_feature[i]),
self._compute_loss_gram_matrix(gt_img_feature[i]))
style += self._compute_loss_l1(self._compute_loss_gram_matrix(fake_img_synthesis_feature[i]),
self._compute_loss_gram_matrix(gt_img_feature[i]))
perceptual += self._compute_loss_l1(fake_img_feature[i], gt_img_feature[i])
perceptual += self._compute_loss_l1(fake_img_synthesis_feature[i], gt_img_feature[i])
self._loss_g_style = style * self._opt.lambda_g_style
self._loss_g_perceptual = perceptual * self._opt.lambda_g_perceptual
target = (1 - fake_img_mask) * self._img_none_occ
target = target.detach()
self._loss_g_hole = self._compute_loss_l1((1 - fake_img_mask) * fake_img, target) * self._opt.lambda_g_hole
target = fake_img_mask * self._img_none_occ
target = target.detach()
self._loss_g_vaild = self._compute_loss_l1(fake_img_mask * fake_img, target) * self._opt.lambda_g_valid
# self._loss_g_mask_hash = -0.5 * torch.abs(fake_img_mask - 0.5).mean() * self._opt.lambda_g_hash
d_fake_img_synthesis_prob, d_fake_img_attr = self._D.forward(fake_img_synthesis)
if has_attr == True:
self._loss_g_attr = self._compute_loss_attr(d_fake_img_attr, self._occ_attr) / self._B * self._opt.lambda_D_attr
self._loss_g_synthesis_fake = self._compute_loss_D(d_fake_img_synthesis_prob, True) * self._opt.lambda_D_prob
self._loss_g_mask = -torch.mean(fake_img_mask).pow(2) * self._opt.lambda_mask
self._loss_g_mask_smooth = self._compute_loss_smooth(fake_img_mask) * self._opt.lambda_mask_smooth
self._loss_g_synth_smooth = self._compute_loss_smooth(fake_img_synthesis) * self._opt.lambda_g_syhth_smooth
if keep_data_for_visuals:
self._vis_batch_occ_img = util.tensor2im(self._input_img_occ, idx=-1)
self._vis_batch_fake_img = util.tensor2im(fake_img.data, idx=-1)
self._vis_batch_fake_img_mask = util.tensor2maskim(fake_img_mask.data, idx=-1)
self._vis_batch_fake_synthesis = util.tensor2im(fake_img_synthesis.data, idx=-1)
self._vis_batch_none_occ_img = util.tensor2im(self._input_img_none_occ, idx=-1)
if has_GT == True and has_attr == True:
return self._loss_g_synthesis_fake + self._loss_g_mask + \
self._loss_g_mask_smooth + self._loss_g_synth_smooth +\
self._loss_g_vaild + self._loss_g_hole + \
self._loss_g_perceptual + self._loss_g_style + \
self._loss_g_attr # + self._loss_g_mask_hash + \
elif has_GT == False and has_attr == True:
return self._loss_g_synthesis_fake + self._loss_g_mask + \
self._loss_g_mask_smooth + self._loss_g_synth_smooth +\
self._loss_g_attr # + self._loss_g_mask_hash
elif has_GT == False and has_attr == False:
return self._loss_g_synthesis_fake + self._loss_g_mask + \
self._loss_g_mask_smooth + self._loss_g_synth_smooth
#+ self._loss_g_mask_hash
else:
raise NotImplementedError('Not existing has_GT = False and has_attr = True')
return None
def _forward_D(self, has_attr):
fake_img, fake_img_mask = self._G.forward(self._img_occ)
fake_img_synthesis = fake_img_mask * self._img_occ + (1 - fake_img_mask) * fake_img
d_real_img_prob, d_real_img_attr = self._D.forward(self._img_none_occ)
self._loss_d_real = self._compute_loss_D(d_real_img_prob, True) * self._opt.lambda_D_prob
if has_attr:
self._loss_d_attr = self._compute_loss_attr(d_real_img_attr, self._none_occ_attr) / self._B * self._opt.lambda_D_attr
d_fake_img_prob, _ = self._D.forward(fake_img_synthesis.detach())
self._loss_d_fake = self._compute_loss_D(d_fake_img_prob, False) * self._opt.lambda_D_prob
if has_attr:
return self._loss_d_real + self._loss_d_fake + self._loss_d_attr, fake_img_synthesis
else:
return self._loss_d_real + self._loss_d_fake, fake_img_synthesis
def _gradinet_penalty_D(self, fake_img_synthesis):
alpha = torch.rand(self._B, 1, 1, 1).expand_as(self._img_none_occ).to(self._device)
interpolated = alpha * self._img_none_occ.data + (1 - alpha) * fake_img_synthesis.data
interpolated.requires_grad = True
interpolated_prob, _ = self._D(interpolated)
grad = torch.autograd.grad(outputs=interpolated_prob,
inputs=interpolated,
grad_outputs=torch.ones(interpolated_prob.size()).to(self._device),
retain_graph=True,
create_graph=True,
only_inputs=True)[0]
grad = grad.view(grad.size(0), -1)
grad_l2norm = torch.sqrt(torch.sum(grad ** 2, dim=1))
self._loss_d_gp = torch.mean((grad_l2norm - 1) ** 2) * self._opt.lambda_D_gp
return self._loss_d_gp
def _compute_loss_D(self, estim, is_real):
return -torch.mean(estim) if is_real else torch.mean(estim)
def _compute_loss_smooth(self, mat):
return torch.sum(torch.abs(mat[:, :, :, :-1] - mat[:, :, :, 1:])) + \
torch.sum(torch.abs(mat[:, :, :-1, :] - mat[:, :, 1:, :]))
def _compute_loss_gram_matrix(self, feat):
(b, ch, h, w) = feat.size()
feat = feat.view(b, ch, h * w)
feat_t = feat.transpose(1, 2)
gram = torch.bmm(feat, feat_t) / (ch * h * w)
return gram
def get_current_errors(self, has_GT, has_attr):
if has_GT == True and has_attr == True:
loss_dict = OrderedDict([('g_mskd_fake', self._loss_g_synthesis_fake.item()),
('g_m_mean', self._loss_g_mask.item()),
('g_m_smooth', self._loss_g_mask_smooth.item()),
# ('g_m_hash', self._loss_g_mask_hash.item()),
('g_generate_face_smooth', self._loss_g_synth_smooth.item()),
('g_attr', self._loss_g_attr),
('g_generate_face_vaild', self._loss_g_vaild.item()),
('g_generate_face_hole', self._loss_g_hole.item()),
('g_generate_face_perceptual', self._loss_g_perceptual.item()),
('g_generate_face_style', self._loss_g_style.item()),
('d_real', self._loss_d_real.item()),
('d_fake', self._loss_d_fake.item()),
('d_gp', self._loss_d_gp.item()),
('d_attr', self._loss_d_attr)
])
elif has_GT == False and has_attr == True:
loss_dict = OrderedDict([('g_mskd_fake', self._loss_g_synthesis_fake.item()),
('g_m_mean', self._loss_g_mask.item()),
('g_m_smooth', self._loss_g_mask_smooth.item()),
# ('g_m_hash', self._loss_g_mask_hash.item()),
('g_generate_face_smooth', self._loss_g_synth_smooth.item()),
('g_attr', self._loss_g_attr),
('d_real', self._loss_d_real.item()),
('d_fake', self._loss_d_fake.item()),
('d_gp', self._loss_d_gp.item()),
('d_attr', self._loss_d_attr)
])
elif has_GT == False and has_attr == False:
loss_dict = OrderedDict([('g_mskd_fake', self._loss_g_synthesis_fake.item()),
('g_m_mean', self._loss_g_mask.item()),
('g_m_smooth', self._loss_g_mask_smooth.item()),
# ('g_m_hash', self._loss_g_mask_hash.item()),
('g_generate_face_smooth', self._loss_g_synth_smooth.item()),
('d_real', self._loss_d_real.item()),
('d_fake', self._loss_d_fake.item()),
('d_gp', self._loss_d_gp.item()),
])
else:
raise NotImplementedError('Not existing has_GT = False and has_attr = True')
return loss_dict
def get_current_scalars(self):
return OrderedDict([('lr_G', self._current_lr_G), ('lr_D', self._current_lr_D)])
def get_current_visuals(self):
visuals = OrderedDict()
visuals['1_batch_occ_img'] = self._vis_batch_occ_img
visuals['2_batch_fake_img'] = self._vis_batch_fake_img
visuals['3_batch_fake_img_mask'] = self._vis_batch_fake_img_mask
visuals['4_batch_fake_img_synthesis'] = self._vis_batch_fake_synthesis
visuals['5_batch_none_occ_img'] = self._vis_batch_none_occ_img
return visuals
def save(self, label):
self._save_network(self._G, 'G', label)
self._save_network(self._D, 'D', label)
self._save_optimizer(self._optimizer_G, 'G', label)
self._save_optimizer(self._optimizer_D, 'D', label)
# def load(self):
# load_epoch = self._opt.load_epoch
# self._load_network(self._G, 'G', load_epoch)
def load(self):
load_epoch = self._opt.load_epoch
self._load_network(self._G, 'G', load_epoch)
if self._is_train:
self._load_network(self._D, 'D', load_epoch)
self._load_optimizer(self._optimizer_G, 'G', load_epoch)
self._load_optimizer(self._optimizer_D, 'D', load_epoch)
def update_learning_rate(self):
lr_decay_G = self._opt.lr_G / self._opt.nepochs_decay
self._current_lr_G -= lr_decay_G
for param_group in self._optimizer_G.param_groups:
param_group['lr'] = self._current_lr_G
print('update G learning rate: %f -> %f' % (self._current_lr_G + lr_decay_G, self._current_lr_G))
lr_decay_D = self._opt.lr_D / self._opt.nepochs_decay
self._current_lr_D -= lr_decay_D
for param_group in self._optimizer_D.param_groups:
param_group['lr'] = self._current_lr_D
print('update D learning rate: %f -> %f' % (self._current_lr_D + lr_decay_D, self._current_lr_D))
|
<reponame>adityazagade/StockScanner<filename>stockscanner/model/asset/holding.py
from datetime import date, timedelta, datetime
from typing import List
from pandas import Timestamp
from stockscanner.model.config import Config
from stockscanner.persistence.dao_manager import DAOManager
from stockscanner.utils import Constants
class Entry:
def __init__(self, d: date, quantity: float, price: float) -> None:
if (type(d) is Timestamp) or (type(d) is datetime):
d = d.date()
self.date = d
self.quantity = quantity
self.price = price
class Holding:
def __init__(self, symbol, history: List[Entry]) -> None:
if len(history) <= 0:
raise Exception("Cannot add a holding without history")
self.symbol = symbol
self.history: List[Entry] = history
def get_average_buy_price(self) -> float:
mcap = 0
for entry in self.history:
mcap += entry.quantity * entry.price
quantity = self.get_quantity()
if quantity == 0:
raise Exception("Total quantity == 0")
return mcap / self.get_quantity()
def get_quantity(self) -> float:
quantity = 0
for entry in self.history:
quantity += entry.quantity
return quantity
def get_present_value(self) -> float:
return self.get_quantity() * self.get_current_price()
def get_current_price(self) -> float:
dao = DAOManager.get_instance().get_dao_for_ticker()
df = dao.read_all_data(self.symbol)
mask = (df['Date'] > (date.today() - timedelta(5)).strftime("%d-%b-%Y")) & (
df['Date'] <= date.today().strftime("%d-%b-%Y"))
return float((df.loc[mask]).iloc[-1]['Close'])
def get_price_as_of_date(self, d) -> float:
dao = DAOManager.get_instance().get_dao_for_ticker()
df = dao.read_all_data(self.symbol)
mask = (df['Date'] >= (d - timedelta(5)).strftime("%d-%b-%Y")) & (df['Date'] <= d.strftime("%d-%b-%Y"))
return float((df.loc[mask]).iloc[-1]['Close'])
def get_value_as_of_date(self, d) -> float:
return self.get_quantity() * self.get_price_as_of_date(d)
def get_invested_amount(self):
for entry in self.history:
return entry.price * entry.quantity
def add_entry(self, d, quantity, price):
self.history.append(Entry(d, quantity, price))
def remove_entries(self, **kwargs):
quantity = kwargs.get("quantity")
d = kwargs.get("date")
price = kwargs.get("price")
if self.get_quantity() < quantity:
raise Exception("Cannot remove more than what is present")
if quantity > self.history[0].quantity:
q_tmp = self.history[0].quantity
self.history.pop(0)
# self.__trade_book.append(Trade("sell", d, price, q_tmp))
self.remove_entries(quantity=quantity - q_tmp, date=d, price=price)
elif quantity == self.history[0].quantity:
self.history.pop(0)
# self.__trade_book.append(Trade("sell", d, price, quantity))
else:
self.history[0].quantity = (self.history[0].quantity - quantity)
# self.__trade_book.append(Trade("sell", d, price, quantity))
class SavingsAccount(Holding):
def __init__(self, symbol, history: List[Entry]) -> None:
super().__init__(symbol, history)
self.interest_rate = Config.load_config()["interest_rate"] / 100
def get_average_buy_price(self) -> float:
sum = 0
for entry in self.history:
sum += entry.price * entry.quantity
return sum
def get_present_value(self) -> float:
sum = 0
d = date.today()
for entry in self.history:
if entry.date < d:
principle = entry.quantity * entry.price
daily_rate = self.interest_rate / 365 * 100
value = principle * pow((1 + daily_rate), (d - entry.date).days)
sum += value
return sum
def get_current_price(self) -> float:
return 1
def get_price_as_of_date(self, d) -> float:
return 1
def get_value_as_of_date(self, d) -> float:
sum = 0
for entry in self.history:
if entry.date <= d:
principle = entry.quantity * entry.price
daily_rate = self.interest_rate / 365
curr_date = d
if (type(d) is Timestamp) or (type(d) is datetime):
curr_date = d.date()
value = principle * pow((1 + daily_rate), (curr_date - entry.date).days)
sum += value
return sum
def get_quantity(self) -> float:
return super().get_quantity()
def get_invested_amount(self):
return super().get_invested_amount()
def remove_entries(self, **kwargs):
quantity = kwargs.get("quantity")
d = kwargs.get("date")
value_to_be_removed = quantity * self.get_current_price()
curr_value = self.get_value_as_of_date(d)
expected_value = curr_value - value_to_be_removed
self.history = []
self.history.append(Entry(d=d, quantity=expected_value, price=1))
class HoldingBuilder:
def __init__(self, symbol) -> None:
super().__init__()
self.history = []
self.symbol = symbol
def with_entry(self, d: date, quantity: float, price: float):
self.history.append(Entry(d, quantity, price))
return self
def build(self):
if self.symbol == Constants.SAVINGS_ACC:
return SavingsAccount(self.symbol, self.history)
return Holding(self.symbol, self.history)
|
###
### Multimodal registration with exhaustive search mutual information
### Author: Johan \"{O}fverstedt
###
from numpy.random.mtrand import random
import time
import torch
import torch.nn.functional
import numpy as np
import torch.nn.functional as F
import torch.fft
import torchvision.transforms.functional as TF
import transformations
import util3d
VALUE_TYPE = torch.float32
ALIGN_CORNERS = True
def matrix_string(matrix):
s = '['
for i in range(matrix.size):
if i > 0:
s += ', '
s += f'{matrix[i]:.02f}'
s += ']'
return s
# Creates a list of random angles
def grid_angles(center, radius, n = 32):
angles = []
n_denom = n
if radius < 180:
n_denom -= 1
for i in range(n):
i_frac = i/n_denom
ang = center + (2.0 * i_frac - 1.0) * radius
angles.append(ang)
return angles
def random_angles_3d(centers, center_prob, radius, n = 32, include_centers=True):
angles = []
if not isinstance(centers, list):
centers = [centers]
if center_prob is not None:
mass = np.sum(center_prob)
p = center_prob / mass
else:
p = None
center_inds = np.arange(len(centers))
if include_centers:
angles = angles + centers
for i in range(n):
c = np.random.choice(center_inds, p=p, replace=True)
c = centers[c]
frac = np.random.random(size=(c.size,))
ang = c + (2.0 * frac - 1.0) * radius
angles.append(ang)
return angles
def quasi_random_angles_3d(center, radius, n = 32):
angles = []
phi3 = 1.220744084605759475361686349108831
alpha = np.array([1.0/phi3, 1.0/(phi3**2.0), 1.0/(phi3**3.0)])
state = np.random.random(size=(3,))#np.zeros((3,))
for i in range(n):
frac = state + i * alpha
frac -= np.floor(frac)
state[:] = frac[:]
ang = center + (2.0 * frac - 1.0) * radius
angles.append(ang)
return angles
### Helper functions
def sum_pool(A, ds_factor):
return torch.nn.functional.avg_pool3d(A, ds_factor, divisor_override=1)
def compute_entropy(C, N, eps=1e-7):
p = C/N
return p*torch.log2(torch.clamp(p, min=eps, max=None))
def float_compare(A, c):
return torch.clamp(1-torch.abs(A-c), 0.0)
def fft_of_levelsets(A, Q, packing, ds, setup_fn):
fft_list = []
for a_start in range(0, Q, packing):
a_end = np.minimum(a_start + packing, Q)
levelsets = []
for a in range(a_start, a_end):
levelsets.append(float_compare(A, a))
A_cat = torch.cat(levelsets, 0)
del levelsets
ffts = setup_fn(A_cat, ds)
del A_cat
fft_list.append((ffts, a_start, a_end))
return fft_list
def fft(A):
spectrum = torch.fft.rfftn(A, dim=(2, 3, 4))
return spectrum
def ifft(Afft):
res = torch.fft.irfftn(Afft, dim=(2, 3, 4))
return res
def fftconv(A, B):
C = A * B
return C
def corr_target_setup(A, ds=1):
if ds > 1:
A = sum_pool(A, ds)
B = fft(A)
return B
def corr_template_setup(B, ds=1):
if ds > 1:
B = sum_pool(B, ds)
B_FFT = torch.conj(fft(B))
return B_FFT
def corr_apply(A, B, sz, do_rounding = True):
C = fftconv(A, B)
C = ifft(C)
C = C[:sz[0], :sz[1], :sz[2], :sz[3], :sz[4]]
if do_rounding:
C = torch.round(C)
return C
def tf_rotate(I, angle, fill_value, center=None):
return TF.rotate(I, -angle, center=center, fill=[fill_value, ])
def make_torch_grad_3d(on_gpu=True):
kernel = np.zeros((3, 1, 3, 3, 3), dtype='float32')
kernel[0, 0, 1, 1, 0] = -0.5
kernel[0, 0, 1, 1, 2] = 0.5
kernel[1, 0, 1, 0, 1] = -0.5
kernel[1, 0, 1, 2, 1] = 0.5
kernel[2, 0, 0, 1, 1] = -0.5
kernel[2, 0, 2, 1, 1] = 0.5
tkern = torch.from_numpy(kernel)
if on_gpu:
tkern = tkern.cuda()
def apply_grad(B, epsilon=1e-5):
G = torch.conv3d(B, tkern, bias=None, stride=1, padding=1)
G_norm = torch.sqrt(torch.sum(G**2, dim=1) + epsilon**2)
return G / G_norm
return apply_grad
def corr_apply_multiple(A, B, sz):
C = fftconv(A[0], B[0])
for i in range(1, len(A)):
C += fftconv(A[i], B[i])
C = ifft(C)
C = C[:sz[0], :sz[1], :sz[2], :sz[3], :sz[4]]
return C
def create_float_tensor(shape, on_gpu, fill_value=None):
if on_gpu:
res = torch.cuda.FloatTensor(shape[0], shape[1], shape[2], shape[3], shape[4])
if fill_value is not None:
res.fill_(fill_value)
return res
else:
if fill_value is not None:
res = np.full((shape[0], shape[1], shape[2], shape[3], shape[4]), fill_value=fill_value, dtype='float32')
else:
res = np.zeros((shape[0], shape[1], shape[2], shape[3], shape[4]), dtype='float32')
return torch.tensor(res, dtype=torch.float32)
def to_tensor(A, on_gpu=True):
if torch.is_tensor(A):
A_tensor = A.cuda(non_blocking=True) if on_gpu else A
if A_tensor.ndim == 2:
A_tensor = torch.reshape(A_tensor, (1, 1, A_tensor.shape[0], A_tensor.shape[1]))
elif A_tensor.ndim == 3:
A_tensor = torch.reshape(A_tensor, (1, 1, A_tensor.shape[0], A_tensor.shape[1], A_tensor.shape[2]))
return A_tensor
else:
return to_tensor(torch.tensor(A, dtype=VALUE_TYPE), on_gpu=on_gpu)
def tf_apply_scale_euler_3d_from_matrix(input, sz, thetas, interpolation_mode='bilinear', on_gpu=False):
grid = TF.affine_grid(thetas.reshape(1, 3, 4), sz, align_corners=ALIGN_CORNERS).float()
if on_gpu:
grid = grid.cuda()
return TF.grid_sample(input, grid, mode=interpolation_mode, align_corners=ALIGN_CORNERS)
### End helper functions
def align_rigid_ngf(A, B, M_A, M_B, angles, overlap=0.5, enable_partial_overlap=True, squared_mode=True, on_gpu=True, save_maps=False, display_progress=True):
eps=1e-7
results = []
maps = []
A_tensor = to_tensor(A, on_gpu=on_gpu)
B_tensor = to_tensor(B, on_gpu=on_gpu)
# Create all constant masks if not provided
if M_A is None:
M_A = create_float_tensor(A_tensor.shape, on_gpu, 1.0)
else:
M_A = to_tensor(M_A, on_gpu)
A_tensor = M_A * A_tensor
if M_B is None:
M_B = create_float_tensor(B_tensor.shape, on_gpu, 1.0)
else:
M_B = to_tensor(M_B, on_gpu)
# Pad for overlap
if enable_partial_overlap:
partial_overlap_pad_sz = (round(B.shape[-1]*(1.0-overlap)), round(B.shape[-2]*(1.0-overlap)), round(B.shape[-3]*(1.0-overlap)))
A_tensor = F.pad(A_tensor, (partial_overlap_pad_sz[0], partial_overlap_pad_sz[0], partial_overlap_pad_sz[1], partial_overlap_pad_sz[1], partial_overlap_pad_sz[2], partial_overlap_pad_sz[2]), mode='constant', value=0)
M_A = F.pad(M_A, (partial_overlap_pad_sz[0], partial_overlap_pad_sz[0], partial_overlap_pad_sz[1], partial_overlap_pad_sz[1], partial_overlap_pad_sz[2], partial_overlap_pad_sz[2]), mode='constant', value=0)
else:
partial_overlap_pad_sz = (0, 0, 0)
ext_ashape = A_tensor.shape
ext_bshape = B_tensor.shape
ext_valid_shape = torch.tensor([1, 1, (A_tensor.shape[2])-(B_tensor.shape[2])+1, (A_tensor.shape[3])-(B_tensor.shape[3])+1, (A_tensor.shape[4])-(B_tensor.shape[4])+1], dtype=torch.long)
grad_fun = make_torch_grad_3d(on_gpu=on_gpu)
A_grad = grad_fun(A_tensor)
A_grad = A_grad * M_A
# use default center of rotation (which is the center point)
center_of_rotation = [(B_tensor.shape[4]-1) / 2.0, (B_tensor.shape[3]-1) / 2.0, (B_tensor.shape[2]-1) / 2.0]
M_A_FFT = corr_target_setup(M_A)
if squared_mode:
# square terms
A1_FFT = corr_target_setup(A_grad[:, 0:1, :, :, :]**2)
A2_FFT = corr_target_setup(A_grad[:, 1:2, :, :, :]**2)
A3_FFT = corr_target_setup(A_grad[:, 2:3, :, :, :]**2)
# cross terms (0 and 1, 0 and 2, 1 and 2)
A4_FFT = 2.0 * corr_target_setup(A_grad[:, 0:1, :, :, :] * A_grad[:, 1:2, :, :, :])
A5_FFT = 2.0 * corr_target_setup(A_grad[:, 0:1, :, :, :] * A_grad[:, 2:3, :, :, :])
A6_FFT = 2.0 * corr_target_setup(A_grad[:, 1:2, :, :, :] * A_grad[:, 2:3, :, :, :])
else:
A1_FFT = corr_target_setup(A_grad[:, 0:1, :, :, :])
A2_FFT = corr_target_setup(A_grad[:, 1:2, :, :, :])
A3_FFT = corr_target_setup(A_grad[:, 2:3, :, :, :])
print('#Angles: ', len(angles))
best_mi = -1.0
best_ang = np.array([0.0, 0.0, 0.0])
best_matrix = np.zeros((12,))
ang_tensors = [torch.tensor(util3d.make_rigid3d_matrix(2.0 * np.pi * angles[i] / 360.0, np.zeros((3,)), np.zeros((3,)), np.zeros((3,)), xyz_mode=True, hom=False)) for i in range(len(angles))]
if on_gpu:
ang_tensors = [ang_tensors[i].cuda() for i in range(len(ang_tensors))]
for ang_ind, ang in enumerate(angles):
# preprocess B for angle
#
B_tensor_rotated = tf_apply_scale_euler_3d_from_matrix(B_tensor, B_tensor.shape, ang_tensors[ang_ind], interpolation_mode='bilinear', on_gpu=False)
M_B_rotated = tf_apply_scale_euler_3d_from_matrix(M_B, M_B.shape, ang_tensors[ang_ind], interpolation_mode='nearest', on_gpu=False)
B_tensor_rotated = B_tensor_rotated * M_B_rotated
B_tensor_rotated = F.pad(B_tensor_rotated, (0, ext_ashape[-1]-ext_bshape[-1], 0, ext_ashape[-2]-ext_bshape[-2], 0, ext_ashape[-3]-ext_bshape[-3], 0, 0, 0, 0), mode='constant', value=0)
M_B_rotated = F.pad(M_B_rotated, (0, ext_ashape[-1]-ext_bshape[-1], 0, ext_ashape[-2]-ext_bshape[-2], 0, ext_ashape[-3]-ext_bshape[-3], 0, 0, 0, 0), mode='constant', value=0)
B_grad = grad_fun(B_tensor_rotated)
del B_tensor_rotated
B_grad = B_grad * M_B_rotated
M_B_FFT = corr_template_setup(M_B_rotated)
del M_B_rotated
N = torch.clamp(corr_apply(M_A_FFT, M_B_FFT, ext_valid_shape), min=eps, max=None)
del M_B_FFT
if squared_mode:
# square terms
B1_FFT = corr_template_setup(B_grad[:, 0:1, :, :, :]**2)
B2_FFT = corr_template_setup(B_grad[:, 1:2, :, :, :]**2)
B3_FFT = corr_template_setup(B_grad[:, 2:3, :, :, :]**2)
# cross terms (0 and 1, 0 and 2, 1 and 2)
B4_FFT = corr_template_setup(B_grad[:, 0:1, :, :, :] * B_grad[:, 1:2, :, :, :])
B5_FFT = corr_template_setup(B_grad[:, 0:1, :, :, :] * B_grad[:, 2:3, :, :, :])
B6_FFT = corr_template_setup(B_grad[:, 1:2, :, :, :] * B_grad[:, 2:3, :, :, :])
else:
B1_FFT = corr_template_setup(B_grad[:, 0:1, :, :, :])
B2_FFT = corr_template_setup(B_grad[:, 1:2, :, :, :])
B3_FFT = corr_template_setup(B_grad[:, 2:3, :, :, :])
if squared_mode:
NGF = corr_apply_multiple([A1_FFT, A2_FFT, A3_FFT, A4_FFT, A5_FFT, A6_FFT], [B1_FFT, B2_FFT, B3_FFT, B4_FFT, B5_FFT, B6_FFT], ext_valid_shape)
else:
NGF = corr_apply_multiple([A1_FFT, A2_FFT, A3_FFT], [B1_FFT, B2_FFT, B3_FFT], ext_valid_shape)
NGF = NGF / N
if save_maps:
maps.append(NGF.cpu().numpy())
(max_n, _) = torch.max(torch.reshape(N, (-1,)), 0)
N_filt = torch.lt(N, overlap*max_n)
NGF[N_filt] = -1.0
del N_filt, N
NGF_vec = torch.reshape(NGF, (-1,))
(val, ind) = torch.max(NGF_vec, -1)
val_cpu = val.item()
if val_cpu > best_mi:
best_mi = val
best_ang = ang
best_matrix[:] = ang_tensors[ang_ind].cpu().numpy().reshape((12,))
results.append((ang, val, ind, ang_tensors[ang_ind]))
#NGF.fill_(-1.0)
unsquared_str = '' if squared_mode else ' unsquared'
print(f'{100.0*(1+ang_ind)/len(angles):.1f}: NGF{unsquared_str}: {best_mi:.4f}, Angle: {best_ang[0]:.2f}, {best_ang[1]:.2f}, {best_ang[2]:.2f}, {matrix_string(best_matrix)} \r', end='')
print('\n-------------------------------------------')
cpu_results = []
for i in range(len(results)):
ang = results[i][0]
maxval = results[i][1].cpu().numpy()
maxind = results[i][2].cpu().numpy()
rotmatrix = results[i][3].cpu().numpy()
sub = np.unravel_index(maxind, ext_valid_shape.numpy().astype('int'))
z = sub[-3]
y = sub[-2]
x = sub[-1]
cpu_results.append((maxval, ang, -(z - partial_overlap_pad_sz[2]), -(y - partial_overlap_pad_sz[1]), -(x - partial_overlap_pad_sz[0]), center_of_rotation[2], center_of_rotation[1], center_of_rotation[0], rotmatrix))
cpu_results = sorted(cpu_results, key=(lambda tup: tup[0]), reverse=True)
# print top 10
top_ind = np.minimum(20, len(cpu_results))
for i in range(top_ind):
res = cpu_results[i]
print(float(res[0]), res[1], res[2], res[3], res[4], res[5], res[6], res[7], res[8].reshape((res[8].size,)))
print('-------------------------------------------')
# Return the maximum found
if save_maps:
return cpu_results, maps
else:
return cpu_results, None
def to_tup(value, n):
if isinstance(value, tuple) or isinstance(value, list):
assert(len(value) == n)
return value
else:
return tuple([value] * n)
def align_rigid_and_refine_ngf(A, B, M_A, M_B, angles_n, angles_max, start_angle, starting_points, rand_methods, overlap=0.5, enable_partial_overlap=True, algo='gpu_squared', save_maps=False, display_progress=True):
stages = len(angles_n)
A = to_tup(A, stages)
B = to_tup(B, stages)
M_A = to_tup(M_A, stages)
M_B = to_tup(M_B, stages)
algo = to_tup(algo, stages)
starting_points = to_tup(starting_points, stages-1)
# Currently, only cube images are supported for image B.
for i in range(len(B)):
assert(B[i].shape[0]==B[i].shape[1] and B[i].shape[0]==B[i].shape[2])
if start_angle is None:
start_angle = np.array([0.0, 0.0, 0.0])
maps = []
for r in range(stages):
assert(algo[r] == 'gpu_ngf' or algo[r] == 'gpu_ngf_unsquared' or algo[r] == 'cpu_ngf' or algo[r] == 'cpu_ngf_unsquared')
squared_mode = not ('unsquared' in algo[r])
on_gpu = 'gpu' in algo[r]
if rand_methods[r] == 'quasi':
ang = quasi_random_angles_3d(start_angle, angles_max[r], angles_n[r])
elif rand_methods[r] == 'rand':
ang = random_angles_3d(start_angle, None, angles_max[r], angles_n[r])
t1 = time.time()
param, maps_r = align_rigid_ngf(A[r], B[r], M_A[r], M_B[r], ang, overlap, enable_partial_overlap, squared_mode=squared_mode, on_gpu=on_gpu, save_maps=save_maps, display_progress=display_progress)
t2 = time.time()
if display_progress:
print(f'Time elapsed for stage {r+1}: {t2-t1:.02f}s.')
if maps_r is not None:
maps = maps + maps_r
last_param = param[0]
if r + 1 < stages:
start_angle = [np.array(param[i][1]) for i in range(starting_points[r])]
return last_param, maps, param
|
"""
Tests optimizing a unit in AlexNet/CaffeNet with DeePSiM generators 1-8
Running time (approx.; steps == 200; n_units == 1):
5 mins (GTX 2080); 10 mins (GTX 1060)
"""
from time import time
from pathlib import Path
import h5py as h5
import numpy as np
from Experiments import CNNExperiment
save_root = Path('temp')
engine = 'pytorch' # or caffe; will switch engine in generator and target
optimizer_name = 'genetic'
generator_names = (
'deepsim-norm1', 'deepsim-norm2', 'deepsim-conv3', 'deepsim-conv4',
'deepsim-pool5', 'deepsim-fc6', 'deepsim-fc7', 'deepsim-fc8')
steps = 200
target_unit = { # goldfish neuron
'caffe': ('caffenet', 'fc8', 1),
'pytorch': ('alexnet', 'classifier.6', 1)}[engine]
init_rand_seed = 0
exp_settings = {
'optimizer_name': optimizer_name,
'optimizer_parameters': {'generator_parameters': {'engine': engine}},
'scorer_parameters': {'engine': engine},
'image_size': 85,
'with_write': False,
'max_optimize_steps': steps,
'random_seed': 0,
'stochastic': False,
'config_file_path': __file__}
optimizer_setting_names = (
'population_size', 'mutation_rate', 'mutation_size', 'selectivity',
'heritability', 'n_conserve')
by_generator_optimizer_settings = {
'deepsim-norm1': (15, 1, 1.5, 2, 0.5, 0),
'deepsim-norm2': (10, 0.5, 0.7, 4, 0.5, 0),
'deepsim-conv3': (12, 0.65, 0.75, 2.25, 0.5, 0),
'deepsim-conv4': (10, 0.9, 0.75, 2.5, 0.5, 0),
'deepsim-pool5': (10, 0.6, 1, 2.5, 0.75, 0),
'deepsim-fc6': (20, 0.5, 0.5, 2, 0.5, 0),
'deepsim-fc7': (45, 0.6, 0.3, 1.25, 0.5, 0),
'deepsim-fc8': (20, 0.2, 0.6, 2, 0.5, 0)}
t0 = time()
# main loop
unit = np.atleast_1d(target_unit[2:])
target_s = '_'.join(map(str, unit))
init_randgen = np.random.RandomState(init_rand_seed)
for generator_name in generator_names:
# set generator-specific parameters
optimizer_parameters = exp_settings['optimizer_parameters']
optimizer_parameters['generator_name'] = generator_name
for p, v in zip(
optimizer_setting_names,
by_generator_optimizer_settings[generator_name]):
optimizer_parameters[p] = v
exp_settings['optimizer_parameters'] = optimizer_parameters
# specify initialization (for reproducibility)
gen_layer_name = generator_name.split('-')[1]
with h5.File('test_data/stats_caffenet.h5', 'r') as f:
mu = f[gen_layer_name]['mu'][()]
sig = f[gen_layer_name]['sig'][()]
pop_size = exp_settings['optimizer_parameters']['population_size']
init_codes = init_randgen.normal(mu, sig, size=(pop_size, *mu.shape))
if generator_name != 'deepsim-fc8':
# generator was trained on post-ReLU input
init_codes = np.clip(init_codes, 0, None)
project_dir = (
save_root / engine / optimizer_name / generator_name
/ 'random_dist' # init name
/ target_unit[0] / target_unit[1] / target_s)
try:
project_dir.mkdir(parents=True)
except FileExistsError:
# skips finished, running, or aborted experiments
# this allows running test1.py in parallel with (almost) no
# interference (unless timing is so precise---for example in batch
# jobs---that two processes attempted to create the same folder
# at the same time. Downside is, for aborted experiments,
# project_dir needs to be manually removed
continue
exp_settings['project_dir'] = project_dir
exp_settings['target_neuron'] = target_unit
experiment = CNNExperiment(**exp_settings)
experiment.optimizer.set_initialization(init_codes) # manual init
experiment.run()
# load and print results
print('target unit:\t', target_unit)
for generator_name in generator_names:
project_dir = (
save_root / engine / optimizer_name / generator_name
/ 'random_dist' # init name
/ target_unit[0] / target_unit[1] / target_s)
acts = []
for i in (0, steps-1):
score_f = project_dir / f'scores_step{i:03d}.npz'
try:
acts.append(np.load(score_f)['scores'].max())
except FileNotFoundError:
acts.append(np.nan)
print(f'{generator_name}\t{acts[0]:>4.1f} -> {acts[1]:>5.1f}')
print(f'took time: {time() - t0:.0f} s')
"""
Note: for unclear reasons, torch results are slightly different
on different runs; caffe results are consistent.
results:
>>> cuda 10.1.243, python 3.7.5, Ubuntu 19.10
>>> torch 1.3.1
deepsim-norm1 -1.6 -> 8.5
deepsim-norm2 0.8 -> 6.7
deepsim-conv3 1.6 -> 10.1
deepsim-conv4 1.8 -> 12.8
deepsim-pool5 2.0 -> 18.2
deepsim-fc6 5.4 -> 31.6
deepsim-fc7 5.8 -> 58.5
deepsim-fc8 6.2 -> 38.7
>>> caffe
deepsim-norm1 0.0 -> 16.4
deepsim-norm2 3.5 -> 10.8
deepsim-conv3 4.8 -> 15.3
deepsim-conv4 5.4 -> 21.8
deepsim-pool5 3.5 -> 27.3
deepsim-fc6 6.3 -> 51.1
deepsim-fc7 6.9 -> 74.1
deepsim-fc8 9.1 -> 52.7
>>> cuda 9.0, python 3.7.1 (anaconda), Windows 10
>>> torch 1.0.1
deepsim-norm1 -1.6 -> 8.5
deepsim-norm2 0.8 -> 6.3
deepsim-conv3 1.6 -> 9.5
deepsim-conv4 1.8 -> 13.7
deepsim-pool5 2.0 -> 19.2
deepsim-fc6 5.4 -> 26.6
deepsim-fc7 5.8 -> 45.7
deepsim-fc8 6.2 -> 35.9
>>> caffe
deepsim-norm1 0.0 -> 15.2
deepsim-norm2 3.5 -> 11.4
deepsim-conv3 4.8 -> 15.7
deepsim-conv4 5.4 -> 21.1
deepsim-pool5 3.5 -> 27.6
deepsim-fc6 6.3 -> 49.8
deepsim-fc7 6.9 -> 64.3
deepsim-fc8 9.1 -> 49.1
"""
|
import json
import math
import os
from absl import app
from absl import flags
from absl import logging
import data as data_lib
import metrics
import model as model_lib
import objective as obj_lib
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
import numpy as np
from sklearn.metrics import confusion_matrix
FLAGS = flags.FLAGS
flags.DEFINE_float(
'learning_rate', 0.3,
'Initial learning rate per batch size of 256.')
flags.DEFINE_enum(
'learning_rate_scaling', 'linear', ['linear', 'sqrt'],
'How to scale the learning rate as a function of batch size.')
flags.DEFINE_float(
'warmup_epochs', 10,
'Number of epochs of warmup.')
flags.DEFINE_float('weight_decay', 1e-6, 'Amount of weight decay to use.')
flags.DEFINE_float(
'batch_norm_decay', 0.9,
'Batch norm decay parameter.')
flags.DEFINE_integer(
'train_batch_size', 512,
'Batch size for training.')
flags.DEFINE_string(
'train_split', 'train',
'Split for training.')
flags.DEFINE_integer(
'train_epochs', 100,
'Number of epochs to train for.')
flags.DEFINE_integer(
'train_steps', 0,
'Number of steps to train for. If provided, overrides train_epochs.')
flags.DEFINE_integer(
'eval_steps', 0,
'Number of steps to eval for. If not provided, evals over entire dataset.')
flags.DEFINE_integer(
'eval_batch_size', 256,
'Batch size for eval.')
flags.DEFINE_integer(
'checkpoint_epochs', 1,
'Number of epochs between checkpoints/summaries.')
flags.DEFINE_integer(
'checkpoint_steps', 0,
'Number of steps between checkpoints/summaries. If provided, overrides '
'checkpoint_epochs.')
flags.DEFINE_string(
'eval_split', 'validation',
'Split for evaluation.')
flags.DEFINE_string(
'dataset', 'imagenet2012',
'Name of a dataset.')
flags.DEFINE_bool(
'cache_dataset', False,
'Whether to cache the entire dataset in memory. If the dataset is '
'ImageNet, this is a very bad idea, but for smaller datasets it can '
'improve performance.')
flags.DEFINE_enum(
'mode', 'train', ['train', 'eval', 'train_then_eval'],
'Whether to perform training or evaluation.')
flags.DEFINE_enum(
'train_mode', 'pretrain', ['pretrain', 'finetune'],
'The train mode controls different objectives and trainable components.')
flags.DEFINE_bool('lineareval_while_pretraining', True,
'Whether to finetune supervised head while pretraining.')
flags.DEFINE_string(
'checkpoint', None,
'Loading from the given checkpoint for fine-tuning if a finetuning '
'checkpoint does not already exist in model_dir.')
flags.DEFINE_bool(
'zero_init_logits_layer', False,
'If True, zero initialize layers after avg_pool for supervised learning.')
flags.DEFINE_integer(
'fine_tune_after_block', -1,
'The layers after which block that we will fine-tune. -1 means fine-tuning '
'everything. 0 means fine-tuning after stem block. 4 means fine-tuning '
'just the linear head.')
flags.DEFINE_string(
'master', None,
'Address/name of the TensorFlow master to use. By default, use an '
'in-process master.')
flags.DEFINE_string(
'model_dir', None,
'Model directory for training.')
flags.DEFINE_string(
'data_path', None,
'Directory where dataset is stored.')
flags.DEFINE_bool(
'use_tpu', True,
'Whether to run on TPU.')
flags.DEFINE_string(
'tpu_name', None,
'The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 '
'url.')
flags.DEFINE_string(
'tpu_zone', None,
'[Optional] GCE zone where the Cloud TPU is located in. If not '
'specified, we will attempt to automatically detect the GCE project from '
'metadata.')
flags.DEFINE_string(
'gcp_project', None,
'[Optional] Project name for the Cloud TPU-enabled project. If not '
'specified, we will attempt to automatically detect the GCE project from '
'metadata.')
flags.DEFINE_enum(
'optimizer', 'lars', ['momentum', 'adam', 'lars'],
'Optimizer to use.')
flags.DEFINE_float(
'momentum', 0.9,
'Momentum parameter.')
flags.DEFINE_string(
'eval_name', None,
'Name for eval.')
flags.DEFINE_integer(
'keep_checkpoint_max', 5,
'Maximum number of checkpoints to keep.')
flags.DEFINE_integer(
'keep_hub_module_max', 1,
'Maximum number of Hub modules to keep.')
flags.DEFINE_float(
'temperature', 0.1,
'Temperature parameter for contrastive loss.')
flags.DEFINE_boolean(
'hidden_norm', True,
'Temperature parameter for contrastive loss.')
flags.DEFINE_enum(
'proj_head_mode', 'nonlinear', ['none', 'linear', 'nonlinear'],
'How the head projection is done.')
flags.DEFINE_integer(
'proj_out_dim', 128,
'Number of head projection dimension.')
flags.DEFINE_integer(
'num_proj_layers', 3,
'Number of non-linear head layers.')
flags.DEFINE_integer(
'ft_proj_selector', 0,
'Which layer of the projection head to use during fine-tuning. '
'0 means no projection head, and -1 means the final layer.')
flags.DEFINE_boolean(
'global_bn', True,
'Whether to aggregate BN statistics across distributed cores.')
flags.DEFINE_integer(
'width_multiplier', 1,
'Multiplier to change width of network.')
flags.DEFINE_integer(
'resnet_depth', 50,
'Depth of ResNet.')
flags.DEFINE_float(
'sk_ratio', 0.,
'If it is bigger than 0, it will enable SK. Recommendation: 0.0625.')
flags.DEFINE_float(
'se_ratio', 0.,
'If it is bigger than 0, it will enable SE.')
flags.DEFINE_integer(
'image_size', 224,
'Input image size.')
flags.DEFINE_float(
'color_jitter_strength', 1.0,
'The strength of color jittering.')
flags.DEFINE_boolean(
'use_blur', True,
'Whether or not to use Gaussian blur for augmentation during pretraining.')
def build_saved_model(model):
"""Returns a tf.Module for saving to SavedModel."""
class SimCLRModel(tf.Module):
"""Saved model for exporting to hub."""
def __init__(self, model):
self.model = model
# This can't be called `trainable_variables` because `tf.Module` has
# a getter with the same name.
self.trainable_variables_list = model.trainable_variables
@tf.function
def __call__(self, inputs, trainable):
self.model(inputs, training=trainable)
return get_salient_tensors_dict()
module = SimCLRModel(model)
input_spec = tf.TensorSpec(shape=[None, None, None, 3], dtype=tf.float32)
module.__call__.get_concrete_function(input_spec, trainable=True)
module.__call__.get_concrete_function(input_spec, trainable=False)
return module
def save(model, global_step):
"""Export as SavedModel for finetuning and inference."""
saved_model = build_saved_model(model)
export_dir = os.path.join(FLAGS.model_dir, 'saved_model')
checkpoint_export_dir = os.path.join(export_dir, str(global_step))
if tf.io.gfile.exists(checkpoint_export_dir):
tf.io.gfile.rmtree(checkpoint_export_dir)
tf.saved_model.save(saved_model, checkpoint_export_dir)
if FLAGS.keep_hub_module_max > 0:
# Delete old exported SavedModels.
exported_steps = []
for subdir in tf.io.gfile.listdir(export_dir):
if not subdir.isdigit():
continue
exported_steps.append(int(subdir))
exported_steps.sort()
for step_to_delete in exported_steps[:-FLAGS.keep_hub_module_max]:
tf.io.gfile.rmtree(os.path.join(export_dir, str(step_to_delete)))
def try_restore_from_checkpoint(model, global_step, optimizer):
"""Restores the latest ckpt if it exists, otherwise check FLAGS.checkpoint."""
checkpoint = tf.train.Checkpoint(
model=model, global_step=global_step, optimizer=optimizer)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
directory=FLAGS.model_dir,
max_to_keep=FLAGS.keep_checkpoint_max)
latest_ckpt = checkpoint_manager.latest_checkpoint
if latest_ckpt:
# Restore model weights, global step, optimizer states
logging.info('Restoring from latest checkpoint: %s', latest_ckpt)
checkpoint_manager.checkpoint.restore(latest_ckpt).expect_partial()
elif FLAGS.checkpoint:
# Restore model weights only, but not global step and optimizer states
logging.info('Restoring from given checkpoint: %s', FLAGS.checkpoint)
checkpoint_manager2 = tf.train.CheckpointManager(
tf.train.Checkpoint(model=model),
directory=FLAGS.model_dir,
max_to_keep=FLAGS.keep_checkpoint_max)
checkpoint_manager2.checkpoint.restore(FLAGS.checkpoint).expect_partial()
if FLAGS.zero_init_logits_layer:
model = checkpoint_manager2.checkpoint.model
output_layer_parameters = model.supervised_head.trainable_weights
logging.info('Initializing output layer parameters %s to zero',
[x.op.name for x in output_layer_parameters])
for x in output_layer_parameters:
x.assign(tf.zeros_like(x))
return checkpoint_manager
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
builder = tfds.folder_dataset.ImageFolder(FLAGS.data_path)
train_dataset = builder.as_dataset(split='train', shuffle_files=True)
eval_dataset = builder.as_dataset(split='val', shuffle_files=True)
num_train_examples = builder.info.splits[FLAGS.train_split].num_examples
num_eval_examples = builder.info.splits[FLAGS.eval_split].num_examples
num_classes = builder.info.features['label'].num_classes
train_steps = model_lib.get_train_steps(num_train_examples)
eval_steps = FLAGS.eval_steps or int(
math.ceil(num_eval_examples / FLAGS.eval_batch_size))
epoch_steps = int(round(num_train_examples / FLAGS.train_batch_size))
logging.info('# train examples: %d', num_train_examples)
logging.info('# train_steps: %d', train_steps)
logging.info('# eval examples: %d', num_eval_examples)
logging.info('# eval steps: %d', eval_steps)
checkpoint_steps = (
FLAGS.checkpoint_steps or (FLAGS.checkpoint_epochs * epoch_steps))
topology = None
if FLAGS.use_tpu:
if FLAGS.tpu_name:
cluster = tf.distribute.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
else:
cluster = tf.distribute.cluster_resolver.TPUClusterResolver(FLAGS.master)
tf.config.experimental_connect_to_cluster(cluster)
topology = tf.tpu.experimental.initialize_tpu_system(cluster)
logging.info('Topology:')
logging.info('num_tasks: %d', topology.num_tasks)
logging.info('num_tpus_per_task: %d', topology.num_tpus_per_task)
strategy = tf.distribute.TPUStrategy(cluster)
else:
# For (multiple) GPUs.
strategy = tf.distribute.MirroredStrategy()
print("\nNum GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
logging.info('Running using MirroredStrategy on %d replicas',
strategy.num_replicas_in_sync)
with strategy.scope():
model = model_lib.Model(num_classes)
if FLAGS.mode == 'eval':
ckpt =FLAGS.checkpoint
result = perform_evaluation(model, eval_dataset, eval_steps, ckpt, strategy,
topology)
return
def perform_evaluation(model, eval_dataset, eval_steps, ckpt, strategy, topology):
"""Perform evaluation."""
if FLAGS.train_mode == 'pretrain' and not FLAGS.lineareval_while_pretraining:
logging.info('Skipping eval during pretraining without linear eval.')
return
# Build input pipeline.
#ds = data_lib.build_distributed_dataset(builder, FLAGS.eval_batch_size, False,
# strategy, topology)
summary_writer = tf.summary.create_file_writer(FLAGS.model_dir)
checkpoint = tf.train.Checkpoint(
model=model, global_step=tf.Variable(0, dtype=tf.int64))
checkpoint.restore(ckpt).expect_partial()
global_step = checkpoint.global_step
print("HI")
def preprocess_for_eval(x):
"""Preprocesses the given image for evaluation.
Args:
image: `Tensor` representing an image of arbitrary size.
height: Height of output image.
width: Width of output image.
crop: Whether or not to (center) crop the test images.
Returns:
A preprocessed image `Tensor`.
"""
x["image"] = tf.image.convert_image_dtype(x["image"], dtype=tf.float32)
#x["image"] = tf.reshape(x["image"], [FLAGS.image_size, FLAGS.image_size, 3])
x["image"] = tf.clip_by_value(x["image"], 0., 1.)
return x
with strategy.scope():
ds = eval_dataset.map(preprocess_for_eval).batch(1)
preds = []
labels =[]
for x in ds:
image = x['image']
labels.append( x['label'].numpy()[0])
logits = model(image, training=False)
preds.append( np.argmax(logits[1].numpy(), -1)[0] )
#print(np.argmax(logits[1].numpy(), -1)[0], x['label'].numpy()[0])
logging.info('Finished eval for %s', ckpt)
print(confusion_matrix(labels, preds))
if __name__ == '__main__':
tf.compat.v1.enable_v2_behavior()
# For outside compilation of summaries on TPU.
tf.config.set_soft_device_placement(True)
app.run(main)
|
#!/usr/bin/python
import usb.core
import usb.util
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import time
class UsbLivePlot:
"""
UsbLivePlot class provides a way to receive sensor readings in form of USB packets and create a self-refreshing plot using matplotlib FuncAnimation.
<NAME> 2016
"""
def __init__(self):
# find our device - Vendor ST, Product STM32F4
self.usbDev = usb.core.find(idVendor=0x0483, idProduct=0xF00D)
if self.usbDev is None:
raise ValueError('Device not found')
# With no arguments, the first configuration will be the active one
self.usbDev.set_configuration()
# find and assign IN endpoint
self.epIn = self.findEndpoint(usb.util.ENDPOINT_IN)
# create figure with x,y,z subplots
self.fig = plt.figure()
self.ax1 = self.fig.add_subplot(3,1,1)
self.ax2 = self.fig.add_subplot(3,1,2)
self.ax3 = self.fig.add_subplot(3,1,3)
self.startTime = time.time()
self.timeOverflow = 0
# TODO: add angles subplots on the right side of the window
# create buffers
self.datasize = 12
self.timear = []
self.xar = []
self.yar = []
self.zar = []
def xyzFromUsb(self, usbData):
xbytes = usbData[0 :self.datasize]
ybytes = usbData[self.datasize :2*self.datasize]
zbytes = usbData[2*self.datasize:3*self.datasize]
tbytes = usbData[3*self.datasize:(3+2)*self.datasize]
x = []
y = []
z = []
t = []
# Byte arrays to integers, time is uint16
for i in range(self.datasize):
x.append(self.accel_byte2g(xbytes[i]))
y.append(self.accel_byte2g(ybytes[i]))
z.append(self.accel_byte2g(zbytes[i]))
t_tmp = int.from_bytes(tbytes[2*i:2*i+2], byteorder='little', signed=False)
t.append(t_tmp/1000 + self.timeOverflow) # seconds
return x,y,z,t # little endian transmission
def accel_byte2g(self, accel_byte):
"""
Convert unsigned char data to acceleration in [g]
"""
accel_g = accel_byte - 256 if accel_byte > 127 else accel_byte
accel_g = float(accel_g) * 0.0185
return accel_g
def animate(self, i):
# Read USB
timeout = 50
try:
usbData = self.usbDev.read(self.epIn.bEndpointAddress, 5*self.datasize, timeout)
except usb.core.USBError as e:
print('Data not read:', e)
return
xbuf,ybuf,zbuf,tMcu = self.xyzFromUsb(usbData)
#t = time.time() - self.startTime
#timegen = []
#for i in range(self.datasize):
# timegen.append(t + i*0.025) # seconds
# use extend() instead of append() to have 1-D list where plot lines make sense
# note: data obtained from USB is little endian so it is locally reversed. it does not matter until we use time from MCU
self.timear.extend(tMcu)
self.xar.extend(xbuf)
self.yar.extend(ybuf)
self.zar.extend(zbuf)
# Check if time have overflown
if len(self.timear) > (self.datasize + 1):
if self.timear[-1] < self.timear[-self.datasize-1]:
self.timeOverflow += 65.535
for val in self.timear[-self.datasize:-1]:
val += self.timeOverflow
self.ax1.clear()
self.ax2.clear()
self.ax3.clear()
self.ax1.plot(self.timear, self.xar, marker='.', linestyle='None')
self.ax2.plot(self.timear, self.yar, marker='.', linestyle='None', color='orange')
self.ax3.plot(self.timear, self.zar, marker='.', linestyle='None', color='green')
self.ax1.set_title('Acceleration from STM32F4Discovery')
self.ax1.set_ylabel('x-axis [g]')
self.ax2.set_ylabel('y-axis [g]')
self.ax3.set_ylabel('z-axis [g]')
self.ax3.set_xlabel('Time [s]')
def findEndpoint(self, direction):
cfg = self.usbDev.get_active_configuration()
intf = cfg[(0,0)]
ep = usb.util.find_descriptor(
intf,
# match the first OUT endpoint
custom_match = \
lambda e: \
usb.util.endpoint_direction(e.bEndpointAddress) == \
direction)
assert ep is not None
print(ep)
return ep
def main():
usbLive = UsbLivePlot()
usbLive.filename = 'acceleration.log'
# Create a self-updating plot
ani = animation.FuncAnimation(usbLive.fig, usbLive.animate, interval = 30)
plt.show()
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""DSPT3 U3S2M2 Lecture - Aaron.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1CrqR41yAB2TSKWnANXAREusGUEy8ipAN
"""
import psycopg2
dir(psycopg2)
'''psycopg2.connect looks like it may be interesting!
(Similar to how sqlite3 module worked)'''
help(psycopg2.connect)
dbname = 'gqblsofi'
user = 'gqblsofi'
password = '<PASSWORD>'
host = 'rajje.db.elephantsql.com'
pg_conn = psycopg2.connect(dbname=dbname, user=user, password=password, host=host)
pg_conn
dir(pg_conn)
pg_curs = pg_conn.cursor()
help(pg_curs.execute)
create_table_statement = """
CREATE TABLE test_table (
id SERIAL PRIMARY KEY,
name varchar(40) NOT NULL,
data JSONB
);
"""
pg_curs.execute(create_table_statement)
pg_conn.commit()
insert_statement = """
INSERT INTO test_table (name, data) VALUES
(
'A row name',
null
),
(
'Another row, with JSON',
'{ "a": 1, "b": ["dog", "cat", 42], "c": true }'::JSONB
);
"""
pg_curs.execute(insert_statement)
pg_conn.commit()
query = "SELECT * FROM test_table;"
pg_curs.execute(query)
pg_curs.fetchall()
"""# ETL - RPG data from SQLite to PostgreSQL
We'd like to get the RPG data out of SQLite and insert it into PostgreSQL.
Aka - we're making a data pipeline! Aka - an ETL (Extract Transform Load). Our first "cloud" ETL!
"""
!wget https://github.com/KryssyCo/DS-Unit-3-Sprint-2-SQL-and-Databases/blob/master/module1-introduction-to-sql/rpg_db.sqlite3?raw=true
!mv 'rpg_db.sqlite3?raw=true' rpg_db.sqlite3
!ls
import sqlite3
sl_conn = sqlite3.connect('rpg_db.sqlite3')
sl_curs =sl_conn.cursor()
# We care about the charactercreator_character table
row_count = 'SELECT COUNT(*) FROM charactercreator_character'
sl_curs.execute(row_count).fetchall()
# Our goal - copy the characters table from SQLite to PostgreSQL using Python
# Step 1 - E = Extract: Get the characters from the table
get_characters = 'SELECT * FROM charactercreator_character'
characters = sl_curs.execute(get_characters).fetchall()
characters[:5]
len(characters)
# Step 2 - Transform
# In this case, we don't actually want/need to change much
# Because we want to keep all the data
# And we're going from SQL to SQL
# But what do we need to be able to load into PostgreSQL
# We need to make a new table with the apprropriate schema
# What was the old schema? We can get at this with SQLitte internals
sl_curs.execute('PRAGMA table_info(charactercreator_character);').fetchall()
create_character_table = """
CREATE TABLE charactercreator_character (
character_id SERIAL PRIMARY KEY,
name VARCHAR(30),
level INT,
exp INT,
hp INT,
strength INT,
intelligence INT,
dexterity INT,
wisdom INT
);
"""
pg_curs.execute(create_character_table)
pg_conn.commit()
# We can query tables if we want to check
# This is a clever optional thing, showing postgresql internals
show_tables = """
SELECT
*
FROM
pg_catalog.pg_tables
WHERE
schemaname != 'pg_catalog'
AND schemaname != 'information_schema';
"""
pg_curs.execute(show_tables)
pg_curs.fetchall()
characters[0]
example_insert = """
INSERT INTO charactercreator_character
(name, level, exp, hp, strength, intelligence, dexterity, wisdom)
VALUES """ +str(characters[0][1:]) + ";"
print(example_insert)
# How do we do this for all characters? Loops!
for character in characters:
insert_character = """
INSERT INTO charactercreator_character
(name, level, exp, hp, strength, intelligence, dexterity, wisdom)
VALUES """ + str(character[1:]) + ";"
pg_curs.execute(insert_character)
# pg_conn.commit()
pg_curs.execute('SELECT * FROM charactercreator_character')
pg_curs.fetchall()
pg_conn.commit()
# A quick test that we did this correctly
pg_curs.execute('SELECT * FROM charactercreator_character')
pg_characters = pg_curs.fetchall()
characters[0]
pg_characters[0]
for character, pg_character in zip(characters, pg_characters):
assert character == pg_character
|
<filename>Notebooks/python-library/venv/lib/python3.9/site-packages/check50/_api.py
import hashlib
import functools
import numbers
import os
import re
import shlex
import shutil
import signal
import sys
import time
import pexpect
from pexpect.exceptions import EOF, TIMEOUT
from . import internal, regex
_log = []
internal.register.before_every(_log.clear)
def log(line):
"""
Add to check log
:param line: line to be added to the check log
:type line: str
The check log is student-visible via the ``--log`` flag to ``check50``.
"""
_log.append(line.replace("\n", "\\n"))
_data = {}
internal.register.before_every(_data.clear)
def data(**kwargs):
"""
Add data to the check payload
:params kwargs: key/value mappings to be added to the check payload
Example usage::
check50.data(time=7.3, mem=23)
"""
_data.update(kwargs)
def include(*paths):
"""
Copy files/directories from the check directory (:data:`check50.internal.check_dir`),
to the current directory
:params paths: files/directories to be copied
Example usage::
check50.include("foo.txt", "bar.txt")
assert os.path.exists("foo.txt") and os.path.exists("bar.txt")
"""
cwd = os.getcwd()
for path in paths:
_copy((internal.check_dir / path).resolve(), cwd)
def hash(file):
"""
Hashes file using SHA-256.
:param file: name of file to be hashed
:type file: str
:rtype: str
:raises check50.Failure: if ``file`` does not exist
"""
exists(file)
log(_("hashing {}...").format(file))
# https://stackoverflow.com/a/22058673
with open(file, "rb") as f:
sha256 = hashlib.sha256()
for block in iter(lambda: f.read(65536), b""):
sha256.update(block)
return sha256.hexdigest()
def exists(*paths):
"""
Assert that all given paths exist.
:params paths: files/directories to be checked for existence
:raises check50.Failure: if any ``path in paths`` does not exist
Example usage::
check50.exists("foo.c", "foo.h")
"""
for path in paths:
log(_("checking that {} exists...").format(path))
if not os.path.exists(path):
raise Failure(_("{} not found").format(path))
def import_checks(path):
"""
Import checks module given relative path.
:param path: relative path from which to import checks module
:type path: str
:returns: the imported module
:raises FileNotFoundError: if ``path / .check50.yaml`` does not exist
:raises yaml.YAMLError: if ``path / .check50.yaml`` is not a valid YAML file
This function is particularly useful when a set of checks logically extends
another, as is often the case in CS50's own problems that have a "less comfy"
and "more comfy" version. The "more comfy" version can include all of the
"less comfy" checks like so::
less = check50.import_checks("../less")
from less import *
.. note::
the ``__name__`` of the imported module is given by the basename
of the specified path (``less`` in the above example).
"""
dir = internal.check_dir / path
file = internal.load_config(dir)["checks"]
mod = internal.import_file(dir.name, (dir / file).resolve())
sys.modules[dir.name] = mod
return mod
class run:
"""
Run a command.
:param command: command to be run
:param env: environment in which to run command
:type command: str
:type env: dict
By default, the command will be run using the same environment as ``check50``,
these mappings may be overriden via the ``env`` parameter::
check50.run("./foo").stdin("foo").stdout("bar").exit(0)
check50.run("./foo", env={ "HOME": "/" }).stdin("foo").stdout("bar").exit(0)
"""
def __init__(self, command, env={}):
log(_("running {}...").format(command))
full_env = os.environ.copy()
full_env.update(env)
# Workaround for OSX pexpect bug http://pexpect.readthedocs.io/en/stable/commonissues.html#truncated-output-just-before-child-exits
# Workaround from https://github.com/pexpect/pexpect/issues/373
command = "bash -c {}".format(shlex.quote(command))
self.process = pexpect.spawn(command, encoding="utf-8", echo=False, env=full_env)
def stdin(self, line, str_line=None, prompt=True, timeout=3):
"""
Send line to stdin, optionally expect a prompt.
:param line: line to be send to stdin
:type line: str
:param str_line: what will be displayed as the delivered input, a human \
readable form of ``line``
:type str_line: str
:param prompt: boolean indicating whether a prompt is expected, if True absorbs \
all of stdout before inserting line into stdin and raises \
:class:`check50.Failure` if stdout is empty
:type prompt: bool
:param timeout: maximum number of seconds to wait for prompt
:type timeout: int / float
:raises check50.Failure: if ``prompt`` is set to True and no prompt is given
"""
if str_line is None:
str_line = line
if line == EOF:
log("sending EOF...")
else:
log(_("sending input {}...").format(str_line))
if prompt:
try:
self.process.expect(".+", timeout=timeout)
except (TIMEOUT, EOF):
raise Failure(_("expected prompt for input, found none"))
except UnicodeDecodeError:
raise Failure(_("output not valid ASCII text"))
# Consume everything on the output buffer
try:
for _i in range(int(timeout * 10)):
self.process.expect(".+", timeout=0.1)
except (TIMEOUT, EOF):
pass
try:
if line == EOF:
self.process.sendeof()
else:
self.process.sendline(line)
except OSError:
pass
return self
def stdout(self, output=None, str_output=None, regex=True, timeout=3, show_timeout=False):
"""
Retrieve all output from stdout until timeout (3 sec by default). If ``output``
is None, ``stdout`` returns all of the stdout outputted by the process, else
it returns ``self``.
:param output: optional output to be expected from stdout, raises \
:class:`check50.Failure` if no match \
In case output is a float or int, the check50.number_regex \
is used to match just that number". \
In case output is a stream its contents are used via output.read().
:type output: str, int, float, stream
:param str_output: what will be displayed as expected output, a human \
readable form of ``output``
:type str_output: str
:param regex: flag indicating whether ``output`` should be treated as a regex
:type regex: bool
:param timeout: maximum number of seconds to wait for ``output``
:type timeout: int / float
:param show_timeout: flag indicating whether the timeout in seconds \
should be displayed when a timeout occurs
:type show_timeout: bool
:raises check50.Mismatch: if ``output`` is specified and nothing that the \
process outputs matches it
:raises check50.Failure: if process times out or if it outputs invalid UTF-8 text.
Example usage::
check50.run("./hello").stdout("[Hh]ello, world!?", "hello, world").exit()
output = check50.run("./hello").stdout()
if not re.match("[Hh]ello, world!?", output):
raise check50.Mismatch("hello, world", output)
"""
if output is None:
self._wait(timeout)
return self.process.before.replace("\r\n", "\n").lstrip("\n")
# In case output is a stream (file-like object), read from it
try:
output = output.read()
except AttributeError:
pass
if str_output is None:
str_output = str(output)
# In case output is an int/float, use a regex to match exactly that int/float
if isinstance(output, numbers.Number):
regex = True
output = globals()["regex"].decimal(output)
expect = self.process.expect if regex else self.process.expect_exact
if output == EOF:
log(_("checking for EOF..."))
else:
output = str(output).replace("\n", "\r\n")
log(_("checking for output \"{}\"...").format(str_output))
try:
expect(output, timeout=timeout)
except EOF:
result = self.process.before + self.process.buffer
if self.process.after != EOF:
result += self.process.after
raise Mismatch(str_output, result.replace("\r\n", "\n"))
except TIMEOUT:
if show_timeout:
raise Missing(str_output, self.process.before,
help=_("check50 waited {} seconds for the output of the program").format(timeout))
raise Missing(str_output, self.process.before)
except UnicodeDecodeError:
raise Failure(_("output not valid ASCII text"))
except Exception:
raise Failure(_("check50 could not verify output"))
# If we expected EOF and we still got output, report an error.
if output == EOF and self.process.before:
raise Mismatch(EOF, self.process.before.replace("\r\n", "\n"))
return self
def reject(self, timeout=1):
"""
Check that the process survives for timeout. Useful for checking whether program is waiting on input.
:param timeout: number of seconds to wait
:type timeout: int / float
:raises check50.Failure: if process ends before ``timeout``
"""
log(_("checking that input was rejected..."))
try:
self._wait(timeout)
except Failure as e:
if not isinstance(e.__cause__, TIMEOUT):
raise
else:
raise Failure(_("expected program to reject input, but it did not"))
return self
def exit(self, code=None, timeout=5):
"""
Wait for process to exit or until timeout (5 sec by default) and asserts
that process exits with ``code``. If ``code`` is ``None``, returns the code
the process exited with.
..note:: In order to ensure that spawned child processes do not outlive the check that spawned them, it is good practice to call either method (with no arguments if the exit code doesn't matter) or ``.kill()`` on every spawned process.
:param code: code to assert process exits with
:type code: int
:param timeout: maximum number of seconds to wait for the program to end
:type timeout: int / float
:raises check50.Failure: if ``code`` is given and does not match the actual exitcode within ``timeout``
Example usage::
check50.run("./hello").exit(0)
code = check50.run("./hello").exit()
if code != 0:
raise check50.Failure(f"expected exit code 0, not {code}")
"""
self._wait(timeout)
if code is None:
return self.exitcode
log(_("checking that program exited with status {}...").format(code))
if self.exitcode != code:
raise Failure(_("expected exit code {}, not {}").format(code, self.exitcode))
return self
def kill(self):
"""Kill the process.
Child will first be sent a ``SIGHUP``, followed by a ``SIGINT`` and
finally a ``SIGKILL`` if it ignores the first two."""
self.process.close(force=True)
return self
def _wait(self, timeout=5):
try:
self.process.expect(EOF, timeout=timeout)
except TIMEOUT:
raise Failure(_("timed out while waiting for program to exit")) from TIMEOUT(timeout)
except UnicodeDecodeError:
raise Failure(_("output not valid ASCII text"))
self.kill()
if self.process.signalstatus == signal.SIGSEGV:
raise Failure(_("failed to execute program due to segmentation fault"))
self.exitcode = self.process.exitstatus
return self
class Failure(Exception):
"""
Exception signifying check failure.
:param rationale: message to be displayed capturing why the check failed
:type rationale: str
:param help: optional help message to be displayed
:type help: str
Example usage::
out = check50.run("./cash").stdin("4.2").stdout()
if 10 not in out:
help = None
if 11 in out:
help = "did you forget to round your result?"
raise check50.Failure("Expected a different result", help=help)
"""
def __init__(self, rationale, help=None):
self.payload = {"rationale": rationale, "help": help}
def __str__(self):
return self.payload["rationale"]
class Missing(Failure):
"""
Exception signifying check failure due to an item missing from a collection.
This is typically a specific substring in a longer string, for instance the contents of stdout.
:param item: the expected item / substring
:param collection: the collection / string
:param help: optional help message to be displayed
:type help: str
Example usage::
actual = check50.run("./fibonacci 5").stdout()
if "5" not in actual and "3" in actual:
help = "Be sure to start the sequence at 1"
raise check50.Missing("5", actual, help=help)
"""
def __init__(self, missing_item, collection, help=None):
super().__init__(rationale=_("Did not find {} in {}").format(_raw(missing_item), _raw(collection)), help=help)
if missing_item == EOF:
missing_item = "EOF"
self.payload.update({"missing_item": str(missing_item), "collection": str(collection)})
class Mismatch(Failure):
"""
Exception signifying check failure due to a mismatch in expected and actual outputs.
:param expected: the expected value
:param actual: the actual value
:param help: optional help message to be displayed
:type help: str
Example usage::
from re import match
expected = "[Hh]ello, world!?\\n"
actual = check50.run("./hello").stdout()
if not match(expected, actual):
help = None
if match(expected[:-1], actual):
help = r"did you forget a newline ('\\n') at the end of your printf string?"
raise check50.Mismatch("hello, world\\n", actual, help=help)
"""
def __init__(self, expected, actual, help=None):
super().__init__(rationale=_("expected {}, not {}").format(_raw(expected), _raw(actual)), help=help)
if expected == EOF:
expected = "EOF"
if actual == EOF:
actual = "EOF"
self.payload.update({"expected": expected, "actual": actual})
def hidden(failure_rationale):
"""
Decorator that marks a check as a 'hidden' check. This will suppress the log
accumulated throughout the check and will catch any :class:`check50.Failure`s thrown
during the check, and reraising a new :class:`check50.Failure` with the given ``failure_rationale``.
:param failure_rationale: the rationale that will be displayed to the student if the check fails
:type failure_rationale: str
Exaple usage::
@check50.check()
@check50.hidden("Your program isn't returning the expected result. Try running it on some sample inputs.")
def hidden_check():
check50.run("./foo").stdin("bar").stdout("baz").exit()
"""
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except Failure:
raise Failure(failure_rationale)
finally:
_log.clear()
return wrapper
return decorator
def _raw(s):
"""Get raw representation of s, truncating if too long."""
if isinstance(s, list):
s = "\n".join(_raw(item) for item in s)
if s == EOF:
return "EOF"
s = f'"{repr(str(s))[1:-1]}"'
if len(s) > 15:
s = s[:15] + "...\"" # Truncate if too long
return s
def _copy(src, dst):
"""Copy src to dst, copying recursively if src is a directory."""
try:
shutil.copy(src, dst)
except IsADirectoryError:
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
shutil.copytree(src, dst)
|
import ast
import types
from textwrap import dedent
import inspect
from core_language import Var, Prim, Return, Fun, primops, LitBool, LitFloat, LitInt, Assign, Loop
from type_system import int32, int64
class CoreTranslator(ast.NodeVisitor):
"""
Processes the tree of the python abstract syntax grammar,
modifying it to convert in a new AST that can be later easly
convert into LLVM IR( intermediate representation language )
It recursively descends through the Python AST compressing it into our Core language.
We are going to support basic loops, arithmetic with addition and multiplication,
numeric literals, and array indexing.
Given a function like:
def add(a,b):
return a + b
Which is represent in Python's AST as:
('Module',
{'body': [('FunctionDef',
{'args': ('arguments',
{'args': [('Name',
{'ctx': ('Param', {}), 'id': "'a'"}),
('Name',
{'ctx': ('Param', {}), 'id': "'b'"})],
'defaults': [],
'kwarg': None,
'vararg': None}),
'body': [('Return',
{'value': ('BinOp',
{'left': ('Name',
{'ctx': ('Load', {}),
'id': "'a'"}),
'op': ('Add', {}),
'right': ('Name',
{'ctx': ('Load', {}),
'id': "'b'"})})})],
'decorator_list': [],
'name': "'add'"})]})
To something that looks like this:
('Fun',
{'args': [('Var', {'id': "'a'", 'type': None}),
('Var', {'id': "'b'", 'type': None})],
'body': [('Return',
{'val': ('Prim',
{'args': [('Var', {'id': "'a'", 'type': None}),
('Var', {'id': "'b'", 'type': None})],
'fn': "'add#'"})})],
'fname': "'add'"})
The type is going to be infered later on.
"""
def __init__(self):
pass
def translate(self, source):
if isinstance(source, types.ModuleType):
source = dedent(inspect.getsource(source))
if isinstance(source, types.FunctionType):
source = dedent(inspect.getsource(source))
if isinstance(source, types.LambdaType):
source = dedent(inspect.getsource(source))
elif isinstance(source, (str, unicode)):
source = dedent(source)
else:
raise NotImplementedError
self._source = source
self._ast = ast.parse(source)
return self.visit(self._ast)
def visit_Module(self, node):
body = map(self.visit, node.body)
return body[0]
def visit_Name(self, node):
return Var(node.id)
def visit_Num(self, node):
if isinstance(node.n, float):
return LitFloat(node.n)
else:
return LitInt(node.n)
def visit_Bool(self, node):
return LitBool(node.n)
def visit_Call(self, node):
name = self.visit(node.func)
args = map(self.visit, node.args)
keywords = map(self.visit, node.keywords)
return App(name, args)
def visit_BinOp(self, node):
op_str = node.op.__class__
a = self.visit(node.left)
b = self.visit(node.right)
opname = primops[op_str]
return Prim(opname, [a, b])
def visit_Assign(self, node):
targets = node.targets
assert len(node.targets) == 1
var = node.targets[0].id
val = self.visit(node.value)
return Assign(var, val)
def visit_FunctionDef(self, node):
stmts = list(node.body)
stmts = map(self.visit, stmts)
args = map(self.visit, node.args.args)
res = Fun(node.name, args, stmts)
return res
def visit_Pass(self, node):
return Noop()
def visit_Return(self, node):
val = self.visit(node.value)
return Return(val)
def visit_Attribute(self, node):
if node.attr == "shape":
val = self.visit(node.value)
return Prim("shape#", [val])
else:
raise NotImplementedError
def visit_Subscript(self, node):
if isinstance(node.ctx, ast.Load):
if node.slice:
val = self.visit(node.value)
ix = self.visit(node.slice.value)
return Index(val, ix)
elif isinstance(node.ctx, ast.Store):
raise NotImplementedError
def visit_For(self, node):
target = self.visit(node.target)
stmts = map(self.visit, node.body)
if node.iter.func.id in {"xrange", "range"}:
args = map(self.visit, node.iter.args)
else:
raise Exception("Loop must be over range")
if len(args) == 1: # xrange(n)
return Loop(target, LitInt(0, type=int32), args[0], stmts)
elif len(args) == 2: # xrange(n,m)
return Loop(target, args[0], args[1], stmts)
def visit_AugAssign(self, node):
if isinstance(node.op, ast.Add):
ref = node.target.id
value = self.visit(node.value)
return Assign(ref, Prim("add#", [Var(ref), value]))
if isinstance(node.op, ast.Mul):
ref = node.target.id
value = self.visit(node.value)
return Assign(ref, Prim("mult#", [Var(ref), value]))
else:
raise NotImplementedError
def generic_visit(self, node):
raise NotImplementedError |
<gh_stars>1-10
# coding: utf-8
"""
convertapi
Convert API lets you effortlessly convert file formats and types. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class HtmlSsrfThreatCheckResult(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'is_valid': 'bool',
'is_threat': 'bool',
'threat_links': 'list[HtmlThreatLink]'
}
attribute_map = {
'is_valid': 'IsValid',
'is_threat': 'IsThreat',
'threat_links': 'ThreatLinks'
}
def __init__(self, is_valid=None, is_threat=None, threat_links=None): # noqa: E501
"""HtmlSsrfThreatCheckResult - a model defined in Swagger""" # noqa: E501
self._is_valid = None
self._is_threat = None
self._threat_links = None
self.discriminator = None
if is_valid is not None:
self.is_valid = is_valid
if is_threat is not None:
self.is_threat = is_threat
if threat_links is not None:
self.threat_links = threat_links
@property
def is_valid(self):
"""Gets the is_valid of this HtmlSsrfThreatCheckResult. # noqa: E501
True if the document is valid and has no errors, false otherwise # noqa: E501
:return: The is_valid of this HtmlSsrfThreatCheckResult. # noqa: E501
:rtype: bool
"""
return self._is_valid
@is_valid.setter
def is_valid(self, is_valid):
"""Sets the is_valid of this HtmlSsrfThreatCheckResult.
True if the document is valid and has no errors, false otherwise # noqa: E501
:param is_valid: The is_valid of this HtmlSsrfThreatCheckResult. # noqa: E501
:type: bool
"""
self._is_valid = is_valid
@property
def is_threat(self):
"""Gets the is_threat of this HtmlSsrfThreatCheckResult. # noqa: E501
True if the document contains an SSRF threat, false otherwise # noqa: E501
:return: The is_threat of this HtmlSsrfThreatCheckResult. # noqa: E501
:rtype: bool
"""
return self._is_threat
@is_threat.setter
def is_threat(self, is_threat):
"""Sets the is_threat of this HtmlSsrfThreatCheckResult.
True if the document contains an SSRF threat, false otherwise # noqa: E501
:param is_threat: The is_threat of this HtmlSsrfThreatCheckResult. # noqa: E501
:type: bool
"""
self._is_threat = is_threat
@property
def threat_links(self):
"""Gets the threat_links of this HtmlSsrfThreatCheckResult. # noqa: E501
Links found in the input HTML that contains threats # noqa: E501
:return: The threat_links of this HtmlSsrfThreatCheckResult. # noqa: E501
:rtype: list[HtmlThreatLink]
"""
return self._threat_links
@threat_links.setter
def threat_links(self, threat_links):
"""Sets the threat_links of this HtmlSsrfThreatCheckResult.
Links found in the input HTML that contains threats # noqa: E501
:param threat_links: The threat_links of this HtmlSsrfThreatCheckResult. # noqa: E501
:type: list[HtmlThreatLink]
"""
self._threat_links = threat_links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(HtmlSsrfThreatCheckResult, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, HtmlSsrfThreatCheckResult):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
<filename>aio_pool/pool.py
import asyncio
import logging
import sys
from asyncio.base_events import BaseEventLoop
from asyncio.coroutines import iscoroutinefunction
from asyncio.locks import Semaphore
from asyncio.tasks import Task
from collections import deque
from concurrent.futures import ThreadPoolExecutor
from functools import singledispatch
from multiprocessing.pool import ( # type: ignore # noqa
ExceptionWithTraceback, MaybeEncodingError, Pool,
_helper_reraises_exception, mapstar, starmapstar)
from typing import Any, Awaitable, Callable, Deque, Optional, Set, Tuple, Union
__all__ = ["AioPool"]
logger = logging.getLogger("aiopool")
logger.addHandler(logging.NullHandler())
async def _create_bounded_task(func, args, kwds: dict, sem: Semaphore, loop: "BaseEventLoop", iscoroutinefunction=iscoroutinefunction):
if iscoroutinefunction(func):
task = await _create_bounded_task_coro(func, args, kwds, sem, loop)
elif func is mapstar:
task = await _create_bounded_task_mapstar(func, args, kwds, sem, loop)
elif func is starmapstar:
task = await _create_bounded_task_starmapstar(func, args, kwds, sem, loop)
else:
task = await _create_bounded_task_thread(func, args, kwds, sem, loop)
return task
async def _create_bounded_task_thread(func, args, kwds: dict, sem, loop: "BaseEventLoop"):
await sem.acquire()
task = loop.run_in_executor(None, lambda: func(*args, **kwds))
task.add_done_callback(lambda t: sem.release())
return task
async def _create_bounded_task_coro(func, args, kwds: dict, sem: Semaphore, loop: "BaseEventLoop"):
await sem.acquire()
task = loop.create_task(func(*args, **kwds))
task.add_done_callback(lambda t: sem.release())
return task
async def _create_bounded_task_mapstar(func: mapstar, args, kwds: dict, sem: Semaphore, loop: "BaseEventLoop"):
underlying_func = args[0][0]
underlying_params = args[0][1]
results: Deque[Awaitable[Any]] = deque([])
append = results.append
for params in underlying_params:
append(await _create_bounded_task(
underlying_func, (params, ), {}, sem, loop))
return asyncio.gather(*results, return_exceptions=True)
async def _create_bounded_task_starmapstar(func: starmapstar, args, kwds: dict, sem: Semaphore, loop: "BaseEventLoop"):
underlying_func = args[0][0]
underlying_params = args[0][1]
results = deque([])
append = results.append
for params in underlying_params:
append(await _create_bounded_task(
underlying_func, params, {}, sem, loop))
return asyncio.gather(*results)
async def task_wrapper(
job, i, func,
task: Awaitable[Any],
put: Callable[[Any], Awaitable[None]],
wrap_exception: bool = False,
) -> None:
try:
result = (True, await task)
except Exception as e:
if wrap_exception and func is not _helper_reraises_exception:
e = ExceptionWithTraceback(e, e.__traceback__)
result = (False, e)
try:
await put((job, i, result))
except Exception as e:
wrapped = MaybeEncodingError(e, result[1])
logger.debug("Possible encoding error while sending result: %s" % (wrapped))
await put((job, i, (False, wrapped)))
async def _run_worker(
get: Callable[[], Awaitable[Any]],
put: Callable[[Any], Awaitable[None]],
loop: asyncio.BaseEventLoop,
initializer=None,
initargs=(),
maxtasks=None,
wrap_exception=False,
concurrency_limit=128,
iscoroutinefunction=asyncio.iscoroutinefunction,
) -> None:
if initializer is not None:
if iscoroutinefunction(initializer):
await initializer(*initargs)
else:
initializer(*initargs)
completed = 0
sem_concurrency_limit = asyncio.BoundedSemaphore(concurrency_limit)
tasks: Set[Task[Any]] = set()
def remove_task(t: Task, *, tasks: Set[Task] = tasks) -> None:
tasks.remove(t)
while maxtasks is None or (maxtasks and completed < maxtasks):
async with sem_concurrency_limit:
try:
task = await get()
except (EOFError, OSError):
logger.debug("worker got EOFError or OSError -- exiting")
for task in tasks:
task.cancel()
tasks.clear() # Don't wait for anything.
break
if task is None:
logger.debug("worker got sentinel -- exiting")
break
job, i, func, args, kwds = task
task = await _create_bounded_task(func, args, kwds, sem=sem_concurrency_limit, loop=loop)
new_task = loop.create_task(
task_wrapper(job, i, func,
task,
put=put,
wrap_exception=wrap_exception,
)
)
tasks.add(new_task)
new_task.add_done_callback(remove_task)
if tasks:
await asyncio.gather(*tasks, return_exceptions=True)
logger.debug("worker exiting after %d tasks" % completed)
def worker(
inqueue,
outqueue,
initializer=None,
initargs=(),
loop_initializer=asyncio.new_event_loop,
threads=1,
maxtasks: Optional[int] = None,
wrap_exception: bool = False,
concurrency_limit=128,
) -> None:
loop: asyncio.BaseEventLoop = loop_initializer()
asyncio.set_event_loop(loop)
worker_tp = ThreadPoolExecutor(threads, thread_name_prefix="Worker_TP_")
loop.set_default_executor(worker_tp)
get_tp = ThreadPoolExecutor(1, thread_name_prefix="GetTask_TP_")
put_tp = ThreadPoolExecutor(1, thread_name_prefix="PutTask_TP_")
async def get_task(*, loop=loop, tp=get_tp, queue=inqueue) -> tuple:
return await loop.run_in_executor(tp, queue.get)
async def put_result(result, *, loop=loop, tp=put_tp, queue=outqueue) -> None:
return await loop.run_in_executor(tp, queue.put, result)
try:
loop.run_until_complete(
_run_worker(
get_task,
put_result,
loop=loop,
initializer=initializer,
initargs=initargs,
maxtasks=maxtasks,
wrap_exception=wrap_exception,
concurrency_limit=concurrency_limit,
)
)
except Exception as err:
logger.exception("worker got exception %s", err)
finally:
logger.debug("shutdown workers")
get_tp.shutdown()
put_tp.shutdown()
worker_tp.shutdown()
logger.debug("shutdown asyncgens")
loop.run_until_complete(loop.shutdown_asyncgens())
if loop.is_running():
loop.close()
logger.debug("Worker done")
class AioPool(Pool):
def __init__(
self,
processes: Optional[int] = None,
initializer: Optional[Callable[..., Union[Awaitable[Any], Any]]] = None,
initargs: Tuple[Any, ...] = (),
maxtasksperchild: int = None,
context=None,
loop_initializer: Callable[[], BaseEventLoop] = None,
pool_size: int = 1,
concurrency_limit: int = 128,
) -> None:
"""Process pool implementation that support async functions.
Support the same funcitonalilty as the original process pool.
Args:
processes: number of processes to run, same behaviour as Pool.
Defaults to None.
initializer: Initializer function that being executed first by each process.
Can be async. Optional. Defaults to None.
initargs: Arguments to pass to initializer. Defaults to ().
maxtasksperchild: max tasks per process. same behaviour as Pool. Defaults to None.
context: determine how to start the child processes. same behaviour as Pool. Defaults to None.
loop_initializer: Function that create the new event loop. Defaults to None.
pool_size: size for the default pool for the event loop in the new process. Defaults to 1.
concurrency_limit: Maximume concurrent tasks to run in each process. Defaults to 128.
"""
self._loop_initializer = loop_initializer or asyncio.new_event_loop
if pool_size <= 0:
raise ValueError("Thread pool size must be at least 1")
self._pool_size = pool_size
if concurrency_limit < 1:
raise ValueError("Conccurency limit must be at least 1.")
self._concurrency_limit = concurrency_limit
super().__init__(processes, initializer, initargs, maxtasksperchild, context)
if sys.version_info.minor < 8:
def _repopulate_pool(self) -> None:
"""Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
"""
for _ in range(self._processes - len(self._pool)):
w = self.Process(
target=worker,
args=(
self._inqueue,
self._outqueue,
self._initializer,
self._initargs,
self._loop_initializer,
self._pool_size,
self._maxtasksperchild,
self._wrap_exception,
self._concurrency_limit,
),
)
self._pool.append(w)
w.name = w.name.replace("Process", "PoolWorker")
w.daemon = True
w.start()
logger.debug("added worker")
elif sys.version_info.minor >= 8:
def _repopulate_pool(self) -> None:
return self._repopulate_pool_static(
self._ctx,
self.Process,
self._processes,
self._pool,
self._inqueue,
self._outqueue,
self._initializer,
self._initargs,
self._loop_initializer,
self._maxtasksperchild,
self._wrap_exception,
self._pool_size,
self._concurrency_limit,
)
@staticmethod
def _repopulate_pool_static(
ctx,
Process,
processes,
pool,
inqueue,
outqueue,
initializer,
initargs,
loop_initializer,
maxtasksperchild,
wrap_exception,
pool_size,
concurrency_limit,
) -> None:
"""Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
"""
for i in range(processes - len(pool)):
w = Process(
ctx,
target=worker,
args=(
inqueue,
outqueue,
initializer,
initargs,
loop_initializer,
pool_size,
maxtasksperchild,
wrap_exception,
concurrency_limit,
),
)
w.name = w.name.replace("Process", "PoolWorker")
w.daemon = True
w.start()
pool.append(w)
logger.debug("added worker")
|
# -*- coding: utf-8 -*-
"""This module contains the core function of the *PLUTO_gen* program and the first subfunction of *PLUTO_gen*.
"""
import xmltodict
def read_xml(filename):
"""Reads in the XML to be converted
Reads a *XML-Timeline* file. And returns a dictionary
Arguments:
filename (str): A string containing the path to the Timeline-XML file.
Returns:
doc (dictionary)
"""
with open(filename) as fd:
doc = xmltodict.parse(fd.read())
return doc
def check_payload_command(pafCommand):
return pafCommand["@mnemonic"][0:6] == "TC_paf"
def write_header(plutopath="tmp.plp"):
# Writes header with instrument restart. Designed to take 45s to match the procedure that will run on the satellite
f = open(plutopath, "w")
f.write("procedure\n")
f.write("\tinitiate and confirm step myStep\n")
f.write("\t\tmain\n")
f.write(
'\t\t\tlog "------------------------------------------------------------------------------------------------------------------";\n'
)
f.write('\t\t\tlog "Starting tests";\n')
f.write("\t\t\tlog to string (current time());\n")
f.write("\n")
f.write("\t\t\tinitiate TC_pafMODE with arguments\n")
f.write("\t\t\t\tMODE:=2\n")
f.write("\t\t\tend with;\n")
f.write("\n")
f.write("\t\t\twait for 5s;\n")
f.write("\n")
f.write("\t\t\tinitiate TC_pcfPLTMControl with arguments\n")
f.write("\t\t\t\tEnable:=1,\n")
f.write("\t\t\t\tPartition:=0\n")
f.write("\t\t\tend with;\n")
f.write("\n")
f.write("\t\t\twait for 5s;\n")
f.write("\n")
f.write("\t\t\tinitiate TC_pafPWRTOGGLE with arguments\n")
f.write("\t\t\t\tCONST:=165\n")
f.write("\t\t\tend with;\n")
f.write("\n")
f.write("\t\t\twait for 30s;\n")
f.write("\n")
f.write("\t\t\tinitiate TC_pcfPLTMControl with arguments\n")
f.write("\t\t\t\tEnable:=1,\n")
f.write("\t\t\t\tPartition:=0\n")
f.write("\t\t\tend with;\n")
f.write("\n")
f.write("\t\t\twait for 5s;\n")
f.write("\n")
f.close()
def write_footer(plutopath="tmp.plp"):
f = open(plutopath, "a+")
f.write("\t\tend main\n")
f.write("\tend step;\n")
f.write("end procedure\n")
f.close()
def write_tcArgument(pafCommand, plutopath="tmp.plp"):
if not check_payload_command(pafCommand):
raise ValueError(
"Invalid Command "
+ pafCommand["@mnemonic"]
+ " PLUTO generator only supports Platform commands"
)
elif pafCommand["@mnemonic"] == "TC_pafPWRTOGGLE":
raise ValueError(
"Redundant Command "
+ pafCommand["@mnemonic"]
+ " PLUTO generator will remove powertoggles"
)
else:
f = open(plutopath, "a+")
if pafCommand["comment"] is not None:
f.write('\t\t\tlog "' + pafCommand["comment"].split(",")[0] + '"' + ";\n")
f.write("\t\t\tlog to string (current time());\n")
f.write("\t\t\tinitiate " + str(pafCommand["@mnemonic"]) + " with arguments\n")
if isinstance(pafCommand["tcArguments"]["tcArgument"], list):
for i in range(len(pafCommand["tcArguments"]["tcArgument"])):
# print(str(pafCommand["tcArguments"]["tcArgument"][i]))
f.write(
"\t\t\t\t"
+ str(pafCommand["tcArguments"]["tcArgument"][i]["@mnemonic"])
+ ":="
+ str(pafCommand["tcArguments"]["tcArgument"][i]["#text"])
)
if i < len(pafCommand["tcArguments"]["tcArgument"]) - 1:
f.write(",\n")
else:
f.write("\n")
else:
# print(str(pafCommand["tcArguments"]["tcArgument"]))
f.write(
"\t\t\t\t"
+ str(pafCommand["tcArguments"]["tcArgument"]["@mnemonic"])
+ ":="
+ str(pafCommand["tcArguments"]["tcArgument"]["#text"])
)
f.write("\n")
f.write("\t\t\tend with;\n\n")
f.close()
def write_wait(wait_time, plutopath="tmp.plp"):
if wait_time > 0:
f = open(plutopath, "a+")
f.write("\t\t\twait for " + str(wait_time) + "s;\n\n")
f.close()
def PLUTO_generator(
configFile,
XML_Path,
PLUTO_Path="pluto_script.plp",
wait_platform=False,
max_wait_time=None,
):
"""The core function of the PLUTO_gen program.
Reads a *XML-Timeline* file. And output a PLUTO script for running on the MATS standalone instrument.
Arguments:
SCIMXML_Path (str): A string containing the path to the Timeline-XML file.
PLUTO_Path (str): A string containing the path where outputfile should be written (default "pluto_script.plp")
wait_platform (Bool): Whether to wait for payload commands or not (default = False)
Returns:
None
"""
timeline_xml = read_xml(XML_Path)
write_header(PLUTO_Path)
for i in range(len(timeline_xml["InnoSatTimeline"]["listOfCommands"]["command"])):
if i < len(timeline_xml["InnoSatTimeline"]["listOfCommands"]["command"]) - 1:
wait_time = int(
timeline_xml["InnoSatTimeline"]["listOfCommands"]["command"][i + 1][
"relativeTime"
]
) - int(
timeline_xml["InnoSatTimeline"]["listOfCommands"]["command"][i][
"relativeTime"
]
)
else:
wait_time = 0
try:
write_tcArgument(
timeline_xml["InnoSatTimeline"]["listOfCommands"]["command"][i],
PLUTO_Path,
)
if max_wait_time == None:
write_wait(wait_time, PLUTO_Path)
else:
write_wait(min(wait_time, max_wait_time), PLUTO_Path)
except ValueError as e:
print(e)
if wait_platform:
write_wait(wait_time, PLUTO_Path)
else:
print("Wait time ignored")
write_footer(PLUTO_Path)
return
|
<gh_stars>0
"""
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
from unittest import TestCase
from cibyl.models.ci.build import Build
from cibyl.models.ci.environment import Environment
from cibyl.models.ci.job import Job
from cibyl.models.ci.printers.raw import CIRawPrinter
from cibyl.models.ci.system import JobsSystem
from cibyl.models.ci.test import Test
class TestCIRawPrinter(TestCase):
"""Tests for :class:`CIRawPrinter`.
"""
def test_str_environment(self):
"""Testing printing of an environment.
"""
name = "test_env"
env = Environment(name)
printer = CIRawPrinter()
self.assertIn("Environment: ", printer.print_environment(env))
self.assertIn(name, printer.print_environment(env))
def test_print_build(self):
"""Testing printing of a standard build.
"""
build_id = 'test-build'
build_status = 'FAILURE'
build1 = Build(build_id=build_id)
build2 = Build(build_id=build_id)
printer = CIRawPrinter()
self.assertIn('Build: ', printer.print_build(build1))
self.assertIn(build_id, printer.print_build(build1))
self.assertIn('Build: ', printer.print_build(build2))
build2.status.value = build_status
self.assertIn('Build: ', printer.print_build(build2))
self.assertIn('Status: ', printer.print_build(build2))
self.assertIn(build_id, printer.print_build(build2))
def test_print_build_complete(self):
"""Testing printing of a complete build.
"""
build_id = 'test-build'
test = Test("test_name", "failure")
build = Build(build_id=build_id)
build.add_test(test)
build.status.value = "SUCCESS"
build.duration.value = 60000
printer = CIRawPrinter(verbosity=2)
result = printer.print_build(build)
self.assertIn('Build: ', result)
self.assertIn(build_id, result)
self.assertIn('Duration:', result)
self.assertIn('1.00m', result)
self.assertIn('Test:', result)
def test_print_build_all_status(self):
"""Testing all possible statuses of a build.
"""
statuses = ["SUCCESS", "FAILURE", "UNSTABLE"]
for status in statuses:
build = Build(build_id='build')
build.status.value = status
printer = CIRawPrinter()
result = printer.print_build(build)
self.assertIn('Status: ', result)
self.assertIn(status, result)
def test_print_job(self):
"""Testing printing of a job.
"""
job_name = 'test-job'
job_url = 'http://ci_system/test-job'
job1 = Job(name=job_name)
job2 = Job(name=job_name)
printer = CIRawPrinter()
self.assertIn('Job: ', printer.print_job(job1))
self.assertIn('Job: ', printer.print_job(job2))
self.assertIn(job1.name.value, printer.print_job(job1))
self.assertIn(job_name, printer.print_job(job2))
job2.url.value = job_url
self.assertIn('Job: ', printer.print_job(job2))
self.assertIn(job_name, printer.print_job(job2))
def test_print_test(self):
"""Test printing of a test.
"""
name = 'test-test'
test_result = 'FAILURE'
class_name = 'unit'
duration = 25
test1 = Test(name=name)
test2 = Test(name=name)
printer = CIRawPrinter(verbosity=2)
self.assertIn('Test: ', printer.print_test(test1))
self.assertIn(name, printer.print_test(test1))
self.assertIn('Test: ', printer.print_test(test2))
test2.result.value = test_result
test2.duration.value = duration
test2.class_name.value = class_name
result2 = printer.print_test(test2)
self.assertIn('Test: ', result2)
self.assertIn(name, result2)
self.assertIn('Result: ', result2)
self.assertIn(test_result, result2)
self.assertIn('Class name: ', result2)
self.assertIn(class_name, result2)
self.assertIn('Duration:', result2)
self.assertIn('0.00m', result2)
def test_print_test_all_results(self):
"""Test all possible results of a test.
"""
results = ["SUCCESS", "FAILURE", "UNSTABLE", "SKIPPED"]
printer = CIRawPrinter(verbosity=2)
for result in results:
test = Test(name='test')
test.result.value = result
result = printer.print_test(test)
self.assertIn('Result: ', result)
self.assertIn(result, result)
def test_system_str_jobs(self):
"""Test system str for a system with jobs and builds."""
system = JobsSystem("test", "test_type")
build = Build("1", "SUCCESS")
job = Job("test_job")
job.add_build(build)
system.add_job(job)
system.register_query()
printer = CIRawPrinter(verbosity=0)
output = printer.print_system(system)
expected = """System: test
Job: test_job
Build: 1
Status: SUCCESS
Total jobs found in query: 1"""
self.assertIn(output, expected)
|
import os
import cv2
import glob
import json
import math
import numpy as np
import matplotlib.pyplot as plt
def getminmax(x1, x2):
aux1 = sorted(x1)
aux2 = sorted(x2)
#print(aux1[0], aux2[0])
if aux1[0] < aux2[0]:
p = aux1[0]
else:
p = aux2[0]
#print(p)
p = p * 10
#print(p)
if p < 0:
p = 0
#print('----------')
if aux1[len(aux1)-1] > aux2[len(aux2)-1]:
e = aux1[len(aux1)-1]
else:
e = aux2[len(aux2)-1]
#print(e)
e = e * 10 + 1
#print(e)
if e > 10:
e = 10
#print(math.floor(p)*10, int(round(e,0))*10+5)
return math.floor(p)*10, int(round(e,0))*10+5
if __name__ == '__main__':
experiments = ['baseline/']
encoders = ['resnet18',
'resnet34',
'resnet50',
'resnet101',
'resnet152',
'resnext50_32x4d',
'resnext101_32x8d',
'timm-resnest14d',
'timm-resnest26d',
'timm-resnest50d',
'timm-resnest101e',
'timm-resnest200e',
'timm-resnest269e',
'timm-resnest50d_4s2x40d',
'timm-resnest50d_4s2x40d',
'timm-res2net50_26w_4s',
'timm-res2net101_26w_4s',
'timm-res2net101_26w_4s',
'timm-res2net50_26w_8s',
'timm-res2net50_48w_2s',
'timm-res2net50_14w_8s',
'timm-res2next50',
'timm-regnetx_002',
'timm-regnetx_004',
'timm-regnetx_006',
'timm-regnetx_008',
'timm-regnetx_016',
'timm-regnetx_032',
'timm-regnetx_040',
'timm-regnetx_064',
'timm-regnetx_080',
'timm-regnetx_120',
'timm-regnetx_160',
'timm-regnetx_320',
'timm-regnety_002',
'timm-regnety_004',
'timm-regnety_006',
'timm-regnety_008',
'timm-regnety_016',
'timm-regnety_032',
'timm-regnety_040',
'timm-regnety_064',
'timm-regnety_080',
'timm-regnety_120',
'timm-regnety_160',
'timm-regnety_320',
'senet154',
'se_resnet50',
'se_resnet101',
'se_resnet152',
'se_resnext50_32x4d',
'se_resnext101_32x4d',
'timm-skresnet18',
'timm-skresnet34',
'timm-skresnext50_32x4d',
'densenet121',
'densenet169',
'densenet201',
'densenet161',
'inceptionresnetv2',
'inceptionv4',
'xception',
'efficientnet-b0',
'efficientnet-b1',
'efficientnet-b2',
'efficientnet-b3',
'efficientnet-b4',
'efficientnet-b5',
'efficientnet-b6',
'efficientnet-b7',
'timm-efficientnet-b0',
'timm-efficientnet-b1',
'timm-efficientnet-b2',
'timm-efficientnet-b3',
'timm-efficientnet-b4',
'timm-efficientnet-b5',
'timm-efficientnet-b6',
'timm-efficientnet-b7',
'timm-efficientnet-b8',
'timm-efficientnet-l2',
'timm-efficientnet-lite0',
'timm-efficientnet-lite1',
'timm-efficientnet-lite2',
'timm-efficientnet-lite3',
'timm-efficientnet-lite4',
'mobilenet_v2',
'dpn68',
'dpn68b',
'dpn92',
'dpn98',
'dpn107',
'dpn131',
'vgg11',
'vgg11_bn',
'vgg13',
'vgg13_bn',
'vgg16',
'vgg16_bn',
'vgg19',
'vgg19_bn']
encoders = ['resnet50/','resnet101/','resnext50_32x4d/','resnext101_32x8d/',
'timm-res2net50_26w_4s/','timm-res2net101_26w_4s/','vgg16/','densenet121/',
'densenet169/','densenet201/']
decoders = ['unetplusplus/', 'unet/','fpn/','pspnet/','linknet/', 'manet/']
#decoders = ['unet/']
datasets = ['medseg/', 'covid19china/', 'mosmed/', 'covid20cases/','ricord1a/']
#datasets = ['covid19china/']
runs_path = 'RUNS/'
for experiment in experiments:
for dataset in datasets:
for decoder in decoders:
for encoder in encoders:
runspath = runs_path + experiment + dataset + decoder + encoder
mean_results_path = runspath + '/graphics'
if os.path.isdir(mean_results_path):
os.system('rm -rf {}'.format(mean_results_path))
runs = glob.glob(runspath + '*')
if len(runs) != 5:
print('ERROR!!!!')
print(runs)
exit()
all_train = []
all_valid = []
num_classes = 0
labels = []
for run in runs:
print(run)
results_path = run + '/graphics'
if os.path.isdir(results_path):
os.system('rm -rf {}'.format(results_path))
os.mkdir(results_path)
train_logs_path = run + '/train_logs.json'
with open(train_logs_path) as train_logs_file:
train_logs = json.load(train_logs_file)
if len(labels) == 0:
epoch_0 = train_logs['train'][0]
for key, value in epoch_0.items():
labels.append(key)
num_classes = len(labels) - 3
train_results = train_logs['train']
valid_results = train_logs['valid']
#print(labels)
for label in labels:
if label != 'Time' and label != 'Epoch' and label != 'Weighted mean of: dice_loss and jaccard_loss)':
print(label)
x1 = []
x2 = []
for train_epoch, valid_epoch in zip(train_results, valid_results):
x1.append(train_epoch[label])
x2.append(valid_epoch[label])
#if label == 'fscore':
# print(x1)
all_train.append(x1)
all_valid.append(x2)
y = list(range(len(x1)))
p, e = getminmax(x1, x2)
#print(int(p)*10)
#print(int(e)*10+1)
yticks = [p/100 for p in range(p, e, 5)]
xticks = [p for p in range(0, len(x1)+5, 5)]
plt.plot(y, x1, label='train')
plt.plot(y, x2, label='valid')
plt.xticks(xticks)
plt.yticks(yticks)
plt.title(label)
plt.legend()
plt.grid(True)
plt.savefig(results_path + '/' + label.lower() + '.png')
plt.clf()
print("--------------------------------------------------")
os.mkdir(mean_results_path)
for i in range(num_classes):
mean_train = []
mean_valid = []
print(labels[i+1])
for j in range(0+i, len(all_train), num_classes):
mean_train.append(all_train[j])
mean_valid.append(all_valid[j])
mean_train = list(map(lambda x: sum(x)/len(x), zip(*mean_train)))
mean_valid = list(map(lambda x: sum(x)/len(x), zip(*mean_valid)))
p, e = getminmax(mean_train, mean_valid)
y = list(range(len(mean_train)))
yticks = [p/100 for p in range(p, e, 5)]
xticks = [p for p in range(0, len(mean_train)+5, 5)]
plt.plot(y, mean_train, label='train')
plt.plot(y, mean_valid, label='valid')
plt.xticks(xticks)
plt.yticks(yticks)
plt.title(dataset + experiment.split('_')[0] + labels[i+1])
plt.legend()
plt.grid(True)
plt.savefig(mean_results_path + '/' + labels[i+1].lower() + '.png')
plt.clf()
|
<filename>ZConfig/tests/test_loader.py
##############################################################################
#
# Copyright (c) 2002, 2003, 2018 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Tests of ZConfig.loader classes and helper functions."""
import os.path
import sys
import tempfile
import unittest
import ZConfig
import ZConfig.loader
import ZConfig.url
from ZConfig._compat import NStringIO as StringIO
from ZConfig._compat import urllib2
from ZConfig.tests.support import CONFIG_BASE, TestHelper
myfile = os.path.abspath(__file__)
LIBRARY_DIR = os.path.join(os.path.dirname(myfile), "library")
class LoaderTestCase(TestHelper, unittest.TestCase):
def test_open_resource_non_ascii(self):
# Files are decoded using utf-8 on open
loader = ZConfig.loader.SchemaLoader()
url = ZConfig.url.urljoin(CONFIG_BASE, "non-ascii.txt")
with loader.openResource(url) as stream:
val = stream.read()
self.assertEqual(
val,
u'# -*-coding: utf-8; mode: conf-*-\n'
u'This file contains a snowman, U+2603: \u2603\n'
)
def test_schema_caching(self):
loader = ZConfig.loader.SchemaLoader()
url = ZConfig.url.urljoin(CONFIG_BASE, "simple.xml")
schema1 = loader.loadURL(url)
schema2 = loader.loadURL(url)
self.assertIs(schema1, schema2)
def test_simple_import_with_cache(self):
loader = ZConfig.loader.SchemaLoader()
url1 = ZConfig.url.urljoin(CONFIG_BASE, "library.xml")
schema1 = loader.loadURL(url1)
sio = StringIO("<schema>"
" <import src='library.xml'/>"
" <section type='type-a' name='section'/>"
"</schema>")
url2 = ZConfig.url.urljoin(CONFIG_BASE, "stringio")
schema2 = loader.loadFile(sio, url2)
self.assertTrue(schema1.gettype("type-a") is schema2.gettype("type-a"))
def test_schema_loader_source_errors(self):
loader = ZConfig.loader.SchemaLoader()
self.assertRaisesRegex(ZConfig.SchemaError,
"illegal schema component name",
loader.schemaComponentSource,
'', None)
self.assertRaisesRegex(ZConfig.SchemaError,
"illegal schema component name",
loader.schemaComponentSource,
'foo..bar', None)
def test_config_loader_abstract_schema(self):
class MockSchema(object):
_abstract = True
def isabstract(self):
return self._abstract
def gettype(self, _t):
return self
self.assertRaisesRegex(ZConfig.SchemaError,
"abstract type",
ZConfig.loader.ConfigLoader,
MockSchema())
s = MockSchema()
s._abstract = False
loader = ZConfig.loader.ConfigLoader(s)
s._abstract = True
self.assertRaisesRegex(ZConfig.ConfigurationError,
"cannot match abstract section",
loader.startSection,
None, None, None)
def test_simple_import_using_prefix(self):
self.load_schema_text("""\
<schema prefix='ZConfig.tests.library'>
<import package='.thing'/>
</schema>
""")
def test_import_errors(self):
# must specify exactly one of package or src
self.assertRaises(ZConfig.SchemaError, ZConfig.loadSchemaFile,
StringIO("<schema><import/></schema>"))
self.assertRaises(ZConfig.SchemaError, ZConfig.loadSchemaFile,
StringIO("<schema>"
" <import src='library.xml'"
" package='ZConfig'/>"
"</schema>"))
# cannot specify src and file
self.assertRaises(ZConfig.SchemaError, ZConfig.loadSchemaFile,
StringIO("<schema>"
" <import src='library.xml'"
" file='other.xml'/>"
"</schema>"))
# cannot specify module as package
sio = StringIO("<schema>"
" <import package='ZConfig.tests.test_loader'/>"
"</schema>")
with self.assertRaises(ZConfig.SchemaResourceError) as ctx:
ZConfig.loadSchemaFile(sio)
e = ctx.exception
self.assertEqual(e.filename, "component.xml")
self.assertEqual(e.package, "ZConfig.tests.test_loader")
self.assertTrue(e.path is None)
# make sure the str() doesn't raise an unexpected exception
str(e)
def test_import_from_package(self):
loader = ZConfig.loader.SchemaLoader()
sio = StringIO("<schema>"
" <import package='ZConfig.tests.library.widget'/>"
"</schema>")
schema = loader.loadFile(sio)
self.assertTrue(schema.gettype("widget-a") is not None)
def test_import_from_package_with_file(self):
loader = ZConfig.loader.SchemaLoader()
sio = StringIO("<schema>"
" <import package='ZConfig.tests.library.widget'"
" file='extra.xml' />"
"</schema>")
schema = loader.loadFile(sio)
self.assertTrue(schema.gettype("extra-type") is not None)
def test_import_from_package_extra_directory(self):
loader = ZConfig.loader.SchemaLoader()
sio = StringIO("<schema>"
" <import package='ZConfig.tests.library.thing'"
" file='extras.xml' />"
"</schema>")
schema = loader.loadFile(sio)
self.assertTrue(schema.gettype("extra-thing") is not None)
def test_import_from_package_with_missing_file(self):
loader = ZConfig.loader.SchemaLoader()
sio = StringIO("<schema>"
" <import package='ZConfig.tests.library.widget'"
" file='notthere.xml' />"
"</schema>")
with self.assertRaises(ZConfig.SchemaResourceError) as ctx:
loader.loadFile(sio)
e = ctx.exception
self.assertEqual(e.filename, "notthere.xml")
self.assertEqual(e.package, "ZConfig.tests.library.widget")
self.assertTrue(e.path)
# make sure the str() doesn't raise an unexpected exception
str(e)
def test_import_from_package_with_directory_file(self):
loader = ZConfig.loader.SchemaLoader()
sio = StringIO("<schema>"
" <import package='ZConfig.tests.library.widget'"
" file='really/notthere.xml' />"
"</schema>")
self.assertRaises(ZConfig.SchemaError, loader.loadFile, sio)
def test_import_two_components_one_package(self):
loader = ZConfig.loader.SchemaLoader()
sio = StringIO("<schema>"
" <import package='ZConfig.tests.library.widget' />"
" <import package='ZConfig.tests.library.widget'"
" file='extra.xml' />"
"</schema>")
schema = loader.loadFile(sio)
schema.gettype("widget-a")
schema.gettype("extra-type")
def test_import_component_twice_1(self):
# Make sure we can import a component twice from a schema.
# This is most likely to occur when the component is imported
# from each of two other components, or from the top-level
# schema and a component.
loader = ZConfig.loader.SchemaLoader()
sio = StringIO("<schema>"
" <import package='ZConfig.tests.library.widget' />"
" <import package='ZConfig.tests.library.widget' />"
"</schema>")
schema = loader.loadFile(sio)
schema.gettype("widget-a")
def test_import_component_twice_2(self):
# Make sure we can import a component from a config file even
# if it has already been imported from the schema.
loader = ZConfig.loader.SchemaLoader()
sio = StringIO("<schema>"
" <import package='ZConfig.tests.library.widget' />"
"</schema>")
schema = loader.loadFile(sio)
loader = ZConfig.loader.ConfigLoader(schema)
sio = StringIO("%import ZConfig.tests.library.widget")
loader.loadFile(sio)
def test_urlsplit_urlunsplit(self):
# Extracted from Python's test.test_urlparse module:
samples = [
('http://www.python.org',
('http', 'www.python.org', '', '', '', ''),
('http', 'www.python.org', '', '', '')),
('http://www.python.org#abc',
('http', 'www.python.org', '', '', '', 'abc'),
('http', 'www.python.org', '', '', 'abc')),
('http://www.python.org/#abc',
('http', 'www.python.org', '/', '', '', 'abc'),
('http', 'www.python.org', '/', '', 'abc')),
("http://a/b/c/d;p?q#f",
('http', 'a', '/b/c/d', 'p', 'q', 'f'),
('http', 'a', '/b/c/d;p', 'q', 'f')),
('file:///tmp/junk.txt',
('file', '', '/tmp/junk.txt', '', '', ''),
('file', '', '/tmp/junk.txt', '', '')),
]
for url, parsed, split in samples:
result = ZConfig.url.urlsplit(url)
self.assertEqual(result, split)
result2 = ZConfig.url.urlunsplit(result)
self.assertEqual(result2, url)
def test_file_url_normalization(self):
self.assertEqual(
ZConfig.url.urlnormalize("file:/abc/def"),
"file:///abc/def")
self.assertEqual(
ZConfig.url.urlunsplit(("file", "", "/abc/def", "", "")),
"file:///abc/def")
self.assertEqual(
ZConfig.url.urljoin("file:/abc/", "def"),
"file:///abc/def")
self.assertEqual(
ZConfig.url.urldefrag("file:/abc/def#frag"),
("file:///abc/def", "frag"))
def test_url_from_file(self):
class MockFile(object):
name = 'path'
self.assertEqual('file://',
ZConfig.loader._url_from_file(MockFile)[:7])
def test_isPath(self):
assertTrue = self.assertTrue
isPath = ZConfig.loader.SchemaLoader().isPath
assertTrue(isPath("abc"))
assertTrue(isPath("abc/def"))
assertTrue(isPath("/abc"))
assertTrue(isPath("/abc/def"))
assertTrue(isPath(r"\abc"))
assertTrue(isPath(r"\abc\def"))
assertTrue(isPath(r"c:\abc\def"))
assertTrue(isPath("/ab:cd"))
assertTrue(isPath(r"\ab:cd"))
assertTrue(isPath("long name with spaces"))
assertTrue(isPath("long name:with spaces"))
assertTrue(not isPath("ab:cd"))
assertTrue(not isPath("http://www.example.com/"))
assertTrue(not isPath("http://www.example.com/sample.conf"))
assertTrue(not isPath("file:///etc/zope/zope.conf"))
assertTrue(not isPath("file:///c|/foo/bar.conf"))
class TestNonExistentResources(unittest.TestCase):
# XXX Not sure if this is the best approach for these. These
# tests make sure that the error reported by ZConfig for missing
# resources is handled in a consistent way. Since ZConfig uses
# urllib2.urlopen() for opening all resources, what we do is
# replace that function with one that always raises an exception.
# Since urllib2.urlopen() can raise either IOError or OSError
# (depending on the version of Python), we run test for each
# exception. urllib2.urlopen() is restored after running the
# test.
def setUp(self):
self.urllib2_urlopen = urllib2.urlopen
urllib2.urlopen = self.fake_urlopen
def tearDown(self):
urllib2.urlopen = self.urllib2_urlopen
def fake_urlopen(self, url):
raise self.error()
def test_nonexistent_file_ioerror(self):
self.error = IOError
self.check_nonexistent_file()
def test_nonexistent_file_oserror(self):
self.error = OSError
self.check_nonexistent_file()
def check_nonexistent_file(self):
fn = tempfile.mktemp()
schema = ZConfig.loadSchemaFile(StringIO("<schema/>"))
self.assertRaises(ZConfig.ConfigurationError,
ZConfig.loadSchema, fn)
self.assertRaises(ZConfig.ConfigurationError,
ZConfig.loadConfig, schema, fn)
self.assertRaises(ZConfig.ConfigurationError,
ZConfig.loadConfigFile, schema,
StringIO("%include " + fn))
self.assertRaises(ZConfig.ConfigurationError,
ZConfig.loadSchema,
"http://www.zope.org/no-such-document/")
self.assertRaises(ZConfig.ConfigurationError,
ZConfig.loadConfig, schema,
"http://www.zope.org/no-such-document/")
class TestResourcesInZip(unittest.TestCase):
def setUp(self):
self.old_path = sys.path[:]
# now add our sample EGG to sys.path:
zipfile = os.path.join(os.path.dirname(myfile), "foosample.zip")
sys.path.append(zipfile)
def tearDown(self):
sys.path[:] = self.old_path
def test_zip_import_component_from_schema(self):
sio = StringIO('''
<schema>
<abstracttype name="something"/>
<import package="foo.sample"/>
<section name="*"
attribute="something"
type="something"
/>
</schema>
''')
schema = ZConfig.loadSchemaFile(sio)
t = schema.gettype("sample")
self.assertFalse(t.isabstract())
def test_zip_import_component_from_config(self):
sio = StringIO('''
<schema>
<abstracttype name="something"/>
<section name="*"
attribute="something"
type="something"
/>
</schema>
''')
schema = ZConfig.loadSchemaFile(sio)
value = '''
%import foo.sample
<sample>
data value
</sample>
'''
sio = StringIO(value)
config, _ = ZConfig.loadConfigFile(schema, sio)
self.assertEqual(config.something.data, "| value |")
sio = StringIO(value)
with self.assertRaises(ZConfig.ConfigurationSyntaxError):
ZConfig.loadConfigFile(schema, sio,
overrides=["sample/data=othervalue"])
class TestOpenPackageResource(TestHelper, unittest.TestCase):
magic_name = 'not a valid import name'
def setUp(self):
sys.modules[self.magic_name] = self
def tearDown(self):
del sys.modules[self.magic_name]
def test_package_loader_resource_error(self):
class MockLoader(object):
pass
self.__loader__ = MockLoader()
self.__path__ = ['dir']
self.assertRaisesRegex(ZConfig.SchemaResourceError,
"error opening schema component",
ZConfig.loader.openPackageResource,
self.magic_name, 'a path')
# Now with an empty path
self.__path__ = []
self.assertRaisesRegex(ZConfig.SchemaResourceError,
"schema component not found",
ZConfig.loader.openPackageResource,
self.magic_name, 'a path')
def test_resource(self):
r = ZConfig.loader.Resource(self, None)
self.assertEqual(self.magic_name, r.magic_name)
|
<gh_stars>100-1000
"""
flood_fill
"""
from __future__ import absolute_import, division, print_function
from PySide import QtGui
import logging
import collections
import time
from mcedit2.editortools import EditorTool
from mcedit2.command import SimplePerformCommand
from mcedit2.editortools.select import SelectionCursor
from mcedit2.util.showprogress import showProgress
from mcedit2.widgets.blockpicker import BlockTypeButton
from mcedit2.widgets.layout import Column, Row
from mceditlib import faces
log = logging.getLogger(__name__)
class FloodFillTool(EditorTool):
name = "Flood Fill"
iconName = "flood_fill"
modifiesWorld = True
def mousePress(self, event):
pos = event.blockPosition
if self.hoverCheckbox.isChecked():
pos = pos + event.blockFace.vector
command = FloodFillCommand(self.editorSession,
pos,
self.blockTypeWidget.block,
self.indiscriminateCheckBox.isChecked(),
self.getFloodDirs())
self.editorSession.pushCommand(command)
def mouseMove(self, event):
self.mouseDrag(event)
def mouseDrag(self, event):
self.cursorNode.point = event.blockPosition
self.cursorNode.face = event.blockFace
def __init__(self, editorSession, *args, **kwargs):
super(FloodFillTool, self).__init__(editorSession, *args, **kwargs)
toolWidget = QtGui.QWidget()
self.toolWidget = toolWidget
self.cursorNode = SelectionCursor()
self.floodXPosCheckbox = QtGui.QCheckBox(self.tr("X+"), checked=True)
self.floodXNegCheckbox = QtGui.QCheckBox(self.tr("X-"), checked=True)
self.floodYPosCheckbox = QtGui.QCheckBox(self.tr("Y+"), checked=True)
self.floodYNegCheckbox = QtGui.QCheckBox(self.tr("Y-"), checked=True)
self.floodZPosCheckbox = QtGui.QCheckBox(self.tr("Z+"), checked=True)
self.floodZNegCheckbox = QtGui.QCheckBox(self.tr("Z-"), checked=True)
floodDirsLayout = Column(Row(
self.floodXPosCheckbox,
self.floodYPosCheckbox,
self.floodZPosCheckbox,
), Row(
self.floodXNegCheckbox,
self.floodYNegCheckbox,
self.floodZNegCheckbox,
), )
self.blockTypeWidget = BlockTypeButton()
self.blockTypeWidget.block = self.editorSession.worldEditor.blocktypes["stone"]
self.blockTypeWidget.editorSession = self.editorSession
self.indiscriminateCheckBox = QtGui.QCheckBox("Ignore block meta")
self.indiscriminateCheckBox.setChecked(False)
self.hoverCheckbox = QtGui.QCheckBox("Hover")
toolWidget.setLayout(Column(Row(QtGui.QLabel("Block:"),
self.blockTypeWidget),
Row(self.hoverCheckbox, self.indiscriminateCheckBox),
floodDirsLayout,
None))
def getFloodDirs(self):
return {f: c.isChecked() for f, c in
((faces.FaceXIncreasing, self.floodXPosCheckbox),
(faces.FaceYIncreasing, self.floodYPosCheckbox),
(faces.FaceZIncreasing, self.floodZPosCheckbox),
(faces.FaceXDecreasing, self.floodXNegCheckbox),
(faces.FaceYDecreasing, self.floodYNegCheckbox),
(faces.FaceZDecreasing, self.floodZNegCheckbox))}
class FloodFillCommand(SimplePerformCommand):
def __init__(self, editorSession, point, blockInfo, indiscriminate, floodDirs):
super(FloodFillCommand, self).__init__(editorSession)
self.blockInfo = blockInfo
self.point = point
self.indiscriminate = indiscriminate
self.floodDirs = floodDirs
def perform(self):
dim = self.editorSession.currentDimension
point = self.point
doomedBlock = dim.getBlockID(*point)
doomedBlockData = dim.getBlockData(*point)
checkData = (doomedBlock not in (8, 9, 10, 11)) # always ignore data when replacing water/lava xxx forge fluids?
indiscriminate = self.indiscriminate
floodDirs = self.floodDirs
log.info("Flood fill: replacing %s with %s", (doomedBlock, doomedBlockData), self.blockInfo)
if doomedBlock == self.blockInfo.ID:
if indiscriminate or doomedBlockData == self.blockInfo.meta:
return
if indiscriminate:
checkData = False
if doomedBlock == 2: # grass
doomedBlock = 3 # dirt
x, y, z = point
dim.setBlockID(x, y, z, self.blockInfo.ID)
dim.setBlockData(x, y, z, self.blockInfo.meta)
def processCoords(coords):
newcoords = collections.deque()
for (x, y, z) in coords:
for face, offsets in faces.faceDirections:
if not floodDirs[face]:
continue
dx, dy, dz = offsets
p = (x + dx, y + dy, z + dz)
nx, ny, nz = p
b = dim.getBlockID(nx, ny, nz)
if indiscriminate:
if b == 2:
b = 3
if b == doomedBlock:
if checkData:
if dim.getBlockData(nx, ny, nz) != doomedBlockData:
continue
dim.setBlockID(nx, ny, nz, self.blockInfo.ID)
dim.setBlockData(nx, ny, nz, self.blockInfo.meta)
newcoords.append(p)
return newcoords
def spread(coords):
start = time.time()
num = 0
while len(coords):
num += len(coords)
coords = processCoords(coords)
d = time.time() - start
progress = "Did {0} coords in {1}".format(num, d)
log.debug(progress)
yield progress
showProgress("Flood fill...", spread([point]), cancel=True)
|
<reponame>wsk1314zwr/submarine
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from datetime import datetime
from typing import List
import freezegun
import pytest
from freezegun import freeze_time
import submarine
from submarine.entities.model_registry import ModelVersion, RegisteredModel
from submarine.entities.model_registry.model_stages import (
STAGE_ARCHIVED,
STAGE_DEVELOPING,
STAGE_NONE,
STAGE_PRODUCTION,
)
from submarine.exceptions import SubmarineException
from submarine.store.database import models
from submarine.store.model_registry.sqlalchemy_store import SqlAlchemyStore
freezegun.configure(default_ignore_list=["threading", "tensorflow"])
@pytest.mark.e2e
class TestSqlAlchemyStore(unittest.TestCase):
def setUp(self):
submarine.set_db_uri(
"mysql+pymysql://submarine_test:password_test@localhost:3306/submarine_test"
)
self.db_uri = submarine.get_db_uri()
self.store = SqlAlchemyStore(self.db_uri)
def tearDown(self):
submarine.set_db_uri(None)
models.Base.metadata.drop_all(self.store.engine)
def test_create_registered_model(self):
name1 = "test_create_RM_1"
rm1 = self.store.create_registered_model(name1)
self.assertEqual(rm1.name, name1)
self.assertEqual(rm1.description, None)
# error in duplicate
with self.assertRaises(SubmarineException):
self.store.create_registered_model(name1)
# test create with tags
name2 = "test_create_RM_2"
tags = ["tag1", "tag2"]
rm2 = self.store.create_registered_model(name2, tags=tags)
rm2d = self.store.get_registered_model(name2)
self.assertEqual(rm2.name, name2)
self.assertEqual(rm2.tags, tags)
self.assertEqual(rm2d.name, name2)
self.assertEqual(rm2d.tags, tags)
# test create with description
name3 = "test_create_RM_3"
description = "A test description."
rm3 = self.store.create_registered_model(name3, description)
rm3d = self.store.get_registered_model(name3)
self.assertEqual(rm3.name, name3)
self.assertEqual(rm3.description, description)
self.assertEqual(rm3d.name, name3)
self.assertEqual(rm3d.description, description)
# invalid model name
with self.assertRaises(SubmarineException):
self.store.create_registered_model(None)
with self.assertRaises(SubmarineException):
self.store.create_registered_model("")
def test_update_registered_model_description(self):
name = "test_update_RM"
rm1 = self.store.create_registered_model(name)
rm1d = self.store.get_registered_model(name)
self.assertEqual(rm1.name, name)
self.assertEqual(rm1d.description, None)
# update description
fake_datetime = datetime.strptime("2021-11-11 11:11:11.111000", "%Y-%m-%d %H:%M:%S.%f")
with freeze_time(fake_datetime):
rm2 = self.store.update_registered_model_description(name, "New description.")
rm2d = self.store.get_registered_model(name)
self.assertEqual(rm2.name, name)
self.assertEqual(rm2.description, "New description.")
self.assertEqual(rm2d.name, name)
self.assertEqual(rm2d.description, "New description.")
self.assertEqual(rm2d.last_updated_time, fake_datetime)
def test_rename_registered_model(self):
name = "test_rename_RM"
new_name = "test_rename_RN_new"
rm = self.store.create_registered_model(name)
self.store.create_model_version(
name, "model_id_0", "test", "application_1234", "tensorflow"
)
self.store.create_model_version(
name, "model_id_1", "test", "application_1235", "tensorflow"
)
mv1d = self.store.get_model_version(name, 1)
mv2d = self.store.get_model_version(name, 2)
self.assertEqual(rm.name, name)
self.assertEqual(mv1d.name, name)
self.assertEqual(mv2d.name, name)
# test renaming registered model also updates its models
self.store.rename_registered_model(name, new_name)
rm = self.store.get_registered_model(new_name)
mv1d = self.store.get_model_version(new_name, 1)
mv2d = self.store.get_model_version(new_name, 2)
self.assertEqual(rm.name, new_name)
self.assertEqual(mv1d.name, new_name)
self.assertEqual(mv2d.name, new_name)
# test accessing the registered model with the original name will fail
with self.assertRaises(SubmarineException):
self.store.rename_registered_model(name, name)
# invalid name will fail
with self.assertRaises(SubmarineException):
self.store.rename_registered_model(name, None)
with self.assertRaises(SubmarineException):
self.store.rename_registered_model(name, "")
def test_delete_registered_model(self):
name1 = "test_delete_RM"
name2 = "test_delete_RM_2"
rm_tags = ["rm_tag1", "rm_tag2"]
rm1 = self.store.create_registered_model(name1, tags=rm_tags)
rm2 = self.store.create_registered_model(name2, tags=rm_tags)
mv_tags = ["mv_tag1", "mv_tag2"]
rm1mv1 = self.store.create_model_version(
rm1.name, "model_id_0", "test", "application_1234", "tensorflow", tags=mv_tags
)
rm2mv1 = self.store.create_model_version(
rm2.name, "model_id_1", "test", "application_1234", "tensorflow", tags=mv_tags
)
# check store
rm1d = self.store.get_registered_model(rm1.name)
self.assertEqual(rm1d.name, name1)
self.assertEqual(rm1d.tags, rm_tags)
rm1mv1d = self.store.get_model_version(rm1mv1.name, rm1mv1.version)
self.assertEqual(rm1mv1d.name, name1)
self.assertEqual(rm1mv1d.tags, mv_tags)
# delete registered model
self.store.delete_registered_model(rm1.name)
# cannot get model
with self.assertRaises(SubmarineException):
self.store.get_registered_model(rm1.name)
# cannot delete it again
with self.assertRaises(SubmarineException):
self.store.delete_registered_model(rm1.name)
# registered model tag are cascade deleted with the registered model
for tag in rm_tags:
with self.assertRaises(SubmarineException):
self.store.delete_registered_model_tag(rm1.name, tag)
# models are cascade deleted with the registered model
with self.assertRaises(SubmarineException):
self.store.get_model_version(rm1mv1.name, rm1mv1.version)
# model tags are cascade deleted with the registered model
for tag in rm_tags:
with self.assertRaises(SubmarineException):
self.store.delete_model_version_tag(rm1mv1.name, rm1mv1.version, tag)
# Other registered models and model versions are not affected
rm2d = self.store.get_registered_model(rm2.name)
self.assertEqual(rm2d.name, rm2.name)
self.assertEqual(rm2d.tags, rm2.tags)
rm2mv1 = self.store.get_model_version(rm2mv1.name, rm2mv1.version)
self.assertEqual(rm2mv1.name, rm2mv1.name)
self.assertEqual(rm2mv1.tags, rm2mv1.tags)
def _compare_registered_model_names(
self, results: List[RegisteredModel], rms: List[RegisteredModel]
):
result_names = set([result.name for result in results])
rm_names = set([rm.name for rm in rms])
self.assertEqual(result_names, rm_names)
def test_list_registered_model(self):
rms = [self.store.create_registered_model(f"test_list_RM_{i}") for i in range(10)]
results = self.store.list_registered_model()
self.assertEqual(len(results), 10)
self._compare_registered_model_names(results, rms)
def test_list_registered_model_filter_with_string(self):
rms = [
self.store.create_registered_model("A"),
self.store.create_registered_model("AB"),
self.store.create_registered_model("B"),
self.store.create_registered_model("ABA"),
self.store.create_registered_model("AAA"),
]
results = self.store.list_registered_model(filter_str="A")
self.assertEqual(len(results), 4)
self._compare_registered_model_names(rms[:2] + rms[3:], results)
results = self.store.list_registered_model(filter_str="AB")
self.assertEqual(len(results), 2)
self._compare_registered_model_names([rms[1], rms[3]], results)
results = self.store.list_registered_model(filter_str="ABA")
self.assertEqual(len(results), 1)
self._compare_registered_model_names([rms[3]], results)
results = self.store.list_registered_model(filter_str="ABC")
self.assertEqual(len(results), 0)
self.assertEqual(results, [])
def test_list_registered_model_filter_with_tags(self):
tags = ["tag1", "tag2", "tag3"]
rms = [
self.store.create_registered_model("test1"),
self.store.create_registered_model("test2", tags=tags[0:1]),
self.store.create_registered_model("test3", tags=tags[1:2]),
self.store.create_registered_model("test4", tags=[tags[0], tags[2]]),
self.store.create_registered_model("test5", tags=tags),
]
results = self.store.list_registered_model(filter_tags=tags[0:1])
self.assertEqual(len(results), 3)
self._compare_registered_model_names(results, [rms[1], rms[3], rms[4]])
results = self.store.list_registered_model(filter_tags=tags[0:2])
self.assertEqual(len(results), 1)
self._compare_registered_model_names(results, [rms[-1]])
# empty result
other_tag = ["tag4"]
results = self.store.list_registered_model(filter_tags=other_tag)
self.assertEqual(len(results), 0)
self.assertEqual(results, [])
# empty result
results = self.store.list_registered_model(filter_tags=tags + other_tag)
self.assertEqual(len(results), 0)
self.assertEqual(results, [])
def test_list_registered_model_filter_both(self):
tags = ["tag1", "tag2", "tag3"]
rms = [
self.store.create_registered_model("A"),
self.store.create_registered_model("AB", tags=[tags[0]]),
self.store.create_registered_model("B", tags=[tags[1]]),
self.store.create_registered_model("ABA", tags=[tags[0], tags[2]]),
self.store.create_registered_model("AAA", tags=tags),
]
results = self.store.list_registered_model()
self.assertEqual(len(results), 5)
self._compare_registered_model_names(results, rms)
results = self.store.list_registered_model(filter_str="A", filter_tags=[tags[0]])
self.assertEqual(len(results), 3)
self._compare_registered_model_names(results, [rms[1], rms[3], rms[4]])
results = self.store.list_registered_model(filter_str="AB", filter_tags=[tags[0]])
self.assertEqual(len(results), 2)
self._compare_registered_model_names(results, [rms[1], rms[3]])
results = self.store.list_registered_model(filter_str="AAA", filter_tags=tags)
self.assertEqual(len(results), 1)
self._compare_registered_model_names(results, [rms[-1]])
@freeze_time("2021-11-11 11:11:11.111000")
def test_get_registered_model(self):
name = "test_get_RM"
tags = ["tag1", "tag2"]
fake_datetime = datetime.now()
rm = self.store.create_registered_model(name, tags=tags)
self.assertEqual(rm.name, name)
rmd = self.store.get_registered_model(name)
self.assertEqual(rmd.name, name)
self.assertEqual(rmd.creation_time, fake_datetime)
self.assertEqual(rmd.last_updated_time, fake_datetime)
self.assertEqual(rmd.description, None)
self.assertEqual(rmd.tags, tags)
def test_add_registered_model_tag(self):
name1 = "test_add_RM_tag"
name2 = "test_add_RM_tag_2"
tags = ["tag1", "tag2"]
rm1 = self.store.create_registered_model(name1, tags=tags)
rm2 = self.store.create_registered_model(name2, tags=tags)
new_tag = "new tag"
self.store.add_registered_model_tag(name1, new_tag)
rmd = self.store.get_registered_model(name1)
all_tags = [new_tag] + tags
self.assertEqual(rmd.tags, all_tags)
# test add the same tag
same_tag = "tag1"
self.store.add_registered_model_tag(name1, same_tag)
rm1d = self.store.get_registered_model(rm1.name)
self.assertEqual(rm1d.tags, all_tags)
# does not affect other models
rm2d = self.store.get_registered_model(rm2.name)
self.assertEqual(rm2d.tags, tags)
# cannot set invalid tag
with self.assertRaises(SubmarineException):
self.store.add_registered_model_tag(rm1.name, None)
with self.assertRaises(SubmarineException):
self.store.add_registered_model_tag(rm1.name, "")
# cannot use invalid model name
with self.assertRaises(SubmarineException):
self.store.add_registered_model_tag(None, new_tag)
# cannot set tag on deleted registered model
self.store.delete_registered_model(rm1.name)
with self.assertRaises(SubmarineException):
new_tag = "new tag2"
self.store.add_registered_model_tag(name1, new_tag)
def test_delete_registered_model_tag(self):
name1 = "test_delete_RM_tag"
name2 = "test_delete_RM_tag_2"
tags = ["tag1", "tag2"]
rm1 = self.store.create_registered_model(name1, tags=tags)
rm2 = self.store.create_registered_model(name2, tags=tags)
new_tag = "new tag"
self.store.add_registered_model_tag(rm1.name, new_tag)
self.store.delete_registered_model_tag(rm1.name, new_tag)
rm1d = self.store.get_registered_model(rm1.name)
self.assertEqual(rm1d.tags, tags)
# delete tag that is already deleted
with self.assertRaises(SubmarineException):
self.store.delete_registered_model_tag(rm1.name, new_tag)
rm1d = self.store.get_registered_model(rm1.name)
self.assertEqual(rm1d.tags, tags)
# does not affect other models
rm2d = self.store.get_registered_model(rm2.name)
self.assertEqual(rm2d.tags, tags)
# Cannot delete invalid key
with self.assertRaises(SubmarineException):
self.store.delete_registered_model_tag(rm1.name, None)
with self.assertRaises(SubmarineException):
self.store.delete_registered_model_tag(rm1.name, "")
# Cannot use invalid model name
with self.assertRaises(SubmarineException):
self.store.delete_registered_model_tag(None, "tag1")
# Cannot delete tag on deleted (non-existed) registered model
self.store.delete_registered_model(name1)
with self.assertRaises(SubmarineException):
self.store.delete_registered_model_tag(name1, "tag1")
@freeze_time("2021-11-11 11:11:11.111000")
def test_create_model_version(self):
model_name = "test_create_MV"
self.store.create_registered_model(model_name)
fake_datetime = datetime.now()
mv1 = self.store.create_model_version(
model_name, "model_id_0", "test", "application_1234", "tensorflow"
)
self.assertEqual(mv1.name, model_name)
self.assertEqual(mv1.version, 1)
self.assertEqual(mv1.creation_time, fake_datetime)
m1d = self.store.get_model_version(mv1.name, mv1.version)
self.assertEqual(m1d.name, model_name)
self.assertEqual(m1d.id, "model_id_0")
self.assertEqual(m1d.user_id, "test")
self.assertEqual(m1d.experiment_id, "application_1234")
self.assertEqual(m1d.model_type, "tensorflow")
self.assertEqual(m1d.current_stage, STAGE_NONE)
self.assertEqual(m1d.creation_time, fake_datetime)
self.assertEqual(m1d.last_updated_time, fake_datetime)
self.assertEqual(m1d.dataset, None)
# new model for same registered model autoincrement version
m2 = self.store.create_model_version(
model_name, "model_id_1", "test", "application_1234", "tensorflow"
)
m2d = self.store.get_model_version(m2.name, m2.version)
self.assertEqual(m2.version, 2)
self.assertEqual(m2d.version, 2)
# create model with tags
tags = ["tag1", "tag2"]
m3 = self.store.create_model_version(
model_name, "model_id_2", "test", "application_1234", "tensorflow", tags=tags
)
m3d = self.store.get_model_version(m3.name, m3.version)
self.assertEqual(m3.version, 3)
self.assertEqual(m3.tags, tags)
self.assertEqual(m3d.version, 3)
self.assertEqual(m3d.tags, tags)
# create model with description
description = "A test description."
m4 = self.store.create_model_version(
model_name,
"model_id_3",
"test",
"application_1234",
"tensorflow",
description=description,
)
m4d = self.store.get_model_version(m4.name, m4.version)
self.assertEqual(m4.version, 4)
self.assertEqual(m4.description, description)
self.assertEqual(m4d.version, 4)
self.assertEqual(m4d.description, description)
def test_update_model_version_description(self):
name = "test_update_MV_description"
self.store.create_registered_model(name)
mv1 = self.store.create_model_version(
name, "model_id_0", "test", "application_1234", "tensorflow"
)
m1d = self.store.get_model_version(mv1.name, mv1.version)
self.assertEqual(m1d.name, name)
self.assertEqual(m1d.version, 1)
self.assertEqual(m1d.id, "model_id_0")
self.assertEqual(m1d.description, None)
# update description
fake_datetime = datetime.strptime("2021-11-11 11:11:11.111000", "%Y-%m-%d %H:%M:%S.%f")
with freeze_time(fake_datetime):
self.store.update_model_version_description(mv1.name, mv1.version, "New description.")
m1d = self.store.get_model_version(mv1.name, mv1.version)
self.assertEqual(m1d.name, name)
self.assertEqual(m1d.version, 1)
self.assertEqual(m1d.id, "model_id_0")
self.assertEqual(m1d.description, "New description.")
self.assertEqual(m1d.last_updated_time, fake_datetime)
def test_transition_model_version_stage(self):
name = "test_transition_MV_stage"
self.store.create_registered_model(name)
mv1 = self.store.create_model_version(
name, "model_id_0", "test", "application_1234", "tensorflow"
)
mv2 = self.store.create_model_version(
name, "model_id_1", "test", "application_1234", "tensorflow"
)
fake_datetime = datetime.strptime("2021-11-11 11:11:11.111000", "%Y-%m-%d %H:%M:%S.%f")
with freeze_time(fake_datetime):
self.store.transition_model_version_stage(mv1.name, mv1.version, STAGE_DEVELOPING)
m1d = self.store.get_model_version(mv1.name, mv1.version)
self.assertEqual(m1d.current_stage, STAGE_DEVELOPING)
# check last updated time
self.assertEqual(m1d.last_updated_time, fake_datetime)
rmd = self.store.get_registered_model(name)
self.assertEqual(rmd.last_updated_time, fake_datetime)
fake_datetime = datetime.strptime("2021-11-11 11:11:22.222000", "%Y-%m-%d %H:%M:%S.%f")
with freeze_time(fake_datetime):
self.store.transition_model_version_stage(mv1.name, mv1.version, STAGE_PRODUCTION)
m1d = self.store.get_model_version(mv1.name, mv1.version)
self.assertEqual(m1d.current_stage, STAGE_PRODUCTION)
# check last updated time
self.assertEqual(m1d.last_updated_time, fake_datetime)
rmd = self.store.get_registered_model(name)
self.assertEqual(rmd.last_updated_time, fake_datetime)
fake_datetime = datetime.strptime("2021-11-11 11:11:22.333000", "%Y-%m-%d %H:%M:%S.%f")
with freeze_time(fake_datetime):
self.store.transition_model_version_stage(mv1.name, mv1.version, STAGE_ARCHIVED)
m1d = self.store.get_model_version(mv1.name, mv1.version)
self.assertEqual(m1d.current_stage, STAGE_ARCHIVED)
# check last updated time
self.assertEqual(m1d.last_updated_time, fake_datetime)
rmd = self.store.get_registered_model(name)
self.assertEqual(rmd.last_updated_time, fake_datetime)
# uncanonical stage
for uncanonical_stage_name in ["DEVELOPING", "developing", "DevElopIng"]:
self.store.transition_model_version_stage(mv1.name, mv1.version, STAGE_NONE)
self.store.transition_model_version_stage(mv1.name, mv1.version, uncanonical_stage_name)
m1d = self.store.get_model_version(mv1.name, mv1.version)
self.assertEqual(m1d.current_stage, STAGE_DEVELOPING)
# Not matching stages
with self.assertRaises(SubmarineException):
self.store.transition_model_version_stage(mv1.name, mv1.version, None)
# Not matching stages
with self.assertRaises(SubmarineException):
self.store.transition_model_version_stage(mv1.name, mv1.version, "stage")
# No change for other model
m2d = self.store.get_model_version(mv2.name, mv2.version)
self.assertEqual(m2d.current_stage, STAGE_NONE)
def test_delete_model_version(self):
name = "test_for_delete_MV"
tags = ["tag1", "tag2"]
self.store.create_registered_model(name)
mv = self.store.create_model_version(
name, "model_id_0", "test", "application_1234", "tensorflow", tags=tags
)
mvd = self.store.get_model_version(mv.name, mv.version)
self.assertEqual(mvd.name, name)
self.store.delete_model_version(name=mv.name, version=mv.version)
# model tags are cascade deleted with the model
with self.assertRaises(SubmarineException):
self.store.delete_model_version_tag(mv.name, mv.version, tags[0])
with self.assertRaises(SubmarineException):
self.store.delete_model_version_tag(mv.name, mv.version, tags[1])
# cannot get a deleted model
with self.assertRaises(SubmarineException):
self.store.get_model_version(mv.name, mv.version)
# cannot update description of a deleted model
with self.assertRaises(SubmarineException):
self.store.update_model_version_description(mv.name, mv.version, "New description.")
# cannot delete a non-existing version
with self.assertRaises(SubmarineException):
self.store.delete_model_version(name=mv.name, version=None)
# cannot delete a non-existing model name
with self.assertRaises(SubmarineException):
self.store.delete_model_version(name=None, version=mv.version)
@freeze_time("2021-11-11 11:11:11.111000")
def test_get_model_version(self):
name = "test_get_MV"
tags = ["tag1", "tag2"]
self.store.create_registered_model(name)
fake_datetime = datetime.now()
mv = self.store.create_model_version(
name, "model_id_0", "test", "application_1234", "tensorflow", tags=tags
)
self.assertEqual(mv.creation_time, fake_datetime)
self.assertEqual(mv.last_updated_time, fake_datetime)
mvd = self.store.get_model_version(mv.name, mv.version)
self.assertEqual(mvd.name, name)
self.assertEqual(mvd.id, "model_id_0")
self.assertEqual(mvd.user_id, "test")
self.assertEqual(mvd.experiment_id, "application_1234")
self.assertEqual(mvd.model_type, "tensorflow")
self.assertEqual(mvd.current_stage, STAGE_NONE)
self.assertEqual(mvd.creation_time, fake_datetime)
self.assertEqual(mvd.last_updated_time, fake_datetime)
self.assertEqual(mvd.dataset, None)
self.assertEqual(mvd.description, None)
self.assertEqual(mvd.tags, tags)
def _compare_model_versions(self, results: List[ModelVersion], mms: List[ModelVersion]) -> None:
result_versions = set([result.version for result in results])
model_versions = set([mm.version for mm in mms])
self.assertEqual(result_versions, model_versions)
@freeze_time("2021-11-11 11:11:11.111000")
def test_list_model_versions(self):
name1 = "test_list_models_1"
name2 = "test_list_models_2"
self.store.create_registered_model(name1)
self.store.create_registered_model(name2)
tags = ["tag1", "tag2", "tag3"]
models = [
self.store.create_model_version(
name1, "model_id_0", "test", "application_1234", "tensorflow"
),
self.store.create_model_version(
name1, "model_id_1", "test", "application_1234", "tensorflow", tags=[tags[0]]
),
self.store.create_model_version(
name1, "model_id_2", "test", "application_1234", "tensorflow", tags=[tags[1]]
),
self.store.create_model_version(
name1,
"model_id_3",
"test",
"application_1234",
"tensorflow",
tags=[tags[0], tags[2]],
),
self.store.create_model_version(
name1, "model_id_4", "test", "application_1234", "tensorflow", tags=tags
),
]
results = self.store.list_model_versions(name1)
self.assertEqual(len(results), 5)
self._compare_model_versions(results, models)
results = self.store.list_model_versions(name1, filter_tags=tags[0:1])
self.assertEqual(len(results), 3)
self._compare_model_versions(results, [models[1], models[3], models[4]])
results = self.store.list_model_versions(name1, filter_tags=tags[0:2])
self.assertEqual(len(results), 1)
self._compare_model_versions(results, [models[-1]])
# empty result
other_tag = ["tag4"]
results = self.store.list_model_versions(name1, filter_tags=other_tag)
self.assertEqual(len(results), 0)
# empty result
results = self.store.list_model_versions(name1, filter_tags=tags + other_tag)
self.assertEqual(len(results), 0)
# empty result for other models
results = self.store.list_model_versions(name2)
self.assertEqual(len(results), 0)
results = self.store.list_model_versions(name2, filter_tags=tags)
self.assertEqual(len(results), 0)
def test_get_model_version_uri(self):
name = "test_get_model_version_uri"
self.store.create_registered_model(name)
mv = self.store.create_model_version(
name, "model_id_0", "test", "application_1234", "tensorflow"
)
uri = self.store.get_model_version_uri(mv.name, mv.version)
self.assertEqual(uri, f"s3://submarine/registry/{mv.id}/{mv.name}/{mv.version}")
# cannot retrieve URI for deleted model version
self.store.delete_model_version(mv.name, mv.version)
with self.assertRaises(SubmarineException):
self.store.get_model_version_uri(mv.name, mv.version)
def test_add_model_version_tag(self):
name1 = "test_add_MV_tag"
name2 = "test_add_MV_tag_2"
tags = ["tag1", "tag2"]
self.store.create_registered_model(name1)
self.store.create_registered_model(name2)
rm1mv1 = self.store.create_model_version(
name1, "model_id_0", "test", "application_1234", "tensorflow", tags=tags
)
rm1mv2 = self.store.create_model_version(
name1, "model_id_1", "test", "application_1234", "tensorflow", tags=tags
)
rm2mv1 = self.store.create_model_version(
name2, "model_id_2", "test", "application_1234", "tensorflow", tags=tags
)
new_tag = "new tag"
self.store.add_model_version_tag(rm1mv1.name, rm1mv1.version, new_tag)
all_tags = [new_tag] + tags
rm1m1d = self.store.get_model_version(rm1mv1.name, rm1mv1.version)
self.assertEqual(rm1m1d.name, name1)
self.assertEqual(rm1m1d.tags, all_tags)
# test add a same tag
same_tag = "tag1"
self.store.add_model_version_tag(rm1mv1.name, rm1mv1.version, same_tag)
mvd = self.store.get_model_version(rm1mv1.name, rm1mv1.version)
self.assertEqual(mvd.tags, all_tags)
# does not affect other models
rm1m2d = self.store.get_model_version(rm1mv2.name, rm1mv2.version)
self.assertEqual(rm1m2d.name, name1)
self.assertEqual(rm1m2d.tags, tags)
rm2mv1 = self.store.get_model_version(rm2mv1.name, rm2mv1.version)
self.assertEqual(rm2mv1.name, name2)
self.assertEqual(rm2mv1.tags, tags)
# cannot add an invalid tag
with self.assertRaises(SubmarineException):
self.store.add_model_version_tag(rm1mv1.name, rm1mv1.version, None)
with self.assertRaises(SubmarineException):
self.store.add_model_version_tag(rm1mv1.name, rm1mv1.version, "")
# cannot add tag on deleted (non-existed) model
self.store.delete_model_version(rm1mv1.name, rm1mv1.version)
with self.assertRaises(SubmarineException):
self.store.add_model_version_tag(rm1mv1.name, rm1mv1.version, same_tag)
def test_delete_model_tag(self):
name1 = "test_delete_MV_tag"
name2 = "test_delete_MV_tag_2"
tags = ["tag1", "tag2"]
self.store.create_registered_model(name1)
self.store.create_registered_model(name2)
rm1mv1 = self.store.create_model_version(
name1, "model_id_0", "test", "application_1234", "tensorflow", tags=tags
)
rm1m2 = self.store.create_model_version(
name1, "model_id_1", "test", "application_1234", "tensorflow", tags=tags
)
rm2mv1 = self.store.create_model_version(
name2, "model_id_2", "test", "application_1234", "tensorflow", tags=tags
)
new_tag = "new tag"
self.store.add_model_version_tag(rm1mv1.name, rm1mv1.version, new_tag)
self.store.delete_model_version_tag(rm1mv1.name, rm1mv1.version, new_tag)
rm1m1d = self.store.get_model_version(rm1mv1.name, rm1mv1.version)
self.assertEqual(rm1m1d.tags, tags)
# deleting a tag does not affect other models
self.store.delete_model_version_tag(rm1mv1.name, rm1mv1.version, tags[0])
rm1m1d = self.store.get_model_version(rm1mv1.name, rm1mv1.version)
rm1m2d = self.store.get_model_version(rm1m2.name, rm1m2.version)
rm2mv1 = self.store.get_model_version(rm2mv1.name, rm2mv1.version)
self.assertEqual(rm1m1d.tags, tags[1:])
self.assertEqual(rm1m2d.tags, tags)
self.assertEqual(rm2mv1.tags, tags)
# delete a tag that is already deleted
with self.assertRaises(SubmarineException):
self.store.delete_model_version_tag(rm1mv1.name, rm1mv1.version, tags[0])
rm1m1d = self.store.get_model_version(rm1mv1.name, rm1mv1.version)
self.assertEqual(rm1m1d.tags, tags[1:])
# cannot delete tag with invalid value
with self.assertRaises(SubmarineException):
self.store.delete_model_version_tag(rm1mv1.name, rm1mv1.version, None)
with self.assertRaises(SubmarineException):
self.store.delete_model_version_tag(rm1mv1.name, rm1mv1.version, "")
# cannot delete tag on deleted (non-existed) model
self.store.delete_model_version(rm1m2.name, rm1m2.version)
with self.assertRaises(SubmarineException):
self.store.delete_model_version_tag(rm1m2.name, rm1m2.version, tags[0])
# cannot use invalid model name or version
with self.assertRaises(SubmarineException):
self.store.delete_model_version_tag(None, rm1mv1.version, tags[1])
with self.assertRaises(SubmarineException):
self.store.delete_model_version_tag(rm1mv1.name, None, tags[1])
|
<gh_stars>10-100
import os
import sys
import yaml
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
import torch
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from networks import encoder_net
from networks import transformer_net
import train
torch.autograd.set_detect_anomaly(True)
GRADIENTS = {} # Each element is [counter, grad]
def backward_hook(module, grad_input, grad_output):
"""Hook called in the backward pass of modules, saving the gradients in the GRADIENTS dict.
Args:
module (torch.nn.Module): The module for which this backward pass is called. Must provide a
'name' attribute, that will be used as key in the GRADIENTS dict.
grad_input (tuple): Tuple (dL/dx, dL/dw, dL/db)
grad_output (tuple): 1-tuple (dL/do) i.e. the gradient of the loss w.r.t. the layer output.
"""
if module.name == 'Transformer_out':
q, p = transformer_net.TransformerNet.to_phase_space(grad_output[0])
set_gradient('Transformer_out_q', q.detach().cpu().numpy())
set_gradient('Transformer_out_p', p.detach().cpu().numpy())
else:
set_gradient(module.name, grad_output[0].detach().cpu().numpy())
return None
def set_gradient(name, gradient):
if name in GRADIENTS:
GRADIENTS[name][0] += 1
GRADIENTS[name][1] += gradient
else:
GRADIENTS[name] = [1, gradient]
def register_hooks(hgn):
"""Set a name to all the interesting layers of the hamiltonian generative networks and register
hook.
Args:
hgn (hamiltonian_generative_network.HGN): The HGN to analyse.
"""
# Setting name variable to be used in hook
hgn.encoder.input_conv.name = 'Encoder_in'
hgn.encoder.out_mean.name = 'Encoder_out_mean'
hgn.encoder.out_logvar.name = 'Encoder_out_logvar'
hgn.transformer.in_conv.name = 'Transformer_in'
hgn.transformer.out_conv.name = 'Transformer_out'
hgn.hnn.in_conv.name = 'Hamiltonian_in'
hgn.hnn.linear.name = 'Hamiltonian_out'
hgn.decoder.residual_blocks[0].name = 'Decoder_in'
hgn.decoder.out_conv.name = 'Decoder_out'
# Registering hooks
hgn.encoder.input_conv.register_backward_hook(backward_hook)
hgn.encoder.out_mean.register_backward_hook(backward_hook)
hgn.encoder.out_logvar.register_backward_hook(backward_hook)
hgn.transformer.in_conv.register_backward_hook(backward_hook)
hgn.transformer.out_conv.register_backward_hook(backward_hook)
hgn.hnn.in_conv.register_backward_hook(backward_hook)
hgn.hnn.linear.register_backward_hook(backward_hook)
hgn.decoder.residual_blocks[0].register_backward_hook(backward_hook)
hgn.decoder.out_conv.register_backward_hook(backward_hook)
def get_grads(hgn, batch_size, dtype):
"""Plot the gradients of each input-output layer of the hamiltonian generative network model.
Args:
hgn (hamiltonian_generative_network.HGN): The HGN to analyze.
batch_size (int): Batch size used when testing gradients
dtype (torch.dtype): Type to be used in tensor operations.
"""
register_hooks(hgn)
rand_in = torch.rand((batch_size, hgn.seq_len, hgn.channels, 32, 32)).type(dtype)
hgn.fit(rand_in)
names = GRADIENTS.keys()
max_grads = [np.abs((GRADIENTS[k][1] / GRADIENTS[k][0])).max() for k in names]
mean_grads = [np.abs((GRADIENTS[k][1] / GRADIENTS[k][0])).mean() for k in names]
return names, max_grads, mean_grads
def plot_grads(names, max_grads, mean_grads):
plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.3, lw=1, color="c")
plt.bar(np.arange(len(max_grads)), mean_grads, alpha=0.3, lw=1, color="b")
plt.hlines(0, 0, len(mean_grads) + 1, lw=2, color="k")
plt.xticks(range(0, len(mean_grads), 1), names, rotation="vertical")
plt.xlim(left=0, right=len(mean_grads))
plt.ylim(bottom=-0.000001, top=0.0001) # zoom in on the lower gradient regions
plt.xlabel("Layers")
plt.ylabel("average gradient")
plt.title("Gradient flow")
plt.grid(True)
plt.legend([Line2D([0], [0], color="c", lw=4),
Line2D([0], [0], color="b", lw=4),
Line2D([0], [0], color="k", lw=4)],
['max-gradient', 'mean-gradient', 'zero-gradient'])
plt.show()
if __name__ == '__main__':
params_file = "experiment_params/default.yaml"
with open(params_file, 'r') as f:
params = yaml.load(f, Loader=yaml.FullLoader)
device = params["device"] if torch.cuda.is_available() else "cpu"
hgn = train.load_hgn(params, device=device, dtype=torch.float)
names, max_grads, mean_grads = get_grads(
hgn, batch_size=params['optimization']['batch_size'], dtype=torch.float)
print('-------------------BACKWARD CALL COUNTS------------------------------------------------')
for k, v in GRADIENTS.items():
print(f'{k:20} backward called {v[0]:10} times')
print('-------------------------GRADIENTS-----------------------------------------------------')
for name, max_grad, mean_grad in zip(names, max_grads, mean_grads):
print(f'{name:20} max_grad: {max_grad:25} mean_grad: {mean_grad:25}')
print('---------------------------------------------------------------------------------------')
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2019 CERN.
#
# cds-books is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""CDS-Books migrator API."""
import json
import uuid
from contextlib import contextmanager
import click
from elasticsearch_dsl import Q
from flask import current_app
from invenio_app_ils.documents.api import Document, DocumentIdProvider
from invenio_app_ils.documents.search import DocumentSearch
from invenio_app_ils.records_relations.api import RecordRelationsParentChild
from invenio_app_ils.relations.api import SERIAL_RELATION, \
MULTIPART_MONOGRAPH_RELATION
from invenio_app_ils.series.api import Series, SeriesIdProvider
from invenio_app_ils.series.search import SeriesSearch
from invenio_base.app import create_cli
from invenio_db import db
from invenio_indexer.api import RecordIndexer
from invenio_migrator.cli import _loadrecord
from cds_books.migrator.errors import DocumentMigrationError, \
MultipartMigrationError
from cds_books.migrator.records import CDSParentRecordDumpLoader
@contextmanager
def commit():
"""Commit transaction or rollback in case of an exception."""
try:
yield
db.session.commit()
except:
print('Rolling back changes...')
db.session.rollback()
raise
def reindex_pidtype(pid_type):
"""Reindex records with the specified pid_type."""
click.echo('Indexing pid type "{}"...'.format(pid_type))
cli = create_cli()
runner = current_app.test_cli_runner()
runner.invoke(
cli,
'index reindex --pid-type {} --yes-i-know'.format(pid_type),
catch_exceptions=False
)
runner.invoke(cli, 'index run', catch_exceptions=False)
click.echo('Indexing completed!')
def bulk_index_records(records):
"""Bulk index a list of records."""
indexer = RecordIndexer()
click.echo('Bulk indexing {} records...'.format(len(records)))
indexer.bulk_index([str(r.id) for r in records])
indexer.process_bulk_queue()
click.echo('Indexing completed!')
def model_provider_by_rectype(rectype):
"""Return the correct model and PID provider based on the rectype."""
if rectype in ('serial', 'multipart'):
return Series, SeriesIdProvider
elif rectype == 'document':
return Document, DocumentIdProvider
else:
raise ValueError('Unknown rectype: {}'.format(rectype))
def import_parents_from_file(dump_file, rectype, include):
"""Load parent records from file."""
model, provider = model_provider_by_rectype(rectype)
include_keys = None if include is None else include.split(',')
with click.progressbar(json.load(dump_file).items()) as bar:
records = []
for key, parent in bar:
if 'legacy_recid' in parent:
click.echo('Importing parent "{0}({1})"...'.
format(parent['legacy_recid'], rectype))
else:
click.echo('Importing parent "{0}({1})"...'.
format(parent['title'], rectype))
if include_keys is None or key in include_keys:
has_children = parent.get('_migration', {}).get('children', [])
has_volumes = parent.get('_migration', {}).get('volumes', [])
if rectype == 'serial' and has_children:
record = import_record(parent, model, provider)
records.append(record)
elif rectype == 'multipart' and has_volumes:
record = import_record(parent, model, provider)
records.append(record)
# Index all new parent records
bulk_index_records(records)
def import_record(dump, model, pid_provider):
"""Import record in database."""
record = CDSParentRecordDumpLoader.create(dump, model, pid_provider)
return record
def import_documents_from_record_file(sources, include):
"""Import documents from records file generated by CDS-Migrator-Kit."""
include = include if include is None else include.split(',')
records = []
for idx, source in enumerate(sources, 1):
click.echo('({}/{}) Migrating documents in {}...'.format(
idx, len(sources), source.name))
model, provider = model_provider_by_rectype('document')
include_keys = None if include is None else include.split(',')
with click.progressbar(json.load(source).items()) as bar:
records = []
for key, parent in bar:
click.echo('Importing document "{}"...'.
format(parent['legacy_recid']))
if include_keys is None or key in include_keys:
record = import_record(
parent,
model,
provider
)
records.append(record)
# Index all new parent records
bulk_index_records(records)
def import_documents_from_dump(sources, source_type, eager, include):
"""Load records."""
include = include if include is None else include.split(',')
for idx, source in enumerate(sources, 1):
click.echo('({}/{}) Migrating documents in {}...'.format(
idx, len(sources), source.name))
data = json.load(source)
with click.progressbar(data) as records:
for item in records:
click.echo('Processing document "{}"...'.
format(item['recid']))
if include is None or str(item['recid']) in include:
_loadrecord(item, source_type, eager=eager)
# We don't get the record back from _loadrecord so re-index all documents
reindex_pidtype('docid')
def get_multipart_by_legacy_recid(recid):
"""Search multiparts by its legacy recid."""
search = SeriesSearch().query(
'bool',
filter=[
Q('term', mode_of_issuance='MULTIPART_MONOGRAPH'),
Q('term', legacy_recid=recid),
]
)
result = search.execute()
if not result.hits or result.hits.total < 1:
click.secho('no multipart found with legacy recid {}'.format(recid),
fg='red')
# TODO uncomment with cleaner data
# raise MultipartMigrationError(
# 'no multipart found with legacy recid {}'.format(recid))
elif result.hits.total > 1:
raise MultipartMigrationError(
'found more than one multipart with recid {}'.format(recid))
else:
return Series.get_record_by_pid(result.hits[0].pid)
def create_multipart_volumes(pid, multipart_legacy_recid, migration_volumes):
"""Create multipart volume documents."""
volumes = {}
# Combine all volume data by volume number
click.echo('Creating volume for {}...'.format(multipart_legacy_recid))
for obj in migration_volumes:
volume_number = obj['volume']
if volume_number not in volumes:
volumes[volume_number] = {}
volume = volumes[volume_number]
for key in obj:
if key != 'volume':
if key in volume:
raise KeyError(
'Duplicate key "{}" for multipart {}'.format(
key,
multipart_legacy_recid
)
)
volume[key] = obj[key]
volume_numbers = iter(sorted(volumes.keys()))
# Re-use the current record for the first volume
first_volume = next(volume_numbers)
first = Document.get_record_by_pid(pid)
if 'title' in volumes[first_volume]:
first['title'] = volumes[first_volume]['title']
first['volume'] = first_volume
first['_migration']['multipart_legacy_recid'] = multipart_legacy_recid
# to be tested
if 'legacy_recid' in first:
del first['legacy_recid']
first.commit()
yield first
# Create new records for the rest
for number in volume_numbers:
temp = first.copy()
temp['title'] = volumes[number]['title']
temp['volume'] = number
record_uuid = uuid.uuid4()
provider = DocumentIdProvider.create(
object_type='rec',
object_uuid=record_uuid,
)
temp['pid'] = provider.pid.pid_value
record = Document.create(temp, record_uuid)
record.commit()
yield record
def create_parent_child_relation(parent, child, relation_type, volume):
"""Create parent child relations."""
rr = RecordRelationsParentChild()
click.echo('Creating relations: {0} - {1}'.format(parent['pid'],
child['pid']))
rr.add(
parent=parent,
child=child,
relation_type=relation_type,
volume=str(volume) if volume else None
)
def link_and_create_multipart_volumes():
"""Link and create multipart volume records."""
click.echo('Creating document volumes and multipart relations...')
search = DocumentSearch().filter('term', _migration__is_multipart=True)
for hit in search.scan():
if 'legacy_recid' not in hit:
continue
click.secho('Linking multipart {}...'.format(hit.legacy_recid),
fg='green')
multipart = get_multipart_by_legacy_recid(hit.legacy_recid)
documents = create_multipart_volumes(
hit.pid,
hit.legacy_recid,
hit._migration.volumes
)
for document in documents:
if document and multipart:
click.echo(
'Creating relations: {0} - {1}'.format(multipart['pid'],
document['pid']))
create_parent_child_relation(
multipart,
document,
MULTIPART_MONOGRAPH_RELATION,
document['volume']
)
def get_serials_by_child_recid(recid):
"""Search serials by children recid."""
search = SeriesSearch().query(
'bool',
filter=[
Q('term', mode_of_issuance='SERIAL'),
Q('term', _migration__children=recid),
]
)
for hit in search.scan():
yield Series.get_record_by_pid(hit.pid)
def get_migrated_volume_by_serial_title(record, title):
"""Get volume number by serial title."""
for serial in record['_migration']['serials']:
if serial['title'] == title:
return serial.get('volume', None)
raise DocumentMigrationError(
'Unable to find volume number in record {} by title "{}"'.format(
record['pid'],
title
)
)
def link_documents_and_serials():
"""Link documents/multiparts and serials."""
def link_records_and_serial(record_cls, search):
for hit in search.scan():
# Skip linking if the hit doesn't have a legacy recid since it
# means it's a volume of a multipart
if 'legacy_recid' not in hit:
continue
record = record_cls.get_record_by_pid(hit.pid)
for serial in get_serials_by_child_recid(hit.legacy_recid):
volume = get_migrated_volume_by_serial_title(
record,
serial['title']
)
create_parent_child_relation(
serial,
record,
SERIAL_RELATION,
volume
)
click.echo('Creating serial relations...')
link_records_and_serial(
Document,
DocumentSearch().filter('term', _migration__has_serial=True)
)
link_records_and_serial(
Series,
SeriesSearch().filter('bool', filter=[
Q('term', mode_of_issuance='MULTIPART_MONOGRAPH'),
Q('term', _migration__has_serial=True),
])
)
def validate_serial_records():
"""Validate that serials were migrated successfully.
Performs the following checks:
* Find duplicate serials
* Ensure all children of migrated serials were migrated
"""
def validate_serial_relation(serial, recids):
relations = serial.relations.get().get('serial', [])
if len(recids) != len(relations):
click.echo(
'[Serial {}] Incorrect number of children: {} '
'(expected {})'.format(
serial['pid'],
len(relations),
len(recids)
)
)
for relation in relations:
child = Document.get_record_by_pid(
relation['pid'],
pid_type=relation['pid_type']
)
if 'legacy_recid' in child and child['legacy_recid'] not in recids:
click.echo(
'[Serial {}] Unexpected child with legacy '
'recid: {}'.format(serial['pid'], child['legacy_recid'])
)
titles = set()
search = SeriesSearch().filter('term', mode_of_issuance='SERIAL')
for serial_hit in search.scan():
# Store titles and check for duplicates
if 'title' in serial_hit:
title = serial_hit.title
if title in titles:
current_app.logger.warning(
'Serial title "{}" already exists'.format(title))
else:
titles.add(title)
# Check if any children are missing
children = serial_hit._migration.children
serial = Series.get_record_by_pid(serial_hit.pid)
validate_serial_relation(serial, children)
click.echo('Serial validation check done!')
def validate_multipart_records():
"""Validate that multiparts were migrated successfully.
Performs the following checks:
* Ensure all volumes of migrated multiparts were migrated
"""
def validate_multipart_relation(multipart, volumes):
relations = multipart.relations.get().get('multipart_monograph', [])
titles = [volume['title'] for volume in volumes if 'title' in volume]
count = len(set(v['volume'] for v in volumes))
if count != len(relations):
click.echo(
'[Multipart {}] Incorrect number of volumes: {} '
'(expected {})'.format(multipart['pid'], len(relations), count)
)
for relation in relations:
child = Document.get_record_by_pid(
relation['pid'],
pid_type=relation['pid_type']
)
if child['title'] not in titles:
click.echo(
'[Multipart {}] Title "{}" does not exist in '
'migration data'.format(
multipart['pid'],
child['title']
)
)
search = SeriesSearch().filter(
'term',
mode_of_issuance='MULTIPART_MONOGRAPH'
)
for multipart_hit in search.scan():
# Check if any child is missing
if 'volumes' in multipart_hit._migration:
volumes = multipart_hit._migration.volumes
multipart = Series.get_record_by_pid(multipart_hit.pid)
validate_multipart_relation(multipart, volumes)
click.echo('Multipart validation check done!')
|
<reponame>bosscha/GASS
#! /usr/bin/env python2
# -*- coding: utf-8 -*-
__author__="<NAME>"
__version__="0.1"
__email__="<EMAIL>"
'''
File name: Useful_Functions_for_GA_Main.py
Author: <NAME>
Description : Useful functions to make test - Subarray selection using Genetic Algorithm
with parameters given in the file '.txt'
Context : ALMA internship
Date created: 06/2017
Date last modified: 08/2017
Python Version: 2.7
'''
import numpy as np
from pylab import *
import pandas as pd
import csv
import time
import sys
import os
DOSSIER_COURRANT = os.path.dirname(os.path.abspath(__file__))
DOSSIER_PARENT = os.path.dirname(DOSSIER_COURRANT)
sys.path.append(os.path.join(DOSSIER_PARENT,"Codes"))
import classes as c
import evolution as ev
import compute_constraints_subarray as ccs
#==================================================================================================================================
#==================================================================================================================================
# Useful functions in order to :
# - initialize a population thanks to a configuration file
# - evolve a population doing a genetic algorithm
# - save the subarrays solution to "casa" format and the final cosntaints results
#==================================================================================================================================
#==================================================================================================================================
def Init_Pop(cfg_file,configuration_Manager,source_declination,source_hour_angle, num_subarrays, list_num_pads_subarrays, num_arrays,list_objective_constraints,list_constraints_weights,list_subarrays_weights):
"""
Initialize a population with :
- cfg_file : a file containing the pads to consider
- configuration_Manager : a class wich allows to configurate the population
- num_subarrays : a number of subarrays in each array
- list_num_pads_subarrays : a list of the number of pads per subarrays
- num_arrays : a number of arrays for the population
"""
cfg = pd.read_csv(
cfg_file, comment='#', names=['E', 'N', 'U', 'diam', 'pad'], sep='\s+')
cm=configuration_Manager
xx, yy , zz =cfg[['E', 'N', 'U']].values.transpose()
diam=cfg[['diam']].values.transpose()
name =cfg[['pad']].values.transpose()
s=0
for i in range(0,num_subarrays):
s+=list_num_pads_subarrays[i]
cm.clear()
for i in range(0,len(xx)):
pad=c.Pad(xx[i],yy[i],zz[i],diam[0][i],name[0][i])
cm.add_Pad(pad)
cm.set_Source_Declination(source_declination)
cm.set_Source_Hour_Angle(source_hour_angle)
cm.set_Number_Subarrays(num_subarrays)
cm.set_Number_Pads_Per_Subarray(list_num_pads_subarrays)
cm.set_Objective_Constraints(list_objective_constraints)
cm.set_Constraints_Weights(list_constraints_weights)
cm.set_Subarrays_Weights(list_subarrays_weights)
pop=c.Population(cm,num_arrays,True)
return pop
#==================================================================================================================================
def Evolve_Pop(pop,num_generations,termination_condition,threshold,configuration_Manager,mutation_Rate,tournament_Size,elitism_Num,display=True):
"""
Evolve a population for K=num_generations generations with parameters :
- cfg_file : a file containing the pads to consider
- num_generations : a number of generations
- configuration_Manager : a class wich allows to configurate the population
- mutation_Rate : a rate for the mutation
- tournament_Size : a number of arrays that can participate to the tournament wich select the parents for crossover
- elitism_Num : a number of survivors to keep for the next population
- display : boolean to display or not the result every 10 generations
"""
ga=ev.GA(configuration_Manager,mutation_Rate,tournament_Size,elitism_Num)
counter=0
Scores=[]
Indexes=[]
condition=False
while counter<=num_generations and not condition:
pop=ga.evolve_Population(pop)
best,pos=pop.get_Fittest()
Scores.append([best.get_Score()])
Indexes.append(counter)
if counter%10==0 and display==True :
print "Best score _ Generation n°", counter," = ", best.get_Score()
if termination_condition==True:
condition=Scores[counter][0]>threshold
if condition==True:
print "Threshold reached _ Best score _ Generation n°",counter," = ",Scores[counter][0]
counter+=1
return pop, Scores, Indexes
#==================================================================================================================================
def Subarrays_to_cfg(array,cfg_file,saving_constraints_results,Constraints,score,cpu):
"""
Save the subarrays of the array solution
and the final constraints results of each subarray of the array solution
in the folder "GA_Subarray_Selection/Results/"
"""
##Saving of the subarrays at CASA format
Table=[]
for i in range(0,array.configuration_Manager.get_Number_Subarrays()):
Table.append([])
for j in range(0,array.configuration_Manager.get_Number_Pads_Per_Subarray()[i]):
Table[i].append([])
pad=array.get_Pad(i,j)
Table[i][j].append(pad.get_E())
Table[i][j].append(pad.get_N())
Table[i][j].append(pad.get_U())
Table[i][j].append(pad.get_Diam())
Table[i][j].append(pad.get_Name())
stockage_fichier=[]
t=time.strftime("%d-%m-%Y_%H:%M:%S") #int(time.strftime("%Y/%m/%d/%H:%M:%S"))
os.mkdir("GA_Subarray_Selection/Results/Results_"+str(t))
os.mkdir("GA_Subarray_Selection/Results/Results_"+str(t)+"/Subarrays_Storage")
stockage_fichier.append("Results_"+str(t))
for i in range(0,array.configuration_Manager.get_Number_Subarrays()):
file_0="Subarray_"+str(i)+".cfg"
with open("GA_Subarray_Selection/Results/Results_"+str(t)+"/Subarrays_Storage/"+file_0,"wb") as fw:
fw.write("# observatory=ALMA \n")
fw.write("# coordsys=LOC (local tangent plane) \n")
fw.write("# x y z diam pad# \n")
co = csv.writer(fw, delimiter=' ', quotechar='"') #, quoting=csv.QUOTE_NONNUMERIC)
for ligne in Table[i]:
co.writerow(ligne)
##Saving of the final constraints results of the array solution of the GA
file_1="Final_Constraints_Results.txt"
with open("GA_Subarray_Selection/Results/Results_"+str(t)+"/"+file_1,"wb") as fw:
fw.write('------------------------------------------------------------\n')
fw.write("Final Constraints Results for the Array solution\n")
fw.write("Version : "+str(__version__)+"\n")
fw.write("Best score : "+str(score)+"\n")
fw.write("CPU Time : "+str(cpu)+" sec"+"\n")
fw.write('------------------------------------------------------------\n')
fw.write("\n")
for i in range(0,array.configuration_Manager.get_Number_Subarrays()):
fw.write('------ Spatial Resolution, Maximum Recoverable Scale, Elongation, Sidelobe Percentage for Subarray '+str(i)+' ------ \n')
co = csv.writer(fw, delimiter=',', quotechar='"')
co.writerow(Constraints[i])
fw.write("\n")
return stockage_fichier
|
from __future__ import absolute_import, division, print_function
# LIBTBX_SET_DISPATCHER_NAME cctbx.xfel.h5_average
import numpy as np
import h5py
import iotbx.phil
import libtbx.load_env
from dials.util.options import OptionParser
import sys, os
from six.moves import range
phil_scope = iotbx.phil.parse("""
average = True
.type = bool
.help = Generate average image
max = True
.type = bool
.help = Generate maximum projection
stddev = True
.type = bool
.help = Generate standard deviation image
""")
class Processh5(object):
"""
Compute the average ("avg"), maximum projection ("max") or standard deviation ("stddev") of a
series of images provided as a multi-image hdf5 file.
"""
def __init__(self, h5file):
# read everything from a multi-image h5 file
self.h5file = h5file
self.readfile = h5py.File(h5file, "r")
self.shape_xy = self.readfile.values()[1].values()[0].value.shape
self.length = len(self.readfile.values())
#from IPython import embed; embed()
def prepare_writefile_and_array(self, func):
writefile_parts = (os.path.basename(os.path.splitext(self.h5file)[0]),
os.path.splitext(self.h5file)[1])
writefile_name = ("_"+func).join(writefile_parts)
writefile = h5py.File(writefile_name, "w")
arr = np.zeros((self.length, self.shape_xy[0], self.shape_xy[1]))
self.readfile.copy(str(self.readfile.values()[1].name), writefile['/'])
return (writefile, writefile_name, arr)
def process(self, func):
# process and write everything to a new avg (or other) file
writefile, writefile_name, arr = self.prepare_writefile_and_array(func)
for ii in range(self.length-1):
arr[ii][:][:] = np.asarray(self.readfile.values()[ii+1].values()[0].value)
func_lookup = {
"avg":np.mean,
"max":np.max,
"stddev":np.std
}
f = func_lookup[func]
res = f(arr, axis=0)
# write results to new file
val = writefile.values()[0].values()[0]
val.write_direct(res)
writefile.close()
print("Wrote", writefile_name)
def cleanup(self):
self.readfile.close()
def run(args):
if ("--help" in args) or ("-h" in args) or (len(args) == 0):
print("Usage: %s r25792.h5" % libtbx.env.dispatcher_name)
return
elif ("--config" in args) or ("-c" in args):
iotbx.phil.parse(phil_scope).show(attributes_level=2)
return
h5s = []
for arg in args:
if arg.endswith(".h5") or arg.endswith(".hdf5"):
h5s.append(args.pop(args.index(arg)))
sys.argv = [sys.argv[0]] + args
parser = OptionParser(phil=phil_scope)
params, options = parser.parse_args(show_diff_phil=True)
for h5 in h5s:
print("Processing image %s..." % h5)
processor = Processh5(h5)
if params.average:
processor.process("avg")
if params.max:
processor.process("max")
if params.stddev:
processor.process("stddev")
processor.cleanup()
if __name__ == "__main__":
run(sys.argv[1:])
|
'''
Created on May 13, 2015
@author: corilo
'''
import csv
import sys
from PySide.QtCore import SIGNAL
from PySide.QtGui import QMainWindow, QFileDialog, QTreeWidgetItem, QIcon, QPixmap, QMessageBox
from res import MainRes
from yec.nhmfl.icr.MzFinder.Inputs.Import_FTMS_Thermo.Load_FTMS_Thermo_File import ImportThermoFile
from yec.nhmfl.icr.MzFinder.Output.MzMatches2Excel import MzMatches_To_Report
from yec.nhmfl.icr.MzFinder.ui import run_Add_mz_dialog
from yec.nhmfl.icr.MzFinder.ui.Main.MainWindow import Ui_MainWindow
class MainWindow(QMainWindow):
nativo = False
tof_is_open = False
icr_is_open = False
hplc_is_open = False
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.connect(self.ui.actionOpen_Thermo_raw, SIGNAL("triggered()"), self.add_file)
#QtCore.QObject.connect(self.ui.actionOpen_Thermo_raw, QtCore.SIGNAL("released()"), MainWindow.add_file)
def add_file(self):
arquivo_Caminhos = QFileDialog.getOpenFileNames(None,
"Select Thermo *.raw files",
'/',
"(*.raw);; All Files (*.*)" , None)
print arquivo_Caminhos
for caminho in arquivo_Caminhos[0]:
self.ui.listWidget.addItem(caminho)
print caminho
def delete_file(self):
listItems= self.ui.listWidget.selectedItems()
if not listItems: return
for item in listItems:
self.ui.listWidget.takeItem(self.ui.listWidget.row(item))
def add_mz(self):
self.add_mz_dialog = run_Add_mz_dialog.run(self.ui.treeWidget)
def delete_mz(self):
for item in self.ui.treeWidget.selectedItems():
index = self.ui.treeWidget.indexOfTopLevelItem(item)
self.ui.treeWidget.takeTopLevelItem(index)
def import_from_text_file(self):
arquivo_Caminho = QFileDialog.getOpenFileName(None,
"Select txt file",
'/',
"(*.txt);; All Files (*.*)" , None)[0]
filename = arquivo_Caminho
self.f = open(filename)
reader = csv.reader(self.f, delimiter='\t')
linhas = [linha for linha in reader]
for i in linhas:
if len(i) == 0:
continue
else:
print i
a = QTreeWidgetItem(self.ui.treeWidget)
a.setText(0, str(i[0]))
a.setText(1, str(i[1]))
a.setText(2, str(i[2]))
a.setText(3, str(i[3]))
def process_clicked(self):
dict_result = {}
list_file_name = []
for index in xrange(self.ui.listWidget.count()):
arquivo = self.ui.listWidget.item(index).text()
list_file_name.append(arquivo)
list_of_tuples_mass_and_windows = []
for index in xrange(self.ui.treeWidget.topLevelItemCount()):
item = self.ui.treeWidget.topLevelItem(index)
list_of_tuples_mass_and_windows.append((float(item.text(0)), float(item.text(1)), float(item.text(2).split(",")[0]), str(item.text(2).split(",")[1])))
list_of_tuples_mass_and_windows_thresould = self.RemoveRepetidosLista(list_of_tuples_mass_and_windows)
find_peaks = ImportThermoFile(list_of_tuples_mass_and_windows_thresould, list_file_name, dict_result)
find_peaks.start()
find_peaks.join()
icon_reader = QIcon(":/icons/images/find.png")
icon_readerII = QPixmap(":/icons/images/find.png")
message = QMessageBox()
if len(dict_result.keys()) > 0:
try:
MzMatches_To_Report(dict_result)
message.setIconPixmap(icon_readerII)
message.setText('Success')
message.setWindowIcon(icon_reader)
message.setWindowTitle("Success")
message.exec_()
except:
message.setIconPixmap(icon_readerII)
message.setText('Ups something went wrong')
message.setWindowIcon(icon_reader)
message.setWindowTitle("Success")
message.exec_()
else:
message.setIconPixmap(icon_readerII)
message.setText('Sorry no matches found')
message.setWindowIcon(icon_reader)
message.setWindowTitle("No matches")
message.exec_()
def RemoveRepetidosLista(self, lista):
list2 = []
[list2.append(i) for i in lista if not i in list2]
return list2
def run(app):
#print QtGui.QStyleFactory.keys()
#app.setStyle('WindowsVista')
#pixmap = QtGui.QPixmap(":/images/logos/startup_bannerDemoI5-2.png")
#splash = QtGui.QSplashScreen(pixmap)
#splash.setMask(pixmap.mask())
#splash.show()
#splash.showMessage("(Only imageset editing implemented!) | Version: pre-release", QtCore.Qt.AlignTop | QtCore.Qt.AlignCenter, QtCore.Qt.GlobalColor.blue)
#for i in range(20):
# app.processEvents()
# time.sleep(0.2)
f = MainWindow()
#f.resize(1100, 600)
#f.setWindowFlags(QtCore.Qt.FramelessWindowHint)
f.show()
#splash.finish(f)
sys.exit(app.exec_())
|
import numpy as np
import torch
import os
from .base_model import BaseModel
from . import networks_basic as networks
class DistModel(BaseModel):
def name(self):
return self.model_name
def initialize(
self, model='net-lin', net='alex', colorspace='Lab',
pnet_rand=False, pnet_tune=False, model_path=None, use_gpu=True,
printNet=False, spatial=False, is_train=False, lr=.0001, beta1=0.5,
version='0.1', gpu_ids=[0]):
'''
INPUTS
model - ['net-lin'] for linearly calibrated network
['net'] for off-the-shelf network
['L2'] for L2 distance in Lab colorspace
['SSIM'] for ssim in RGB colorspace
net - ['squeeze','alex','vgg']
model_path - if None, will look in weights/[NET_NAME].pth
colorspace - ['Lab','RGB'] colorspace to use for L2 and SSIM
use_gpu - bool - whether or not to use a GPU
printNet - bool - whether or not to print network architecture out
spatial - bool - whether to output an array containing varying distances across spatial dimensions
spatial_shape - if given, output spatial shape. if None then spatial shape is determined automatically via spatial_factor (see below).
spatial_factor - if given, specifies upsampling factor relative to the largest spatial extent of a convolutional layer. if None then resized to size of input images.
spatial_order - spline order of filter for upsampling in spatial mode, by default 1 (bilinear).
is_train - bool - [True] for training mode
lr - float - initial learning rate
beta1 - float - initial momentum term for adam
version - 0.1 for latest, 0.0 was original (with a bug)
gpu_ids - int array - [0] by default, gpus to use
'''
BaseModel.initialize(self, use_gpu=use_gpu, gpu_ids=gpu_ids)
self.model = model
self.net = net
self.is_train = is_train
self.spatial = spatial
self.gpu_ids = gpu_ids
self.model_name = '%s [%s]' % (model, net)
if(self.model == 'net-lin'): # pretrained net + linear layer
self.net = networks.PNetLin(
pnet_rand=pnet_rand,
pnet_tune=pnet_tune,
pnet_type=net,
use_dropout=True,
spatial=spatial,
version=version,
lpips=True)
kw = {}
if not use_gpu:
kw['map_location'] = 'cpu'
if(not is_train):
# print('Loading model from: %s'%model_path)
state_dict = torch.hub.load_state_dict_from_url(
"http://folk.ntnu.no/haakohu/checkpoints/perceptual_similarity/alex.pth", **kw)
self.net.load_state_dict(state_dict, strict=False)
elif(self.model == 'net'): # pretrained network
self.net = networks.PNetLin(
pnet_rand=pnet_rand, pnet_type=net, lpips=False)
elif(self.model in ['L2', 'l2']):
# not really a network, only for testing
self.net = networks.L2(use_gpu=use_gpu, colorspace=colorspace)
self.model_name = 'L2'
elif(self.model in ['DSSIM', 'dssim', 'SSIM', 'ssim']):
self.net = networks.DSSIM(use_gpu=use_gpu, colorspace=colorspace)
self.model_name = 'SSIM'
else:
raise ValueError("Model [%s] not recognized." % self.model)
self.parameters = list(self.net.parameters())
if self.is_train: # training mode
# extra network on top to go from distances (d0,d1) => predicted
# human judgment (h*)
self.rankLoss = networks.BCERankingLoss()
self.parameters += list(self.rankLoss.net.parameters())
self.lr = lr
self.old_lr = lr
self.optimizer_net = torch.optim.Adam(
self.parameters, lr=lr, betas=(beta1, 0.999))
else: # test mode
self.net.eval()
if(use_gpu):
self.net.to(gpu_ids[0])
self.net = torch.nn.DataParallel(self.net, device_ids=gpu_ids)
if(self.is_train):
self.rankLoss = self.rankLoss.to(
device=gpu_ids[0]) # just put this on GPU0
if(printNet):
print('---------- Networks initialized -------------')
networks.print_network(self.net)
print('-----------------------------------------------')
def forward(self, in0, in1, retPerLayer=False):
''' Function computes the distance between image patches in0 and in1
INPUTS
in0, in1 - torch.Tensor object of shape Nx3xXxY - image patch scaled to [-1,1]
OUTPUT
computed distances between in0 and in1
'''
return self.net.forward(in0, in1, retPerLayer=retPerLayer) |
<reponame>Mon-ius/flask-deploy
from config import *
from validation import *
from utils import *
import subprocess
import getpass
import os
import click
@click.group()
def cx():
"""A quick deploy script for productive flask app."""
@click.command(context_settings=dict(
allow_extra_args=True
))
@click.option('--email', prompt='Your email', help='Email,Apply ssl certification,CloudFlare.',
callback=validate_email)
@click.option('--key', prompt='Your secret key', help='Secret Key,Apply ssl certification,CloudFlare.')
@click.option('--domain')
@click.pass_context
def miss_tmp(ctx,email, key,domain):
ssl_file_gen(DOMAIN, USR, CUR_LOC, email, key)
raise JumpOutFuckingClick
@click.command(context_settings=dict(
allow_extra_args=True
))
@click.option('--dns_type', prompt='Service options. \n [1] CloudFlare \n [2] AliYun \n\n\nYour Choice')
@click.pass_context
def miss_ssl(ctx,dns_type):
"""
These are available DNS provider servie options. \n
[1] CloudFlare <CF_Email,CF_Key> --dns dns_cf \n
[2] AliYun <Ali_Key,Ali_Secret> --dns dns_ali \n
"""
# if not dns_type:
if(str(dns_type)=="1"):
try:
op_cf()
except JumpOutFuckingClick2:
click.echo("<_@,@_<2")
if(str(dns_type)=="2"):
try:
op_ali()
except JumpOutFuckingClick2:
click.echo("<_@,@_<2")
raise JumpOutFuckingClick
@click.command(short_help='AliYun Option',context_settings=dict(
allow_extra_args=True
))
@click.option('--ali_key', prompt='Ali_Key')
@click.option('--ali_secret', prompt='Ali_Secret')
@click.pass_context
def op_ali(ctx,ali_key,ali_secret):
dns_op = "dns_ali"
op_1 = "Ali_Key={}".format(ali_key)
op_2 = "Ali_Secret={}".format(ali_secret)
ssl_multi_gen(DOMAIN, USR, CUR_LOC, op_1,op_2,dns_op)
raise JumpOutFuckingClick2
@click.command(short_help='CloudFlare Option',context_settings=dict(
allow_extra_args=True
))
@click.option('--cf_email', prompt='CF_Email')
@click.option('--cf_key', prompt='CF_Key')
@click.pass_context
def op_cf(ctx,cf_email,cf_key):
dns_op = "dns_cf"
op_1 = "CF_Email={}".format(cf_email)
op_2 = "CF_Key={}".format(cf_key)
ssl_multi_gen(DOMAIN, USR, CUR_LOC, op_1,op_2,dns_op)
raise JumpOutFuckingClick2
#Deploy
@click.command()
@click.option('--domain', prompt='Your domain', help='The domain to be configured.',
callback=validate_domain
)
@click.option('--dns_option', help='DNS option,Apply ssl certification. \n[1]CloudFlare,\n[2]AliYun.',
callback=validate_options
)
@click.option('--docker', help='Confirm having database or not.')
@click.pass_context
def deploy(ctx, domain, dns_option,docker):
"""Deploy the flask app right now."""
global DOMAIN, USR, CUR_LOC
usr = getpass.getuser()
loc = os.path.join(os.getcwd(), domain)
DOMAIN, USR, CUR_LOC = domain, usr, loc
if not os.path.exists(CUR_LOC):
try:
os.makedirs(CUR_LOC)
except:
if click.confirm("You have no privilege of current location Would you like to own it?"):
subprocess.call(['sudo', 'chown', '-R', usr+":"+usr, './'])
os.makedirs(loc)
else:
click.echo("You have no previlege!!!")
return
uwsgi_file_gen(DOMAIN, USR, CUR_LOC)
nginx_file_gen(DOMAIN, USR, CUR_LOC)
service_file_gen(DOMAIN, USR, CUR_LOC)
if not docker:
if not click.confirm('Do you have database already?'):
docker_file_gen(DOMAIN, USR, CUR_LOC)
if not dns_option:
if not click.confirm('Do you have SSL certification?'):
try:
# miss_tmp()
miss_ssl()
except JumpOutFuckingClick:
click.echo("<_@,@_<")
else:
click.echo("ss"+dns_option)
if(str(dns_option)=="1"):
try:
op_cf()
except JumpOutFuckingClick2:
click.echo("<_@,@_<2")
if(str(dns_option)=="2"):
try:
op_ali()
except JumpOutFuckingClick2:
click.echo("<_@,@_<2")
click.echo("It's deployed. Fake")
#Gen
@click.command()
@click.option('--domain', prompt='Your domain', help='The domain to be configured.',
callback=validate_domain
)
@click.option('--dns_option', help='DNS option,Apply ssl certification. \n[1]CloudFlare,\n[2]AliYun.',
callback=validate_options
)
@click.option('--docker', help='Confirm having database or not.')
@click.pass_context
def gen(ctx, domain, dns_option,docker):
"""Essential config.(Just generate)"""
global DOMAIN, USR, CUR_LOC
usr = getpass.getuser()
loc = os.path.join(os.getcwd(), domain)
DOMAIN, USR, CUR_LOC = domain, usr, loc
if not os.path.exists(CUR_LOC):
try:
os.makedirs(CUR_LOC)
except:
if click.confirm("You have no privilege of current location Would you like to own it?"):
subprocess.call(['sudo', 'chown', '-R', usr+":"+usr, './'])
os.makedirs(loc)
else:
click.echo("You have no previlege!!!")
return
uwsgi_file_gen(DOMAIN, USR, CUR_LOC)
nginx_file_gen(DOMAIN, USR, CUR_LOC)
service_file_gen(DOMAIN, USR, CUR_LOC)
if not docker:
if not click.confirm('Do you have database already?'):
docker_file_gen(DOMAIN, USR, CUR_LOC)
if not dns_option:
if not click.confirm('Do you have SSL certification?'):
try:
# miss_tmp()
miss_ssl()
except JumpOutFuckingClick:
click.echo("<_@,@_<")
else:
click.echo("ss"+dns_option)
if(str(dns_option)=="1"):
try:
op_cf()
except JumpOutFuckingClick2:
click.echo("<_@,@_<2")
if(str(dns_option)=="2"):
try:
op_ali()
except JumpOutFuckingClick2:
click.echo("<_@,@_<2")
@click.command()
@click.option('--domain', prompt='Your domain', help='The domain to be configured.',
callback=validate_domain)
@click.pass_context
def run(ctx, domain):
"""Run generated script: <startup.sh>"""
global DOMAIN, USR, CUR_LOC
usr = getpass.getuser()
loc = os.path.join(os.getcwd(), domain)
DOMAIN, USR, CUR_LOC = domain, usr, loc
if not os.path.exists(CUR_LOC):
click.echo("No folder for domain({}) at user({}) environment, please try fd/flask-deploy generate to init.".format(domain,usr))
return
else:
try:
current_files = os.listdir(CUR_LOC)
if "start.sh" in current_files:
click.echo("On load")
return
else:
click.echo("No file for domain({}) at user({}) environment, please try fd/flask-deploy generate to init.".format(CUR_LOC,usr))
return
except:
if click.confirm("You have no privilege of current location Would you like to own it?"):
subprocess.call(['sudo', 'chown', '-R', usr+":"+usr, './'])
os.makedirs(loc)
else:
click.echo("You have no previlege!!!")
return
cx.add_command(gen, 'gen')
cx.add_command(deploy, 'deploy')
cx.add_command(run, 'run')
if __name__ == '__main__':
cx()
|
<gh_stars>1-10
from selenium import webdriver
import os
import requests
import bs4
import re
import selenium.webdriver
import RandomHeaders
import threading
import sys
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
def convertHeadless(driver, url):
#converts a phantomjs browser to a firefox webdriver window
cookies = driver.get_cookies()
#saves cookies as dict
driver.quit()
#closes the phantomjs window
driver = webdriver.Firefox()
#replaces phantomjs instance with firefox browser
driver.get(url)
# has to go to the url before adding cookies
# If you were doing this with shoes - it should show an empty cart
for cookie in cookies:
#adds cookies to the driver
driver.add_cookie(cookie)
driver.get(url)
# this will reload the url with the cookies you imported
return driver
def URLGen(model, size):
BaseSize = 580
#Base Size is for Shoe Size 6.5
ShoeSize = size - 6.5
ShoeSize = ShoeSize * 20
RawSize = ShoeSize + BaseSize
ShoeSizeCode = int(RawSize)
URL = 'http://www.adidas.com/us/' + str(model) + '.html?forceSelSize=' + str(model) + '_' + str(ShoeSizeCode)
return URL
def createHeadlessBrowser(proxy=None, XResolution=1024, YResolution=768):
#proxy = None
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = (
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.86 Safari/537.36')
if proxy != None:
service_args = ['--proxy={}'.format(proxy),'--proxy-type=https','--ignore-ssl-errors=true', '--ssl-protocol=any', '--web-security=false',]
driver = webdriver.PhantomJS(service_args=service_args, desired_capabilities=dcap)
else:
driver = webdriver.PhantomJS(desired_capabilities=dcap)
driver.set_window_size(XResolution,YResolution)
driver.set_page_load_timeout(20)
return driver
def grabCurrentTitle(url):
#this grabs the title of the splash page
driver = webdriver.PhantomJS()
driver.get(url)
title = driver.title
driver.close()
driver.quit()
return title
def verifyProxy(proxy, timeout=10):
#this is to verify that a proxy is working
try:
requests.get('https://www.google.com/', timeout=timeout)
except:
return False
class bot(object):
#placeholder bot class - will eventually merge a ton of stuff into this
def __init__(self, proxy, saveimages=True, url='https://www.google.com/'):
print('Initiated bot')
self.headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
self.proxyList = proxy
print(self.proxyList)
self.saveSS = saveimages
self.driverList = []
self.driverInfo = []
self.failedProxies = []
self.successProxies = []
self.targetURL = url
#why are there so many... this is a bad way of doing this
def updateHeader(self, userAgent):
#placeholder function for proxy change
self.headers = {'User-Agent': userAgent}
print (self.headers)
def addProxy(self, proxy):
self.proxyList.append(proxy)
print("Successfully added {}".format(proxy))
def startDriver(self, proxy=None):
if proxy != None:
print (proxy)
driver = createHeadlessBrowser(proxy=proxy)
else:
driver = createHeadlessBrowser()
try:
driver.get(self.targetURL)
except:
driver.close()
self.failedProxies.append(proxy)
return
self.driverList.append({'driver': driver, 'proxy': proxy})
self.driverInfo.append({'proxy': proxy, 'driver': driver, 'url': self.targetURL, 'useragent': self.headers})
self.successProxies.append(proxy)
#this is just a placeholder url
if self.saveSS == True:
driver.save_screenshot('static/{}.png'.format(proxy.partition(':')[0]))
print("started {} driver".format(proxy))
def goToURL(self, driver, url):
self.targetURL = url
proxy = driver['proxy']
driver = driver['driver']
driver.get(url)
print (driver.title)
if self.saveSS == True:
driver.save_screenshot('static/{}.png'.format(proxy.partition(':')[0]))
print("saved screenshot on {} at {}.png".format(driver, proxy.partition(':')[0]))
def sendAllToURL(self, url):
threads = [threading.Thread(target=self.goToURL, args=(driver, url)) for driver in self.driverList]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def startAllDrivers(self):
threads = [threading.Thread(target=self.startDriver, args=(proxy,)) for proxy in self.proxyList]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def returnDriverInfo(self):
return self.driverInfo
def grabSS(proxy):
while True:
try:
headers = RandomHeaders.LoadHeader()
driver = webdriver.PhantomJS(service_args=['--proxy={}'.format(proxy), '--proxy-type=https'])
#driver = webdriver.PhantomJS()
driver.get(URL)
while driver.title == SPLASHTITLE:
driver.save_screenshot('{}.png'.format(proxy.replace(':', '').replace('.', '')))
#this just visualized the phantomjs driver - you can replace this with pass if you're trying to reduce mem
cookies_list = driver.get_cookies()
driver.close()
driver.quit()
driver = webdriver.Firefox(service_args=['--proxy={}'.format(proxy), '--proxy-type=https'])
# you can only set cookies for the driver's current domain so visit the page first then set cookies
driver.get(URL)
# precautionary - delete all cookies first
driver.delete_all_cookies()
for cookie in cookies_list:
# precautionary - prevent possible Exception - can only add cookie for current domain
if "adidas" in cookie['domain']:
driver.add_cookie(cookie)
# once cookies are changed browser must be refreshed
driver.refresh()
#converts phantomjs cookies into firefox webdriver to check out
except Exception as exp:
print (exp)
if __name__ == "__main__":
URL = sys.argv[2]
if '-r' in str(sys.argv).lower():
PROXIES = []
with open(str(sys.argv[sys.argv.index('-R')+1])) as f:
PROXIES = f.readlines()
else:
PROXIES = sys.argv[2:]
SPLASHTITLE = grabCurrentTitle(URL)
threads = [threading.Thread(target=grabSS, args=(proxy,)) for proxy in PROXIES]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
|
<reponame>HugoCMU/pirateAI
import os
import random
import logging
import numpy as np
import pickle
from uuid import uuid4
from keras.models import load_model
from src.dataset import image_analysis
import src.config as config
class Pirate(object):
"""
Pirates are the agents on the island. When instantiated, pirates load a model
into GPU memory.
"""
def __init__(self, dna=None, name='Unborn', win=0, loss=0, saltyness=0, rank=None):
"""
:param dna: (string) identifier uuid4 string for a pirate
:param name: (string) the pirate's name
:param win: (int) number of wins
:param loss: (int) number of losses
:param saltyness: (int) an estimate of a Pirate's performance (think ELO)
:param rank: (int) rank of this pirate from their training batch
:raises FileNotFoundError: Can't load pirate model
:raises ValueError: no input given
"""
self.log = logging.getLogger(__name__)
self.dna = dna or str(uuid4())
if name == 'Unborn':
self.name = self._generate_name(rank=rank)
else:
self.name = name
self.win = win
self.loss = loss
self.saltyness = saltyness
# Model contains weights and graph
self._model = self._load_model()
def act(self, input, visualize=False):
"""
Runs the pirate model on the given input (image, etc).
:param input: input tensor, format matches model
:param visualize: (bool) display incoming image and metadata
:return: (int) action resulting from model.
:raises ValueError: no input given
"""
if input is None:
raise ValueError("Please provide an input image to generate an action")
if len(input.shape) == 3:
input = np.expand_dims(input, axis=0)
norm_input_image = input
output = self._model.predict(norm_input_image)
# Classification model outputs action probabilities
action = np.argmax(output)
if visualize or config.INPUT_DEBUG: # This blocks the GIL to visualize
image_analysis(image=input[0, :, :, :], label=action)
return action
def description(self):
"""
Finds and returns the info in hyperparameter text file
:return: (string) or None
:raises FileNotFoundError: Can't find hyperparameter text file in path
"""
for dirpath, _, files in os.walk(config.MODEL_DIR):
if "{dna}.pickle".format(dna=self.dna) in files:
with open(os.path.join(dirpath, self.dna + '.pickle'), 'rb') as file:
data = pickle.load(file)
assert isinstance(data, dict), 'Pirate description is corrupted'
# Pirate description printed out to logger
self.log.info('--- Pirate %s (dna: %s) ---' % (self.name, self.dna))
model_summary = data.pop('model_summary', None)
for line in model_summary:
self.log.info(line)
for key, val in data.items():
self.log.info('%s : %s' % (str(key), str(val)))
return data
raise FileNotFoundError('Could not find description in path using given dna string')
def _load_model(self):
"""
Tries to find pirate model in the model path
:return: (keras.model) or None
:raises FileNotFoundError: Can't find model in path
"""
for dirpath, _, files in os.walk(config.MODEL_DIR):
if self.dna + '.h5' in files:
return load_model(os.path.join(dirpath, self.dna + '.h5'))
raise FileNotFoundError('Could not find model in path using given dna string')
def _generate_name(self, rank=-1):
"""
Generates a proper pirate name
:param rank: (int) rank with respect to training batch
:return:(string) name
"""
name = ''
# Titles are ordered based on rank
titles = ['Salty ', 'Admiral ', 'Captain ', 'Don ', 'First Mate ', 'Gunmaster ',
'Sailor ', 'Deckhand ', 'Mc', 'Cookie ', 'Lil', '']
if rank in range(len(titles)):
name += titles[rank]
# The real part of the name is chosen randomly
real_names = ['Jack', 'Haddock', 'Blackbeard', 'Will', 'Long', 'Simon', 'Barbossa']
name += random.choice(real_names)
self.log.debug('The Pirate %s has been created' % name)
return name
def __eq__(self, pirate):
"""
Compare pirates using their dna
:param pirate: (Pirate) the other pirate
:return: (bool) True if dna matches
"""
return self.dna == pirate.dna
|
<gh_stars>0
import re
from bs4 import BeautifulSoup
import time
from itertools import islice
import io
start_time = time.time()
print('starting...')
global root_path
root_path = "C:/Users/Anxhela/PycharmProjects/AI/venv/"
def find_vector(text, count):
array = text.split('_') # Getting the words form the sentence
data = []
# iterating throughout the array and extracting the vectors
vector = []
for word in array:
result = get_vector(word)
if result[0] != 'NO VEC':
data = sum_vec(vector, result)
vector = normalize_vec(data)
print_status(count)
return vector
def sum_vec(old, new):
if len(old) == 0:
old = new
else:
for i in range(len(old)):
old[i] = float(old[i]) + float(new[i])
return old
def normalize_vec(vec):
sumvec = 0.0000
for item in vec:
sumvec += float(item)
for i in range(len(vec)):
vec[i] = round(float(vec[i]) / sumvec, 4)
return vec
def get_vector(word):
if word in dict_paths.keys():
path = dict_paths[word]
file = open(path, "r+")
contents = file.read()
contents = contents.split(' ')
array = []
for number in contents:
if len(number) != 0:
array.append(float(number))
file.close()
else:
array = ['NO VEC']
try:
return array
except KeyError:
return ['NO VEC']
def build_files_dict():
# READ AND PRINT THE WORD2VEC FILE
passing = 0
global new_file
balls_hash2 = {}
myfile = open("Data/cc.sq.300.vec", encoding="utf8")
c = 1
for line in myfile:
alb_dict = {}
line = line.rstrip('\n')
tokens = line.split(' ')
regex = re.compile('[@_!#$%^&*()<>"?/|}{~:]')
if regex.search(tokens[0]) is None and tokens[0] != 'aux' and tokens[0] != 'AUX' and tokens[0] != 'PRN' and tokens[0] != 'Aux' \
and tokens[0] != 'daljes20172016201520142013201220112010200920082007200620052004200320022001200019991998199719961995199419931992199119901989198819871985198419821981198019761975197419731972197119661964196219601957195619531942'\
and tokens[0] != 'prn' and tokens[0] != 'Prn'\
and tokens[0] != 'Kuvendet-e-Dibres-parlamente-demokratike-te-kohes-Nga-Fatos-Daci-Kuvendet-ne-Diber-kane-qene-parlamente-te-kohes-Me-fjalen-kuvend-ne-Diber-kuptohej-nje-mbledhje-e-zgjeruar-e-pjesetareve-te-nje-bashkesie-ose-e-disa-bashkesive-ndermjet-tyre-Ne-kuve'\
and tokens[0] != 'LPT1'\
and tokens[0] != 'LPT2'\
and tokens[0] != 'HΉĤĦȞʰʱḢḤḦḨḪНҢӇӉΗἨἩἪἫἬἭἮἯῊᾘЋΗⱧԋњһhÉÈËEĘĚĔĖẺẸẾỀỄễỂểȨȩḜḝĒḖḗȄȅȆȇỆệḘḙḚḛ3عڠeēėèéëẽĕęəẻếềẹÉÈËEĘĚĔĖẺẸẾỀỄễỂểȨȩḜḝĒḖḗȄȅȆȇỆệḘḙḚḛ3عڠeēėèéëẽĕęəẻếềẹ'\
and tokens[0] != 'hgjhngjngnbnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn' \
and tokens[0] != 'ArmorCreuseDordogneDoubsDrômeEureEure-et-LoirFinistèreGardHaute-GaronneGersGirondeHéraultIlle-et-VilaineIndreIndre-et-LoireIsèreJuraLandesLoir-et-CherLoireHaute-LoireLoire-AtlantiqueLoiretLotLot-et-GaronneLozèreMaine-et-LoireMancheMarneHaute-MarneMayenneMeurthe-et-MoselleMeuseMorbihanMoselleNièvreNordOiseOrnePas-de-CalaisPuy-de-DômePyrénées-AtlantiquesHautes-PyrénéesPyrénées-OrientalesBas-RhinHaut-RhinRhôneHaute-SaôneSaône-et-LoireSartheSavoieHaute-SavoieParisSeine-MaritimeSeine-et-MarneYvelinesDeux-SèvresSommeTarnTarn-et-GaronneVarVaucluseVendéeVienneHaute-VienneVosgesYonneTerritoire'\
and tokens[0] != '<KEY>':
for i in range(len(tokens)):
if i == 0:
alb_dict[tokens[0]] = ''
balls_hash2[tokens[0]] = ''
else:
alb_dict[tokens[0]] += tokens[i] + ' '
path = "Data/Dict/" + tokens[0] + ".txt"
balls_hash2[tokens[0]] = path
new_file = open(path, "w")
new_file.write(alb_dict[tokens[0]])
new_file.close()
print_file_create_status(c)
c += 1
print('Creating Files Done...')
print('passed', passing)
return balls_hash2
def print_file_create_status(c):
print(c, '/1167010')
def print_status(c):
print(c, '/4683')
dict_paths = build_files_dict()
f = open("Data/Dict_w2v.txt", "w+")
wordnet_path = 'Data/albanet.xml'
wordnet_file = open(wordnet_path, encoding="utf8").read()
wordnet = BeautifulSoup(wordnet_file, "xml")
count = 1
for synset in wordnet.findAll('synset'):
# check if Definition tag exists in the synset
if synset.find('definition'):
definition = synset.definition.text
if len(definition) > 0 and definition != 'vetëlëvizje': # Do not consider synsets with empty definitions
formatedDef = re.sub(r'[-()\"#/@;:<>{}`+=~|.!?,]', r'', definition) # Remove all special charactes
formatedDef = re.sub(r"\s+", '_', formatedDef) # Convert single and multi spaces to _
vector = find_vector(formatedDef, count)
formatedDef = formatedDef[0:250]
string = ''
for x in vector:
string += str(x) + ' '
f.write(formatedDef + ' ' + string + '\n')
count += 1
print("--- %s mins ---" % (time.time() - start_time))
print('end')
f.close()
|
<reponame>alercebroker/ztf-api-apf
from attr import Attribute, attr
from flask_restx import Resource, fields, Model
from math import isnan
def get_magpsf(raw_response):
try:
magpsf = raw_response.magpsf
return magpsf
except AttributeError:
mag = raw_response["mag"]
return mag
def get_sigmapsf(raw_response):
try:
sigmapsf = raw_response.sigmapsf
return sigmapsf
except AttributeError:
e_mag = raw_response["e_mag"]
return e_mag
def get_parent_candid(raw_response):
try:
parent_candid = raw_response.parent_candid
except AttributeError:
parent_candid = raw_response["parent_candid"]
if parent_candid and isnan(parent_candid):
return None
else:
return parent_candid
def get_rfid(raw_response):
try:
rfid = raw_response.rfid
except AttributeError:
rfid = raw_response["rfid"]
if rfid and isnan(rfid):
return None
else:
return rfid
def get_tid(raw_response):
try:
tid = raw_response["tid"]
return "atlas"
except KeyError:
return "ztf"
class NotNanFloat(fields.Raw):
def format(self, value):
return None if isnan(value) else value
detection_model = Model(
"Detection",
{
"tid": fields.String(attribute=get_tid),
"mjd": NotNanFloat(attribute="mjd"),
"candid": fields.String,
"fid": fields.Integer,
"pid": fields.Integer,
"diffmaglim": NotNanFloat(attribute="diffmaglim"),
"isdiffpos": fields.Integer,
"nid": fields.Integer,
"distnr": NotNanFloat(attribute="distnr"),
"magpsf": fields.Float(attribute=get_magpsf),
"magpsf_corr": NotNanFloat(attribute="magpsf_corr"),
"magpsf_corr_ext": NotNanFloat(attribute="magpsf_corr_ext"),
"magap": NotNanFloat(attribute="magap"),
"magap_corr": NotNanFloat(attribute="magap_corr"),
"sigmapsf": fields.Float(attribute=get_sigmapsf),
"sigmapsf_corr": NotNanFloat(attribute="sigmapsf_corr"),
"sigmapsf_corr_ext": NotNanFloat(attribute="sigmapsf_corr_ext"),
"sigmagap": NotNanFloat(attribute="sigmagap"),
"sigmagap_corr": NotNanFloat(attribute="sigmagap_corr"),
"ra": NotNanFloat(attribute="ra"),
"dec": NotNanFloat(attribute="dec"),
"rb": NotNanFloat(attribute="rb"),
"rbversion": fields.String,
"drb": NotNanFloat(attribute="drb"),
"magapbig": NotNanFloat(attribute="magapbig"),
"sigmagapbig": NotNanFloat(attribute="sigmagapbig"),
"rfid": fields.Integer(attribute=get_rfid),
"has_stamp": fields.Boolean,
"corrected": fields.Boolean,
"dubious": fields.Boolean,
"candid_alert": fields.String,
"step_id_corr": fields.String,
"phase": fields.Float(default=0.0),
"parent_candid": fields.Integer(attribute=get_parent_candid),
},
)
non_detection_model = Model(
"Non Detection",
{
"tid": fields.String(attribute=get_tid),
"mjd": NotNanFloat(attribute="mjd"),
"fid": fields.Integer,
"diffmaglim": NotNanFloat(attribute="diffmaglim"),
},
)
light_curve_model = Model(
"Light Curve",
{
"detections": fields.List(fields.Nested(detection_model)),
"non_detections": fields.List(fields.Nested(non_detection_model)),
},
)
|
import sys
import os
from time import sleep
from subprocess import run, Popen
from shutil import copy
import yaml
GH_BASE = os.path.expanduser("~/github")
DANS_BASE = f"{GH_BASE}/Dans-labs"
THEME_BASE = f"{DANS_BASE}/mkdocs-dans"
CLIENTS = f"{THEME_BASE}/clients.yaml"
HELP = "help.md"
USAGE = """
Run `build.py` from the Terminal as follows:
```sh
python3 build.py make
python3 build.py docs
python3 build.py g commitmsg
```
`build` builds the DANS theme from the source files.
`pack` installs the DANS theme as a module.
`make` builds the DANS theme from the source files and installs it as a module.
`docs` serves the theme documentation (without make)
`g` does `make`, and pushes the theme repo site to GitHub,
where it will be published under <https://dans-labs.github.io/mkdocs-dans/>.
The repo itself will also be committed and pushed to GitHub.
Replace `commitmsg` by anything that is appropriate as a commit message.
"""
def readArgs():
args = sys.argv[1:]
if not len(args) or args[0] in {"-h", "--help", "help"}:
console(USAGE)
return (False, None, [])
arg = args[0]
if arg not in {
"build",
"pack",
"make",
"docs",
"push",
"g",
}:
console(USAGE)
return (False, None, [])
if arg in {"g"}:
if len(args) < 2:
console("Provide a commit message")
return (False, None, [])
return (arg, args[1], args[2:])
return (arg, None, [])
def console(msg, error=False, newline=True):
msg = msg[1:] if msg.startswith("\n") else msg
msg = msg[0:-1] if msg.endswith("\n") else msg
target = sys.stderr if error else sys.stdout
nl = "\n" if newline else ""
target.write(f"{msg}{nl}")
target.flush()
def readYaml(fileName):
with open(fileName) as y:
y = yaml.load(y)
return y
def commit(task, msg):
run(["git", "add", "--all", "."])
run(["git", "commit", "-m", msg])
run(["git", "push", "origin", "master"])
def buildCustom():
status = run(["npm", "run", "build"]).returncode
if status:
return
def packCustom():
for fl in ("README.md", "package.json"):
copy(fl, f"python/{fl}")
curDir = os.getcwd()
os.chdir("python")
status = run(["pip3", "install", "."]).returncode
os.chdir(curDir)
return not status
def makeCustom():
status = run(["npm", "run", "build"]).returncode
if status:
return
for fl in ("README.md", "package.json"):
copy(fl, f"python/{fl}")
curDir = os.getcwd()
os.chdir("python")
status = run(["pip3", "install", "."]).returncode
os.chdir(curDir)
return not status
def shipDocs():
run(["mkdocs", "gh-deploy"])
def buildDocs():
run(["mkdocs", "build"])
def serveDocs():
proc = Popen(["mkdocs", "serve"])
sleep(3)
run("open http://127.0.0.1:8000", shell=True)
try:
proc.wait()
except KeyboardInterrupt:
pass
proc.terminate()
def main():
(task, msg, remaining) = readArgs()
if not task:
return
elif task == "build":
buildCustom()
elif task == "pack":
packCustom()
elif task == "make":
makeCustom()
elif task == "docs":
serveDocs()
elif task == "g":
if makeCustom():
shipDocs()
commit(task, msg)
main()
|
from django.urls import reverse
from django.utils.crypto import get_random_string
from django.test import TestCase, override_settings
from accounts.models import User
from zentral.contrib.inventory.models import Taxonomy
@override_settings(STATICFILES_STORAGE='django.contrib.staticfiles.storage.StaticFilesStorage')
class JamfSetupViewsTestCase(TestCase):
@classmethod
def setUpTestData(cls):
# user
cls.pwd = "<PASSWORD>"
cls.user = User.objects.create_user("godzilla", "<EMAIL>", cls.pwd)
def login_redirect(self, url):
response = self.client.get(url)
self.assertRedirects(response, "{u}?next={n}".format(u=reverse("login"), n=url))
def log_user_in(self):
response = self.client.post(reverse('login'),
{'username': self.user.username, 'password': <PASSWORD>},
follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["user"], self.user)
def log_user_out(self):
response = self.client.get(reverse('logout'))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["user"].is_authenticated, False)
def test_jamf_instances_redirect(self):
self.login_redirect(reverse("jamf:jamf_instances"))
def test_jamf_instances_view(self):
self.log_user_in()
response = self.client.get(reverse("jamf:jamf_instances"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "jamf/jamfinstance_list.html")
self.assertContains(response, "0 jamf instances")
def test_create_jamf_instance_redirect(self):
self.login_redirect(reverse("jamf:create_jamf_instance"))
def test_create_jamf_instance_get(self):
self.log_user_in()
response = self.client.get(reverse("jamf:create_jamf_instance"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "jamf/jamfinstance_form.html")
self.assertContains(response, "Create jamf instance")
def create_jamf_instance(self):
self.log_user_in()
response = self.client.post(reverse("jamf:create_jamf_instance"),
{"host": "yo.example.com",
"port": 8443,
"path": "/JSSResource",
"user": "godzilla",
"password": "<PASSWORD>"},
follow=True)
self.assertEqual(response.template_name, ["jamf/jamfinstance_detail.html"])
self.assertContains(response, "0 Tag configs")
jamf_instance = response.context["object"]
self.assertEqual(jamf_instance.version, 0)
return response, jamf_instance
def test_create_jamf_instance_post(self):
response, jamf_instance = self.create_jamf_instance()
self.assertContains(response, "https://yo.example.com:8443/JSSResource")
self.assertContains(response, "godzilla")
self.assertNotContains(response, "pwd")
def test_delete_jamf_instance_redirect(self):
response, jamf_instance = self.create_jamf_instance()
self.log_user_out()
self.login_redirect(reverse("jamf:delete_jamf_instance", args=(jamf_instance.id,)))
def test_delete_jamf_instance_get(self):
_, jamf_instance = self.create_jamf_instance()
response = self.client.get(reverse("jamf:delete_jamf_instance", args=(jamf_instance.id,)))
self.assertContains(response, "Delete jamf instance")
# TODO: def test_delete_jamf_instance_post(self):
# PB: API calls!
def test_setup_jamf_instance_redirect(self):
_, jamf_instance = self.create_jamf_instance()
self.log_user_out()
self.login_redirect(reverse("jamf:setup_jamf_instance", args=(jamf_instance.id,)))
def test_update_jamf_instance_redirect(self):
_, jamf_instance = self.create_jamf_instance()
self.log_user_out()
self.login_redirect(reverse("jamf:update_jamf_instance", args=(jamf_instance.id,)))
def test_update_jamf_instance_get(self):
_, jamf_instance = self.create_jamf_instance()
response = self.client.get(reverse("jamf:update_jamf_instance", args=(jamf_instance.id,)))
self.assertContains(response, "Update jamf instance")
def test_update_jamf_instance_post(self):
_, jamf_instance = self.create_jamf_instance()
response = self.client.post(reverse("jamf:update_jamf_instance", args=(jamf_instance.id,)),
{"host": "yo.example2.com",
"port": 8443,
"path": "/JSSResource",
"user": "godzilla",
"password": "<PASSWORD>"},
follow=True)
self.assertEqual(response.template_name, ["jamf/jamfinstance_detail.html"])
self.assertContains(response, "0 Tag configs")
self.assertContains(response, "https://yo.example2.com:8443/JSSResource")
jamf_instance = response.context["object"]
self.assertEqual(jamf_instance.version, 1)
def test_create_tag_config(self):
_, jamf_instance = self.create_jamf_instance()
t, _ = Taxonomy.objects.get_or_create(name=get_random_string(34))
regex = r"^YOLOFOMO: (.*)$"
response = self.client.post(reverse("jamf:create_tag_config", args=(jamf_instance.id,)),
{"source": "GROUP",
"taxonomy": t.pk,
"regex": regex,
"replacement": r"\1"},
follow=True)
self.assertEqual(response.template_name, ["jamf/jamfinstance_detail.html"])
self.assertContains(response, "1 Tag config")
self.assertContains(response, t.name)
def test_create_tag_config_error(self):
_, jamf_instance = self.create_jamf_instance()
t, _ = Taxonomy.objects.get_or_create(name=get_random_string(34))
regex = r"^YOLOFOMO: ("
response = self.client.post(reverse("jamf:create_tag_config", args=(jamf_instance.id,)),
{"source": "GROUP",
"taxonomy": t.pk,
"regex": regex,
"replacement": r"\1"},
follow=True)
self.assertEqual(response.template_name, ["jamf/tagconfig_form.html"])
self.assertContains(response, "Not a valid regex")
def test_update_tag_config(self):
_, jamf_instance = self.create_jamf_instance()
t, _ = Taxonomy.objects.get_or_create(name=get_random_string(34))
regex = r"^YOLOFOMO: (.*)$"
response = self.client.post(reverse("jamf:create_tag_config", args=(jamf_instance.id,)),
{"source": "GROUP",
"taxonomy": t.pk,
"regex": regex,
"replacement": r"\1"},
follow=True)
tag_config = response.context["tag_configs"][0]
response = self.client.post(reverse("jamf:update_tag_config", args=(jamf_instance.pk, tag_config.pk)),
{"source": "GROUP",
"taxonomy": t.pk,
"regex": regex,
"replacement": r"haha: \1"},
follow=True)
self.assertEqual(response.template_name, ["jamf/jamfinstance_detail.html"])
self.assertContains(response, "1 Tag config")
self.assertContains(response, "haha")
def test_delete_tag_config(self):
_, jamf_instance = self.create_jamf_instance()
t, _ = Taxonomy.objects.get_or_create(name=get_random_string(34))
regex = r"^YOLOFOMO: (.*)$"
response = self.client.post(reverse("jamf:create_tag_config", args=(jamf_instance.id,)),
{"source": "GROUP",
"taxonomy": t.pk,
"regex": regex,
"replacement": r"\1"},
follow=True)
tag_config = response.context["tag_configs"][0]
response = self.client.post(reverse("jamf:delete_tag_config", args=(jamf_instance.pk, tag_config.pk)),
follow=True)
self.assertEqual(response.template_name, ["jamf/jamfinstance_detail.html"])
self.assertContains(response, "0 Tag configs")
|
<reponame>fsimkovic/cptbx<filename>conkit/core/sequencefile.py
# coding=utf-8
#
# BSD 3-Clause License
#
# Copyright (c) 2016-21, University of Liverpool
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""SequenceFile container used throughout ConKit"""
from __future__ import division
__author__ = "<NAME>"
__date__ = "03 Aug 2016"
__version__ = "0.13.3"
import numpy as np
import sys
from conkit.core.entity import Entity
from conkit.core.mappings import AminoAcidMapping, SequenceAlignmentState
class SequenceFile(Entity):
"""A sequence file object representing a single sequence file
The :obj:`~conkit.core.sequencefile.SequenceFile` class represents a data structure to hold
:obj:`~conkit.core.sequence.Sequence` instances in a single sequence file. It contains
functions to store and analyze sequences.
Attributes
----------
id : str
A unique identifier
is_alignment : bool
A boolean status for the alignment
meff : int
The number of effective sequences in the :obj:`~conkit.core.sequencefile.SequenceFile`
nseq : int
The number of sequences in the :obj:`~conkit.core.sequencefile.SequenceFile`
remark : list
The :obj:`~conkit.core.sequencefile.SequenceFile`-specific remarks
status : int
An indication of the sequence file, i.e alignment, no alignment, or unknown
top_sequence : :obj:`~conkit.core.sequence.Sequence`, None
The first :obj:`~conkit.core.sequence.Sequence` entry in the file
Examples
--------
>>> from conkit.core import Sequence, SequenceFile
>>> sequence_file = SequenceFile("example")
>>> sequence_file.add(Sequence("foo", "ABCDEF"))
>>> sequence_file.add(Sequence("bar", "ZYXWVU"))
>>> print(sequence_file)
SequenceFile(id="example" nseq=2)
"""
__slots__ = ["_remark", "_status"]
def __init__(self, id):
"""Initialise a new :obj:`~conkit.core.sequencefile.SequenceFile`
Parameters
----------
id : str
A unique identifier for the sequence file
"""
self._remark = []
self._status = SequenceAlignmentState.unknown
super(SequenceFile, self).__init__(id)
def __repr__(self):
return '{}(id="{}" nseq={})'.format(self.__class__.__name__, self.id, self.nseq)
@property
def ascii_matrix(self):
"""The alignment encoded in a 2-D ASCII matrix"""
return [list(seq.seq_ascii) for seq in self]
@property
def encoded_matrix(self):
"""The alignment encoded for contact prediction"""
return [list(seq.seq_encoded) for seq in self]
@property
def is_alignment(self):
"""A boolean status for the alignment
Returns
-------
bool
A boolean status for the alignment
"""
seq_length = self.top_sequence.seq_len
self._status = SequenceAlignmentState.aligned
for sequence in self:
if sequence.seq_len != seq_length:
self._status = SequenceAlignmentState.unaligned
break
return self._status == SequenceAlignmentState.aligned
@property
def diversity(self):
"""The diversity of an alignment defined by :math:`\\sqrt{N}/L`.
``N`` equals the number of sequences in
the alignment and ``L`` the sequence length
"""
if self.empty:
return 0.0
elif self.is_alignment:
return (np.sqrt(len(self)) / float(self.top.seq_len)).round(decimals=3).item()
else:
raise ValueError("This is not an alignment")
@property
def empty(self):
"""Status of emptiness of sequencefile"""
return len(self) < 1
@property
def meff(self):
"""The number of effective sequences"""
return int(sum(self.get_weights()))
@property
def nseq(self):
"""The number of sequences"""
return len(self)
@property
def remark(self):
"""The :obj:`~conkit.core.sequencefile.SequenceFile`-specific remarks"""
return self._remark
@remark.setter
def remark(self, remark):
"""Set the :obj:`~conkit.core.sequencefile.SequenceFile` remark
Parameters
----------
remark : str, list
The remark will be added to the list of remarks
"""
if isinstance(remark, list):
self._remark += remark
elif isinstance(remark, tuple):
self._remark += list(remark)
else:
self._remark += [remark]
@property
def status(self):
"""An indication of the residue status, i.e true positive, false positive, or unknown"""
return self._status.value
@status.setter
def status(self, status):
"""Set the status
Parameters
----------
status : int
[0] for `unknown`,
[-1] for `no alignment`, or
[1] for `alignment`
Raises
------
:exc:`ValueError`
Cannot determine if your sequence file is an alignment or not
"""
self._status = SequenceAlignmentState(status)
@property
def top_sequence(self):
"""The first :obj:`~conkit.core.sequence.Sequence` entry in :obj:`~conkit.core.sequencefile.SequenceFile`
Returns
-------
:obj:`~conkit.core.sequence.Sequence`
The first :obj:`~conkit.core.sequence.Sequence` entry in :obj:`~conkit.core.sequencefile.SequenceFile`
"""
return self.top
def get_meff_with_id(self, identity):
"""Calculate the number of effective sequences with specified sequence identity
See Also
--------
meff, get_weights
"""
return int(sum(self.get_weights(identity=identity)))
def get_weights(self, identity=0.8):
"""Calculate the sequence weights
This function calculates the sequence weights in the
the Multiple Sequence Alignment.
The mathematical function used to calculate `Meff` is
.. math::
M_{eff}=\\sum_{i}\\frac{1}{\\sum_{j}S_{i,j}}
Parameters
----------
identity : float, optional
The sequence identity to use for similarity decision [default: 0.8]
Returns
-------
list
A list of the sequence weights in the alignment
Raises
------
:exc:`ValueError`
:obj:`~conkit.core.sequencefile.SequenceFile` is not an alignment
:exc:`ValueError`
Sequence Identity needs to be between 0 and 1
"""
if identity < 0 or identity > 1:
raise ValueError("Sequence Identity needs to be between 0 and 1")
if self.is_alignment:
from conkit.core.ext.c_sequencefile import c_get_weights
X = np.array(self.ascii_matrix, dtype=np.int64)
hamming = np.zeros(X.shape[0], dtype=np.float64)
c_get_weights(X, identity, hamming)
return hamming.tolist()
else:
raise ValueError("This is not an alignment")
def get_frequency(self, symbol):
"""Calculate the frequency of an amino acid (symbol) in each Multiple Sequence Alignment column
Returns
-------
list
A list containing the per alignment-column amino acid frequency count
Raises
------
:exc:`RuntimeError`
:obj:`~conkit.core.sequencefile.SequenceFile` is not an alignment
"""
if self.is_alignment:
from conkit.core.ext.c_sequencefile import c_get_frequency
X = np.array(self.encoded_matrix, dtype=np.int64)
symbol = getattr(AminoAcidMapping, symbol, AminoAcidMapping["X"]).value
frequencies = np.zeros(X.shape[1], dtype=np.int64)
c_get_frequency(X, symbol, frequencies)
return frequencies.tolist()
else:
raise ValueError("This is not an alignment")
def filter(self, min_id=0.3, max_id=0.9, inplace=False):
"""Filter sequences from an alignment according to the minimum and maximum identity
between the sequences
Parameters
----------
min_id : float, optional
Minimum sequence identity
max_id : float, optional
Maximum sequence identity
inplace : bool, optional
Replace the saved order of sequences [default: False]
Returns
-------
:obj:`~conkit.core.sequencefile.SequenceFile`
The reference to the :obj:`~conkit.core.sequencefile.SequenceFile`, regardless of inplace
Raises
------
:exc:`ValueError`
:obj:`~conkit.core.sequencefile.SequenceFile` is not an alignment
:exc:`ValueError`
Minimum sequence identity needs to be between 0 and 1
:exc:`ValueError`
Maximum sequence identity needs to be between 0 and 1
"""
if 0 > min_id > 1:
raise ValueError("Minimum sequence identity needs to be between 0 and 1")
elif 0 > max_id > 1:
raise ValueError("Maximum sequence identity needs to be between 0 and 1")
if self.is_alignment:
from conkit.core.ext.c_sequencefile import c_filter
X = np.array(self.ascii_matrix, dtype=np.int64)
throwables = np.full(X.shape[0], False, dtype=np.bool)
c_filter(X, min_id, max_id, throwables)
filtered = self._inplace(inplace)
for i, sequence in enumerate(self):
if throwables[i]:
filtered.remove(sequence.id)
return filtered
else:
raise ValueError("This is not an alignment")
def filter_gapped(self, min_prop=0.0, max_prop=0.9, inplace=True):
"""Filter all sequences a gap proportion greater than the limit
Parameters
----------
min_prop : float, optional
Minimum allowed gap proportion [default: 0.0]
max_prop : float, optional
Maximum allowed gap proportion [default: 0.9]
inplace : bool, optional
Replace the saved order of sequences [default: False]
Returns
-------
:obj:`~conkit.core.sequencefile.SequenceFile`
The reference to the :obj:`~conkit.core.sequencefile.SequenceFile`, regardless of inplace
Raises
------
:exc:`ValueError`
:obj:`~conkit.core.sequencefile.SequenceFile` is not an alignment
:exc:`ValueError`
Minimum gap proportion needs to be between 0 and 1
:exc:`ValueError`
Maximum gap proportion needs to be between 0 and 1
"""
if 0.0 > min_prop > 1.0:
raise ValueError("Minimum gap proportion needs to be between 0 and 1")
elif 0.0 > max_prop > 1.0:
raise ValueError("Maximum gap proportion needs to be between 0 and 1")
symbol = "X"
if self.is_alignment:
from conkit.core.ext.c_sequencefile import c_filter_symbol
X = np.array(self.encoded_matrix, dtype=np.int64)
symbol = getattr(AminoAcidMapping, symbol, AminoAcidMapping["X"]).value
throwables = np.full(X.shape[0], False, dtype=np.bool)
c_filter_symbol(X, min_prop, max_prop, symbol, throwables)
filtered = self._inplace(inplace)
for i, sequence in enumerate(self):
if throwables[i]:
filtered.remove(sequence.id)
return filtered
else:
raise ValueError("This is not an alignment")
def sort(self, kword, reverse=False, inplace=False):
"""Sort the :obj:`~conkit.core.sequencefile.SequenceFile`
Parameters
----------
kword : str
The dictionary key to sort sequences by
reverse : bool, optional
Sort the sequences in reverse order [default: False]
inplace : bool, optional
Replace the saved order of sequences [default: False]
Returns
-------
:obj:`~conkit.core.sequencefile.SequenceFile`
The reference to the :obj:`~conkit.core.sequencefile.SequenceFile`, regardless of inplace
Raises
------
:exc:`ValueError`
``kword`` not in :obj:`~conkit.core.sequencefile.SequenceFile`
"""
sequence_file = self._inplace(inplace)
sequence_file._sort(kword, reverse)
return sequence_file
def to_string(self):
"""Return the :obj:`~conkit.core.sequencefile.SequenceFile` as :obj:`str`"""
content = [s.seq for s in self]
return "\n".join(content)
def trim(self, start, end, inplace=False):
"""Trim the :obj:`~conkit.core.sequencefile.SequenceFile`
Parameters
----------
start : int
First residue to include
end : int
Final residue to include
inplace : bool, optional
Replace the saved order of sequences [default: False]
Returns
-------
:obj:`~conkit.core.sequencefile.SequenceFile`
The reference to the :obj:`~conkit.core.sequencefile.SequenceFile`, regardless of inplace
"""
sequence_file = self._inplace(inplace)
if self.is_alignment:
i = start - 1
j = end
for sequence in sequence_file:
sequence.seq = sequence.seq[i:j]
return sequence_file
else:
raise ValueError("This is not an alignment")
def summary(self):
"""Generate a summary for the :obj:`~conkit.core.sequencefile.SequenceFile`
Returns
-------
str
"""
sstream = "Summary for {id}{nline}"
sstream += "-------------------------------{nline}"
sstream += "Alignment:{tab}{tab}{is_alignment}"
sstream += "Number of sequences:{tab}{nseq}{nline}" % self.nseq
sstream += "Alignment depth (0.8):{tab}{meff}{nline}" % self.meff
return sstream.format(
id=self.id, is_alignment=self.is_alignment, tab="\t", nline="\n", nseq=self.nseq, meff=self.meff
)
|
<filename>ats/models.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.db import models
from django.contrib import admin
from django.contrib.auth.models import User
class Project(models.Model):
id = models.AutoField(primary_key=True)
name = models.TextField(blank=False)
start_dt = models.DateField(null=False, blank=False)
end_dt = models.DateField(null=True, blank=True)
sortkey = models.IntegerField(null=False)
external_project = models.ForeignKey('ExternalProject',
on_delete=models.PROTECT, blank=True, null=True)
def __str__(self):
if self.end_dt:
if self.end_dt < datetime.date.today():
return '%d : %s [closed]' % (self.id, self.name)
else:
return '%d : %s [opened]' % (self.id, self.name)
else:
return '%d : %s [opened]' % (self.id, self.name)
class ExternalProject(models.Model):
id = models.AutoField(primary_key=True)
name = models.TextField(blank=False)
code = models.CharField(max_length=255, db_index=True,
blank=True, null=True, default='',)
def __str__(self):
return '%d : %s' % (self.id, self.name)
class Job(models.Model):
id = models.AutoField(primary_key=True)
name = models.TextField(blank=False)
sortkey = models.IntegerField(null=False)
invalid = models.BooleanField(default=False)
def __str__(self):
if self.invalid:
return '%d : %s [invalid]' % (self.id, self.name)
else:
return '%d : %s' % (self.id, self.name)
class Task(models.Model):
id = models.AutoField(primary_key=True)
name = models.TextField(blank=False)
job = models.ForeignKey('Job', on_delete=models.PROTECT)
sortkey = models.IntegerField(null=False)
invalid = models.BooleanField(default=False)
def __str__(self):
if self.invalid:
return '%d : %s (%s) [invalid]' % (
self.id, self.name, self.job.name)
else:
return '%d : %s (%s)' % (
self.id, self.name, self.job.name)
class ProjectWorker(models.Model):
id = models.AutoField(primary_key=True)
user = models.ForeignKey(User, on_delete=models.PROTECT)
project = models.ForeignKey('Project', on_delete=models.PROTECT)
job = models.ForeignKey('Job', on_delete=models.PROTECT)
invalid = models.BooleanField(default=False)
class Meta:
unique_together = [
['job', 'project','user']
]
def __str__(self):
if self.invalid:
return '%d : %s (%s - %s) [invalid]' % (
self.id, self.user.username, self.project.name, self.job.name)
else:
return '%d : %s (%s - %s)' % (
self.id, self.user.username, self.project.name, self.job.name)
class UsedTaskTime(models.Model):
id = models.BigAutoField(primary_key=True)
user = models.ForeignKey(User, on_delete=models.PROTECT)
project = models.ForeignKey('Project', on_delete=models.PROTECT)
task = models.ForeignKey('Task', on_delete=models.PROTECT)
taskdate = models.DateField(null=False)
tasktime = models.TimeField(null=False)
def __str__(self):
return '%d : [%s - %s] %s - %s - %s' % (
self.id, self.taskdate, self.tasktime,
self.user.username, self.project.name, self.task.name)
|
<filename>app/user/tests/test_user_api.py
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
CREATE_USER_URL = reverse('user:create')
TOKEN_URL = reverse('user:token')
ME_URL = reverse('user:me') # url of the authenticated user
def create_user(**params):
return get_user_model().objects.create_user(**params)
class PublicUserApiTests(TestCase):
"""Tests creation an unauthenticated user"""
def setUp(self):
self.client = APIClient()
def test_create_valid_user_success(self):
"""Test creating user with valid payload successfull"""
payload = {
'email': '<EMAIL>',
'password': '<PASSWORD>',
'name': 'some name'
}
res = self.client.post(CREATE_USER_URL, payload)
# confirms the request created a user
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
# verify that the user was trully created
user = get_user_model().objects.get(**res.data)
# test that the password is correct
self.assertTrue(user.check_password(payload['password']))
# confirms that the password is not present in the response
# as it would be a potential security threat
self.assertNotIn('password', res.data)
def test_user_exists(self):
"""Test creating an already existing user"""
payload = {
'email': '<EMAIL>',
'password': '<PASSWORD>',
'name': 'bambi',
}
create_user(**payload)
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_short_password(self):
"""Tests that the pass must be > 5 chars"""
payload = {
'email': '<EMAIL>',
'password': 'te',
'name': 'rambo',
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
self.assertFalse(user_exists)
def test_create_token(self):
"""test that a token is created for a user"""
payload = {
'email': '<EMAIL>',
'password': '<PASSWORD>',
'name': 'rambo2',
}
create_user(**payload)
# remember that this doesn't create a user,
# only checks the token for one
res = self.client.post(TOKEN_URL, payload)
self.assertIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_invalid_token(self):
"""Test that a token is not created for invalid credentials"""
payload1 = {
'email': '<EMAIL>',
'password': '<PASSWORD>',
'name': 'rambo2',
}
create_user(**payload1)
payload2 = {
'email': '<EMAIL>',
'password': '<PASSWORD>',
'name': 'rambo2',
}
# tries to creata a token with the wrong password
res = self.client.post(TOKEN_URL, payload2)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_token_no_user(self):
"""Test that token is not created if user doesn't exist"""
# same as invalid token, but without creating the user
payload = {
'email': '<EMAIL>',
'password': '<PASSWORD>',
'name': 'rambo2',
}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_missing_data(self):
"""Tests tjat token is not created if password is invalid"""
res = self.client.post(TOKEN_URL, {'email': 'one', 'password': ''})
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_user_unauthorized(self):
"""Test that authentication is required for users"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateUserApiTests(TestCase):
"""Test api requests that require authentication"""
def setUp(self):
self.user = create_user(
email='<EMAIL>',
password='<PASSWORD>',
name='rambo'
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_retrieve_user_authorized(self):
"""test retrieving profile for logged user"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, {
'name': self.user.name,
'email': self.user.email,
})
def test_post_not_allowed(self):
"""test that POST is not allowed on the me url"""
res = self.client.post(ME_URL, {})
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_user_profile(self):
"""Test updating the user profile for authenticated user"""
payload = {
'name': 'new_name',
'email': '<EMAIL>',
'password': '<PASSWORD>'
}
res = self.client.patch(ME_URL, payload)
self.user.refresh_from_db()
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
self.assertEqual(res.status_code, status.HTTP_200_OK)
|
<filename>Lib/test/test_raise.py
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Tests for the raise statement."""
from test import support
import sys
import types
import unittest
def get_tb():
try:
raise OSError()
except:
return sys.exc_info()[2]
class Context:
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
return True
class TestRaise(unittest.TestCase):
def test_invalid_reraise(self):
try:
raise
except RuntimeError as e:
self.assertIn("No active exception", str(e))
else:
self.fail("No exception raised")
def test_reraise(self):
try:
try:
raise IndexError()
except IndexError as e:
exc1 = e
raise
except IndexError as exc2:
self.assertTrue(exc1 is exc2)
else:
self.fail("No exception raised")
def test_except_reraise(self):
def reraise():
try:
raise TypeError("foo")
except:
try:
raise KeyError("caught")
except KeyError:
pass
raise
self.assertRaises(TypeError, reraise)
def test_finally_reraise(self):
def reraise():
try:
raise TypeError("foo")
except:
try:
raise KeyError("caught")
finally:
raise
self.assertRaises(KeyError, reraise)
def test_nested_reraise(self):
def nested_reraise():
raise
def reraise():
try:
raise TypeError("foo")
except:
nested_reraise()
self.assertRaises(TypeError, reraise)
def test_with_reraise1(self):
def reraise():
try:
raise TypeError("foo")
except:
with Context():
pass
raise
self.assertRaises(TypeError, reraise)
def test_with_reraise2(self):
def reraise():
try:
raise TypeError("foo")
except:
with Context():
raise KeyError("caught")
raise
self.assertRaises(TypeError, reraise)
def test_yield_reraise(self):
def reraise():
try:
raise TypeError("foo")
except:
yield 1
raise
g = reraise()
next(g)
self.assertRaises(TypeError, lambda: next(g))
self.assertRaises(StopIteration, lambda: next(g))
def test_erroneous_exception(self):
class MyException(Exception):
def __init__(self):
raise RuntimeError()
try:
raise MyException
except RuntimeError:
pass
else:
self.fail("No exception raised")
def test_new_returns_invalid_instance(self):
# See issue #11627.
class MyException(Exception):
def __new__(cls, *args):
return object()
with self.assertRaises(TypeError):
raise MyException
class TestCause(unittest.TestCase):
def test_invalid_cause(self):
try:
raise IndexError from 5
except TypeError as e:
self.assertIn("exception cause", str(e))
else:
self.fail("No exception raised")
def test_class_cause(self):
try:
raise IndexError from KeyError
except IndexError as e:
self.assertIsInstance(e.__cause__, KeyError)
else:
self.fail("No exception raised")
def test_instance_cause(self):
cause = KeyError()
try:
raise IndexError from cause
except IndexError as e:
self.assertTrue(e.__cause__ is cause)
else:
self.fail("No exception raised")
def test_erroneous_cause(self):
class MyException(Exception):
def __init__(self):
raise RuntimeError()
try:
raise IndexError from MyException
except RuntimeError:
pass
else:
self.fail("No exception raised")
class TestTraceback(unittest.TestCase):
def test_sets_traceback(self):
try:
raise IndexError()
except IndexError as e:
self.assertIsInstance(e.__traceback__, types.TracebackType)
else:
self.fail("No exception raised")
def test_accepts_traceback(self):
tb = get_tb()
try:
raise IndexError().with_traceback(tb)
except IndexError as e:
self.assertNotEqual(e.__traceback__, tb)
self.assertEqual(e.__traceback__.tb_next, tb)
else:
self.fail("No exception raised")
class TestContext(unittest.TestCase):
def test_instance_context_instance_raise(self):
context = IndexError()
try:
try:
raise context
except:
raise OSError()
except OSError as e:
self.assertEqual(e.__context__, context)
else:
self.fail("No exception raised")
def test_class_context_instance_raise(self):
context = IndexError
try:
try:
raise context
except:
raise OSError()
except OSError as e:
self.assertNotEqual(e.__context__, context)
self.assertIsInstance(e.__context__, context)
else:
self.fail("No exception raised")
def test_class_context_class_raise(self):
context = IndexError
try:
try:
raise context
except:
raise OSError
except OSError as e:
self.assertNotEqual(e.__context__, context)
self.assertIsInstance(e.__context__, context)
else:
self.fail("No exception raised")
def test_c_exception_context(self):
try:
try:
1/0
except:
raise OSError
except OSError as e:
self.assertIsInstance(e.__context__, ZeroDivisionError)
else:
self.fail("No exception raised")
def test_c_exception_raise(self):
try:
try:
1/0
except:
xyzzy
except NameError as e:
self.assertIsInstance(e.__context__, ZeroDivisionError)
else:
self.fail("No exception raised")
def test_noraise_finally(self):
try:
try:
pass
finally:
raise OSError
except OSError as e:
self.assertTrue(e.__context__ is None)
else:
self.fail("No exception raised")
def test_raise_finally(self):
try:
try:
1/0
finally:
raise OSError
except OSError as e:
self.assertIsInstance(e.__context__, ZeroDivisionError)
else:
self.fail("No exception raised")
def test_context_manager(self):
class ContextManager:
def __enter__(self):
pass
def __exit__(self, t, v, tb):
xyzzy
try:
with ContextManager():
1/0
except NameError as e:
self.assertIsInstance(e.__context__, ZeroDivisionError)
else:
self.fail("No exception raised")
def test_cycle_broken(self):
# Self-cycles (when re-raising a caught exception) are broken
try:
try:
1/0
except ZeroDivisionError as e:
raise e
except ZeroDivisionError as e:
self.assertTrue(e.__context__ is None, e.__context__)
def test_reraise_cycle_broken(self):
# Non-trivial context cycles (through re-raising a previous exception)
# are broken too.
try:
try:
xyzzy
except NameError as a:
try:
1/0
except ZeroDivisionError:
raise a
except NameError as e:
self.assertTrue(e.__context__.__context__ is None)
def test_3118(self):
# deleting the generator caused the __context__ to be cleared
def gen():
try:
yield 1
finally:
pass
def f():
g = gen()
next(g)
try:
try:
raise ValueError
except:
del g
raise KeyError
except Exception as e:
self.assertIsInstance(e.__context__, ValueError)
f()
def test_3611(self):
# A re-raised exception in a __del__ caused the __context__
# to be cleared
class C:
def __del__(self):
try:
1/0
except:
raise
def f():
x = C()
try:
try:
x.x
except AttributeError:
del x
raise TypeError
except Exception as e:
self.assertNotEqual(e.__context__, None)
self.assertIsInstance(e.__context__, AttributeError)
with support.captured_output("stderr"):
f()
class TestRemovedFunctionality(unittest.TestCase):
def test_tuples(self):
try:
raise (IndexError, KeyError) # This should be a tuple!
except TypeError:
pass
else:
self.fail("No exception raised")
def test_strings(self):
try:
raise "foo"
except TypeError:
pass
else:
self.fail("No exception raised")
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__="<NAME>, <NAME>"
import sys
import requests
import json
import codecs
import datetime
import time
import os
import re
import unicodedata
from acscsv.twitter_acs import TwacsCSV
reload(sys)
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
sys.stdin = codecs.getreader('utf-8')(sys.stdin)
#remove this
requests.packages.urllib3.disable_warnings()
# formatter of data from API
TIME_FORMAT_SHORT = "%Y%m%d%H%M"
TIME_FORMAT_LONG = "%Y-%m-%dT%H:%M:%S.000Z"
PAUSE = 1 # seconds between page requests
POSTED_TIME_IDX = 1
#date time parsing utility regex
DATE_TIME_RE = re.compile("([0-9]{4}).([0-9]{2}).([0-9]{2}).([0-9]{2}):([0-9]{2})")
class Query(object):
"""Object represents a single search API query and provides utilities for
managing parameters, executing the query and parsing the results."""
def __init__(self
, user
, password
, stream_url
, paged = False
, output_file_path = None
, search_v2 = False
, hard_max = 500000
):
"""A Query requires at least a valid user name, password and endpoint url.
The URL of the endpoint should be the JSON records endpoint, not the counts
endpoint.
Additional parambers specifying paged search and output file path allow
for making queries which return more than the 500 activity limit imposed by
a single call to the API. This is called paging or paged search. Setting
paged = True will enable the token interpretation
functionality provided in the API to return a seamless set of activites.
Once the object is created, it can be used for repeated access to the
configured end point with the same connection configuration set at
creation."""
self.output_file_path = output_file_path
self.paged = paged
self.hard_max = hard_max
self.paged_file_list = []
self.user = user
self.password = password
self.end_point = stream_url # activities end point NOT the counts end point
# get a parser for the twitter columns
# TODO: use the updated retriveal methods in gnacs instead of this?
self.twitter_parser = TwacsCSV(",", None, False, True, False, True, False, False, False)
self.search_v2 = search_v2
# Flag for post processing tweet timeline from tweet times
self.tweet_times_flag = False
def set_dates(self, start, end):
"""Utility function to set dates from strings. Given string-formated
dates for start date time and end date time, extract the required
date string format for use in the API query and make sure they
are valid dates.
Sets class fromDate and toDate date strings."""
if start:
dt = re.search(DATE_TIME_RE, start)
if not dt:
raise ValueError("Error. Invalid start-date format: %s \n"%str(start))
else:
f =''
for i in range(re.compile(DATE_TIME_RE).groups):
f += dt.group(i+1)
self.fromDate = f
# make sure this is a valid date
tmp_start = datetime.datetime.strptime(f, TIME_FORMAT_SHORT)
if end:
dt = re.search(DATE_TIME_RE, end)
if not dt:
raise ValueError("Error. Invalid end-date format: %s \n"%str(end))
else:
e =''
for i in range(re.compile(DATE_TIME_RE).groups):
e += dt.group(i+1)
self.toDate = e
# make sure this is a valid date
tmp_end = datetime.datetime.strptime(e, TIME_FORMAT_SHORT)
if start:
if tmp_start >= tmp_end:
raise ValueError("Error. Start date greater than end date.\n")
def name_munger(self, f):
"""Utility function to create a valid, friendly file name base
string from an input rule."""
f = re.sub(' +','_',f)
f = f.replace(':','_')
f = f.replace('"','_Q_')
f = f.replace('(','_p_')
f = f.replace(')','_p_')
self.file_name_prefix = unicodedata.normalize(
"NFKD",f[:42]).encode(
"ascii","ignore")
def request(self):
"""HTTP request based on class variables for rule_payload,
stream_url, user and password"""
try:
s = requests.Session()
s.headers = {'Accept-encoding': 'gzip'}
s.auth = (self.user, self.password)
res = s.post(self.stream_url, data=json.dumps(self.rule_payload))
if res.status_code != 200:
sys.stderr.write("Exiting with HTTP error code {}\n".format(res.status_code))
sys.stderr.write("ERROR Message: {}\n".format(res.json()["error"]["message"]))
sys.exit(-1)
except requests.exceptions.ConnectionError, e:
e.msg = "Error (%s). Exiting without results."%str(e)
raise e
except requests.exceptions.HTTPError, e:
e.msg = "Error (%s). Exiting without results."%str(e)
raise e
except requests.exceptions.MissingSchema, e:
e.msg = "Error (%s). Exiting without results."%str(e)
raise e
#Don't use res.text as it creates encoding challenges!
return unicode(res.content, "utf-8")
def parse_responses(self, count_bucket):
"""Parse returned responses.
When paged=True, manage paging using the API token mechanism
When output file is set, write output files for paged output."""
acs = []
repeat = True
page_count = 1
self.paged_file_list = []
while repeat:
doc = self.request()
tmp_response = json.loads(doc)
if "results" in tmp_response:
acs.extend(tmp_response["results"])
else:
raise ValueError("Invalid request\nQuery: %s\nResponse: %s"%(self.rule_payload, doc))
if len(acs) < self.hard_max:
repeat = False
if self.paged or (count_bucket and self.search_v2):
if len(acs) > 0:
if self.output_file_path is not None:
# writing to file
file_name = self.output_file_path + "/{0}_{1}.json".format(
str(datetime.datetime.utcnow().strftime(
"%Y%m%d%H%M%S"))
, str(self.file_name_prefix))
with codecs.open(file_name, "wb","utf-8") as out:
for item in tmp_response["results"]:
out.write(json.dumps(item)+"\n")
self.paged_file_list.append(file_name)
# if writing to file, don't keep track of all the data in memory
acs = []
else:
# storing in memory, so give some feedback as to size
print >>sys.stderr,"[%8d bytes] %5d total activities retrieved..."%(
sys.getsizeof(acs)
, len(acs))
else:
print >> sys.stderr, "No results returned for rule:{0}".format(str(self.rule_payload))
if "next" in tmp_response:
self.rule_payload["next"]=tmp_response["next"]
repeat = True
page_count += 1
print >> sys.stderr, "Fetching page {}...".format(page_count)
else:
if "next" in self.rule_payload:
del self.rule_payload["next"]
repeat = False
time.sleep(PAUSE)
else:
# stop iterating after reaching hard_max
repeat = False
return acs
def get_time_series(self):
if self.paged and self.output_file_path is not None:
for file_name in self.paged_file_list:
with codecs.open(file_name,"rb") as f:
for res in f:
rec = json.loads(res.strip())
t = datetime.datetime.strptime(rec["timePeriod"], TIME_FORMAT_SHORT)
yield [rec["timePeriod"], rec["count"], t]
else:
if self.tweet_times_flag:
# todo: list of tweets, aggregate by bucket
raise NotImplementedError("Aggregated buckets on json tweets not implemented!")
else:
for i in self.time_series:
yield i
def get_activity_set(self):
"""Generator iterates through the entire activity set from memory or disk."""
if self.paged and self.output_file_path is not None:
for file_name in self.paged_file_list:
with codecs.open(file_name,"rb") as f:
for res in f:
yield json.loads(res)
else:
for res in self.rec_dict_list:
yield res
def get_list_set(self):
"""Like get_activity_set, but returns a list containing values parsed by
current Twacs parser configuration."""
for rec in self.get_activity_set():
yield self.twitter_parser.get_source_list(rec)
def execute(self
, pt_filter
, max_results = 100
, start = None
, end = None
, count_bucket = None # None is json
, show_query = False):
"""Execute a query with filter, maximum results, start and end dates.
Count_bucket determines the bucket size for the counts endpoint.
If the count_bucket variable is set to a valid bucket size such
as mintute, day or week, then the acitivity counts endpoint will
Otherwise, the data endpoint is used."""
# set class start and stop datetime variables
self.set_dates(start, end)
# make a friendlier file name from the rules
self.name_munger(pt_filter)
if self.paged or max_results > 500:
# avoid making many small requests
max_results = 500
self.rule_payload = {
'query': pt_filter
}
self.rule_payload["maxResults"] = int(max_results)
if not self.search_v2:
self.rule_payload["publisher"] = "twitter"
if start:
self.rule_payload["fromDate"] = self.fromDate
if end:
self.rule_payload["toDate"] = self.toDate
# use teh proper endpoint url
self.stream_url = self.end_point
if count_bucket:
# remove "maxResults parameter for search v2 queries to counts endpoint
if self.search_v2:
del self.rule_payload["maxResults"]
if not self.end_point.endswith("counts.json"):
self.stream_url = self.end_point[:-5] + "/counts.json"
if count_bucket not in ['day', 'minute', 'hour']:
raise ValueError("Error. Invalid count bucket: %s \n"%str(count_bucket))
self.rule_payload["bucket"] = count_bucket
# for testing, show the query JSON and stop
if show_query:
print >>sys.stderr, "API query:"
print >>sys.stderr, self.rule_payload
sys.exit()
# set up variable to catch the data in 3 formats
self.time_series = []
self.rec_dict_list = []
self.rec_list_list = []
self.res_cnt = 0
# timing
self.delta_t = 1 # keeps us from crashing
# actual oldest tweet before now
self.oldest_t = datetime.datetime.utcnow()
# actual newest tweet more recent that 30 days ago
# self.newest_t = datetime.datetime.utcnow() - datetime.timedelta(days=30)
# search v2: newest date is more recent than 2006-03-01T00:00:00
self.newest_t = datetime.datetime.strptime("2006-03-01T00:00:00.000z", TIME_FORMAT_LONG)
#
for rec in self.parse_responses(count_bucket):
# parse_responses returns only the last set of activities retrieved, not all paged results.
# to access the entire set, use the helper functions get_activity_set and get_list_set!
self.res_cnt += 1
self.rec_dict_list.append(rec)
if count_bucket:
# timeline data
t = datetime.datetime.strptime(rec["timePeriod"], TIME_FORMAT_SHORT)
tmp_tl_list = [rec["timePeriod"], rec["count"], t]
self.tweet_times_flag = False
else:
# json activities
# keep track of tweet times for time calculation
tmp_list = self.twitter_parser.procRecordToList(rec)
self.rec_list_list.append(tmp_list)
t = datetime.datetime.strptime(tmp_list[POSTED_TIME_IDX], TIME_FORMAT_LONG)
tmp_tl_list = [tmp_list[POSTED_TIME_IDX], 1, t]
self.tweet_times_flag = True
# this list is ***either*** list of buckets or list of tweet times!
self.time_series.append(tmp_tl_list)
# timeline requests don't return activities!
if t < self.oldest_t:
self.oldest_t = t
if t > self.newest_t:
self.newest_t = t
self.delta_t = (self.newest_t - self.oldest_t).total_seconds()/60.
return
def get_rate(self):
"""Returns rate from last query executed"""
if self.delta_t != 0:
return float(self.res_cnt)/self.delta_t
else:
return None
def __len__(self):
"""Returns the size of the results set when len(Query) is called."""
try:
return self.res_cnt
except AttributeError:
return 0
def __repr__(self):
"""Returns a string represenataion of the result set."""
try:
return "\n".join([json.dumps(x) for x in self.rec_dict_list])
except AttributeError:
return "No query completed."
if __name__ == "__main__":
g = Query("<EMAIL>"
, "XXXXXPASSWORDXXXXX"
, "https://search.gnip.com/accounts/shendrickson/search/wayback.json")
g.execute("bieber", 10)
for x in g.get_activity_set():
print x
print g
print g.get_rate()
g.execute("bieber", count_bucket = "hour")
print g
print len(g)
pg = Query("<EMAIL>"
, "XXXXXPASSWORDXXXXX"
, "https://search.gnip.com/accounts/shendrickson/search/wayback.json"
, paged = True
, output_file_path = "../data/")
now_date = datetime.datetime.now()
pg.execute("bieber"
, end=now_date.strftime(TIME_FORMAT_LONG)
, start=(now_date - datetime.timedelta(seconds=200)).strftime(TIME_FORMAT_LONG))
for x in pg.get_activity_set():
print x
g.execute("bieber", show_query=True)
|
from keras.callbacks import ModelCheckpoint
# 引入Tensorboard
from keras.callbacks import TensorBoard
from keras.models import Model, load_model, Sequential
from keras.layers import Dense, Activation, Dropout, Input, Masking, TimeDistributed, LSTM, Conv1D
from keras.layers import GRU, Bidirectional, BatchNormalization, Reshape
from keras.optimizers import Adam
from audio_data import graph_spectrogram
import numpy as np
Y = np.load('train_dir/Y.npy')
Y_test = np.load('train_dir/Y_test.npy')
# number of frequncies in fourier decomposition
freq_n = 101
# number of samples in the audio clip
sample_n = 1998
Y_FIX = Y.swapaxes(1,2)
Y_test_FIX = Y_test.swapaxes(1,2)
train_dir = "train_dir"
# Y = Y.reshape(1000,1375,4)
# Y_test = Y_test.reshape(100,1375,4)
def load_training_data(train_dir,num_train=1000,num_test=100):
X = np.zeros((num_train,freq_n,sample_n))
X_test = np.zeros((num_test,freq_n,sample_n))
for i in range(num_train):
X[i,:,:] = graph_spectrogram(train_dir + "/train" + str(i) + ".wav")
for i in range(num_test):
X_test[i,:,:] = graph_spectrogram(train_dir + "/traintest" + str(i) + ".wav")
return X.reshape(num_train,sample_n,freq_n), X_test.reshape(num_test,sample_n,freq_n)
X, X_test = load_training_data(train_dir)
#
X = np.load('train_dir/X.npy')
X_test = np.load('train_dir/X_test.npy')
Ty = 1375 # The number of time steps in the output of the model
def model(input_shape):
"""
Function creating the model's graph in Keras.
Argument:
input_shape -- shape of the model's input data (using Keras conventions)
Returns:
model -- Keras model instance
"""
X_input = Input(shape = input_shape)
# Step 1: CONV layer (≈4 lines) flavor: kernel_size = 15 in coursera
X = Conv1D(196, kernel_size=624, strides=1)(X_input)
X = BatchNormalization()(X)
X = Activation('relu')(X)
X = Dropout(0.8)(X)
# Step 2: First GRU Layer (≈4 lines)
X = GRU(units = 128, return_sequences = True)(X)
X = Dropout(0.8)(X)
X = BatchNormalization()(X)
# Step 3: Second GRU Layer (≈4 lines)
X = GRU(units = 128, return_sequences = True)(X)
X = Dropout(0.8)(X)
X = BatchNormalization()(X)
X = Dropout(0.8)(X)
# Step 4: Time-distributed dense layer (≈1 line)
X = TimeDistributed(Dense(4, activation = "sigmoid"))(X) # time distributed (sigmoid)
model = Model(inputs = X_input, outputs = X)
return model
# num_train = 1000
# Y_NEW = np.zeros((num_train,Ty,4))
# for i in range(num_train):
# spec = Y[i, :, :]
# Y_NEW[i, :, :] = spec.reshape(Ty, 4, order='F')
model = model(input_shape = (sample_n, freq_n))
model.summary()
opt = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, decay=0.01)
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=["accuracy"])
# model.fit(X, Y, batch_size = 5, epochs=10)
model.fit(X, Y_FIX, batch_size = 5, epochs=10)
model.save("audio_wake_model.h5")
# loss, acc = model.evaluate(X_test, Y_test)
# print("Dev set accuracy = ", acc)
#
#
# def detect_triggerword(filename):
# plt.subplot(2, 1, 1)
#
# x = graph_spectrogram(filename)
# # the spectogram outputs (freqs, Tx) and we want (Tx, freqs) to input into the model
# x = x.swapaxes(0,1)
# x = np.expand_dims(x, axis=0)
# predictions = model.predict(x)
#
# plt.subplot(2, 1, 2)
# plt.plot(predictions[0,:,0])
# plt.ylabel('probability')
# plt.show()
# return predictions
|
<reponame>semanticbits/survey_stats
import re
import os
import yaml
import pandas as pd
import sqlalchemy as sa
import dask
import dask.multiprocessing
import dask.cache
from cytoolz.curried import map
from multiprocessing.pool import ThreadPool
from timeit import default_timer as timer
from survey_stats import log
from survey_stats import serdes
from survey_stats.types import DatasetConfig
from survey_stats.dbi import DatabaseConfig, DatasetPart, get_datafile_path
from survey_stats.etl.sas import process_sas_survey
from survey_stats.etl.spss import process_fwf_w_spss_loader
from survey_stats.etl.socrata import load_socrata_data, get_metadata_socrata
logger = log.getLogger(__name__)
def undash(col):
return 'x' + col if col[0] == '_' else col
def load_survey_data(cfg, client=None):
logger.info('loading survey dfs')
svydf = None
if cfg.surveys.parse_mode == 'sas':
svydf = process_sas_survey(cfg.surveys,
facets=cfg.facets,
client=client, lgr=logger)
elif cfg.surveys.parse_mode == 'spss':
svydf = process_fwf_w_spss_loader(cfg.surveys,
facets=cfg.facets,
client=client, lgr=logger)
else:
raise NotImplementedError('Config parse_mode must be spss or sas!')
logger.info('loaded survey dfs', shape=svydf.shape)
svydf = svydf.compute()
svydf = svydf.reset_index(drop=True)
mx = (svydf.apply(lambda xf: xf.value_counts().to_dict())
.to_dict())
# )[svydf.apply(lambda yf: yf.dropna().apply(lambda q: type(q) != str))
# .dropna().any(0)])
# mx = (svydf.select_dtypes(include=['object', 'category'])
# .apply(lambda xf: xf.value_counts().to_dict()
# )[svydf.apply(lambda yf: yf.dropna().apply(lambda q: type(q) != str))
# .dropna().any(0)])
# mx2 = (svydf.applymap(lambda yf: type(yf).__name__)[list(mx.keys())]).apply(lambda xf: xf.value_counts())
# mx = mx.to_dict()
# if len(mx) > 0:
# logger.error('Found category columns with non-str labels!', mx=mx, mx2=mx2)
# # TODO: check formats list in advance to see if expected fmts missing
# raise LookupError('Found categoricals with non-string labels!', mx.keys())
return (svydf, mx)
def load_csv_mariadb_columnstore(df, tblname, engine):
logger.info('creating schema for column store', name=tblname)
start = timer()
q = pd.io.sql.get_schema(df[:0], tblname, con=engine)
q = q.replace('TEXT', 'VARCHAR(100)').replace('BIGINT', 'INT') + \
' engine=columnstore default character set=utf8;'
q = re.sub(r'FLOAT\(\d+\)', 'FLOAT', q)
with engine.connect() as con:
con.execute(q)
logger.info('bulk loaded data using cfimport', name=tblname, rows=df.shape[0], elapsed=timer()-start)
def load_csv_monetdb(df, tblname, engine):
copy_tmpl = "COPY {nrows} OFFSET 2 RECORDS INTO {tbl} from '{csvf}'" + \
" USING DELIMITERS ',','\n','\"' NULL AS ''"
logger.info('creating schema for column store', name=tblname)
start = timer()
q = pd.io.sql.get_schema(df[:0], tblname, con=engine)
q = q.replace('\n', ' ').replace('\t', ' ').replace('year', 'yr')
logger.info('dumping to csv for bulk load', q=q)
csvf = serdes.save_csv(tblname, df, index=False, header=True)
csvf = os.path.abspath(csvf)
copy_cmd = copy_tmpl.format(nrows=df.shape[0]+1000,
tbl=tblname, csvf=csvf)
sql_instrs = q + ';\n\n'
sql_instrs += copy_cmd + ';\n\n'
with open('cache/'+tblname+'.sql', 'w') as fh:
fh.write(sql_instrs)
csvtime = timer()
logger.info('bulk loading csv into monetdb', csvf=csvf, elapsed=csvtime-start)
with engine.begin() as con:
try:
con.execute("DROP TABLE %s" % tblname)
except Exception as e:
# continue if table non-existent,
# else, what happened?!
if str(e).find('no such table') == -1:
raise
con.execute(q)
con.execute(copy_cmd)
logger.info('bulk loaded data using cfimport', name=tblname,
rows=df.shape[0], elapsed_copy=timer()-csvtime,
elapsed=timer()-start)
def load_sqlalchemy(df, engine, tbl):
logger.info('loading df into monetdb table', name=tbl)
start = timer()
with engine.begin() as con:
df.to_sql(tbl, con, chunksize=10000, if_exists='replace', index=False)
logger.info('loaded dataframe into monetdb', tbl=tbl, rows=df.shape[0], elapsed_step=timer()-start)
def bulk_load_df(tblname, engine):
logger.info('loading data from feather', name=tblname)
df = serdes.load_feather(tblname)
if engine.name == 'mysql':
load_csv_mariadb_columnstore(df, tblname, engine)
elif engine.name == 'monetdb':
load_csv_monetdb(df, tblname, engine)
def setup_tables(cfg, dburl):
engine = sa.create_engine(dburl)
if cfg.surveys:
ksvy = serdes.surveys_key4id(cfg.id)
bulk_load_df(ksvy, engine)
if cfg.socrata:
ksoc = serdes.socrata_key4id(cfg.id)
bulk_load_df(ksoc, engine)
def process_dataset(cfg, dbc, cache_dir, resume=True):
logger.bind(dataset=cfg.id)
logger.info('checking for socrata, processing', s=cfg.socrata)
if cfg.socrata:
ksoc = get_datafile_path(DatasetPart.SOCRATA.value, cfg.id, cache_dir)
if os.path.isfile(ksoc) and resume:
logger.warn('found socrata artifact, moving on', resume=resume)
else:
logger.info('generating socrata data', resume=resume, overwriting=os.path.isfile(ksoc))
dsoc = load_socrata_data(cfg.socrata, cfg.facets)
logger.info('saving socrata data to feather', f=ksoc)
dsoc.to_feather(ksoc)
logger.info('saved socrata data to feather', f=ksoc)
# short process, can be run regardless
(qns, facs) = get_metadata_socrata(cfg.socrata, dsoc, cfg.facets)
logger.info('created schema for socrata')
qns.to_feather(get_datafile_path(DatasetPart.SCHEMA.value, cfg.id, cache_dir))
facs.to_feather(get_datafile_path(DatasetPart.FACETS.value, cfg.id, cache_dir))
logger.info('checking for surveys, processing', s=cfg.surveys)
if cfg.surveys:
svyf = get_datafile_path(DatasetPart.SURVEYS.value, cfg.id, cache_dir)
svy_descf = svyf+'.yaml'
if os.path.isfile(svyf) and resume:
logger.warn('found surveys artifact, moving on', resume=resume)
else:
logger.info('generating surveys data', resume=resume, overwriting=os.path.isfile(svyf))
(svydf, svymeta) = load_survey_data(cfg)
logger.info('saving survey data to feather', svyf)
svydf.to_feather(svyf)
logger.info('saving survey desc data to feather', svy_descf)
with open(svy_descf, 'w') as fh:
yaml.dump(svymeta, fh)
logger.info('saved survey data to feather', name=svy_descf)
#if dbc is not None:
# setup_tables(cfg, dbc.uri)
logger.unbind('dataset')
def restore_data(sql_conn):
configs = map(lambda x: os.path.join('config/data', x),
os.listdir('config/data'))
logger.info('restoring tables to survey database')
for yaml_f in configs:
cfg = DatabaseConfig.from_yaml(yaml_f)
logger.bind(dataset=cfg.id)
setup_tables(cfg, sql_conn)
logger.unbind('dataset')
def load_datasets(cache_dir, dbc, dsets, parse_all=False, resume=True):
import cytoolz.dicttoolz as dz
cache = dask.cache.Cache(8e9)
cache.register()
dask.set_options(get=dask.threaded.get, pool=ThreadPool())
configs = list(
map(lambda x: os.path.join('config/data', x),
os.listdir('config/data')))
cmap = {k: DatasetConfig.from_yaml(k) for k in configs}
dsids = dz.valmap(lambda ds: ds.id, cmap)
cmap = dz.merge(cmap, {d.id: d for d in cmap.values()})
if parse_all:
dsets = dsids
for d in dsets:
process_dataset(cmap[d], dbc, cache_dir)
|
<gh_stars>1-10
"""Hard filtering of genomic variants.
"""
from distutils.version import LooseVersion
import os
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils
from bcbio.provenance import do, programs
from bcbio.variation import vcfutils
# ## General functionality
def hard_w_expression(vcf_file, expression, data, filterext=""):
"""Perform hard filtering using bcftools expressions like %QUAL < 20 || DP < 4.
"""
base, ext = utils.splitext_plus(vcf_file)
out_file = "{base}-filter{filterext}{ext}".format(**locals())
if not utils.file_exists(out_file):
with file_transaction(out_file) as tx_out_file:
bcftools = config_utils.get_program("bcftools", data["config"])
output_type = "z" if out_file.endswith(".gz") else "v"
variant_regions = utils.get_in(data, ("config", "algorithm", "variant_regions"))
intervals = ("-t %s" % vcfutils.bgzip_and_index(variant_regions, data["config"])
if variant_regions else "")
cmd = ("{bcftools} filter -O {output_type} {intervals} --soft-filter '+' "
"-e '{expression}' -m '+' {vcf_file} > {tx_out_file}")
do.run(cmd.format(**locals()), "Hard filtering %s with %s" % (vcf_file, expression), data)
if out_file.endswith(".vcf.gz"):
out_file = vcfutils.bgzip_and_index(out_file, data["config"])
return out_file
# ## Caller specific
def freebayes(in_file, ref_file, vrn_files, data):
"""FreeBayes filters: trying custom filter approach before falling back on hard filtering.
"""
out_file = _freebayes_hard(in_file, data)
#out_file = _freebayes_custom(in_file, ref_file, data)
return out_file
def _freebayes_custom(in_file, ref_file, data):
"""Custom FreeBayes filtering using bcbio.variation, tuned to human NA12878 results.
Experimental: for testing new methods.
"""
if vcfutils.get_paired_phenotype(data):
return None
config = data["config"]
bv_ver = programs.get_version("bcbio_variation", config=config)
if LooseVersion(bv_ver) < LooseVersion("0.1.1"):
return None
out_file = "%s-filter%s" % os.path.splitext(in_file)
if not utils.file_exists(out_file):
tmp_dir = utils.safe_makedir(os.path.join(os.path.dirname(in_file), "tmp"))
bv_jar = config_utils.get_jar("bcbio.variation",
config_utils.get_program("bcbio_variation", config, "dir"))
resources = config_utils.get_resources("bcbio_variation", config)
jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx2g"])
java_args = ["-Djava.io.tmpdir=%s" % tmp_dir]
cmd = ["java"] + jvm_opts + java_args + ["-jar", bv_jar, "variant-filter", "freebayes",
in_file, ref_file]
do.run(cmd, "Custom FreeBayes filtering using bcbio.variation")
return out_file
def _freebayes_hard(in_file, data):
"""Perform filtering of FreeBayes results, removing low confidence calls.
Filters using cutoffs on depth based on Meynert et al's work modeling sensitivity
of homozygote and heterozygote calling on depth:
http://www.ncbi.nlm.nih.gov/pubmed/23773188
Tuned based on NA12878 call comparisons to Genome in a Bottle reference genome.
"""
filters = ("(AF <= 0.5 && (DP < 4 || (DP < 13 && %QUAL < 10))) || "
"(AF > 0.5 && (DP < 4 && %QUAL < 50))")
return hard_w_expression(in_file, filters, data)
def gatk_snp_hard(in_file, data):
"""Perform hard filtering on GATK SNPs using best-practice recommendations.
"""
filters = ["QD < 2.0", "MQ < 40.0", "FS > 60.0",
"MQRankSum < -12.5", "ReadPosRankSum < -8.0"]
# GATK Haplotype caller (v2.2) appears to have much larger HaplotypeScores
# resulting in excessive filtering, so avoid this metric
variantcaller = utils.get_in(data, ("config", "algorithm", "variantcaller"), "gatk")
if variantcaller not in ["gatk-haplotype"]:
filters.append("HaplotypeScore > 13.0")
return hard_w_expression(in_file, " || ".join(filters), data, "SNP")
def gatk_indel_hard(in_file, data):
"""Perform hard filtering on GATK indels using best-practice recommendations.
"""
filters = ["QD < 2.0", "ReadPosRankSum < -20.0", "FS > 200.0"]
return hard_w_expression(in_file, " || ".join(filters), data, "INDEL")
|
#import torch.nn as nn
import torch
from torch.nn import functional as F
#from PIL import Image
import numpy as np
import pandas as pd
#import os
import os.path as osp
import shutil
#import math
def save_checkpoint(state,best_pred, epoch,is_best,checkpoint_path,filename='./checkpoint/checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, osp.join(checkpoint_path,'model_{:03d}_{:.4f}.pth.tar'.format((epoch + 1),best_pred)))
def adjust_learning_rate(opt, optimizer, epoch):
"""
Sets the learning rate to the initial LR decayed by 10 every 30 epochs(step = 30)
"""
if opt.lr_mode == 'step':
lr = opt.lr * (0.1 ** (epoch // opt.step))
elif opt.lr_mode == 'poly':
lr = opt.lr * (1 - epoch / opt.num_epochs) ** 0.9
else:
raise ValueError('Unknown lr mode {}'.format(opt.lr_mode))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def one_hot_it(label, label_info):
# return semantic_map -> [H, W, num_classes]
semantic_map = []
for info in label_info:
color = label_info[info]
# colour_map = np.full((label.shape[0], label.shape[1], label.shape[2]), colour, dtype=int)
equality = np.equal(label, color)
class_map = np.all(equality, axis=-1)
semantic_map.append(class_map)
semantic_map = np.stack(semantic_map, axis=-1)
return semantic_map
def compute_score(predict, target, forground = 1,smooth=1):
score = 0
count = 0
target[target!=forground]=0
predict[predict!=forground]=0
assert(predict.shape == target.shape)
overlap = ((predict == forground)*(target == forground)).sum() #TP
union=(predict == forground).sum() + (target == forground).sum()-overlap #FP+FN+TP
FP=(predict == forground).sum()-overlap #FP
FN=(target == forground).sum()-overlap #FN
TN= target.shape[0]*target.shape[1]-union #TN
#print('overlap:',overlap)
dice=(2*overlap +smooth)/ (union+overlap+smooth)
precsion=((predict == target).sum()+smooth) / (target.shape[0]*target.shape[1]+smooth)
jaccard=(overlap+smooth) / (union+smooth)
Sensitivity=(overlap+smooth) / ((target == forground).sum()+smooth)
Specificity=(TN+smooth) / (FP+TN+smooth)
return dice,precsion,jaccard,Sensitivity,Specificity
def eval_multi_seg(predict, target,num_classes):
# pred_seg=torch.argmax(torch.exp(predict),dim=1).int()
pred_seg = predict.data.cpu().numpy()
label_seg = target.data.cpu().numpy().astype(dtype=np.int)
assert(pred_seg.shape == label_seg.shape)
# Dice = []
# Precsion = []
# Jaccard = []
# Sensitivity=[]
# Specificity=[]
# n = pred_seg.shape[0]
Dice=[]
for classes in range(1,num_classes):
overlap=((pred_seg==classes)*(label_seg==classes)).sum()
union=(pred_seg==classes).sum()+(label_seg==classes).sum()
Dice.append((2*overlap+0.1)/(union+0.1))
return Dice
# for i in range(n):
# dice,precsion,jaccard,sensitivity,specificity= compute_score(pred_seg[i],label_seg[i])
# Dice.append(dice)
# Precsion .append(precsion)
# Jaccard.append(jaccard)
# Sensitivity.append(sensitivity)
# Specificity.append(specificity)
# return Dice,Precsion,Jaccard,Sensitivity,Specificity
def eval_seg(predict, target, forground = 1):
pred_seg=torch.round(torch.sigmoid(predict)).int()
pred_seg = pred_seg.data.cpu().numpy()
label_seg = target.data.cpu().numpy().astype(dtype=np.int)
assert(pred_seg.shape == label_seg.shape)
Dice = []
Precsion = []
Jaccard = []
n = pred_seg.shape[0]
for i in range(n):
dice,precsion,jaccard = compute_score(pred_seg[i],label_seg[i])
Dice .append(dice)
Precsion .append(precsion)
Jaccard.append(jaccard)
return Dice,Precsion,Jaccard
def batch_pix_accuracy(pred,label,nclass=1):
if nclass==1:
pred=torch.round(torch.sigmoid(pred)).int()
pred=pred.cpu().numpy()
else:
pred=torch.max(pred,dim=1)
pred=pred.cpu().numpy()
label=label.cpu().numpy()
pixel_labeled = np.sum(label >=0)
pixel_correct=np.sum(pred==label)
assert pixel_correct <= pixel_labeled, \
"Correct area should be smaller than Labeled"
return pixel_correct,pixel_labeled
def batch_intersection_union(predict, target, nclass):
"""Batch Intersection of Union
Args:
predict: input 4D tensor
target: label 3D tensor
nclass: number of categories (int),note: not include background
"""
if nclass==1:
pred=torch.round(torch.sigmoid(predict)).int()
pred=pred.cpu().numpy()
target = target.cpu().numpy()
area_inter=np.sum(pred*target)
area_union=np.sum(pred)+np.sum(target)-area_inter
return area_inter,area_union
if nclass>1:
_, predict = torch.max(predict, 1)
mini = 1
maxi = nclass
nbins = nclass
predict = predict.cpu().numpy() + 1
target = target.cpu().numpy() + 1
# target = target + 1
predict = predict * (target > 0).astype(predict.dtype)
intersection = predict * (predict == target)
# areas of intersection and union
area_inter, _ = np.histogram(intersection, bins=nbins-1, range=(mini+1, maxi))
area_pred, _ = np.histogram(predict, bins=nbins-1, range=(mini+1, maxi))
area_lab, _ = np.histogram(target, bins=nbins-1, range=(mini+1, maxi))
area_union = area_pred + area_lab - area_inter
assert (area_inter <= area_union).all(), \
"Intersection area should be smaller than Union area"
return area_inter, area_union
def pixel_accuracy(im_pred, im_lab):
im_pred = np.asarray(im_pred)
im_lab = np.asarray(im_lab)
# Remove classes from unlabeled pixels in gt image.
# We should not penalize detections in unlabeled portions of the image.
pixel_labeled = np.sum(im_lab > 0)
pixel_correct = np.sum((im_pred == im_lab) * (im_lab > 0))
#pixel_accuracy = 1.0 * pixel_correct / pixel_labeled
return pixel_correct, pixel_labeled
def reverse_one_hot(image):
"""
Transform a 2D array in one-hot format (depth is num_classes),
to a 2D array with only 1 channel, where each pixel value is
the classified class key.
# Arguments
image: The one-hot format image
# Returns
A 2D array with the same width and height as the input, but
with a depth size of 1, where each pixel value is the classified
class key.
"""
# w = image.shape[0]
# h = image.shape[1]
# x = np.zeros([w,h,1])
# for i in range(0, w):
# for j in range(0, h):
# index, value = max(enumerate(image[i, j, :]), key=operator.itemgetter(1))
# x[i, j] = index
image = image.permute(1, 2, 0)
x = torch.argmax(image, dim=-1)
return x
def colour_code_segmentation(image, label_values):
"""
Given a 1-channel array of class keys, colour code the segmentation results.
# Arguments
image: single channel array where each value represents the class key.
label_values
# Returns
Colour coded image for segmentation visualization
"""
# w = image.shape[0]
# h = image.shape[1]
# x = np.zeros([w,h,3])
# colour_codes = label_values
# for i in range(0, w):
# for j in range(0, h):
# x[i, j, :] = colour_codes[int(image[i, j])]
label_values = [label_values[key] for key in label_values]
colour_codes = np.array(label_values)
x = colour_codes[image.astype(int)]
return x
#def compute_global_accuracy(pred, label):
# pred = pred.flatten()
# label = label.flatten()
# total = len(label)
# count = 0.0
# for i in range(total):
# if pred[i] == label[i]:
# count = count + 1.0
# return float(count) / float(total) |
<filename>scripts/mgear/maya/rigbits/rbf_manager_ui.py
#!/usr/bin/env python
"""
A tool to manage a number of rbf type nodes under a user defined setup(name)
Steps -
set Driver
set Control for driver(optional, recommended)
select attributes to driver RBF nodes
Select Node to be driven in scene(Animation control, transform)
Name newly created setup
select attributes to be driven by the setup
add any additional driven nodes
position driver(via the control)
position the driven node(s)
select add pose
Add notes -
Please ensure the driver node is NOT in the same position more than once. This
will cause the RBFNode to fail while calculating. This can be fixed by deleting
any two poses with the same input values.
Edit Notes -
Edit a pose by selecting "pose #" in the table. (which recalls recorded pose)
reposition any controls involved in the setup
select "Edit Pose"
Delete notes -
select desired "pose #"
select "Delete Pose"
Mirror notes -
setups/Controls will succefully mirror if they have had their inverseAttrs
configured previously.
2.0 -------
LOOK into coloring the pose and how close it is
import replace name support (will work through json manually)
support live connections
settings support for suffix, etc
rename existing setup
newScene callback
Attributes:
CTL_SUFFIX (str): suffix for anim controls
DRIVEN_SUFFIX (str): suffix for driven group nodes
EXTRA_MODULE_DICT (str): name of the dict which holds additional modules
MGEAR_EXTRA_ENVIRON (str): environment variable to query for paths
TOOL_NAME (str): name of UI
TOOL_TITLE (str): title as it appears in the ui
__version__ (float): UI version
Deleted Attributes:
RBF_MODULES (dict): of supported rbf modules
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__credits__ = ["<NAME>", "<NAME>"]
"""
# python
import os
import imp
from functools import partial
# maya
import maya.cmds as mc
import pymel.core as pm
import maya.OpenMaya as om
import maya.OpenMayaUI as mui
# mgear
from mgear.maya import pyqt
import mgear.string as mString
from mgear.maya.synoptic import utils
from mgear.vendor.Qt import QtWidgets, QtCore, QtCompat
from maya.app.general.mayaMixin import MayaQWidgetDockableMixin
# rbf
import rbf_io
import rbf_node
# =============================================================================
# Constants
# =============================================================================
__version__ = "1.0"
TOOL_NAME = "RBF Manager UI"
TOOL_TITLE = "{} v{}".format(TOOL_NAME, __version__)
DRIVEN_SUFFIX = rbf_node.DRIVEN_SUFFIX
CTL_SUFFIX = rbf_node.CTL_SUFFIX
MGEAR_EXTRA_ENVIRON = "MGEAR_RBF_EXTRA"
EXTRA_MODULE_DICT = "extraFunc_dict"
MIRROR_SUFFIX = "_mr"
# =============================================================================
# general functions
# =============================================================================
def testFunctions(*args):
"""test function for connecting signals during debug
Args:
*args: Description
"""
print '!!', args
def getPlugAttrs(nodes, attrType="all"):
"""Get a list of attributes to display to the user
Args:
nodes (str): name of node to attr query
keyable (bool, optional): should the list only be kayable attrs
Returns:
list: list of attrplugs
"""
plugAttrs = []
for node in nodes:
if attrType == "all":
attrs = mc.listAttr(node, se=True, u=False)
elif attrType == "cb":
attrs = mc.listAttr(node, se=True, u=False, cb=True)
elif attrType == "keyable":
attrs = mc.listAttr(node, se=True, u=False, keyable=True)
if attrs is None:
continue
[plugAttrs.append("{}.{}".format(node, a)) for a in attrs]
return plugAttrs
def sortRBF(name, rbfType=None):
"""Get node wrapped in RBFNode class based on the type of node
Args:
name (str): name of the RBFNode in scene
rbfType (str, optional): type of RBF to get instance from
Returns:
RBFNode: instance of RBFNode
"""
if mc.objExists(name) and mc.nodeType(name) in rbf_io.RBF_MODULES:
rbfType = mc.nodeType(name)
return rbf_io.RBF_MODULES[rbfType].RBFNode(name)
elif rbfType is not None:
return rbf_io.RBF_MODULES[rbfType].RBFNode(name)
def getEnvironModules():
"""if there are any environment variables set that load additional
modules for the UI, query and return dict
Returns:
dict: displayName:funcObject
"""
extraModulePath = os.environ.get(MGEAR_EXTRA_ENVIRON, None)
if extraModulePath is None or not os.path.exists(extraModulePath):
return None
exModule = imp.load_source(MGEAR_EXTRA_ENVIRON,
os.path.abspath(extraModulePath))
additionalFuncDict = getattr(exModule, EXTRA_MODULE_DICT, None)
if additionalFuncDict is None:
mc.warning("'{}' not found in {}".format(EXTRA_MODULE_DICT,
extraModulePath))
print "No additional menu items added to {}".format(TOOL_NAME)
return additionalFuncDict
def selectNode(name):
"""Convenience function, to ensure no errors when selecting nodes in UI
Args:
name (str): name of node to be selected
"""
if mc.objExists(name):
mc.select(name)
else:
print name, "No longer exists for selection!"
# =============================================================================
# UI General Functions
# =============================================================================
def getControlAttrWidget(nodeAttr, label=""):
"""get a cmds.attrControlGrp wrapped in a qtWidget, still connected
to the specified attr
Args:
nodeAttr (str): node.attr, the target for the attrControlGrp
label (str, optional): name for the attr widget
Returns:
QtWidget: qwidget created from attrControlGrp
"""
mAttrFeild = mc.attrControlGrp(attribute=nodeAttr,
label=label,
po=True)
ptr = mui.MQtUtil.findControl(mAttrFeild)
controlWidget = QtCompat.wrapInstance(long(ptr), base=QtWidgets.QWidget)
controlWidget.setContentsMargins(0, 0, 0, 0)
controlWidget.setMinimumWidth(0)
attrEdit = [wdgt for wdgt in controlWidget.children()
if type(wdgt) == QtWidgets.QLineEdit]
[wdgt.setParent(attrEdit[0]) for wdgt in controlWidget.children()
if type(wdgt) == QtCore.QObject]
attrEdit[0].setParent(None)
controlWidget.setParent(attrEdit[0])
controlWidget.setHidden(True)
return attrEdit[0], mAttrFeild
def HLine():
"""seporator line for widgets
Returns:
Qframe: line for seperating UI elements visually
"""
seperatorLine = QtWidgets.QFrame()
seperatorLine.setFrameShape(QtWidgets.QFrame.HLine)
seperatorLine.setFrameShadow(QtWidgets.QFrame.Sunken)
return seperatorLine
def VLine():
"""seporator line for widgets
Returns:
Qframe: line for seperating UI elements visually
"""
seperatorLine = QtWidgets.QFrame()
seperatorLine.setFrameShape(QtWidgets.QFrame.VLine)
seperatorLine.setFrameShadow(QtWidgets.QFrame.Sunken)
return seperatorLine
def show(dockable=True, newSceneCallBack=True, *args):
"""To launch the ui and not get the same instance
Returns:
DistributeUI: instance
Args:
*args: Description
"""
global RBF_UI
if 'RBF_UI' in globals():
try:
RBF_UI.close()
except TypeError:
pass
RBF_UI = RBFManagerUI(parent=pyqt.maya_main_window(),
newSceneCallBack=newSceneCallBack)
RBF_UI.show(dockable=True)
return RBF_UI
def genericWarning(parent, warningText):
"""generic prompt warning with the provided text
Args:
parent (QWidget): Qwidget to be parented under
warningText (str): information to display to the user
Returns:
QtCore.Response: of what the user chose. For warnings
"""
selWarning = QtWidgets.QMessageBox(parent)
selWarning.setText(warningText)
results = selWarning.exec_()
return results
def promptAcceptance(parent, descriptionA, descriptionB):
"""Warn user, asking for permission
Args:
parent (QWidget): to be parented under
descriptionA (str): info
descriptionB (str): further info
Returns:
QtCore.Response: accept, deline, reject
"""
msgBox = QtWidgets.QMessageBox(parent)
msgBox.setText(descriptionA)
msgBox.setInformativeText(descriptionB)
msgBox.setStandardButtons(QtWidgets.QMessageBox.Ok |
QtWidgets.QMessageBox.Cancel)
msgBox.setDefaultButton(QtWidgets.QMessageBox.Cancel)
decision = msgBox.exec_()
return decision
class ClickableLineEdit(QtWidgets.QLineEdit):
"""subclass to allow for clickable lineEdit, as a button
Attributes:
clicked (QtCore.Signal): emitted when clicked
"""
clicked = QtCore.Signal(str)
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.clicked.emit(self.text())
else:
super(ClickableLineEdit, self).mousePressEvent(event)
class TabBar(QtWidgets.QTabBar):
"""Subclass to get a taller tab widget, for readability
"""
def __init__(self):
super(TabBar, self).__init__()
def tabSizeHint(self, index):
width = QtWidgets.QTabBar.tabSizeHint(self, index).width()
return QtCore.QSize(width, 25)
class RBFSetupInput(QtWidgets.QDialog):
"""Allow the user to select which attrs will drive the rbf nodes in a setup
Attributes:
drivenListWidget (QListWidget): widget to display attrs to drive setup
okButton (QPushButton): BUTTON
result (list): of selected attrs from listWidget
setupField (bool)): Should the setup lineEdit widget be displayed
setupLineEdit (QLineEdit): name selected by user
"""
def __init__(self, listValues, setupField=True, parent=None):
"""setup the UI widgets
Args:
listValues (list): attrs to be displayed on the list
setupField (bool, optional): should the setup line edit be shown
parent (QWidget, optional): widget to parent this to
"""
super(RBFSetupInput, self).__init__(parent=parent)
self.setWindowTitle(TOOL_TITLE)
mainLayout = QtWidgets.QVBoxLayout()
self.setLayout(mainLayout)
self.setupField = setupField
self.result = []
# --------------------------------------------------------------------
setupLayout = QtWidgets.QHBoxLayout()
setupLabel = QtWidgets.QLabel("Specify Setup Name")
self.setupLineEdit = QtWidgets.QLineEdit()
self.setupLineEdit.setPlaceholderText("<name>_<side><int> // skirt_L0")
setupLayout.addWidget(setupLabel)
setupLayout.addWidget(self.setupLineEdit)
if setupField:
mainLayout.addLayout(setupLayout)
# --------------------------------------------------------------------
drivenLayout = QtWidgets.QVBoxLayout()
drivenLabel = QtWidgets.QLabel("Select Driven Attributes")
self.drivenListWidget = QtWidgets.QListWidget()
self.drivenListWidget.setToolTip("Right Click for sorting!")
selType = QtWidgets.QAbstractItemView.ExtendedSelection
self.drivenListWidget.setSelectionMode(selType)
self.drivenListWidget.addItems(listValues)
self.drivenListWidget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
drivenLayout.addWidget(drivenLabel)
drivenLayout.addWidget(self.drivenListWidget)
mainLayout.addLayout(drivenLayout)
# --------------------------------------------------------------------
# buttonLayout = QtWidgets.QHBoxLayout()
self.okButton = QtWidgets.QPushButton("Ok")
self.okButton.clicked.connect(self.onOK)
mainLayout.addWidget(self.okButton)
def onOK(self):
"""collect information from the displayed widgets, userinput, return
Returns:
list: of user input provided from user
"""
setupName = self.setupLineEdit.text()
if setupName == "" and self.setupField:
genericWarning(self, "Enter Setup Name")
return
selectedAttrs = self.drivenListWidget.selectedItems()
if not selectedAttrs:
genericWarning(self, "Select at least one attribute")
return
driverAttrs = [item.text().split(".")[1] for item in selectedAttrs]
self.result.append(setupName)
self.result.append(driverAttrs)
self.accept()
return self.result
def getValue(self):
"""convenience to get result
Returns:
TYPE: Description
"""
return self.result
def exec_(self):
"""Convenience
Returns:
list: [str, [of selected attrs]]
"""
super(RBFSetupInput, self).exec_()
return self.result
class RBFManagerUI(MayaQWidgetDockableMixin, QtWidgets.QMainWindow):
"""A manager for creating, mirroring, importing/exporting poses created
for RBF type nodes.
Attributes:
absWorld (bool): Type of pose info look up, world vs local
addRbfButton (QPushButton): button for adding RBFs to setup
allSetupsInfo (dict): setupName:[of all the RBFNodes in scene]
attrMenu (TYPE): Description
currentRBFSetupNodes (list): currently selected setup nodes(userSelect)
driverPoseTableWidget (QTableWidget): poseInfo for the driver node
genericWidgetHight (int): convenience to adjust height of all buttons
mousePosition (QPose): if tracking mouse position on UI
rbfTabWidget (QTabWidget): where the driven table node info is
displayed
"""
mousePosition = QtCore.Signal(int, int)
def __init__(self, parent=None, hideMenuBar=False, newSceneCallBack=True):
super(RBFManagerUI, self).__init__(parent=parent)
# UI info -------------------------------------------------------------
self.callBackID = None
self.setWindowTitle(TOOL_TITLE)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose, True)
self.genericWidgetHight = 24
# class info ----------------------------------------------------------
self.absWorld = True
self.currentRBFSetupNodes = []
self.allSetupsInfo = None
self.setMenuBar(self.createMenuBar(hideMenuBar=hideMenuBar))
self.setCentralWidget(self.createCentralWidget())
self.centralWidget().setMouseTracking(True)
self.refreshRbfSetupList()
self.connectSignals()
# added because the dockableMixin makes the ui appear small
self.adjustSize()
if newSceneCallBack:
self.newSceneCallBack()
def callBackFunc(self, *args):
"""super safe function for trying to refresh the UI, should anything
fail.
Args:
*args: Description
"""
try:
self.refresh()
except Exception:
pass
def removeSceneCallback(self):
"""remove the callback associated witht he UI, quietly fail.
"""
try:
om.MSceneMessage.removeCallback(self.callBackID)
except Exception as e:
print "CallBack removal failure:"
print e
def newSceneCallBack(self):
"""create a new scene callback to refresh the UI when scene changes.
"""
callBackType = om.MSceneMessage.kSceneUpdate
try:
func = self.callBackFunc
obj = om.MSceneMessage.addCallback(callBackType, func)
self.callBackID = obj
except Exception as e:
print e
self.callBackID = None
# general functions -------------------------------------------------------
def getSelectedSetup(self):
"""return the string name of the selected setup from user and type
Returns:
str, str: name, nodeType
"""
selectedSetup = self.rbf_cbox.currentText()
if selectedSetup.startswith("New"):
setupType = selectedSetup.split(" ")[1]
return None, setupType
else:
return selectedSetup, self.currentRBFSetupNodes[0].rbfType
def getDrivenNodesFromSetup(self):
"""based on the user selected setup, get the associated RBF nodes
Returns:
list: driven rbfnodes
"""
drivenNodes = []
for rbfNode in self.currentRBFSetupNodes:
drivenNodes.extend(rbfNode.getDrivenNode)
return drivenNodes
def getUserSetupInfo(self, drivenNode, drivenAttrs, setupField=True):
"""prompt the user for information needed to create setup or add
rbf node to existing setup
Args:
drivenAttrs (list): of attrs to display to user to select from
setupField (bool, optional): should the user be asked to input
a name for setup
Returns:
list: list of selected attrs, name specified
"""
userInputWdgt = RBFSetupInput(drivenAttrs,
setupField=setupField,
parent=self)
partialObj = partial(self.attrListMenu,
userInputWdgt.drivenListWidget,
"",
nodeToQuery=drivenNode)
customMenu = userInputWdgt.drivenListWidget.customContextMenuRequested
customMenu.connect(partialObj)
results = userInputWdgt.exec_()
if results:
return results[0], results[1]
else:
return None, None
def __deleteSetup(self):
decision = promptAcceptance(self,
"Delete current Setup?",
"This will delete all RBF nodes in setup.")
if decision in [QtWidgets.QMessageBox.Discard,
QtWidgets.QMessageBox.Cancel]:
return
self.deleteSetup()
def deleteSetup(self, setupName=None):
"""Delete all the nodes within a setup.
Args:
setupName (None, optional): Description
"""
setupType = None
if setupName is None:
setupName, setupType = self.getSelectedSetup()
nodesToDelete = self.allSetupsInfo.get(setupName, [])
for rbfNode in nodesToDelete:
drivenNode = rbfNode.getDrivenNode()
rbfNode.deleteRBFToggleAttr()
if drivenNode:
rbf_node.removeDrivenGroup(drivenNode[0])
mc.delete(rbfNode.transformNode)
self.refresh()
def removeRBFFromSetup(self, drivenWidgetIndex):
"""remove RBF tab from setup. Delete driven group, attrs and clean up
Args:
drivenWidgetIndex (QWidget): parent widget that houses the contents
and info of the rbf node
Returns:
n/a: n/a
"""
decision = promptAcceptance(self,
"Are you sure you want to remove node?",
"This will delete the RBF & driven node.")
if decision in [QtWidgets.QMessageBox.Discard,
QtWidgets.QMessageBox.Cancel]:
return
drivenWidget = self.rbfTabWidget.widget(drivenWidgetIndex)
self.rbfTabWidget.removeTab(drivenWidgetIndex)
rbfNode = getattr(drivenWidget, "rbfNode")
self.__deleteAssociatedWidgets(drivenWidget, attrName="associated")
drivenWidget.deleteLater()
drivenNode = rbfNode.getDrivenNode()
rbfNode.deleteRBFToggleAttr()
if drivenNode and drivenNode[0].endswith(rbf_node.DRIVEN_SUFFIX):
rbf_node.removeDrivenGroup(drivenNode[0])
mc.delete(rbfNode.transformNode)
self.currentRBFSetupNodes.remove(rbfNode)
if self.rbfTabWidget.count() == 0:
self.refresh(rbfSelection=True,
driverSelection=True,
drivenSelection=True,
currentRBFSetupNodes=True)
else:
self.refreshAllTables()
def addRBFToSetup(self):
"""query the user in case of a new setup or adding additional RBFs to
existing.
Returns:
TYPE: Description
"""
# TODO cut this function down to size
driverNode = self.driverLineEdit.text()
driverControl = self.controlLineEdit.text()
# take every opportunity to return to avoid unneeded processes
if driverNode == "":
return
selectedAttrItems = self.driver_attributes_widget.selectedItems()
if not selectedAttrItems:
return
driverAttrs = [item.text().split(".")[1] for item in selectedAttrItems]
drivenNode = mc.ls(sl=True)
# This does prevents a driver to be its own driven
if not drivenNode or drivenNode[0] == driverNode:
genericWarning(self, "Select Node to be driven!")
return
drivenNode = drivenNode[0]
drivenNodeType = mc.nodeType(drivenNode)
# smart display all when needed
if drivenNodeType in ["transform", "joint"]:
attrType = "keyable"
else:
attrType = "all"
availableAttrs = getPlugAttrs([drivenNode], attrType=attrType)
setupName, rbfType = self.getSelectedSetup()
# if a setup has already been named or starting new
if setupName is None:
setupName, drivenAttrs = self.getUserSetupInfo(drivenNode,
availableAttrs)
else:
tmpName, drivenAttrs = self.getUserSetupInfo(drivenNode,
availableAttrs,
setupField=False)
if not drivenAttrs:
return
parentNode = False
if mc.nodeType(drivenNode) == "transform":
parentNode = True
drivenNode = rbf_node.addDrivenGroup(drivenNode)
# create RBFNode instance, apply settings
rbfNode = sortRBF(drivenNode, rbfType=rbfType)
rbfNode.setSetupName(setupName)
rbfNode.setDriverControlAttr(driverControl)
rbfNode.setDriverNode(driverNode, driverAttrs)
rbfNode.setDrivenNode(drivenNode, drivenAttrs, parent=parentNode)
# Check if there any preexisting nodes in setup, if so copy pose index
if self.currentRBFSetupNodes:
currentRbfs = self.currentRBFSetupNodes[0]
print "Syncing poses indices from {} >> {}".format(currentRbfs,
rbfNode)
rbfNode.syncPoseIndices(self.currentRBFSetupNodes[0])
else:
rbfNode.applyDefaultPose()
self.populateDriverInfo(rbfNode, rbfNode.getNodeInfo())
# add newly created RBFNode to list of current
self.currentRBFSetupNodes.append(rbfNode)
# get info to populate the UI with it
weightInfo = rbfNode.getNodeInfo()
tabDrivenWidget = self.addNewTab(rbfNode)
self.populateDrivenWidgetInfo(tabDrivenWidget, weightInfo, rbfNode)
self.refreshRbfSetupList(setToSelection=setupName)
self.lockDriverWidgets()
mc.select(driverControl)
def refreshAllTables(self):
"""Convenience function to refresh all the tables on all the tabs
with latest information.
"""
weightInfo = None
rbfNode = None
for index in range(self.rbfTabWidget.count()):
drivenWidget = self.rbfTabWidget.widget(index)
drivenNodeName = drivenWidget.drivenLineEdit.text()
for rbfNode in self.currentRBFSetupNodes:
drivenNodes = rbfNode.getDrivenNode()
if drivenNodes and drivenNodes[0] != drivenNodeName:
continue
weightInfo = rbfNode.getNodeInfo()
self.setDrivenTable(drivenWidget, rbfNode, weightInfo)
if weightInfo and rbfNode:
self.populateDriverInfo(rbfNode, weightInfo)
def deletePose(self):
"""delete a pose from the UI and all the RBFNodes in the setup.
Returns:
n/a: n/a
"""
driverRow = self.driverPoseTableWidget.currentRow()
drivenWidget = self.rbfTabWidget.currentWidget()
drivenTableWidget = getattr(drivenWidget, "tableWidget")
drivenRow = drivenTableWidget.currentRow()
# TODO if one is allow syncing of nodes of different lengths
# it should be done here
if drivenRow != driverRow or drivenRow == -1:
genericWarning(self, "Select Pose # to be deleted.")
return
for rbfNode in self.currentRBFSetupNodes:
rbfNode.deletePose(indexToPop=drivenRow)
self.refreshAllTables()
def editPose(self):
"""edit an existing pose. Specify the index
Returns:
TYPE: Description
"""
rbfNodes = self.currentRBFSetupNodes
if not rbfNodes:
return
driverRow = self.driverPoseTableWidget.currentRow()
drivenWidget = self.rbfTabWidget.currentWidget()
drivenTableWidget = getattr(drivenWidget, "tableWidget")
drivenRow = drivenTableWidget.currentRow()
if drivenRow != driverRow or drivenRow == -1:
genericWarning(self, "Select Pose # to be Edited.")
return
driverNode = rbfNodes[0].getDriverNode()[0]
driverAttrs = rbfNodes[0].getDriverNodeAttributes()
poseInputs = rbf_node.getMultipleAttrs(driverNode, driverAttrs)
for rbfNode in rbfNodes:
poseValues = rbfNode.getPoseValues(resetDriven=True)
rbfNode.addPose(poseInput=poseInputs,
poseValue=poseValues,
posesIndex=drivenRow)
rbfNode.forceEvaluation()
self.refreshAllTables()
def addPose(self):
"""Add pose to rbf nodes in setup. Additional index on all nodes
Returns:
TYPE: Description
"""
rbfNodes = self.currentRBFSetupNodes
if not rbfNodes:
return
driverNode = rbfNodes[0].getDriverNode()[0]
driverAttrs = rbfNodes[0].getDriverNodeAttributes()
poseInputs = rbf_node.getMultipleAttrs(driverNode, driverAttrs)
for rbfNode in rbfNodes:
poseValues = rbfNode.getPoseValues(resetDriven=True,
absoluteWorld=self.absWorld)
rbfNode.addPose(poseInput=poseInputs, poseValue=poseValues)
self.refreshAllTables()
def updateAllSetupsInfo(self, includeEmpty=False):
"""refresh the instance dictionary of all the setps in the scene.
Args:
includeEmpty (bool, optional): there could be rbf nodes with no
setup names.
"""
self.allSetupsInfo = {}
tmp_dict = rbf_node.getRbfSceneSetupsInfo(includeEmpty=includeEmpty)
for setupName, nodes in tmp_dict.iteritems():
self.allSetupsInfo[setupName] = [sortRBF(n) for n in nodes]
def setNodeToField(self, lineEdit, multi=False):
"""take the currently selected node and set its name to the lineedit
provided
Args:
lineEdit (QLineEdit): widget to set the name to
multi (bool, optional): should multiple nodes be supported
Returns:
str: str set to the lineedit
"""
selected = mc.ls(sl=True)
if not multi:
selected = [selected[0]]
controlNameData = ", ".join(selected)
lineEdit.setText(controlNameData)
mc.select(cl=True)
return controlNameData
def highlightListEntries(self, listWidget, toHighlight):
"""set the items in a listWidget to be highlighted if they are in list
Args:
listWidget (QListWidget): list to highlight items on
toHighlight (list): of things to highlight
"""
toHighlight = list(toHighlight)
scrollToItems = []
for index in range(listWidget.count()):
# for qt to check for events like keypress
item = listWidget.item(index)
itemText = item.text()
for desired in toHighlight:
if desired in itemText:
item.setSelected(True)
scrollToItems.append(item)
toHighlight.remove(desired)
if scrollToItems:
listWidget.scrollToItem(scrollToItems[0])
def setAttributeDisplay(self, attrListWidget, driverName, displayAttrs):
nodeAttrsToDisplay = ["{}.{}".format(driverName, attr)
for attr in displayAttrs]
attrListWidget.clear()
attrListWidget.addItems(sorted(nodeAttrsToDisplay))
self.highlightListEntries(attrListWidget, displayAttrs)
def updateAttributeDisplay(self,
attrListWidget,
driverNames,
highlight=[],
attrType="all"):
"""update the provided listwidget with the attrs collected from the
list of nodes provided
Args:
attrListWidget (QListWidget): widget to update
driverNames (list): of nodes to query for attrs to display
highlight (list, optional): of item entries to highlight
keyable (bool, optional): should the displayed attrs be keyable
Returns:
n/a: n/a
"""
nodeAttrsToDisplay = []
if not driverNames:
return
elif type(driverNames) != list:
driverNames = [driverNames]
nodeAttrsToDisplay = getPlugAttrs(driverNames, attrType=attrType)
attrListWidget.clear()
attrListWidget.addItems(sorted(nodeAttrsToDisplay))
if highlight:
self.highlightListEntries(attrListWidget, highlight)
def __deleteAssociatedWidgetsMaya(self, widget, attrName="associatedMaya"):
"""delete maya ui items 'associated' with the provided widgets
Args:
widget (QWidget): Widget that has the associated attr set
attrName (str, optional): class attr to query
"""
if hasattr(widget, attrName):
for t in getattr(widget, attrName):
try:
mc.deleteUI(t, ctl=True)
except Exception:
pass
else:
setattr(widget, attrName, [])
def __deleteAssociatedWidgets(self, widget, attrName="associated"):
"""delete widget items 'associated' with the provided widgets
Args:
widget (QWidget): Widget that has the associated attr set
attrName (str, optional): class attr to query
"""
if hasattr(widget, attrName):
for t in getattr(widget, attrName):
try:
t.deleteLater()
except Exception:
pass
else:
setattr(widget, attrName, [])
def syncDriverTableCells(self,
attrEdit,
rbfAttrPlug,
poseIndex,
valueIndex,
attributeName,
*args):
"""When you edit the driver table, it will update all the sibling
rbf nodes in the setup.
Args:
attrEdit (QLineEdit): cell that was edited in the driver table
rbfAttrPlug (str): node.attr the cell represents
*args: signal throws additional args
"""
attr = rbfAttrPlug.partition(".")[2]
value = attrEdit.text()
for rbfNode in self.currentRBFSetupNodes:
attrPlug = "{}.{}".format(rbfNode, attr)
mc.setAttr(attrPlug, float(value))
rbfNode.forceEvaluation()
def setDriverTable(self, rbfNode, weightInfo):
"""Set the driverTable widget with the information from the weightInfo
Args:
rbfNode (RBFNode): node to query additional info from
weightInfo (dict): to pull information from
Returns:
n/a: n/a
"""
poses = weightInfo["poses"]
# ensure deletion of associated widgets with this parent widget
self.__deleteAssociatedWidgetsMaya(self.driverPoseTableWidget)
self.__deleteAssociatedWidgets(self.driverPoseTableWidget)
self.driverPoseTableWidget.clear()
columnLen = len(weightInfo["driverAttrs"])
self.driverPoseTableWidget.setColumnCount(columnLen)
headerNames = weightInfo["driverAttrs"]
self.driverPoseTableWidget.setHorizontalHeaderLabels(headerNames)
poseInputLen = len(poses["poseInput"])
self.driverPoseTableWidget.setRowCount(poseInputLen)
if poseInputLen == 0:
return
verticalLabels = ["Pose {}".format(index) for index
in range(poseInputLen)]
self.driverPoseTableWidget.setVerticalHeaderLabels(verticalLabels)
tmpWidgets = []
mayaUiItems = []
for rowIndex, poseInput in enumerate(poses["poseInput"]):
for columnIndex, pValue in enumerate(poseInput):
# TODO, this is where we get the attrControlGroup
rbfAttrPlug = "{}.poses[{}].poseInput[{}]".format(rbfNode,
rowIndex,
columnIndex)
attrEdit, mAttrFeild = getControlAttrWidget(rbfAttrPlug,
label="")
func = partial(self.syncDriverTableCells,
attrEdit,
rbfAttrPlug,
rowIndex,
columnIndex,
headerNames[columnIndex])
self.driverPoseTableWidget.setCellWidget(rowIndex,
columnIndex,
attrEdit)
attrEdit.returnPressed.connect(func)
tmpWidgets.append(attrEdit)
mayaUiItems.append(mAttrFeild)
setattr(self.driverPoseTableWidget, "associated", tmpWidgets)
setattr(self.driverPoseTableWidget, "associatedMaya", mayaUiItems)
def lockDriverWidgets(self, lock=True):
"""toggle the ability to edit widgets after they have been set
Args:
lock (bool, optional): should it be locked
"""
self.setDriverButton.blockSignals(lock)
if lock:
self.driver_attributes_widget.setEnabled(False)
else:
self.driver_attributes_widget.setEnabled(True)
def populateDriverInfo(self, rbfNode, weightInfo):
"""populate the driver widget, driver, control, driving attrs
Args:
rbfNode (RBFNode): node for query
weightInfo (dict): to pull information from, since we have it
"""
driverNode = weightInfo["driverNode"]
if driverNode:
driverNode = driverNode[0]
self.driverLineEdit.setText(driverNode)
driverControl = weightInfo["driverControl"]
# populate control here
self.controlLineEdit.setText(driverControl)
self.setAttributeDisplay(self.driver_attributes_widget,
driverNode,
weightInfo["driverAttrs"])
self.setDriverTable(rbfNode, weightInfo)
def _associateRBFnodeAndWidget(self, tabDrivenWidget, rbfNode):
"""associates the RBFNode with a widget for convenience when adding,
deleting, editing
Args:
tabDrivenWidget (QWidget): tab widget
rbfNode (RBFNode): instance to be associated
"""
setattr(tabDrivenWidget, "rbfNode", rbfNode)
def createAndTagDrivenWidget(self, weightInfo, lockWidgets=True):
"""create and associate a widget, populated with the information
provided by the weightInfo
Args:
weightInfo (dict): information to populate the widgets with
lockWidgets (bool, optional): should they be locked from editing
Returns:
QWidget: parent widget that houses all the information to display
"""
drivenWidgetComponents = self.createDrivenAttributeWidget()
drivenWidget = drivenWidgetComponents.pop(-1)
widgetAttrs = ("drivenLineEdit",
"drivenSelectButton",
"attributeListWidget",
"tableWidget")
for component, attr in zip(drivenWidgetComponents, widgetAttrs):
setattr(drivenWidget, attr, component)
if attr == "attributeListWidget" and lockWidgets:
component.setEnabled(False)
# TODO add signal connections here
table = [wdgt for wdgt in drivenWidgetComponents
if type(wdgt) == QtWidgets.QTableWidget][0]
header = table.verticalHeader()
# TODO There was an inconsistency here with signals, potentially
# resolved
header.sectionClicked.connect(self.setConsistentHeaderSelection)
header.sectionClicked.connect(self.recallDriverPose)
selDelFunc = self.setEditDeletePoseEnabled
table.itemSelectionChanged.connect(selDelFunc)
clickWidget = [wdgt for wdgt in drivenWidgetComponents
if type(wdgt) == ClickableLineEdit][0]
clickWidget.clicked.connect(selectNode)
return drivenWidget
def setDrivenTable(self, drivenWidget, rbfNode, weightInfo):
"""set the widgets with information from the weightInfo for dispaly
Args:
drivenWidget (QWidget): parent widget, the tab to populate
rbfNode (RBFNode): node associated with widget
weightInfo (dict): of information to display
"""
poses = weightInfo["poses"]
drivenWidget.tableWidget.clear()
rowCount = len(poses["poseValue"])
drivenWidget.tableWidget.setRowCount(rowCount)
drivenAttrs = weightInfo["drivenAttrs"]
drivenWidget.tableWidget.setColumnCount(len(drivenAttrs))
drivenWidget.tableWidget.setHorizontalHeaderLabels(drivenAttrs)
verticalLabels = ["Pose {}".format(index) for index in range(rowCount)]
drivenWidget.tableWidget.setVerticalHeaderLabels(verticalLabels)
for rowIndex, poseInput in enumerate(poses["poseValue"]):
for columnIndex, pValue in enumerate(poseInput):
rbfAttrPlug = "{}.poses[{}].poseValue[{}]".format(rbfNode,
rowIndex,
columnIndex)
attrEdit, mAttrFeild = getControlAttrWidget(rbfAttrPlug,
label="")
drivenWidget.tableWidget.setCellWidget(rowIndex,
columnIndex,
attrEdit)
def populateDrivenWidgetInfo(self, drivenWidget, weightInfo, rbfNode):
"""set the information from the weightInfo to the widgets child of
drivenWidget
Args:
drivenWidget (QWidget): parent widget
weightInfo (dict): of information to display
rbfNode (RBFNode): instance of the RBFNode
Returns:
n/a: n/a
"""
drivenWidget.drivenLineEdit.clear()
driverNode = weightInfo["drivenNode"]
if driverNode:
driverNode = driverNode[0]
else:
return
drivenWidget.drivenLineEdit.setText(str(driverNode))
self.setAttributeDisplay(drivenWidget.attributeListWidget,
weightInfo["drivenNode"][0],
weightInfo["drivenAttrs"])
self.setDrivenTable(drivenWidget, rbfNode, weightInfo)
def addNewTab(self, rbfNode):
"""Create a new tab in the setup
Args:
rbfNode (RBFNode): to pull information from
Returns:
QWidget: created widget
"""
tabDrivenWidget = self.createAndTagDrivenWidget({})
self._associateRBFnodeAndWidget(tabDrivenWidget, rbfNode)
self.rbfTabWidget.addTab(tabDrivenWidget, str(rbfNode))
return tabDrivenWidget
def recreateDrivenTabs(self, rbfNodes):
"""remove tabs and create ones for each node in rbfNodes provided
Args:
rbfNodes (list): [of RBFNodes]
"""
rbfNodes = sorted(rbfNodes)
self.rbfTabWidget.clear()
for rbfNode in rbfNodes:
weightInfo = rbfNode.getNodeInfo()
drivenWidget = self.createAndTagDrivenWidget(weightInfo)
self._associateRBFnodeAndWidget(drivenWidget, rbfNode)
self.populateDrivenWidgetInfo(drivenWidget, weightInfo, rbfNode)
self.rbfTabWidget.addTab(drivenWidget, rbfNode.name)
def displayRBFSetupInfo(self, index):
"""Display the rbfnodes within the desired setups
Args:
index (int): signal information
"""
rbfSelection = str(self.rbf_cbox.currentText())
self.refresh(rbfSelection=False,
driverSelection=True,
drivenSelection=True,
currentRBFSetupNodes=False)
if rbfSelection.startswith("New "):
self.currentRBFSetupNodes = []
self.lockDriverWidgets(lock=False)
return
rbfNodes = self.allSetupsInfo.get(rbfSelection, [])
if not rbfNodes:
return
self.currentRBFSetupNodes = rbfNodes
weightInfo = rbfNodes[0].getNodeInfo()
self.populateDriverInfo(rbfNodes[0], weightInfo)
self.lockDriverWidgets(lock=True)
# wrapping the following in try due to what I think is a Qt Bug.
# need to look further into this.
# File "rbf_manager_ui.py", line 872, in createAndTagDrivenWidget
# header.sectionClicked.connect(self.setConsistentHeaderSelection)
# AttributeError: 'PySide2.QtWidgets.QListWidgetItem' object has
# no attribute 'sectionClicked'
try:
self.recreateDrivenTabs(self.allSetupsInfo[rbfSelection])
except AttributeError:
print "Forcing refresh on UI due to error."
self.refresh(rbfSelection=True,
driverSelection=True,
drivenSelection=True,
currentRBFSetupNodes=True)
def attrListMenu(self,
attributeListWidget,
driverLineEdit,
QPos,
nodeToQuery=None):
"""right click menu for queie qlistwidget
Args:
attributeListWidget (QListWidget): widget to display menu over
driverLineEdit (QLineEdit): widget to query the attrs from
QPos (QtCore.QPos): due to the signal, used
nodeToQuery (None, optional): To display attrs from this nodes
for menu placement
No Longer Returned:
n/a: n/a
"""
if nodeToQuery is None:
nodeToQuery = str(driverLineEdit.text())
self.attrMenu = QtWidgets.QMenu()
parentPosition = attributeListWidget.mapToGlobal(QtCore.QPoint(0, 0))
menu_item_01 = self.attrMenu.addAction("Display Keyable")
menu_item_01.setToolTip("Show Keyable Attributes")
menu_item_01.triggered.connect(partial(self.updateAttributeDisplay,
attributeListWidget,
nodeToQuery,
attrType="keyable"))
menu2Label = "Display ChannelBox (Non Keyable)"
menu_item_02 = self.attrMenu.addAction(menu2Label)
menu2tip = "Show attributes in ChannelBox that are not keyable."
menu_item_02.setToolTip(menu2tip)
menu_item_02.triggered.connect(partial(self.updateAttributeDisplay,
attributeListWidget,
nodeToQuery,
attrType="cb"))
menu_item_03 = self.attrMenu.addAction("Display All")
menu_item_03.setToolTip("GIVE ME ALL!")
menu_item_03.triggered.connect(partial(self.updateAttributeDisplay,
attributeListWidget,
nodeToQuery,
attrType="all"))
self.attrMenu.move(parentPosition + QPos)
self.attrMenu.show()
def refreshRbfSetupList(self, setToSelection=False):
"""refresh the list of setups the user may select from
Args:
setToSelection (bool, optional): after refresh, set to desired
"""
self.rbf_cbox.blockSignals(True)
self.rbf_cbox.clear()
addNewOfType = ["New {} setup".format(rbf)
for rbf in rbf_node.SUPPORTED_RBF_NODES]
self.updateAllSetupsInfo()
addNewOfType.extend(sorted(self.allSetupsInfo.keys()))
self.rbf_cbox.addItems(addNewOfType)
if setToSelection:
selectionIndex = self.rbf_cbox.findText(setToSelection)
self.rbf_cbox.setCurrentIndex(selectionIndex)
else:
self.lockDriverWidgets(lock=False)
self.rbf_cbox.blockSignals(False)
def clearDrivenTabs(self):
"""force deletion on tab widgets
"""
toRemove = []
tabIndicies = self.rbfTabWidget.count()
for index in range(tabIndicies):
tabWidget = self.rbfTabWidget.widget(index)
toRemove.append(tabWidget)
self.rbfTabWidget.clear()
[t.deleteLater() for t in toRemove]
def refresh(self,
rbfSelection=True,
driverSelection=True,
drivenSelection=True,
currentRBFSetupNodes=True,
*args):
"""Refreshes the UI
Args:
rbfSelection (bool, optional): desired section to refresh
driverSelection (bool, optional): desired section to refresh
drivenSelection (bool, optional): desired section to refresh
currentRBFSetupNodes (bool, optional): desired section to refresh
"""
if rbfSelection:
self.refreshRbfSetupList()
if driverSelection:
self.controlLineEdit.clear()
self.driverLineEdit.clear()
self.driver_attributes_widget.clear()
self.__deleteAssociatedWidgetsMaya(self.driverPoseTableWidget)
self.__deleteAssociatedWidgets(self.driverPoseTableWidget)
self.driverPoseTableWidget.clear()
if drivenSelection:
self.clearDrivenTabs()
if currentRBFSetupNodes:
self.currentRBFSetupNodes = []
def recallDriverPose(self, indexSelected):
"""recall a pose recorded from one of the RBFNodes in currentSelection
it should not matter when RBFNode in setup is selected as they
should all be in sync
Args:
indexSelected (int): index of the pose to recall
Returns:
n/a: nada
"""
if not self.currentRBFSetupNodes:
return
self.currentRBFSetupNodes[0].recallDriverPose(indexSelected)
def setConsistentHeaderSelection(self, headerIndex):
"""when a pose is selected in one table, ensure the selection in all
other tables, to avoid visual confusion
Args:
headerIndex (int): desired header to highlight
"""
self.driverPoseTableWidget.blockSignals(True)
self.driverPoseTableWidget.selectRow(headerIndex)
self.driverPoseTableWidget.blockSignals(False)
for index in range(self.rbfTabWidget.count()):
drivenWidget = self.rbfTabWidget.widget(index)
drivenTableWidget = getattr(drivenWidget, "tableWidget")
drivenTableWidget.blockSignals(True)
drivenTableWidget.selectRow(headerIndex)
drivenTableWidget.blockSignals(False)
self.setEditDeletePoseEnabled(enable=True)
def setEditDeletePoseEnabled(self, enable=False):
"""toggle buttons that can or cannot be selected
Args:
enable (bool, optional): to disable vs not
"""
self.editPoseButton.setEnabled(enable)
self.deletePoseButton.setEnabled(enable)
def setDriverControlOnSetup(self, controlName):
"""make sure to set the driverControlAttr when the user supplies one
Args:
controlName (str): name of the control to set in an attr
"""
for rbfNode in self.currentRBFSetupNodes:
rbfNode.setDriverControlAttr(controlName)
def setSetupDriverControl(self, lineEditWidget):
"""should the user wish to set a different driverControl pose setup
creation, prompt them prior to proceeding
Args:
lineEditWidget (QLineEdit): to query for the name
Returns:
n/a: nada
"""
if not self.currentRBFSetupNodes:
self.setNodeToField(lineEditWidget)
elif self.currentRBFSetupNodes:
textA = "Do you want to change the Control for setup?"
textB = "This Control that will be used for recalling poses."
decision = promptAcceptance(self, textA, textB)
if decision in [QtWidgets.QMessageBox.Discard,
QtWidgets.QMessageBox.Cancel]:
return
controlName = self.setNodeToField(lineEditWidget)
self.setDriverControlOnSetup(controlName)
def getRBFNodesInfo(self, rbfNodes):
"""create a dictionary of all the RBFInfo(referred to as
weightNodeInfo a lot) for export
Args:
rbfNodes (list): [of RBFNodes]
Returns:
dict: of all the rbfNodes provided
"""
weightNodeInfo_dict = {}
for rbf in rbfNodes:
weightNodeInfo_dict[rbf.name] = rbf.getNodeInfo()
return weightNodeInfo_dict
def importNodes(self):
"""import a setup(s) from file select by user
Returns:
n/a: nada
"""
sceneFilePath = mc.file(sn=True, q=True)
startDir = os.path.dirname(sceneFilePath)
filePath = rbf_io.fileDialog(startDir, mode=1)
if filePath is None:
return
rbf_io.importRBFs(filePath)
mc.select(cl=True)
self.refresh()
print "RBF setups imported: {}".format(filePath)
def exportNodes(self, allSetups=True):
"""export all nodes or nodes from current setup
Args:
allSetups (bool, optional): If all or setup
Returns:
n/a: nada
"""
# TODO WHEN NEW RBF NODE TYPES ARE ADDED, THIS WILL NEED TO BE RETOOLED
nodesToExport = []
if allSetups:
[nodesToExport.extend(v) for k, v,
in self.allSetupsInfo.iteritems()]
else:
nodesToExport = self.currentRBFSetupNodes
nodesToExport = [n.name for n in nodesToExport]
sceneFilePath = mc.file(sn=True, q=True)
startDir = os.path.dirname(sceneFilePath)
filePath = rbf_io.fileDialog(startDir, mode=0)
if filePath is None:
return
rbf_io.exportRBFs(nodesToExport, filePath)
def gatherMirroredInfo(self, rbfNodes):
"""gather all the info from the provided nodes and string replace
side information for its mirror. Using mGear standard
naming convections
Args:
rbfNodes (list): [of RBFNodes]
Returns:
dict: with all the info mirrored
"""
mirrorWeightInfo = {}
for rbfNode in rbfNodes:
weightInfo = rbfNode.getNodeInfo()
# connections -----------------------------------------------------
mrConnections = []
for pairs in weightInfo["connections"]:
mrConnections.append([mString.convertRLName(pairs[0]),
mString.convertRLName(pairs[1])])
weightInfo["connections"] = mrConnections
# drivenControlName -----------------------------------------------
mrDrvnCtl = mString.convertRLName(weightInfo["drivenControlName"])
weightInfo["drivenControlName"] = mrDrvnCtl
# drivenNode ------------------------------------------------------
weightInfo["drivenNode"] = [mString.convertRLName(n) for n
in weightInfo["drivenNode"]]
# driverControl ---------------------------------------------------
mrDrvrCtl = mString.convertRLName(weightInfo["driverControl"])
weightInfo["driverControl"] = mrDrvrCtl
# driverNode ------------------------------------------------------
weightInfo["driverNode"] = [mString.convertRLName(n) for n
in weightInfo["driverNode"]]
# setupName -------------------------------------------------------
mrSetupName = mString.convertRLName(weightInfo["setupName"])
if mrSetupName == weightInfo["setupName"]:
mrSetupName = "{}{}".format(mrSetupName, MIRROR_SUFFIX)
weightInfo["setupName"] = mrSetupName
# transformNode ---------------------------------------------------
# name
# parent
tmp = weightInfo["transformNode"]["name"]
mrTransformName = mString.convertRLName(tmp)
weightInfo["transformNode"]["name"] = mrTransformName
tmp = weightInfo["transformNode"]["parent"]
if tmp is None:
mrTransformPar = None
else:
mrTransformPar = mString.convertRLName(tmp)
weightInfo["transformNode"]["parent"] = mrTransformPar
# name ------------------------------------------------------------
mirrorWeightInfo[mString.convertRLName(rbfNode.name)] = weightInfo
return mirrorWeightInfo
def getMirroredSetupTargetsInfo(self):
"""convenience function to get all the mirrored info for the new side
Returns:
dict: mirrored dict information
"""
setupTargetInfo_dict = {}
for rbfNode in self.currentRBFSetupNodes:
mrRbfNode = mString.convertRLName(rbfNode.name)
mrRbfNode = sortRBF(mrRbfNode)
drivenNode = rbfNode.getDrivenNode()[0]
drivenControlNode = rbfNode.getConnectedRBFToggleNode()
mrDrivenControlNode = mString.convertRLName(drivenControlNode)
mrDrivenControlNode = pm.PyNode(mrDrivenControlNode)
setupTargetInfo_dict[pm.PyNode(drivenNode)] = [mrDrivenControlNode,
mrRbfNode]
return setupTargetInfo_dict
def mirrorSetup(self):
"""gather all info on current setup, mirror the info, use the creation
func from that rbf module type to create the nodes in the setup with
mirrored information.
THE ONLY nodes created will be the ones created during normal
"add pose" creation. Assumption is that all nodes that need drive,
driven by the setup exist.
Returns:
n/a: nada
"""
if not self.currentRBFSetupNodes:
return
aRbfNode = self.currentRBFSetupNodes[0]
mirrorWeightInfo = self.gatherMirroredInfo(self.currentRBFSetupNodes)
mrRbfType = aRbfNode.rbfType
poseIndices = len(aRbfNode.getPoseInfo()["poseInput"])
rbfModule = rbf_io.RBF_MODULES[mrRbfType]
rbfModule.createRBFFromInfo(mirrorWeightInfo)
setupTargetInfo_dict = self.getMirroredSetupTargetsInfo()
nameSpace = utils.getNamespace(aRbfNode.name)
mrRbfNodes = [v[1] for k, v in setupTargetInfo_dict.iteritems()]
[v.setToggleRBFAttr(0) for v in mrRbfNodes]
mrDriverNode = mrRbfNodes[0].getDriverNode()[0]
mrDriverAttrs = mrRbfNodes[0].getDriverNodeAttributes()
driverControl = aRbfNode.getDriverControlAttr()
driverControl = pm.PyNode(driverControl)
for index in range(poseIndices):
aRbfNode.recallDriverPose(index)
utils.mirrorPose(flip=False, nodes=[driverControl])
mrData = []
for srcNode, dstValues in setupTargetInfo_dict.iteritems():
mrData.extend(utils.calculateMirrorData(srcNode,
dstValues[0]))
for entry in mrData:
utils.applyMirror(nameSpace, entry)
poseInputs = rbf_node.getMultipleAttrs(mrDriverNode, mrDriverAttrs)
for mrRbfNode in mrRbfNodes:
poseValues = mrRbfNode.getPoseValues(resetDriven=True)
mrRbfNode.addPose(poseInput=poseInputs,
poseValue=poseValues,
posesIndex=index)
mrRbfNode.forceEvaluation()
[v.setToggleRBFAttr(1) for v in mrRbfNodes]
setupName, rbfType = self.getSelectedSetup()
self.refreshRbfSetupList(setToSelection=setupName)
mc.select(cl=True)
def hideMenuBar(self, x, y):
"""rules to hide/show the menubar when hide is enabled
Args:
x (int): coord X of the mouse
y (int): coord Y of the mouse
"""
if x < 100 and y < 50:
self.menuBar().show()
else:
self.menuBar().hide()
def tabConextMenu(self, qPoint):
"""create a pop up menu over the tabs when right clicked
Args:
qPoint (int): the mouse position when menu requested
Returns:
n/a: diddly
"""
tabIndex = self.rbfTabWidget.tabBar().tabAt(qPoint)
if tabIndex == -1:
return
selWidget = self.rbfTabWidget.widget(tabIndex)
rbfNode = getattr(selWidget, "rbfNode")
tabMenu = QtWidgets.QMenu(self)
parentPosition = self.rbfTabWidget.mapToGlobal(QtCore.QPoint(0, 0))
menu_item_01 = tabMenu.addAction("Select {}".format(rbfNode))
menu_item_01.triggered.connect(partial(mc.select, rbfNode))
partialObj_selWdgt = partial(self.rbfTabWidget.setCurrentWidget,
selWidget)
menu_item_01.triggered.connect(partialObj_selWdgt)
tabMenu.move(parentPosition + qPoint)
tabMenu.show()
def reevalluateAllNodes(self):
"""for evaluation on all nodes in any setup. In case of manual editing
"""
for name, rbfNodes in self.allSetupsInfo.iteritems():
[rbfNode.forceEvaluation() for rbfNode in rbfNodes]
print "All Nodes have been Re-evaluated"
def toggleGetPoseType(self, toggleState):
"""records whether the user wants poses recorded in worldSpace or check
local space
Args:
toggleState (bool): default True
"""
self.absWorld = toggleState
print "Recording poses in world space set to: {}".format(toggleState)
# signal management -------------------------------------------------------
def connectSignals(self):
"""connect all the signals in the UI
Exceptions being MenuBar and Table header signals
"""
self.rbf_cbox.currentIndexChanged.connect(self.displayRBFSetupInfo)
self.rbf_refreshButton.clicked.connect(self.refresh)
self.driverLineEdit.clicked.connect(selectNode)
self.controlLineEdit.clicked.connect(selectNode)
header = self.driverPoseTableWidget.verticalHeader()
header.sectionClicked.connect(self.setConsistentHeaderSelection)
header.sectionClicked.connect(self.recallDriverPose)
selDelFunc = self.setEditDeletePoseEnabled
self.driverPoseTableWidget.itemSelectionChanged.connect(selDelFunc)
self.addRbfButton.clicked.connect(self.addRBFToSetup)
self.addPoseButton.clicked.connect(self.addPose)
self.editPoseButton.clicked.connect(self.editPose)
self.deletePoseButton.clicked.connect(self.deletePose)
partialObj = partial(self.setSetupDriverControl, self.controlLineEdit)
self.setControlButton.clicked.connect(partialObj)
self.setDriverButton.clicked.connect(partial(self.setNodeToField,
self.driverLineEdit))
partialObj = partial(self.updateAttributeDisplay,
self.driver_attributes_widget)
self.driverLineEdit.textChanged.connect(partialObj)
partialObj = partial(self.attrListMenu,
self.driver_attributes_widget,
self.driverLineEdit)
customMenu = self.driver_attributes_widget.customContextMenuRequested
customMenu.connect(partialObj)
tabBar = self.rbfTabWidget.tabBar()
tabBar.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
tabBar.customContextMenuRequested.connect(self.tabConextMenu)
tabBar.tabCloseRequested.connect(self.removeRBFFromSetup)
# broken down widgets -----------------------------------------------------
def createSetupSelectorWidget(self):
"""create the top portion of the weidget, select setup + refresh
Returns:
list: QLayout, QCombobox, QPushButton
"""
setRBFLayout = QtWidgets.QHBoxLayout()
rbfLabel = QtWidgets.QLabel("Select RBF Setup:")
rbf_cbox = QtWidgets.QComboBox()
rbf_refreshButton = QtWidgets.QPushButton("Refresh")
rbf_cbox.setFixedHeight(self.genericWidgetHight)
rbf_refreshButton.setMaximumWidth(80)
rbf_refreshButton.setFixedHeight(self.genericWidgetHight - 1)
setRBFLayout.addWidget(rbfLabel)
setRBFLayout.addWidget(rbf_cbox, 1)
setRBFLayout.addWidget(rbf_refreshButton)
return setRBFLayout, rbf_cbox, rbf_refreshButton
def selectNodeWidget(self, label, buttonLabel="Select"):
"""create a lout with label, lineEdit, QPushbutton for user input
"""
nodeLayout = QtWidgets.QHBoxLayout()
nodeLabel = QtWidgets.QLabel(label)
nodeLabel.setFixedWidth(40)
nodeLineEdit = ClickableLineEdit()
nodeLineEdit.setReadOnly(True)
nodeSelectButton = QtWidgets.QPushButton(buttonLabel)
nodeLineEdit.setFixedHeight(self.genericWidgetHight)
nodeSelectButton.setFixedHeight(self.genericWidgetHight)
nodeLayout.addWidget(nodeLabel)
nodeLayout.addWidget(nodeLineEdit, 1)
nodeLayout.addWidget(nodeSelectButton)
return nodeLayout, nodeLineEdit, nodeSelectButton
def labelListWidget(self, label, horizontal=True):
"""create the listAttribute that users can select their driver/driven
attributes for the setup
Args:
label (str): to display above the listWidget
horizontal (bool, optional): should the label be above or infront
of the listWidget
Returns:
list: QLayout, QListWidget
"""
if horizontal:
attributeLayout = QtWidgets.QHBoxLayout()
else:
attributeLayout = QtWidgets.QVBoxLayout()
attributeLabel = QtWidgets.QLabel(label)
attributeListWidget = QtWidgets.QListWidget()
attributeLayout.addWidget(attributeLabel)
attributeLayout.addWidget(attributeListWidget)
return attributeLayout, attributeListWidget
def addRemoveButtonWidget(self, label1, label2, horizontal=True):
if horizontal:
addRemoveLayout = QtWidgets.QHBoxLayout()
else:
addRemoveLayout = QtWidgets.QVBoxLayout()
addAttributesButton = QtWidgets.QPushButton(label1)
removeAttributesButton = QtWidgets.QPushButton(label2)
addRemoveLayout.addWidget(addAttributesButton)
addRemoveLayout.addWidget(removeAttributesButton)
return addRemoveLayout, addAttributesButton, removeAttributesButton
def createDriverAttributeWidget(self):
"""widget where the user inputs information for the setups
Returns:
list: [of widgets]
"""
driverMainLayout = QtWidgets.QVBoxLayout()
# --------------------------------------------------------------------
(driverLayout,
driverLineEdit,
driverSelectButton) = self.selectNodeWidget("Driver",
buttonLabel="Set")
driverLineEdit.setToolTip("The node driving the setup. (Click me!)")
# --------------------------------------------------------------------
(controlLayout,
controlLineEdit,
setControlButton) = self.selectNodeWidget("Control",
buttonLabel="Set")
controlLineEdit.setToolTip("The node driving the setup. (Click me!)")
# --------------------------------------------------------------------
(attributeLayout,
attributeListWidget) = self.labelListWidget(label="Select Attributes",
horizontal=False)
attributeListWidget.setToolTip("List of attributes driving setup.")
selType = QtWidgets.QAbstractItemView.ExtendedSelection
attributeListWidget.setSelectionMode(selType)
attributeListWidget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# --------------------------------------------------------------------
driverMainLayout.addLayout(driverLayout, 0)
driverMainLayout.addLayout(controlLayout, 0)
driverMainLayout.addLayout(attributeLayout, 0)
return [controlLineEdit,
setControlButton,
driverLineEdit,
driverSelectButton,
attributeListWidget,
driverMainLayout]
def createDrivenAttributeWidget(self):
"""the widget that displays the driven information
Returns:
list: [of widgets]
"""
drivenWidget = QtWidgets.QWidget()
drivenMainLayout = QtWidgets.QHBoxLayout()
drivenMainLayout.setContentsMargins(0, 10, 0, 10)
drivenMainLayout.setSpacing(9)
driverSetLayout = QtWidgets.QVBoxLayout()
drivenMainLayout.addLayout(driverSetLayout)
drivenWidget.setLayout(drivenMainLayout)
# --------------------------------------------------------------------
(driverLayout,
driverLineEdit,
driverSelectButton) = self.selectNodeWidget("Driven",
buttonLabel="Select")
drivenTip = "The node being driven by setup. (Click me!)"
driverLineEdit.setToolTip(drivenTip)
driverSelectButton.hide()
# --------------------------------------------------------------------
(attributeLayout,
attributeListWidget) = self.labelListWidget(label="Attributes",
horizontal=False)
attributeListWidget.setToolTip("Attributes being driven by setup.")
attributeLayout.setSpacing(1)
selType = QtWidgets.QAbstractItemView.ExtendedSelection
attributeListWidget.setSelectionMode(selType)
attributeListWidget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# --------------------------------------------------------------------
tableWidget = self.createTableWidget()
driverSetLayout.addLayout(driverLayout, 0)
driverSetLayout.addLayout(attributeLayout, 0)
drivenMainLayout.addWidget(tableWidget, 1)
return [driverLineEdit,
driverSelectButton,
attributeListWidget,
tableWidget,
drivenWidget]
def createTableWidget(self):
"""create table widget used to display poses, set tooltips and colum
Returns:
QTableWidget: QTableWidget
"""
tableWidget = QtWidgets.QTableWidget()
tableWidget.insertColumn(0)
tableWidget.insertRow(0)
tableWidget.setHorizontalHeaderLabels(["Pose Value"])
tableWidget.setVerticalHeaderLabels(["Pose #0"])
tableTip = "Live connections to the RBF Node in your setup."
tableTip = tableTip + "\nSelect the desired Pose # to recall pose."
tableWidget.setToolTip(tableTip)
return tableWidget
def createTabWidget(self):
"""Tab widget to add driven widgets too. Custom TabBar so the tab is
easier to select
Returns:
QTabWidget:
"""
tabLayout = QtWidgets.QTabWidget()
tabLayout.setContentsMargins(0, 0, 0, 0)
tabBar = TabBar()
tabLayout.setTabBar(tabBar)
tabBar.setTabsClosable(True)
return tabLayout
def createOptionsButtonsWidget(self):
"""add, edit, delete buttons for modifying rbf setups.
Returns:
list: [QPushButtons]
"""
optionsLayout = QtWidgets.QHBoxLayout()
addPoseButton = QtWidgets.QPushButton("Add Pose")
addTip = "After positioning all controls in the setup, add new pose."
addTip = addTip + "\nEnsure the driver node has a unique position."
addPoseButton.setToolTip(addTip)
addPoseButton.setFixedHeight(self.genericWidgetHight)
EditPoseButton = QtWidgets.QPushButton("Edit Pose")
EditPoseButton.setToolTip("Recall pose, adjust controls and Edit.")
EditPoseButton.setFixedHeight(self.genericWidgetHight)
deletePoseButton = QtWidgets.QPushButton("Delete Pose")
deletePoseButton.setToolTip("Recall pose, then Delete")
deletePoseButton.setFixedHeight(self.genericWidgetHight)
optionsLayout.addWidget(addPoseButton)
optionsLayout.addWidget(EditPoseButton)
optionsLayout.addWidget(deletePoseButton)
return (optionsLayout,
addPoseButton,
EditPoseButton,
deletePoseButton)
def createMenuBar(self, hideMenuBar=False):
"""Create the UI menubar, with option to hide based on mouse input
Args:
hideMenuBar (bool, optional): should it autoHide
Returns:
QMenuBar: for parenting
"""
mainMenuBar = QtWidgets.QMenuBar()
mainMenuBar.setContentsMargins(0, 0, 0, 0)
file = mainMenuBar.addMenu("File")
file.setToolTipsVisible(True)
menu1 = file.addAction("Re-evaluate Nodes", self.reevalluateAllNodes)
menu1.setToolTip("Force all RBF nodes to re-revaluate.")
file.addAction("Export All", self.exportNodes)
file.addAction("Export current setup", partial(self.exportNodes,
allSetups=False))
file.addAction("Import RBFs", partial(self.importNodes))
file.addSeparator()
file.addAction("Delete Current Setup", self.__deleteSetup)
# mirror --------------------------------------------------------------
mirrorMenu = mainMenuBar.addMenu("Mirror")
mirrorMenu.setToolTipsVisible(True)
mirrorMenu1 = mirrorMenu.addAction("Mirror Setup", self.mirrorSetup)
mirrorMenu1.setToolTip("This will create a new setup.")
# settings ------------------------------------------------------------
settingsMenu = mainMenuBar.addMenu("Settings")
settingsMenu.setToolTipsVisible(True)
menuLabel = "Add poses in worldSpace"
worldSpaceMenuItem = settingsMenu.addAction(menuLabel)
worldSpaceMenuItem.toggled.connect(self.toggleGetPoseType)
worldSpaceMenuItem.setCheckable(True)
worldSpaceMenuItem.setChecked(True)
toolTip = "When ADDING NEW pose, should it be recorded in worldSpace."
worldSpaceMenuItem.setToolTip(toolTip)
# show override -------------------------------------------------------
additionalFuncDict = getEnvironModules()
if additionalFuncDict:
showOverridesMenu = mainMenuBar.addMenu("Local Overrides")
for k, v in additionalFuncDict.iteritems():
showOverridesMenu.addAction(k, v)
if hideMenuBar:
mainMenuBar.hide()
self.setMouseTracking(True)
self.mousePosition.connect(self.hideMenuBar)
return mainMenuBar
# main assebly ------------------------------------------------------------
def createCentralWidget(self):
"""main UI assembly
Returns:
QtWidget: main UI to be parented to as the centralWidget
"""
centralWidget = QtWidgets.QWidget()
centralWidgetLayout = QtWidgets.QVBoxLayout()
centralWidget.setLayout(centralWidgetLayout)
(rbfLayout,
self.rbf_cbox,
self.rbf_refreshButton) = self.createSetupSelectorWidget()
self.rbf_cbox.setToolTip("List of available setups in the scene.")
self.rbf_refreshButton.setToolTip("Refresh the UI")
centralWidgetLayout.addLayout(rbfLayout)
centralWidgetLayout.addWidget(HLine())
# --------------------------------------------------------------------
driverDrivenLayout = QtWidgets.QHBoxLayout()
(self.controlLineEdit,
self.setControlButton,
self.driverLineEdit,
self.setDriverButton,
self.driver_attributes_widget,
driverLayout) = self.createDriverAttributeWidget()
self.addRbfButton = QtWidgets.QPushButton("New RBF")
self.addRbfButton.setToolTip("Select node to be driven by setup.")
self.addRbfButton.setFixedHeight(self.genericWidgetHight)
self.addRbfButton.setStyleSheet("background-color: rgb(23, 158, 131)")
driverLayout.addWidget(self.addRbfButton)
self.driverPoseTableWidget = self.createTableWidget()
driverDrivenLayout.addLayout(driverLayout, 0)
driverDrivenLayout.addWidget(self.driverPoseTableWidget, 1)
centralWidgetLayout.addLayout(driverDrivenLayout, 1)
# --------------------------------------------------------------------
self.rbfTabWidget = self.createTabWidget()
centralWidgetLayout.addWidget(self.rbfTabWidget, 1)
# --------------------------------------------------------------------
(optionsLayout,
self.addPoseButton,
self.editPoseButton,
self.deletePoseButton) = self.createOptionsButtonsWidget()
self.editPoseButton.setEnabled(False)
self.deletePoseButton.setEnabled(False)
centralWidgetLayout.addWidget(HLine())
centralWidgetLayout.addLayout(optionsLayout)
return centralWidget
# overrides ---------------------------------------------------------------
def mouseMoveEvent(self, event):
"""used for tracking the mouse position over the UI, in this case for
menu hiding/show
Args:
event (Qt.QEvent): events to filter
"""
if event.type() == QtCore.QEvent.MouseMove:
if event.buttons() == QtCore.Qt.NoButton:
pos = event.pos()
self.mousePosition.emit(pos.x(), pos.y())
def closeEvent(self, evnt):
"""on UI close, ensure that all attrControlgrps are destroyed in case
the user is just reopening the UI. Properly severs ties to the attrs
Args:
evnt (Qt.QEvent): Close event called
"""
self.__deleteAssociatedWidgetsMaya(self.driverPoseTableWidget)
self.__deleteAssociatedWidgets(self.driverPoseTableWidget)
if self.callBackID is not None:
self.removeSceneCallback()
super(RBFManagerUI, self).closeEvent(evnt)
|
<reponame>DHI-GRAS/wapor-et-look<filename>pyWAPOR/Collect/Landsat/PreprocessLandsat.py
import os
import shutil
import tarfile
import numpy as np
import rasterio as rio
from tqdm import tqdm
from pathlib import Path
from datetime import datetime
from datetime import timedelta
from pyWAPOR.Functions.nspi import nspi
from pyWAPOR.Functions.SavGol_Filter import savgol_reconstruct
from pyWAPOR.Pre_ETLook import _get_dekadal_date
def PreprocessLandsat(landsat_dir, output_dir):
if isinstance(landsat_dir, str):
landsat_dir = Path(landsat_dir)
# unpack the *.tar.gz Landsat files
L7_files = list((landsat_dir / Path('L7')).glob('*.tar'))
L8_files = list((landsat_dir / Path('L8')).glob('*.tar'))
print('Unpacking *.tar files...')
for file in tqdm(L7_files + L8_files):
_unpack_and_save(file, delete_input=False)
# merge the individual landsat bands into multiband files
L7_dirs = [directory for directory in list((landsat_dir / Path('L7')).glob('*')) if os.path.isdir(directory)]
L8_dirs = [directory for directory in list((landsat_dir / Path('L8')).glob('*')) if os.path.isdir(directory)]
filename_list = []
bandnames_list = []
print('Merging Landsat bands...')
for directory in tqdm(L7_dirs+L8_dirs):
filename, band_names = _merge_and_save_landsat(directory, delete_input=False)
filename_list.append(filename)
bandnames_list.append(band_names)
# apply nspi gap-filling on the landsat-7 data (slow!)[
_apply_nspi(landsat_dir, filename_list, bandnames_list)
# calculate NDVI/ABEDO and save
_process_and_save(landsat_dir, filename_list, bandnames_list, output_dir, delete_input=False)
def _process_and_save(landsat_dir, filename_list, bandnames_list, output_folder, delete_input=False):
NDVI = []
ALBEDO = []
master_src = None
dates = [datetime.strptime(str(f).split('_')[3], '%Y%m%d') for f in filename_list]
# we need to sort according to date before filtering the timeseries
sorted_dates = [dates[idx] for idx in np.argsort(dates)]
sorted_filenames = [filename_list[idx] for idx in np.argsort(dates)]
sorted_bandnames = [bandnames_list[idx] for idx in np.argsort(dates)]
print('Calculating NDVI/ALBEDO...')
# TODO: # enable delete inputs
for i, file in enumerate(tqdm(sorted_filenames)):
sensor = str(file.split('_')[0])
bandnames = sorted_bandnames[i]
if sensor == 'LE07':
file = file+'_gap-filled'
filename = str(landsat_dir/Path('L7')/Path(file)) + '.tif'
elif sensor == 'LC08':
filename = str(landsat_dir/Path('L8')/Path(file)) + '.tif'
# use the first file as master
if master_src is None:
# open with rasterio
with rio.open(filename) as master_src:
data = master_src.read()
meta = master_src.profile
master_dict = {'transform':master_src.transform,
'height':master_src.height,
'width':master_src.width,
'crs':master_src.crs}
# all files after master are reprojected to match master
else:
with rio.open(filename) as src:
with rio.vrt.WarpedVRT(src, **master_dict) as vrt:
data = vrt.read()
# calculate NDVI and Albedo
NDVI.append(_calc_ndvi(data, bandnames, sensor))
ALBEDO.append(_calc_albedo(data, bandnames, sensor))
del data
NDVI = np.asarray(NDVI)
ALBEDO = np.asarray(ALBEDO)
# TODO: # Fill gaps in NDVI using Weiss et. al. 2014 <----- wait til we hear from Livia
print('Applying SavGol filter...')
NDVI_smooth, _ = savgol_reconstruct(NDVI)
del NDVI
ALBEDO_smooth, _ = savgol_reconstruct(ALBEDO, invert=True)
del ALBEDO
dekadal_dates = [_get_dekadal_date(date) for date in sorted_dates]
unique_dekadal_dates = np.unique(dekadal_dates)
meta.update({'dtype': 'float64',
'nodata': np.nan})
# merge dekadal images
for dekadal_date in unique_dekadal_dates:
datestring = dekadal_date.strftime('%Y%m%d')
idx = np.argwhere(np.isin(dekadal_dates, dekadal_date))[:,0]
if idx.shape[0]==0:
break
ndvi_composite_array = NDVI_smooth[idx,...]
albedo_composite_array = ALBEDO_smooth[idx,...]
ndvi_dekadal_composite = np.nanmean(ndvi_composite_array, axis=0)
albedo_dekadal_composite = np.nanmean(albedo_composite_array, axis=0)
# save dekadal images
ndvi_filename = output_folder / Path(datestring) / Path('NDVI_' + datestring + '.tif')
albedo_filename = output_folder / Path(datestring) / Path('ALBEDO_' + datestring + '.tif')
if not os.path.exists(output_folder / Path(datestring)):
os.makedirs(output_folder / Path(datestring))
with rio.open(str(ndvi_filename), 'w', **meta) as dst:
dst.write(ndvi_dekadal_composite, 1)
with rio.open(str(albedo_filename), 'w', **meta) as dst:
dst.write(albedo_dekadal_composite, 1)
def _calc_ndvi(data, bandnames, sensor):
if sensor == 'LE07':
bands = ['SR_B3', 'SR_B4']
elif sensor == 'LC08':
bands = ['SR_B4', 'SR_B5']
red = data[bandnames.index(bands[0]), ...].astype(np.float)
nir = data[bandnames.index(bands[1]), ...].astype(np.float)
ndvi = (nir - red) / (nir + red)
# remove too large or too small values
ndvi[ndvi > 1] = np.nan
ndvi[ndvi < -1] = np.nan
return ndvi
def _calc_albedo(data, bandnames, sensor):
albedo_Mp = 2.75e-5 # multiplicative scaling factor for Collection 2
albedo_Ap = -0.2 # additive scaling factor for Collection 2
# ESUN values: [Blue, Green, Red, NIR, SWIR-1, SWIR-2]
if sensor == 'LE07':
ESUN_values = np.array([1970, 1842, 1547, 1044, 225.7, 82.06])
bands = ['SR_B1', 'SR_B2', 'SR_B3', 'SR_B4', 'SR_B5', 'SR_B7']
elif sensor == 'LC08':
ESUN_values = np.array([1991, 1812, 1549, 972.6, 214.7, 80.7])
bands = ['SR_B2', 'SR_B3', 'SR_B4', 'SR_B5', 'SR_B6', 'SR_B7']
band_idx = [bandnames.index(band) for band in bands]
BGRNS = albedo_Mp * data[band_idx, ...] + albedo_Ap
albedo = np.sum(BGRNS * np.expand_dims(ESUN_values, (1, 2)), axis=0) / np.sum(ESUN_values)
# remove too large or too small values
albedo[albedo > 1] = np.nan
albedo[albedo < 0] = np.nan
return albedo
def _apply_nspi(landsat_dir, filename_list, bandnames_list, overwrite=False):
L7_idx = [i for i, file in enumerate(filename_list) if str(file.split('_')[0]) == 'LE07']
L8_idx = [i for i, file in enumerate(filename_list) if str(file.split('_')[0]) == 'LC08']
L7_tifs = [filename_list[idx] + '.tif' for idx in L7_idx]
L8_tifs = [filename_list[idx] + '.tif' for idx in L8_idx]
L7_bandnames = [bandnames_list[idx] for idx in L7_idx]
L8_bandnames = [bandnames_list[idx] for idx in L8_idx]
L7_dates = [datetime.strptime(str(f).split('_')[3], '%Y%m%d') for f in L7_tifs]
L8_dates = [datetime.strptime(str(f).split('_')[3], '%Y%m%d') for f in L8_tifs]
L7_bands = ['SR_B1', 'SR_B2', 'SR_B3', 'SR_B4', 'SR_B5', 'SR_B7']
L8_bands = ['SR_B2', 'SR_B3', 'SR_B4', 'SR_B5', 'SR_B6', 'SR_B7']
target_folder = Path('L7')
print('Fill gaps (NSPI)...')
for target_idx, target_file in enumerate(tqdm(L7_tifs)):
target_date = L7_dates[target_idx]
date_diff = [np.abs(target_date - input_date) for input_date in L7_dates + L8_dates]
output_filename = str(landsat_dir / target_folder / Path(target_file).stem) + '_gap-filled.tif'
if os.path.isfile(output_filename) and not overwrite:
continue
input_idx = np.argpartition(date_diff, 1)[1]
input_file = (L7_tifs + L8_tifs)[input_idx]
input_sensor = str(input_file.split('_')[0])
if input_sensor == 'LE07':
input_folder = Path('L7')
input_bands = L7_bands
elif input_sensor == 'LC08':
input_folder = Path('L8')
input_bands = L8_bands
# open target image
with rio.open(str(landsat_dir / target_folder / Path(target_file))) as master_src:
meta = master_src.profile
target_image = master_src.read()
master_dict = {'transform': master_src.transform, 'height': master_src.height, 'width': master_src.width,
'crs': master_src.crs}
if date_diff[input_idx] > timedelta(32):
# save results as tif
with rio.open(output_filename, 'w', **meta) as dst:
dst.write(target_image)
continue
# open input image
with rio.open(str(landsat_dir / input_folder / Path(input_file))) as slave_src:
with rio.vrt.WarpedVRT(slave_src, **master_dict) as vrt:
input_image = vrt.read()
target_band_idx = [L7_bandnames[target_idx].index(band) for band in L7_bands]
input_band_idx = [(L7_bandnames + L8_bandnames)[input_idx].index(band) for band in input_bands]
# reshape images for correct NSPI format
target_image_reshaped = np.transpose(target_image[target_band_idx, ...], axes=[1, 2, 0])
input_image_reshaped = np.transpose(input_image[input_band_idx, ...], axes=[1, 2, 0])
# add nans instead of nodata
target_image_reshaped = np.where(target_image_reshaped == -9999, np.nan, target_image_reshaped)
input_image_reshaped = np.where(input_image_reshaped == -9999, np.nan, input_image_reshaped)
# calculate the missing pixels mask
missing_pixels_mask = np.isnan(target_image_reshaped[..., 0]) & ~np.isnan(input_image_reshaped[..., 0])
# get the bitmask
target_pixel_qa = target_image[L7_bandnames[target_idx].index('QA_PIXEL'), ...]
target_pixel_qa = np.where(target_pixel_qa == -9999, 0, target_pixel_qa)
input_pixel_qa = input_image[(L7_bandnames + L8_bandnames)[input_idx].index('QA_PIXEL'), ...]
input_pixel_qa = np.where(input_pixel_qa == -9999, 0, input_pixel_qa)
# calculate cloud mask
target_pixel_cloudmask = _landsat_cloudmask(target_pixel_qa)
input_pixel_cloudmask = _landsat_cloudmask(input_pixel_qa)
# mask clouds
for n in range(0, len(L7_bands)):
target_image_reshaped[..., n] = np.where(target_pixel_cloudmask, np.nan, target_image_reshaped[..., n])
input_image_reshaped[..., n] = np.where(input_pixel_cloudmask, np.nan, input_image_reshaped[..., n])
out_image_reshaped = nspi(input_image_reshaped, target_image_reshaped, missing_pixels_mask,
num_classes=5, required_pixels=20, max_window_size=15)
# replace the gap-filled bands in the original image
out_image = target_image.astype(np.float64)
out_image[target_band_idx, ...] = np.transpose(out_image_reshaped, axes=[2, 0, 1])
# convert nan-values to nodata
out_image = np.where(np.isnan(out_image), -9999, out_image).astype(np.int16)
# save results as tif
with rio.open(output_filename, 'w', **meta) as dst:
dst.write(out_image)
# merges individual landsat bands and saves as single tif
def _merge_and_save_landsat(directory, delete_input=False, overwrite=False):
master_file = list(directory.glob('*QA_PIXEL.TIF'))[0]
slave_files = [f for f in list(directory.glob('*.TIF')) if 'QA_PIXEL' not in str(f)]
output_filename = master_file.parents[1] / Path(
'_'.join(master_file.stem.split('_')[0:-2]) + '.tif')
band_names = ['_'.join(master_file.stem.split('_')[-2:])]
if os.path.isfile(output_filename) and not overwrite:
# for each slave-file, open and append to array
for i, file in enumerate(slave_files):
band_names.append('_'.join(file.stem.split('_')[-2:]))
return output_filename.stem, band_names
# open master and add to array
with rio.open(str(master_file)) as master_src:
meta = master_src.profile
pixel_qa = master_src.read().squeeze()
master_dict = {'transform': master_src.transform, 'height': master_src.height, 'width': master_src.width,
'crs': master_src.crs}
nodata_mask = np.where(pixel_qa == 1, True, False)
data = np.zeros((len(slave_files) + 1, pixel_qa.shape[0], pixel_qa.shape[1])).astype(np.int16)
data[0, ...] = pixel_qa
# for each slave-file, open and append to array
for i, file in enumerate(slave_files):
with rio.open(file) as slave_src:
with rio.vrt.WarpedVRT(slave_src, **master_dict) as vrt:
data[i + 1, ...] = vrt.read().squeeze()
band_names.append('_'.join(file.stem.split('_')[-2:]))
data[:, nodata_mask] = -9999
# save file as tif in root
meta.update({'count': data.shape[0],
'dtype': str(data.dtype),
'blockxsize': 256,
'blockysize': 256,
'tiled': True,
'compress': 'lzw',
'interleave': 'pixel',
'nodata': -9999})
with rio.open(output_filename, 'w', **meta) as dst:
dst.write(data)
if delete_input:
shutil.rmtree(directory)
return output_filename.stem, band_names
# unpack and saves *.tar files
def _unpack_and_save(file, delete_input=False, overwrite=False):
path = Path(file).parent / Path(Path(file).stem).stem
# If folder exists assume that tar has already been uncompressed
if os.path.isdir(path):
if overwrite:
path.rmdir()
else:
return
os.mkdir(path)
tar = tarfile.open(file, "r:*")
tar.extractall(path=path)
tar.close()
if delete_input:
os.remove(file)
# returns cloud mask given quality band as input
# Updated to match Collection 2 QA_PIXEL
# https://www.usgs.gov/media/files/landsat-8-9-olitirs-collection-2-level-2-data-format-control-book
# https://www.usgs.gov/media/files/landsat-7-etm-collection-2-level-2-data-format-control-book
def _landsat_cloudmask(quality_band):
# if clouds (bit 3) and low/medium/high probability (bit 8 and 9) then clouds
clouds = ((quality_band & (1 << 3)) > 1) & ((quality_band & ((1 << 8) | (1 << 9))) > 1)
# if shadows (bit 4) and low/medium/high probability shadows (bit 10 and 11) then shadows
shadows = ((quality_band & (1 << 4)) > 1) & ((quality_band & ((1 << 10) | (1 << 11))) > 1)
# if cirrus (bit 2) and low/medium/high probability shadows (bit 14 and 15) then shadows
cirrus = ((quality_band & (1 << 2)) > 1) & ((quality_band & ((1 << 14) | (1 << 15))) > 1)
return np.logical_or.reduce((clouds, shadows, cirrus))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.