content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
# SPDX-FileCopyrightText: 2022 Cedar Grove Maker Studios
# SPDX-License-Identifier: MIT
"""
touch_calibrator_built_in.py 2022-01-21 v2.1
Author(s): JG for Cedar Grove Maker Studios
On-screen touchscreen calibrator for built-in displays.
When the test screen appears, use a stylus to swipe to the four edges
of the visible display area. As the screen is calibrated, the small red
square tracks the stylus tip (REPL_ONLY=False). Minimum and maximum
calibration values will display on the screen and in the REPL. The calibration
tuple can be copied and pasted into the calling code's touchscreen
instantiation statement.
DISPLAY_ROTATION: Display rotation value in degrees. Only values of
None, 0, 90, 180, and 270 degrees are accepted. Defaults to None, the
previous orientation of the display.
REPL_ONLY: If False, calibration values are shown graphically on the screen
and printed to the REPL. If True, the values are only printed to the REPL.
Default value is False.
"""
import board
import time
import displayio
import vectorio
import terminalio
from adafruit_display_text.label import Label
import adafruit_touchscreen
from simpleio import map_range
# Operational parameters:
DISPLAY_ROTATION = 0 # Specify 0, 90, 180, or 270 degrees
REPL_ONLY = False # True to disable graphics
# A collection of colors used for graphic objects
class Colors:
BLUE_DK = 0x000060 # Screen fill
RED = 0xFF0000 # Boundary
WHITE = 0xFFFFFF # Text
# Instantiate the built-in display.
display = board.DISPLAY
# Check rotation value and update display.
# Always set rotation before instantiating the touchscreen.
if DISPLAY_ROTATION != None and DISPLAY_ROTATION in (0, 90, 180, 270):
display.rotation = DISPLAY_ROTATION
else:
print("Warning: invalid rotation value -- defalting to zero")
display.rotation = 0
time.sleep(1)
# Activate the display graphics unless REPL_ONLY=True.
if not REPL_ONLY:
display_group = displayio.Group()
display.show(display_group)
# Instantiate touch screen without calibration or display size parameters
if display.rotation == 0:
ts = adafruit_touchscreen.Touchscreen(
board.TOUCH_XL,
board.TOUCH_XR,
board.TOUCH_YD,
board.TOUCH_YU,
# calibration=((5200, 59000), (5250, 59500)),
# size=(board.DISPLAY.width, board.DISPLAY.height),
)
elif display.rotation == 90:
ts = adafruit_touchscreen.Touchscreen(
board.TOUCH_YU,
board.TOUCH_YD,
board.TOUCH_XL,
board.TOUCH_XR,
# calibration=((5250, 59500), (5200, 59000)),
# size=(board.DISPLAY.width, board.DISPLAY.height),
)
elif display.rotation == 180:
ts = adafruit_touchscreen.Touchscreen(
board.TOUCH_XR,
board.TOUCH_XL,
board.TOUCH_YU,
board.TOUCH_YD,
# calibration=((5200, 59000), (5250, 59500)),
# size=(board.DISPLAY.width, board.DISPLAY.height),
)
elif display.rotation == 270:
ts = adafruit_touchscreen.Touchscreen(
board.TOUCH_YD,
board.TOUCH_YU,
board.TOUCH_XR,
board.TOUCH_XL,
# calibration=((5250, 59500), (5200, 59000)),
# size=(board.DISPLAY.width, board.DISPLAY.height),
)
else:
raise ValueError("Rotation value must be 0, 90, 180, or 270")
# Define the graphic objects if REPL_ONLY = False.
if not REPL_ONLY:
# Define the text graphic objects
font_0 = terminalio.FONT
coordinates = Label(
font=font_0,
text="calib: ((x_min, x_max), (y_min, y_max))",
color=Colors.WHITE,
)
coordinates.anchor_point = (0.5, 0.5)
coordinates.anchored_position = (board.DISPLAY.width // 2, board.DISPLAY.height // 4)
display_rotation = Label(
font=font_0,
text="rotation: " + str(display.rotation),
color=Colors.WHITE,
)
display_rotation.anchor_point = (0.5, 0.5)
display_rotation.anchored_position = (board.DISPLAY.width // 2, board.DISPLAY.height // 4 - 30)
# Define graphic objects for the screen fill, boundary, and touch pen.
target_palette = displayio.Palette(1)
target_palette[0] = Colors.BLUE_DK
screen_fill = vectorio.Rectangle(
pixel_shader=target_palette,
x=2,
y=2,
width=board.DISPLAY.width - 4,
height=board.DISPLAY.height - 4,
)
target_palette = displayio.Palette(1)
target_palette[0] = Colors.RED
boundary = vectorio.Rectangle(
pixel_shader=target_palette,
x=0,
y=0,
width=board.DISPLAY.width,
height=board.DISPLAY.height,
)
pen = vectorio.Rectangle(
pixel_shader=target_palette,
x=board.DISPLAY.width // 2,
y=board.DISPLAY.height // 2,
width=10,
height=10,
)
display_group.append(boundary)
display_group.append(screen_fill)
display_group.append(pen)
display_group.append(coordinates)
display_group.append(display_rotation)
# Reset x and y values to raw touchscreen mid-point before measurement.
x_min = x_max = y_min = y_max = 65535 // 2
print("Touchscreen Calibrator")
print(" Use a stylus to swipe slightly beyond the")
print(" four edges of the visible display area.")
print(" ")
print(f" display rotation: {display.rotation} degrees")
print(" Calibration values follow:")
print(" ")
while True:
time.sleep(0.100)
touch = ts.touch_point # Check for touch
if touch:
if not REPL_ONLY:
pen.x = int(map_range(touch[0], x_min, x_max, 0, board.DISPLAY.width)) - 5
pen.y = int(map_range(touch[1], y_min, y_max, 0, board.DISPLAY.height)) - 5
# Remember minimum and maximum values for the calibration tuple.
x_min = min(x_min, touch[0])
x_max = max(x_max, touch[0])
y_min = min(y_min, touch[1])
y_max = max(y_max, touch[1])
# Show the calibration tuple.
print(f"(({x_min}, {x_max}), ({y_min}, {y_max}))")
if not REPL_ONLY:
coordinates.text = f"calib: (({x_min}, {x_max}), ({y_min}, {y_max}))"
|
nilq/baby-python
|
python
|
from spaceone.inventory.libs.schema.dynamic_field import TextDyField, ListDyField, \
DateTimeDyField, EnumDyField, SearchField
from spaceone.inventory.libs.schema.resource import CloudServiceTypeResource, CloudServiceTypeResponse, \
CloudServiceTypeMeta
cst_elb = CloudServiceTypeResource()
cst_elb.name = 'LoadBalancer'
cst_elb.provider = 'aws'
cst_elb.group = 'ELB'
cst_elb.labels = ['Networking']
cst_elb.is_primary = True
cst_elb.is_major = True
cst_elb.service_code = 'AWSELB'
cst_elb.tags = {
'spaceone:icon': 'https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/cloud-services/aws/Elastic-Load-Balancing.svg',
}
cst_elb._metadata = CloudServiceTypeMeta.set_meta(
fields=[
TextDyField.data_source('Name', 'data.load_balancer_name'),
TextDyField.data_source('DNS Name', 'data.dns_name'),
EnumDyField.data_source('State', 'data.state.code', default_state={
'safe': ['active'],
'warning': ['provisioning'],
'alert': ['active_impaired', 'failed']
}),
EnumDyField.data_source('Type', 'data.type', default_badge={
'indigo.500': ['network'], 'coral.600': ['application']
}),
ListDyField.data_source('Availability Zones', 'data.availability_zones', options={
'sub_key': 'zone_name',
'delimiter': '<br>'
}),
DateTimeDyField.data_source('Created At', 'data.created_time'),
TextDyField.data_source('ARN', 'data.load_balancer_arn', options={
'is_optional': True
}),
TextDyField.data_source('Scheme', 'data.scheme', options={
'is_optional': True
}),
TextDyField.data_source('VPC ID', 'data.vpc_id', options={
'is_optional': True
}),
ListDyField.data_source('Subnet ID', 'data.availability_zones', options={
'delimiter': '<br>',
'sub_key': 'subnet_id',
'is_optional': True
}),
ListDyField.data_source('Availability Zone', 'data.availability_zones', options={
'delimiter': '<br>',
'sub_key': 'zone_name',
'is_optional': True
}),
TextDyField.data_source('Hosted Zone ID', 'data.canonical_hosted_zone_id', options={
'is_optional': True
}),
ListDyField.data_source('Security Groups', 'data.security_group', options={
'delimiter': '<br>',
'is_optional': True
}),
ListDyField.data_source('Listener IDs', 'data.listeners', options={
'delimiter': '<br>',
'sub_key': 'listener_arn',
'is_optional': True
}),
ListDyField.data_source('Protocols', 'data.listeners', options={
'delimiter': '<br>',
'sub_key': 'protocol',
'is_optional': True
}),
ListDyField.data_source('Ports', 'data.listeners', options={
'delimiter': '<br>',
'sub_key': 'port',
'is_optional': True
}),
TextDyField.data_source('IP Address Type', 'data.ip_address_type', options={
'is_optional': True
}),
TextDyField.data_source('Access Log S3 Bucket', 'data.attributes.access_logs_s3_bucket', options={
'is_optional': True
}),
TextDyField.data_source('Routing HTTP2 Enabled', 'data.attributes.routing_http2_enabled', options={
'is_optional': True
}),
TextDyField.data_source('Idel Timeout Seconds', 'data.attributes.idle_timeout_seconds', options={
'is_optional': True
}),
TextDyField.data_source('Routing HTTP Drop Invalid Header Fields Enabled',
'data.attributes.routing_http_drop_invalid_header_fields_enabled', options={
'is_optional': True
}),
TextDyField.data_source('WAF Fail Open Enabled',
'data.attributes.waf_fail_open_enabled', options={
'is_optional': True
}),
TextDyField.data_source('Deletion Protection Enabled',
'data.attributes.deletion_protection_enabled', options={
'is_optional': True
}),
TextDyField.data_source('Routing HTTP Desync Mitigation Mode',
'data.attributes.routing_http_desync_mitigation_mode', options={
'is_optional': True
}),
TextDyField.data_source('Load Balancing Cross Zone Enabled',
'data.attributes.load_balancing_cross_zone_enabled', options={
'is_optional': True
}),
TextDyField.data_source('AWS Account ID', 'data.account_id', options={
'is_optional': True
})
],
search=[
SearchField.set(name='Name', key='data.load_balancer_name'),
SearchField.set(name='ARN', key='data.load_balancer_arn'),
SearchField.set(name='DNS Name', key='data.dns_name'),
SearchField.set(name='State', key='data.state'),
SearchField.set(name='Type', key='data.type',
enums={
'application': {'label': 'Application'},
'network': {'label': 'Network'},
}),
SearchField.set(name='Scheme', key='data.scheme',
enums={
'internet-facing': {'label': 'Internet Facing'},
'internal': {'label': 'Internal'},
}),
SearchField.set(name='VPC ID', key='data.vpc_id'),
SearchField.set(name='Availability Zone', key='data.availability_zones.zone_name'),
SearchField.set(name='Subnet ID', key='data.availability_zones.subnet_id'),
SearchField.set(name='Hosted Zone', key='data.canonical_hosted_zone_id'),
SearchField.set(name='Protocol', key='data.listeners.protocol',
enums={
'HTTP': {'label': 'HTTP'},
'HTTPS': {'label': 'HTTPS'},
'TCP': {'label': 'TCP'},
'UDP': {'label': 'UDP'},
'TLS': {'label': 'TLS'},
'TCP_UDP': {'label': 'TCP/UDP'},
}),
SearchField.set(name='Port', key='data.listeners.port', data_type='integer'),
SearchField.set(name='Deletion Protection', key='data.attributes.deletion_protection_enabled',
data_type='boolean'),
SearchField.set(name='Cross-Zone Load Balancing', key='data.attributes.load_balancing_cross_zone_enabled',
data_type='boolean'),
SearchField.set(name='Security Group ID', key='data.security_groups'),
SearchField.set(name='Listener ARN', key='data.listeners.listener_arn'),
SearchField.set(name='Created Time', key='data.created_time', data_type='datetime'),
SearchField.set(name='Region', key='data.region_name'),
SearchField.set(name='AWS Account ID', key='data.account_id'),
]
)
cst_tg = CloudServiceTypeResource()
cst_tg.name = 'TargetGroup'
cst_tg.provider = 'aws'
cst_tg.group = 'ELB'
cst_tg.labels = ['Networking']
cst_tg.service_code = 'AWSELB'
cst_tg.tags = {
'spaceone:icon': 'https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/cloud-services/aws/Elastic-Load-Balancing.svg',
}
cst_tg._metadata = CloudServiceTypeMeta.set_meta(
fields=[
TextDyField.data_source('Name', 'data.target_group_name'),
TextDyField.data_source('Port', 'data.port'),
TextDyField.data_source('Protocol', 'data.protocol'),
TextDyField.data_source('Target Type', 'data.target_type'),
ListDyField.data_source('Load Balancers', 'data.load_balancer_arns', options={
'delimiter': '<br>'
}),
EnumDyField.data_source('Health Check', 'data.health_check_enabled', default_badge={
'indigo.500': ['true'], 'coral.600': ['false']
}),
TextDyField.data_source('ARN', 'data.target_group_arn', options={
'is_optional': True
}),
TextDyField.data_source('VPC ID', 'data.vpc_id', options={
'is_optional': True
}),
TextDyField.data_source('Healthy Threshold Count', 'data.healthy_threshold_count', options={
'is_optional': True
}),
TextDyField.data_source('Unhealthy Threshold Count', 'data.unhealthy_threshold_count', options={
'is_optional': True
}),
TextDyField.data_source('Health Check Enabled', 'data.health_check_enabled', options={
'is_optional': True
}),
TextDyField.data_source('Health Check Timeout Seconds', 'data.health_check_timeout_seconds', options={
'is_optional': True
}),
TextDyField.data_source('Health Check Interval Seconds', 'data.health_check_interval_seconds', options={
'is_optional': True
}),
TextDyField.data_source('Deregistration Delay Timeout Seconds', 'data.deregistration_delay_timeout_seconds',
options={'is_optional': True}),
TextDyField.data_source('Slow Start Duration Seconds', 'data.slow_start_duration_seconds', options={
'is_optional': True
}),
TextDyField.data_source('Stickiness Enabled', 'data.stickiness_enabled', options={
'is_optional': True
}),
TextDyField.data_source('Stickiness Type', 'data.stickiness_type', options={
'is_optional': True
}),
TextDyField.data_source('Load Balancing Algorithm Type', 'data.load_balancing_algorithm_type', options={
'is_optional': True
}),
TextDyField.data_source('Stickiness LB Cookie Duration Seconds', 'data.stickiness_lb_cookie_duration_seconds',
options={'is_optional': True}),
TextDyField.data_source('AWS Account ID', 'data.account_id', options={
'is_optional': True
})
],
search=[
SearchField.set(name='Name', key='data.target_group_name'),
SearchField.set(name='ARN', key='data.'),
SearchField.set(name='Protocol', key='data.protocol',
enums={
'HTTP': {'label': 'HTTP'},
'HTTPS': {'label': 'HTTPS'},
'TCP': {'label': 'TCP'},
'UDP': {'label': 'UDP'},
'TLS': {'label': 'TLS'},
'TCP_UDP': {'label': 'TCP/UDP'},
}),
SearchField.set(name='Port', key='data.port', data_type='integer'),
SearchField.set(name='Target Type', key='data.target_type',
enums={
'instance': {'label': 'Instance'},
'ip': {'label': 'IP'},
'lambda': {'label': 'Lambda'},
}),
SearchField.set(name='VPC ID', key='data.vpc_id'),
SearchField.set(name='Stickiness', key='data.attributes.stickiness_enabled',
enums={
'Enabled': {'label': 'Enabled'},
'Disabled': {'label': 'Disabled'}
}),
SearchField.set(name='Stickiness Type', key='data.attributes.stickiness_type',
enums={
'lb_cookie': {'label': 'LB Cookie'},
'source_ip': {'label': 'Source IP'}
}),
SearchField.set(name='Region', key='data.region_name'),
SearchField.set(name='AWS Account ID', key='data.account_id'),
]
)
CLOUD_SERVICE_TYPES = [
CloudServiceTypeResponse({'resource': cst_elb}),
CloudServiceTypeResponse({'resource': cst_tg}),
]
|
nilq/baby-python
|
python
|
import csv
import matplotlib.pyplot as plt
import numpy as np
CTEs = list()
error = list()
labels = list()
with open('results.csv', 'r') as file:
reader = csv.DictReader(file)
for line in reader:
labels.append(line['permutation'].replace(",","\n"))
CTEs.append(float(line['accuracy']))
error.append(float(line['std']))
x_pos = np.arange(len(labels))
print(CTEs,error)
# Build the plot
fig, ax = plt.subplots()
fig.set_size_inches(10, 5)
ax.bar(x_pos, CTEs,
yerr=error,
align='center',
alpha=0.5,
ecolor='black',
capsize=10)
ax.set_ylabel('Accuracy')
ax.set_xlabel('Feature Categories')
ax.set_xticks(x_pos)
ax.set_xticklabels(labels)
ax.set_title('Model performance by feature categories')
ax.yaxis.grid(True)
# Save the figure and show
plt.tight_layout()
plt.savefig('subset.pdf')
plt.show()
|
nilq/baby-python
|
python
|
import numpy as np
import uncertainties.unumpy as unp
def center():
return None # or the arg-number of the center.
def getCenter(args):
# return the average
return (args[1] + args[4])/2
def args():
return ('Amp1', 'Center1', 'Sigma1', 'Amp2', 'Center2', 'Sigma2', 'Offset')
def f(x, A1, x01, sig1, A2, x02, sig2, offset):
"""
The normal function call for this function. Performs checks on valid arguments, then calls the "raw" function.
:return:
"""
penalty = 10**10 * np.ones(len(x))
if A1 > 0 or A2 > 0:
# Penalize positive amplitude fits.
return penalty
if offset > 1:
return penalty
if not (min(x) < x01 < max(x) and min(x) < x02 < max(x)):
# penalize if center is not on the graph
return penalty
# assume that there's at least a little peak
#if A1 < 1 or A2 < 1:
# return penalty
# The fitting of the second gaussian then sometimes assumes it's even broader than it is to make it an effective offset.
#r = max(x) - min(x)
#if sig1 > r/5 or sig2 > r/5:
# return penalty
return f_raw(x, A1, x01, sig1, A2, x02, sig2, offset)
def f_raw(x, A1, x01, sig1, A2, x02, sig2, offset):
"""
The raw function call, performs no checks on valid parameters..
:return:
"""
return offset + A1 * np.exp(-(x-x01)**2/(2*sig1**2)) + A2 * np.exp(-(x-x02)**2/(2*sig2**2))
def f_unc(x, A1, x01, sig1, A2, x02, sig2, offset):
"""
similar to the raw function call, but uses unp instead of np for uncertainties calculations.
:return:
"""
return offset + A1 * unp.exp(-(x-x01)**2/(2*sig1**2)) + A2 * unp.exp(-(x-x02)**2/(2*sig2**2))
def guess(key, values):
"""
Returns guess values for the parameters of this function class based on the input. Used for fitting using this
class.
:param key:
:param values:
:return:
"""
a = 0.6*min(values) - max(values)
dx = max(key)-min(key)
minLoc = key[np.argmin(values)]
return [a, minLoc,
dx/20,
0.8*a,
minLoc+dx/9,
#2*(min(key) + 0-5 * dx - minLoc) + minLoc # other side of middle
dx/32, max(values)]
def areas(A1, x01, sig1, A2, x02, sig2):
return np.array([A1*sig1,A2*sig2])*np.sqrt(2*np.pi)
|
nilq/baby-python
|
python
|
# check file encoding format
# import chardet
# f = open('test-1000.txt', 'rb')
# result = chardet.detect(f.read())
# print(result)
import codecs
f = codecs.open("dev-1000.txt", 'r', 'GB18030')
ff = f.read()
file_object = codecs.open('dev-1000-new.txt', 'w', 'utf-8')
file_object.write(ff)
# with open("test-1000.txt", 'r') as file:
# for line in file.readlines():
# print(line)
# count = 3001
# with codecs.open("train-3000.txt", "a", "utf-8") as train:
# with codecs.open("test-1000.txt", "r", "utf-8") as test:
# train.write(test.read())
# for line in test.readlines():
# new_line = ""
# new_line += str(count) + '\t'
# for s in range(1, len(line.strip('\t'))):
# if s == len(line.strip("\t")) - 1:
# new_line += line.strip("\t")[s] + '\n'
# else:
# new_line += line.strip("\t")[s] + "\t"
# train.write(new_line)
# count += 1
|
nilq/baby-python
|
python
|
from newGui import Ui_MainWindow
import sys
from pyqtgraph import PlotWidget ,PlotItem
import os
import pathlib
import pyqtgraph as pg
import pandas as pd
import numpy as np
from PyQt5 import QtCore, QtGui, QtWidgets ,QtPrintSupport
#--------- to save as pdf ------------#
def print_widget(widget, filename):
printer =QtPrintSupport.QPrinter(QtPrintSupport.QPrinter.HighResolution)
printer.setOutputFormat(QtGui.QtPrintSupport.QPrinter.PdfFormat)
printer.setOutputFileName(filename)
painter = QtGui.QPainter(printer)
# start scale
xscale = printer.pageRect().width() * 1.0 / widget.width()
yscale = printer.pageRect().height() * 1.0 / widget.height()
scale = min(xscale, yscale)
painter.translate(printer.paperRect().center())
painter.scale(scale, scale)
painter.translate(-widget.width() / 2, -widget.height() / 2)
# end scale
widget.render(painter)
painter.end()
class ApplicationWindow(QtWidgets.QMainWindow):
def __init__(self):
super(ApplicationWindow, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.actionsiganl_1.triggered.connect(lambda:self.opensignal1())
def readsignal1(self):
self.fname1=QtGui.QFileDialog.getOpenFileName(self,'open only txt file',os.getenv('home'),"text(*.txt)")
path=self.fname1[0]
self.data1=np.genfromtxt(path)
def opensignal1(self):
self.readsignal1()
self.data_line1 = self.ui.signal_1.plot(self.data1, name="mode2")
self.ptr1 = 0
self.n = 0
# Set timer
self.timer = pg.QtCore.QTimer()
# Timer signal binding update_data function
self.timer.timeout.connect(self.update_data)
# The timer interval is 50ms, which can be understood as refreshing data once in 50ms
self.timer.start(50)
self.signal_1.show()
#Data shift left
def update_data(self):
self.n += 10
self.data_line1.setData(self.data1[0 : 100+self.n])
self.data_line1.setPos(self.ptr1,0)
#----- save as pdf ---#
# def savepdf(self):
# fn, _ = QtWidgets.QFileDialog.getSaveFileName(
# self, "Export PDF", None, "PDF files (.pdf);;All Files()")
# if fn:
# if QtCore.QFileInfo(fn).suffix() == "": fn += ".pdf"
# print_widget(MainWindow , fn)
def main():
app = QtWidgets.QApplication(sys.argv)
application = ApplicationWindow()
application.show()
app.exec_()
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def export_to_tensorflow(self, path):
"""
Export frame to TensorFlow Records file on given path
TensorFlow records are the standard data format for TensorFlow. The recommended format for TensorFlow is a TFRecords file
containing tf.train.Example protocol buffers. The tf.train.Example protocol buffers encodes (which contain Features as a field).
https://www.tensorflow.org/how_tos/reading_data
During export, the API parses Spark SQL DataTypes to TensorFlow compatible DataTypes as below:
* IntegerType or LongType => Int64List
* FloatType or DoubleType => FloatList
* ArrayType(Double) [Vector] => FloatList
* Any other DataType (Ex: String) => BytesList
Parameters
----------
:param path: (str) HDFS/Local path to export current frame as TensorFlow records
Examples
--------
>>> file_path = "../datasets/cities.csv"
>>> frame = tc.frame.import_csv(file_path, "|", header=True)
-etc-
>>> frame.sort("rank")
>>> frame.inspect()
[#] rank city population_2013 population_2010 change county
============================================================================
[0] 1 Portland 609456 583776 4.40% Multnomah
[1] 2 Salem 160614 154637 3.87% Marion
[2] 3 Eugene 159190 156185 1.92% Lane
[3] 4 Gresham 109397 105594 3.60% Multnomah
[4] 5 Hillsboro 97368 91611 6.28% Washington
[5] 6 Beaverton 93542 89803 4.16% Washington
[6] 7 Bend 81236 76639 6.00% Deschutes
[7] 8 Medford 77677 74907 3.70% Jackson
[8] 9 Springfield 60177 59403 1.30% Lane
[9] 10 Corvallis 55298 54462 1.54% Benton
>>> destPath = "../tests/sandbox/output24.tfr"
>>> import os
... if os.path.exists(filename) os.remove(destPath)
>>> frame.export_to_tensorflow(destPath)
Check for output24.tfr in specified destination path either on Local or HDFS file system
"""
self._scala.exportToTensorflow(path)
|
nilq/baby-python
|
python
|
# Generated by Django 2.1 on 2019-06-19 12:32
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('articles', '0004_merge_20190618_0923'),
('articles', '0004_merge_20190618_1311'),
]
operations = [
]
|
nilq/baby-python
|
python
|
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from django.urls import path, include
from django.conf.urls.static import static
urlpatterns = [
path('', include('frontend.urls')),
path('api/', include('backend.urls')),
path('api/auth/', include('accounts.urls')),
path('admin/', admin.site.urls),
]
# Serve static files in development
if settings.DEBUG:
urlpatterns += static('/', document_root=settings.STATIC_ROOT)
|
nilq/baby-python
|
python
|
import logging
import matplotlib.pyplot as plt
import mrcfile
import numpy as np
from scipy.linalg import lstsq
import aspire.volume
from aspire.nufft import anufft
from aspire.numeric import fft, xp
from aspire.utils import crop_pad_2d, grid_2d
from aspire.utils.matrix import anorm
logger = logging.getLogger(__name__)
def _im_translate2(im, shifts):
"""
Translate image by shifts
:param im: An Image instance to be translated.
:param shifts: An array of size n-by-2 specifying the shifts in pixels.
Alternatively, it can be a row vector of length 2, in which case the same shifts is applied to each image.
:return: An Image instance translated by the shifts.
TODO: This implementation has been moved here from aspire.aspire.abinitio and is faster than _im_translate.
"""
if not isinstance(im, Image):
logger.warning(
"_im_translate2 expects an Image, attempting to convert array."
"Expects array of size n-by-L-by-L."
)
im = Image(im)
if shifts.ndim == 1:
shifts = shifts[np.newaxis, :]
n_shifts = shifts.shape[0]
if shifts.shape[1] != 2:
raise ValueError("Input `shifts` must be of size n-by-2")
if n_shifts != 1 and n_shifts != im.n_images:
raise ValueError("The number of shifts must be 1 or match the number of images")
resolution = im.res
grid = xp.asnumpy(
fft.ifftshift(xp.asarray(np.ceil(np.arange(-resolution / 2, resolution / 2))))
)
om_y, om_x = np.meshgrid(grid, grid)
phase_shifts = np.einsum("ij, k -> ijk", om_x, shifts[:, 0]) + np.einsum(
"ij, k -> ijk", om_y, shifts[:, 1]
)
# TODO: figure out how why the result of einsum requires reshape
phase_shifts = phase_shifts.reshape(n_shifts, resolution, resolution)
phase_shifts /= resolution
mult_f = np.exp(-2 * np.pi * 1j * phase_shifts)
im_f = xp.asnumpy(fft.fft2(xp.asarray(im.asnumpy())))
im_translated_f = im_f * mult_f
im_translated = np.real(xp.asnumpy(fft.ifft2(xp.asarray(im_translated_f))))
return Image(im_translated)
def normalize_bg(imgs, bg_radius=1.0, do_ramp=True):
"""
Normalize backgrounds and apply to a stack of images
:param imgs: A stack of images in N-by-L-by-L array
:param bg_radius: Radius cutoff to be considered as background (in image size)
:param do_ramp: When it is `True`, fit a ramping background to the data
and subtract. Namely perform normalization based on values from each image.
Otherwise, a constant background level from all images is used.
:return: The modified images
"""
L = imgs.shape[-1]
grid = grid_2d(L, indexing="yx")
mask = grid["r"] > bg_radius
if do_ramp:
# Create matrices and reshape the background mask
# for fitting a ramping background
ramp_mask = np.vstack(
(
grid["x"][mask].flatten(),
grid["y"][mask].flatten(),
np.ones(grid["y"][mask].flatten().size),
)
).T
ramp_all = np.vstack(
(grid["x"].flatten(), grid["y"].flatten(), np.ones(L * L))
).T
mask_reshape = mask.reshape((L * L))
imgs = imgs.reshape((-1, L * L))
# Fit a ramping background and apply to images
coeff = lstsq(ramp_mask, imgs[:, mask_reshape].T)[0] # RCOPT
imgs = imgs - (ramp_all @ coeff).T # RCOPT
imgs = imgs.reshape((-1, L, L))
# Apply mask images and calculate mean and std values of background
imgs_masked = imgs * mask
denominator = np.sum(mask)
first_moment = np.sum(imgs_masked, axis=(1, 2)) / denominator
second_moment = np.sum(imgs_masked**2, axis=(1, 2)) / denominator
mean = first_moment.reshape(-1, 1, 1)
variance = second_moment.reshape(-1, 1, 1) - mean**2
std = np.sqrt(variance)
return (imgs - mean) / std
class Image:
def __init__(self, data, dtype=None):
"""
A stack of one or more images.
This is a wrapper of numpy.ndarray which provides methods
for common processing tasks.
:param data: Numpy array containing image data with shape `(n_images, res, res)`.
:param dtype: Optionally cast `data` to this dtype. Defaults to `data.dtype`.
:return: Image instance storing `data`.
"""
assert isinstance(
data, np.ndarray
), "Image should be instantiated with an ndarray"
if data.ndim == 2:
data = data[np.newaxis, :, :]
if dtype is None:
self.dtype = data.dtype
else:
self.dtype = np.dtype(dtype)
self.data = data.astype(self.dtype, copy=False)
self.ndim = self.data.ndim
self.shape = self.data.shape
self.n_images = self.shape[0]
self.res = self.shape[1]
assert data.shape[1] == data.shape[2], "Only square ndarrays are supported."
def __getitem__(self, item):
return self.data[item]
def __setitem__(self, key, value):
self.data[key] = value
def __add__(self, other):
if isinstance(other, Image):
other = other.data
return Image(self.data + other)
def __sub__(self, other):
if isinstance(other, Image):
other = other.data
return Image(self.data - other)
def __mul__(self, other):
if isinstance(other, Image):
other = other.data
return Image(self.data * other)
def __neg__(self):
return Image(-self.data)
def sqrt(self):
return Image(np.sqrt(self.data))
def flip_axes(self):
return Image(np.transpose(self.data, (0, 2, 1)))
def __repr__(self):
return f"{self.n_images} images of size {self.res}x{self.res}"
def asnumpy(self):
return self.data
def copy(self):
return Image(self.data.copy())
def shift(self, shifts):
"""
Translate image by shifts. This method returns a new Image.
:param shifts: An array of size n-by-2 specifying the shifts in pixels.
Alternatively, it can be a column vector of length 2, in which case
the same shifts is applied to each image.
:return: The Image translated by the shifts, with periodic boundaries.
"""
if shifts.ndim == 1:
shifts = shifts[np.newaxis, :]
return self._im_translate(shifts)
def downsample(self, ds_res):
"""
Downsample Image to a specific resolution. This method returns a new Image.
:param ds_res: int - new resolution, should be <= the current resolution
of this Image
:return: The downsampled Image object.
"""
# compute FT with centered 0-frequency
fx = fft.centered_fft2(self.data)
# crop 2D Fourier transform for each image
crop_fx = np.array([crop_pad_2d(fx[i], ds_res) for i in range(self.n_images)])
# take back to real space, discard complex part, and scale
out = np.real(fft.centered_ifft2(crop_fx)) * (ds_res**2 / self.res**2)
return Image(out)
def filter(self, filter):
"""
Apply a `Filter` object to the Image and returns a new Image.
:param filter: An object of type `Filter`.
:return: A new filtered `Image` object.
"""
filter_values = filter.evaluate_grid(self.res)
im_f = xp.asnumpy(fft.centered_fft2(xp.asarray(self.data)))
if im_f.ndim > filter_values.ndim:
im_f *= filter_values
else:
im_f = filter_values * im_f
im = xp.asnumpy(fft.centered_ifft2(xp.asarray(im_f)))
im = np.real(im)
return Image(im)
def rotate(self):
raise NotImplementedError
def save(self, mrcs_filepath, overwrite=False):
with mrcfile.new(mrcs_filepath, overwrite=overwrite) as mrc:
# original input format (the image index first)
mrc.set_data(self.data.astype(np.float32))
def _im_translate(self, shifts):
"""
Translate image by shifts
:param im: An array of size n-by-L-by-L containing images to be translated.
:param shifts: An array of size n-by-2 specifying the shifts in pixels.
Alternatively, it can be a row vector of length 2, in which case the same shifts is applied to each image.
:return: The images translated by the shifts, with periodic boundaries.
TODO: This implementation is slower than _im_translate2
"""
im = self.data
if shifts.ndim == 1:
shifts = shifts[np.newaxis, :]
n_shifts = shifts.shape[0]
assert shifts.shape[-1] == 2, "shifts must be nx2"
assert (
n_shifts == 1 or n_shifts == self.n_images
), "number of shifts must be 1 or match the number of images"
# Cast shifts to this instance's internal dtype
shifts = shifts.astype(self.dtype)
L = self.res
im_f = xp.asnumpy(fft.fft2(xp.asarray(im)))
grid_shifted = fft.ifftshift(
xp.asarray(np.ceil(np.arange(-L / 2, L / 2, dtype=self.dtype)))
)
grid_1d = xp.asnumpy(grid_shifted) * 2 * np.pi / L
om_x, om_y = np.meshgrid(grid_1d, grid_1d, indexing="ij")
phase_shifts_x = -shifts[:, 0].reshape((n_shifts, 1, 1))
phase_shifts_y = -shifts[:, 1].reshape((n_shifts, 1, 1))
phase_shifts = (
om_x[np.newaxis, :, :] * phase_shifts_x
+ om_y[np.newaxis, :, :] * phase_shifts_y
)
mult_f = np.exp(-1j * phase_shifts)
im_translated_f = im_f * mult_f
im_translated = xp.asnumpy(fft.ifft2(xp.asarray(im_translated_f)))
im_translated = np.real(im_translated)
return Image(im_translated)
def norm(self):
return anorm(self.data)
@property
def size(self):
# probably not needed, transition
return np.size(self.data)
def backproject(self, rot_matrices):
"""
Backproject images along rotation
:param im: An Image (stack) to backproject.
:param rot_matrices: An n-by-3-by-3 array of rotation matrices \
corresponding to viewing directions.
:return: Volume instance corresonding to the backprojected images.
"""
L = self.res
assert (
self.n_images == rot_matrices.shape[0]
), "Number of rotation matrices must match the number of images"
# TODO: rotated_grids might as well give us correctly shaped array in the first place
pts_rot = aspire.volume.rotated_grids(L, rot_matrices)
pts_rot = pts_rot.reshape((3, -1))
im_f = xp.asnumpy(fft.centered_fft2(xp.asarray(self.data))) / (L**2)
if L % 2 == 0:
im_f[:, 0, :] = 0
im_f[:, :, 0] = 0
im_f = im_f.flatten()
vol = anufft(im_f, pts_rot[::-1], (L, L, L), real=True) / L
return aspire.volume.Volume(vol)
def show(self, columns=5, figsize=(20, 10)):
"""
Plotting Utility Function.
:param columns: Number of columns in a row of plots.
:param figsize: Figure size in inches, consult `matplotlib.figure`.
"""
# We never need more columns than images.
columns = min(columns, self.n_images)
plt.figure(figsize=figsize)
for i, im in enumerate(self):
plt.subplot(self.n_images // columns + 1, columns, i + 1)
plt.imshow(im, cmap="gray")
plt.show()
class CartesianImage(Image):
def expand(self, basis):
return BasisImage(basis)
class PolarImage(Image):
def expand(self, basis):
return BasisImage(basis)
class BispecImage(Image):
def expand(self, basis):
return BasisImage(basis)
class BasisImage(Image):
def __init__(self, basis):
self.basis = basis
def evaluate(self):
return CartesianImage()
class FBBasisImage(BasisImage):
pass
|
nilq/baby-python
|
python
|
# -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
from org.o3project.odenos.remoteobject.manager.system.event.component_connection_changed\
import ComponentConnectionChanged
from org.o3project.odenos.remoteobject.transport.message_dispatcher\
import MessageDispatcher
from org.o3project.odenos.core.component.logic\
import Logic
from org.o3project.odenos.remoteobject.message.event import Event
from org.o3project.odenos.remoteobject.message.response import Response
from org.o3project.odenos.core.util.network_interface import NetworkInterface
from org.o3project.odenos.core.component.network.topology.port import Port
from org.o3project.odenos.core.component.network.topology.node import Node
from org.o3project.odenos.core.component.network.topology.link import Link
from org.o3project.odenos.core.component.network.flow.flow import Flow
from org.o3project.odenos.core.component.network.packet.in_packet import InPacket
from org.o3project.odenos.core.component.network.packet.out_packet import OutPacket
from org.o3project.odenos.core.component.network.flow.basic.basic_flow_match import\
BasicFlowMatch
from org.o3project.odenos.core.component.network.packet.in_packet_added\
import InPacketAdded
from org.o3project.odenos.core.component.network.packet.out_packet_added\
import OutPacketAdded
import unittest
from contextlib import nested
from mock import Mock, MagicMock, patch
class LogicTest(unittest.TestCase):
Message = MagicMock()
value = {}
result = {}
def setUp(self):
self.target = Logic(
"cc_action",
self.Message)
def tearDown(self):
self.target = None
def test_constructor(self):
conversion_table = self.target._conversion_table
self.assertEqual(
self.target._object_property._object_property["type"],
"Logic")
self.assertEqual(
self.target._object_property._object_property["id"],
"cc_action")
self.assertEqual(
conversion_table._ConversionTable__connection_type_map, {})
self.assertEqual(
conversion_table._ConversionTable__network_conversion_table, {})
self.assertEqual(
conversion_table._ConversionTable__node_conversion_table, {})
self.assertEqual(
conversion_table._ConversionTable__port_conversion_table, {})
self.assertEqual(
conversion_table._ConversionTable__link_conversion_table, {})
self.assertEqual(
conversion_table._ConversionTable__flow_conversion_table, {})
self.assertEqual(self.target._network_interfaces, {})
self.assertEqual(self.target._Logic__subscription_table, {})
def test_conversion_table(self):
self.assertEqual(self.target.conversion_table(), self.target._conversion_table)
def test_do_event_componentconnectionchanged_add_action_not_Exist(self):
with patch("logging.debug") as logging_debug:
self.value = {"action": "add",
"prev": None,
"curr": {"id": "slicer1->network1",
"type": "LogicAndNetwork",
"connection_type": "original",
"state": "initializing",
"logic_id": "slicer1",
"network_id": "network1"}}
self.result = Response("add", self.value)
self.target._do_event_componentconnectionchanged(self.result)
self.assertEqual(
self.target._network_interfaces["network1"].network_id,
"network1")
self.assertEqual(
logging_debug.call_count, 4)
def test_do_event_componentconnectionchanged_add_action_Exist(self):
with patch("logging.debug") as logging_debug:
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.value = {"action": "add",
"prev": None,
"curr": {"id": "slicer1->network1",
"type": "LogicAndNetwork",
"connection_type": "original",
"state": "initializing",
"logic_id": "slicer1",
"network_id": "network1"}}
self.result = Response("add", self.value)
self.target._do_event_componentconnectionchanged(self.result)
self.assertEqual(
self.target._network_interfaces["network1"].network_id,
"network1")
self.assertEqual(
logging_debug.call_count, 3)
def test_do_event_componentconnectionchanged_update_action(self):
with patch("logging.debug") as logging_debug:
self.value = {"action": "update",
"prev": {"id": "slicer1->network1",
"type": "LogicAndNetwork",
"connection_type": "original",
"state": "initializing",
"logic_id": "slicer1",
"network_id": "network1"},
"curr": {"id": "slicer1->network1",
"type": "LogicAndNetwork",
"connection_type": "original",
"state": "initializing",
"logic_id": "slicer1",
"network_id": "network2"}}
self.result = Response("add", self.value)
self.target._do_event_componentconnectionchanged(self.result)
self.assertEqual(
logging_debug.call_count, 3)
def test_do_event_componentconnectionchanged_delete_action(self):
self.target._network_interfaces = {"network1": "network1_value",
"network2": "network2_value"}
with patch("logging.debug") as logging_debug:
self.value = {"action": "delete",
"prev": {"id": "slicer1->network1",
"type": "LogicAndNetwork",
"connection_type": "original",
"state": "initializing",
"logic_id": "slicer1",
"network_id": "network2"},
"curr": None}
self.result = Response("add", self.value)
self.target._do_event_componentconnectionchanged(self.result)
self.assertEqual(
self.target._network_interfaces,
{"network1": "network1_value"})
self.assertEqual(
logging_debug.call_count, 3)
def test_do_event_componentconnectionchanged_other_action(self):
with patch("logging.debug") as logging_debug:
self.value = {"action": "other_action",
"prev": {"id": "slicer1->network1",
"type": "LogicAndNetwork",
"connection_type": "original",
"state": "initializing",
"logic_id": "slicer1",
"network_id": "network2"},
"curr": None}
self.result = Response("add", self.value)
self.target._do_event_componentconnectionchanged(self.result)
self.assertEqual(
logging_debug.call_count, 1)
def test_do_event_componentconnectionchanged_Error(self):
with nested(patch("logging.debug"),
patch("logging.error")) as (logging_debug,
logging_error):
self.value = {"error": "other_action"}
self.result = Response("add", self.value)
self.target._do_event_componentconnectionchanged(self.result)
self.assertEqual(
logging_debug.call_count, 0)
self.assertEqual(
logging_error.call_count, 1)
def test_connection_changed_added_pre(self):
with patch("logging.debug") as logging_debug:
self.value = {"action": "add",
"prev": None,
"curr": {"id": "slicer1->network1",
"type": "LogicAndNetwork",
"connection_type": "original",
"state": "initializing",
"logic_id": "slicer1",
"network_id": "network2"}}
self.value = ComponentConnectionChanged.create_from_packed(
self.value)
self.result = self.target._connection_changed_added_pre(self.value)
self.assertEqual(self.result, True)
def test_connection_changed_update_pre(self):
self.value = {"action": "update",
"prev": {"id": "slicer1->network1",
"type": "LogicAndNetwork",
"connection_type": "original",
"state": "initializing",
"logic_id": "slicer1",
"network_id": "network1"},
"curr": {"id": "slicer1->network1",
"type": "LogicAndNetwork",
"connection_type": "original",
"state": "initializing",
"logic_id": "slicer1",
"network_id": "network2"}}
self.value = ComponentConnectionChanged.create_from_packed(self.value)
self.result = self.target._connection_changed_update_pre(self.value)
self.assertEqual(self.result, True)
def test_connection_changed_delete_pre(self):
self.value = {"action": "delete",
"prev": {"id": "slicer1->network1",
"type": "LogicAndNetwork",
"connection_type": "original",
"state": "initializing",
"logic_id": "slicer1",
"network_id": "network1"},
"curr": None}
self.value = ComponentConnectionChanged.create_from_packed(self.value)
self.result = self.target._connection_changed_delete_pre(self.value)
self.assertEqual(self.result, True)
def test_connection_changed_added(self):
with patch("logging.debug") as logging_debug:
self.value = {"action": "add",
"prev": None,
"curr": {"id": "slicer1->network1",
"type": "LogicAndNetwork",
"connection_type": "original",
"state": "initializing",
"logic_id": "slicer1",
"network_id": "network2"}}
self.value =\
ComponentConnectionChanged.create_from_packed(self.value)
self.result =\
self.target._connection_changed_update_pre(self.value)
self.assertEqual(
logging_debug.call_count, 1)
def test_connection_changed_update(self):
with patch("logging.debug") as logging_debug:
self.value = {"action": "update",
"prev": {"id": "slicer1->network1",
"type": "LogicAndNetwork",
"connection_type": "original",
"state": "initializing",
"logic_id": "slicer1",
"network_id": "network1"},
"curr": {"id": "slicer1->network1",
"type": "LogicAndNetwork",
"connection_type": "original",
"state": "initializing",
"logic_id": "slicer1",
"network_id": "network2"}}
self.value =\
ComponentConnectionChanged.create_from_packed(self.value)
self.result =\
self.target._connection_changed_update_pre(self.value)
self.assertEqual(
logging_debug.call_count, 1)
def test_connection_changed_delete(self):
with patch("logging.debug") as logging_debug:
self.value = {"action": "delete",
"prev": {"id": "slicer1->network1",
"type": "LogicAndNetwork",
"connection_type": "original",
"state": "initializing",
"logic_id": "slicer1",
"network_id": "network1"},
"curr": None}
self.value =\
ComponentConnectionChanged.create_from_packed(self.value)
self.result =\
self.target._connection_changed_update_pre(self.value)
self.assertEqual(
logging_debug.call_count, 1)
def test_add_event_subscription_network_event_type(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._add_event_subscription(
"NodeChanged", "Network123")
self.assertEqual(
logging_debug.call_count, 1)
self.assertEqual(
self.target._Logic__subscription_table,
{"NodeChanged::Network123": None})
def test_add_event_subscription_packet_event_type(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._add_event_subscription(
"InPacketAdded", "Network123")
self.assertEqual(
logging_debug.call_count, 1)
self.assertEqual(
self.target._Logic__subscription_table,
{"InPacketAdded::Network123": None})
def test_add_event_subscription_event_type_not_match(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._add_event_subscription(
"NotType", "Network123")
self.assertEqual(
logging_debug.call_count, 0)
self.assertEqual(
self.target._Logic__subscription_table, {})
def test_add_event_subscription_event_type_None(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._add_event_subscription(
None, "Network123")
self.assertEqual(
logging_debug.call_count, 0)
self.assertEqual(
self.target._Logic__subscription_table, {})
def test_add_event_subscription_network_id_None(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._add_event_subscription(
"InPacketAdded", None)
self.assertEqual(
logging_debug.call_count, 0)
self.assertEqual(
self.target._Logic__subscription_table, {})
def test_add_event_subscription_event_type_network_id_None(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._add_event_subscription(
None, None)
self.assertEqual(
logging_debug.call_count, 0)
self.assertEqual(
self.target._Logic__subscription_table, {})
def test_remove_event_subscription_network_event_type(self):
with patch("logging.debug") as logging_debug:
self.target._Logic__subscription_table =\
{"NodeChanged::Network123": None,
"NodeChanged::Network456": None}
self.result = self.target._remove_event_subscription(
"NodeChanged", "Network123")
self.assertEqual(
logging_debug.call_count, 1)
self.assertEqual(
self.target._Logic__subscription_table,
{"NodeChanged::Network456": None})
def test_remove_event_subscription_packet_event_type(self):
with patch("logging.debug") as logging_debug:
self.target._Logic__subscription_table =\
{"NodeChanged::Network123": None,
"OutPacketAdded::Network123": None}
self.result = self.target._remove_event_subscription(
"OutPacketAdded", "Network123")
self.assertEqual(
logging_debug.call_count, 1)
self.assertEqual(
self.target._Logic__subscription_table,
{"NodeChanged::Network123": None})
def test_remove_event_subscription_event_type_not_match(self):
with patch("logging.debug") as logging_debug:
self.target._Logic__subscription_table =\
{"NodeChanged::Network123": None,
"NotType::Network123": None}
self.result = self.target._remove_event_subscription(
"NotType", "Network123")
self.assertEqual(
logging_debug.call_count, 0)
self.assertEqual(
self.target._Logic__subscription_table,
{"NodeChanged::Network123": None,
"NotType::Network123": None})
def test_remove_event_subscription_event_type_None(self):
with patch("logging.debug") as logging_debug:
self.target._Logic__subscription_table =\
{"NodeChanged::Network123": None,
"OutPacketAdded::Network123": None}
self.result = self.target._remove_event_subscription(
None, "Network123")
self.assertEqual(
logging_debug.call_count, 0)
self.assertEqual(
self.target._Logic__subscription_table,
{"NodeChanged::Network123": None,
"OutPacketAdded::Network123": None})
def test_remove_event_subscription_neteork_id_None(self):
with patch("logging.debug") as logging_debug:
self.target._Logic__subscription_table =\
{"NodeChanged::Network123": None,
"OutPacketAdded::Network123": None}
self.result = self.target._remove_event_subscription(
"NodeChanged", None)
self.assertEqual(
logging_debug.call_count, 0)
self.assertEqual(
self.target._Logic__subscription_table,
{"NodeChanged::Network123": None,
"OutPacketAdded::Network123": None})
def test_remove_event_subscription_event_type_neteork_id_None(self):
with patch("logging.debug") as logging_debug:
self.target._Logic__subscription_table =\
{"NodeChanged::Network123": None,
"OutPacketAdded::Network123": None}
self.result = self.target._remove_event_subscription(
None, None)
self.assertEqual(
logging_debug.call_count, 0)
self.assertEqual(
self.target._Logic__subscription_table,
{"NodeChanged::Network123": None,
"OutPacketAdded::Network123": None})
def test_update_event_subscription_network_event_type(self):
self.result = self.target._update_event_subscription(
"NodeChanged", "Network123", ["attributes"])
self.assertEqual(
self.target._Logic__subscription_table,
{"NodeChanged::UPDATE::Network123": ["attributes"]})
def test_update_event_subscription_event_type_not_match(self):
self.result = self.target._update_event_subscription(
"NotType", "Network123", ["attributes"])
self.assertEqual(
self.target._Logic__subscription_table, {})
def test_update_event_subscription_event_type_None(self):
self.result = self.target._update_event_subscription(
None, "Network123", ["attributes"])
self.assertEqual(
self.target._Logic__subscription_table, {})
def test_update_event_subscription_neteork_id_None(self):
self.result = self.target._update_event_subscription(
"NodeChanged", None, ["attributes"])
self.assertEqual(
self.target._Logic__subscription_table, {})
def test_update_event_subscription_event_type_neteork_id_None(self):
self.result = self.target._update_event_subscription(
None, None)
self.assertEqual(
self.target._Logic__subscription_table, {})
def test_do_event_nodechanged_add_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed")) as (logging_debug,
logging_error,
mock_node):
self.target._on_node_added = Mock()
self.target._Logic__subscription_table =\
{"NodeChanged::publisher_id": "subscription"}
port = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node = Node("Node", "0001", "Node01",
{"port_id": port}, {"attribute_key": "value"})
self.value = {"id": "NodeId",
"version": "0001",
"action": "add",
"prev": None,
"curr": {"node": "node"}}
mock_node.return_value = node
self.result = Event("publisher_id", "NodeChanged", self.value)
self.target._do_event_nodechanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 1)
self.target._on_node_added.assert_called_once_with(
"publisher_id", node)
def test_do_event_nodechanged_update_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed")) as (logging_debug,
logging_error,
mock_node):
self.target._on_node_update = Mock()
self.target._Logic__subscription_table =\
{"NodeChanged::UPDATE::publisher_id": "subscription"}
port = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node = Node("Node", "0001", "Node01",
{"port_id": port}, {"attribute_key": "value"})
self.value = {"id": "NodeId",
"version": "0001",
"action": "update",
"prev": {"node": "node"},
"curr": {"node": "node"}}
self.result = Event("publisher_id", "NodeChanged", self.value)
mock_node.return_value = node
self.target._do_event_nodechanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 1)
self.target._on_node_update.assert_called_once_with(
"publisher_id", node, node, "subscription")
def test_do_event_nodechanged_delete_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed")) as (logging_debug,
logging_error,
mock_node):
self.target._on_node_delete = Mock()
self.target._Logic__subscription_table =\
{"NodeChanged::publisher_id": "subscription"}
port = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node = Node("Node", "0001", "Node01",
{"port_id": port}, {"attribute_key": "value"})
self.value = {"id": "NodeId",
"version": "0001",
"action": "delete",
"prev": {"node": "node"},
"curr": None}
mock_node.return_value = node
self.result = Event("publisher_id", "NodeChanged", self.value)
self.target._do_event_nodechanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 1)
self.target._on_node_delete.assert_called_once_with(
"publisher_id", node)
def test_do_event_nodechanged_other_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed")) as (logging_debug,
logging_error,
mock_node):
self.target._Logic__subscription_table =\
{"NodeChanged::publisher_id": "subscription"}
port = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node = Node("Node", "0001", "Node01",
{"port_id": port}, {"attribute_key": "value"})
self.value = {"id": "NodeId",
"version": "0001",
"action": "Other",
"prev": {"node": "node"},
"curr": None}
mock_node.return_value
self.result = Event("publisher_id", "NodeChanged", self.value)
self.target._do_event_nodechanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 2)
def test_do_event_nodechanged_key_error(self):
with nested(patch("logging.debug"),
patch("logging.error")) as (logging_debug,
logging_error):
self.target._on_node_delete = Mock()
self.target._Logic__subscription_table =\
{"NodeChanged::publisher_id": "subscription"}
self.value = {"node_id": "NodeId",
"version": "0001",
"action": "Other",
"prev": {"node": "node"},
"curr": None}
self.result = Event("publisher_id", "NodeChanged", self.value)
self.target._do_event_nodechanged(self.result)
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
logging_debug.call_count, 0)
def test_do_event_portchanged_add_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed")) as (logging_debug,
logging_error,
mock_port):
self.target._on_port_added = Mock()
self.target._Logic__subscription_table =\
{"PortChanged::publisher_id": "subscription"}
port = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
self.value = {"node_id": "NodeId",
"id": "PortId",
"version": "0001",
"action": "add",
"prev": None,
"curr": {"node": "node"}}
mock_port.return_value = port
self.result = Event("publisher_id", "PortChanged", self.value)
self.target._do_event_portchanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 1)
self.target._on_port_added.assert_called_once_with(
"publisher_id", port)
def test_do_event_portchanged_update_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed")) as (logging_debug,
logging_error,
mock_port):
self.target._on_port_update = Mock()
self.target._Logic__subscription_table =\
{"PortChanged::UPDATE::publisher_id": "subscription"}
port = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
self.value = {"node_id": "NodeId",
"id": "PortId",
"version": "0001",
"action": "update",
"prev": {},
"curr": None}
mock_port.return_value = port
self.result = Event("publisher_id", "PortChanged", self.value)
self.target._do_event_portchanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 1)
self.target._on_port_update.assert_called_once_with(
"publisher_id", port, port, "subscription")
def test_do_event_portchanged_delete_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed")) as (logging_debug,
logging_error,
mock_port):
self.target._on_port_delete = Mock()
self.target._Logic__subscription_table =\
{"PortChanged::publisher_id": "subscription"}
port = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
self.value = {"node_id": "NodeId",
"id": "PortId",
"version": "0001",
"action": "delete",
"prev": {},
"curr": None}
mock_port.return_value = port
self.result = Event("publisher_id", "PortChanged", self.value)
self.target._do_event_portchanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 1)
self.target._on_port_delete.assert_called_once_with(
"publisher_id", port)
def test_do_event_portchanged_other_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed")) as (logging_debug,
logging_error,
mock_port):
self.target._on_port_delete = Mock()
self.target._Logic__subscription_table =\
{"PortChanged::publisher_id": "subscription"}
port = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
self.value = {"node_id": "NodeId",
"id": "PortId",
"version": "0001",
"action": "other",
"prev": {},
"curr": None}
mock_port.return_value = port
self.result = Event("publisher_id", "PortChanged", self.value)
self.target._do_event_portchanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 2)
def test_do_event_portchanged_key_error(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed")) as (logging_debug,
logging_error,
mock_port):
self.target._on_port_delete = Mock()
self.target._Logic__subscription_table =\
{"PortChanged::publisher_id": "subscription"}
port = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
self.value = {"node_id": "NodeId",
"error_id": "PortId",
"version": "0001",
"action": "other",
"prev": {},
"curr": None}
mock_port.return_value = port
self.result = Event("publisher_id", "PortChanged", self.value)
self.target._do_event_portchanged(self.result)
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
logging_debug.call_count, 0)
def test_do_event_linkchanged_add_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed")) as (logging_debug,
logging_error,
mock_link):
self.target._on_link_added = Mock()
self.target._Logic__subscription_table =\
{"LinkChanged::publisher_id": "subscription"}
link = Link("Link", "1", "LinkId", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "PortVal"})
self.value = {"id": "PortId",
"version": "0001",
"action": "add",
"prev": None,
"curr": {"node": "node"}}
mock_link.return_value = link
self.result = Event("publisher_id", "LinkChanged", self.value)
self.target._do_event_linkchanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 1)
self.target._on_link_added.assert_called_once_with(
"publisher_id", link)
def test_do_event_linkchanged_update_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed")) as (logging_debug,
logging_error,
mock_link):
self.target._on_link_update = Mock()
self.target._Logic__subscription_table =\
{"LinkChanged::UPDATE::publisher_id": "subscription"}
link = Link("Link", "1", "LinkId", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "PortVal"})
self.value = {"id": "PortId",
"version": "0001",
"action": "update",
"prev": {"node": "node"},
"curr": {"node": "node"}}
mock_link.return_value = link
self.result = Event("publisher_id", "LinkChanged", self.value)
self.target._do_event_linkchanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 1)
self.target._on_link_update.assert_called_once_with(
"publisher_id", link, link, "subscription")
def test_do_event_linkchanged_delete_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed")) as (logging_debug,
logging_error,
mock_link):
self.target._on_link_delete = Mock()
self.target._Logic__subscription_table =\
{"LinkChanged::publisher_id": "subscription"}
link = Link("Link", "1", "LinkId", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "PortVal"})
self.value = {"id": "PortId",
"version": "0001",
"action": "delete",
"prev": {"node": "node"},
"curr": None}
mock_link.return_value = link
self.result = Event("publisher_id", "LinkChanged", self.value)
self.target._do_event_linkchanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 1)
self.target._on_link_delete.assert_called_once_with(
"publisher_id", link)
def test_do_event_linkchanged_other_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed")) as (logging_debug,
logging_error,
mock_link):
self.target._Logic__subscription_table =\
{"LinkChanged::publisher_id": "subscription"}
link = Link("Link", "1", "LinkId", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "PortVal"})
self.value = {"id": "PortId",
"version": "0001",
"action": "other",
"prev": {"node": "node"},
"curr": None}
mock_link.return_value = link
self.result = Event("publisher_id", "LinkChanged", self.value)
self.target._do_event_linkchanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 2)
def test_do_event_linkchanged_key_error(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed")) as (logging_debug,
logging_error,
mock_link):
self.target._Logic__subscription_table =\
{"LinkChanged::publisher_id": "subscription"}
link = Link("Link", "1", "LinkId", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "PortVal"})
self.value = {"node_id": "NodeId",
"error_id": "PortId",
"version": "0001",
"action": "other",
"prev": {"node": "node"},
"curr": None}
mock_link.return_value = link
self.result = Event("publisher_id", "LinkChanged", self.value)
self.target._do_event_linkchanged(self.result)
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
logging_debug.call_count, 0)
def test_do_event_flowchanged_add_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed")) as (logging_debug,
logging_error,
mock_flow):
self.target._on_flow_added = Mock()
self.target._Logic__subscription_table =\
{"FlowChanged::publisher_id": "subscription"}
flow = Flow("BasicFlow", "1", "FlowId", "Owner",
True, 123456789, "establishing",
{"PortKey": "PortVal"})
self.value = {"id": "PortId",
"version": "0001",
"action": "add",
"prev": None,
"curr": {"type": "Flow"}}
mock_flow.return_value = flow
self.result = Event("publisher_id", "FlowChanged", self.value)
self.target._do_event_flowchanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 1)
self.target._on_flow_added.assert_called_once_with(
"publisher_id", flow)
def test_do_event_flowchanged_update_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed")) as (logging_debug,
logging_error,
mock_flow):
self.target._on_flow_update = Mock()
self.target._Logic__subscription_table =\
{"FlowChanged::UPDATE::publisher_id": "subscription"}
flow = Flow("BasicFlow", "1", "FlowId", "Owner",
True, 123456789, "establishing",
{"PortKey": "PortVal"})
self.value = {"id": "PortId",
"version": "0001",
"action": "update",
"prev": {"type": "Flow"},
"curr": {"type": "Flow"}}
mock_flow.return_value = flow
self.result = Event("publisher_id", "FlowChanged", self.value)
self.target._do_event_flowchanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 1)
self.target._on_flow_update.assert_called_once_with(
"publisher_id", flow, flow, "subscription")
def test_do_event_flowchanged_delete_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed")) as (logging_debug,
logging_error,
mock_flow):
self.target._on_flow_delete = Mock()
self.target._Logic__subscription_table =\
{"FlowChanged::publisher_id": "subscription"}
flow = Flow("BasicFlow", "1", "FlowId", "Owner",
True, 123456789, "establishing",
{"PortKey": "PortVal"})
self.value = {"id": "PortId",
"version": "0001",
"action": "delete",
"prev": {"type": "Flow"},
"curr": None}
mock_flow.return_value = flow
self.result = Event("publisher_id", "FlowChanged", self.value)
self.target._do_event_flowchanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 1)
self.target._on_flow_delete.assert_called_once_with(
"publisher_id", flow)
def test_do_event_flowchanged_other_action(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed")) as (logging_debug,
logging_error,
mock_flow):
self.target._Logic__subscription_table =\
{"FlowChanged::publisher_id": "subscription"}
flow = Flow("BasicFlow", "1", "FlowId", "Owner",
True, 123456789, "establishing",
{"PortKey": "PortVal"})
self.value = {"id": "PortId",
"version": "0001",
"action": "other",
"prev": None,
"curr": {"type": "Flow"}}
mock_flow.return_value = flow
self.result = Event("publisher_id", "FlowChanged", self.value)
self.target._do_event_flowchanged(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 2)
def test_do_event_flowchanged_key_error(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed")) as (logging_debug,
logging_error,
mock_flow):
self.target._Logic__subscription_table =\
{"FlowChanged::publisher_id": "subscription"}
flow = Flow("BasicFlow", "1", "FlowId", "Owner",
True, 123456789, "establishing",
{"PortKey": "PortVal"})
self.value = {"error_id": "PortId",
"version": "0001",
"action": "add",
"prev": None,
"curr": {"type": "Flow"}}
mock_flow.return_value = flow
self.result = Event("publisher_id", "FlowChanged", self.value)
self.target._do_event_flowchanged(self.result)
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
logging_debug.call_count, 0)
def test_do_event_inpacketadded_success(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"in_packet_added.InPacketAdded."
"create_from_packed")) as (logging_debug,
logging_error,
mock_in_packet_added):
mock_in_packet_added.return_value = "Dummy"
self.target._on_in_packet_added_pre = Mock(
return_value=True)
self.target._add_in_packet_conversion = Mock(
return_value="resp_list")
self.target._on_in_packet_added_post = Mock()
self.value = {"id": "InPacketAdded",
"version": "0001"}
self.result = Event("publisher_id", "InPacketAdded", self.value)
self.target._do_event_inpacketadded(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 1)
self.target._on_in_packet_added_pre.assert_called_once_with(
"publisher_id", "Dummy")
self.target._add_in_packet_conversion.assert_called_once_with(
"publisher_id", "Dummy")
self.target._on_in_packet_added_post.assert_called_once_with(
"publisher_id", "Dummy", "resp_list")
def test_do_event_inpacketadded_error(self):
with nested(patch("logging.debug"),
patch("logging.error")) as (logging_debug,
logging_error):
self.value = {"error_id": "InPacketAdded",
"version": "0001"}
self.result = Event("publisher_id", "InPacketAdded", self.value)
self.target._do_event_inpacketadded(self.result)
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
logging_debug.call_count, 0)
def test_on_in_packet_added_pre(self):
with patch("logging.debug") as logging_debug:
self.target._on_in_packet_added_pre("network_id", "msg")
self.assertEqual(
logging_debug.call_count, 1)
def test_on_in_packet_added_post(self):
with patch("logging.debug") as logging_debug:
self.target._on_in_packet_added_post("network_id", "msg",
"resp_list")
self.assertEqual(
logging_debug.call_count, 1)
def test_do_event_outpacketadded_success(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"out_packet_added.OutPacketAdded."
"create_from_packed")) as (logging_debug,
logging_error,
mock_out_packet_added):
mock_out_packet_added.return_value = "Dummy"
self.target._on_out_packet_added_pre = Mock(
return_value=True)
self.target._add_out_packet_conversion = Mock(
return_value="resp_list")
self.target._on_out_packet_added_post = Mock()
self.value = {"id": "OutPacketAdded",
"version": "0001"}
self.result = Event("publisher_id", "OutPacketAdded", self.value)
self.target._do_event_outpacketadded(self.result)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
logging_debug.call_count, 1)
self.target._on_out_packet_added_pre.assert_called_once_with(
"publisher_id", "Dummy")
self.target._add_out_packet_conversion.assert_called_once_with(
"publisher_id", "Dummy")
self.target._on_out_packet_added_post.assert_called_once_with(
"publisher_id", "Dummy", "resp_list")
def test_do_event_outpacketadded_error(self):
with nested(patch("logging.debug"),
patch("logging.error")) as (logging_debug,
logging_error):
self.value = {"error_id": "OutPacketAdded",
"version": "0001"}
self.result = Event("publisher_id", "FlowChanged", self.value)
self.target._do_event_outpacketadded(self.result)
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
logging_debug.call_count, 0)
def test_on_out_packet_added_pre(self):
with patch("logging.debug") as logging_debug:
self.target._on_out_packet_added_pre("network_id", "msg")
self.assertEqual(
logging_debug.call_count, 1)
def test_on_out_packet_added_post(self):
with patch("logging.debug") as logging_debug:
self.target._on_out_packet_added_post("network_id", "msg",
"resp_list")
self.assertEqual(
logging_debug.call_count, 1)
def test_on_node_added(self):
self.target._on_node_added_pre = Mock(
return_value=True)
self.target._add_node_conversion = Mock(
return_value="resp_list")
self.target._on_node_added_post = Mock()
self.target._on_node_added("network_id", "node_msg")
self.target._on_node_added_pre.assert_called_once_with(
"network_id", "node_msg")
self.target._add_node_conversion.assert_called_once_with(
"network_id", "node_msg")
self.target._on_node_added_post.assert_called_once_with(
"network_id", "node_msg", "resp_list")
def test_on_node_added_pre(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._on_node_added_pre("network_id",
"node_msg")
self.assertEqual(
logging_debug.call_count, 1)
def test_on_node_added_post(self):
with patch("logging.debug") as logging_debug:
self.target._on_node_added_post("network_id", "msg", "resp_list")
self.assertEqual(
logging_debug.call_count, 1)
def test_on_port_added(self):
self.target._on_port_added_pre = Mock(
return_value=True)
self.target._add_port_conversion = Mock(
return_value="resp_list")
self.target._on_port_added_post = Mock()
self.target._on_port_added("network_id", "port_msg")
self.target._on_port_added_pre.assert_called_once_with(
"network_id", "port_msg")
self.target._add_port_conversion.assert_called_once_with(
"network_id", "port_msg")
self.target._on_port_added_post.assert_called_once_with(
"network_id", "port_msg", "resp_list")
def test_on_port_added_pre(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._on_port_added_pre("network_id",
"port_msg")
self.assertEqual(self.result, True)
self.assertEqual(
logging_debug.call_count, 1)
def test_on_port_added_post(self):
with patch("logging.debug") as logging_debug:
self.target._on_port_added_post("network_id", "msg", "resp_list")
self.assertEqual(
logging_debug.call_count, 1)
def test_on_link_added(self):
self.target._on_link_added_pre = Mock(
return_value=True)
self.target._add_link_conversion = Mock(
return_value="resp_list")
self.target._on_link_added_post = Mock()
self.target._on_link_added("network_id", "link_msg")
self.target._on_link_added_pre.assert_called_once_with(
"network_id", "link_msg")
self.target._add_link_conversion.assert_called_once_with(
"network_id", "link_msg")
self.target._on_link_added_post.assert_called_once_with(
"network_id", "link_msg", "resp_list")
def test_on_link_added_pre(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._on_link_added_pre("network_id",
"link_msg")
self.assertEqual(self.result, True)
self.assertEqual(
logging_debug.call_count, 1)
def test_on_link_added_post(self):
with patch("logging.debug") as logging_debug:
self.target._on_link_added_post("network_id", "msg", "resp_list")
self.assertEqual(
logging_debug.call_count, 1)
def test_on_flow_added(self):
self.target._on_flow_added_pre = Mock(
return_value=True)
self.target._add_flow_conversion = Mock(
return_value="resp_list")
self.target._on_flow_added_post = Mock()
self.target._on_flow_added("network_id", "flow_msg")
self.target._on_flow_added_pre.assert_called_once_with(
"network_id", "flow_msg")
self.target._add_flow_conversion.assert_called_once_with(
"network_id", "flow_msg")
self.target._on_flow_added_post.assert_called_once_with(
"network_id", "flow_msg", "resp_list")
def test_on_flow_added_pre(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._on_flow_added_pre("network_id",
"flow_msg")
self.assertEqual(self.result, True)
self.assertEqual(
logging_debug.call_count, 1)
def test_on_flow_added_post(self):
with patch("logging.debug") as logging_debug:
self.target._on_flow_added_post("network_id", "msg", "resp_list")
self.assertEqual(
logging_debug.call_count, 1)
def test_on_node_update(self):
self.target._on_node_update_pre = Mock(
return_value=True)
self.target._update_node_conversion = Mock(
return_value="resp_list")
self.target._on_node_update_post = Mock()
self.target._on_node_update("network_id", "prev", "curr", "sttributes")
self.target._on_node_update_pre.assert_called_once_with(
"network_id", "prev", "curr", "sttributes")
self.target._update_node_conversion.assert_called_once_with(
"network_id", "prev", "curr", "sttributes")
self.target._on_node_update_post.assert_called_once_with(
"network_id", "prev", "curr", "sttributes", "resp_list")
def test_on_node_update_pre(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._on_node_update_pre("network_id",
"prev",
"curr",
"sttributes")
self.assertEqual(self.result, True)
self.assertEqual(
logging_debug.call_count, 1)
def test_on_node_update_post(self):
with patch("logging.debug") as logging_debug:
self.target._on_node_update_post("network_id",
"prev",
"curr",
"sttributes",
"resp_list")
self.assertEqual(
logging_debug.call_count, 1)
def test_on_port_update(self):
self.target._on_port_update_pre = Mock(
return_value=True)
self.target._update_port_conversion = Mock(
return_value="resp_list")
self.target._on_port_update_post = Mock()
self.target._on_port_update("network_id", "prev", "curr", "sttributes")
self.target._on_port_update_pre.assert_called_once_with(
"network_id", "prev", "curr", "sttributes")
self.target._update_port_conversion.assert_called_once_with(
"network_id", "prev", "curr", "sttributes")
self.target._on_port_update_post.assert_called_once_with(
"network_id", "prev", "curr", "sttributes", "resp_list")
def test_on_port_update_pre(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._on_port_update_pre("network_id",
"prev",
"curr",
"sttributes")
self.assertEqual(self.result, True)
self.assertEqual(
logging_debug.call_count, 1)
def test_on_port_update_post(self):
with patch("logging.debug") as logging_debug:
self.target._on_port_update_post("network_id",
"prev",
"curr",
"sttributes",
"resp_list")
self.assertEqual(
logging_debug.call_count, 1)
def test_on_link_update(self):
self.target._on_link_update_pre = Mock(
return_value=True)
self.target._update_link_conversion = Mock(
return_value="resp_list")
self.target._on_link_update_post = Mock()
self.target._on_link_update("network_id", "prev", "curr", "sttributes")
self.target._on_link_update_pre.assert_called_once_with(
"network_id", "prev", "curr", "sttributes")
self.target._update_link_conversion.assert_called_once_with(
"network_id", "prev", "curr", "sttributes")
self.target._on_link_update_post.assert_called_once_with(
"network_id", "prev", "curr", "sttributes", "resp_list")
def test_on_link_update_pre(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._on_link_update_pre("network_id",
"prev",
"curr",
"sttributes")
self.assertEqual(self.result, True)
self.assertEqual(
logging_debug.call_count, 1)
def test_on_link_update_post(self):
with patch("logging.debug") as logging_debug:
self.target._on_link_update_post("network_id",
"prev",
"curr",
"sttributes",
"resp_list")
self.assertEqual(
logging_debug.call_count, 1)
def test_on_flow_update(self):
self.target._on_flow_update_pre = Mock(
return_value=True)
self.target._update_flow_conversion = Mock(
return_value="resp_list")
self.target._on_flow_update_post = Mock()
self.target._on_flow_update("network_id", "prev", "curr", "sttributes")
self.target._on_flow_update_pre.assert_called_once_with(
"network_id", "prev", "curr", "sttributes")
self.target._update_flow_conversion.assert_called_once_with(
"network_id", "prev", "curr", "sttributes")
self.target._on_flow_update_post.assert_called_once_with(
"network_id", "prev", "curr", "sttributes", "resp_list")
def test_on_flow_update_pre(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._on_flow_update_pre("network_id",
"prev",
"curr",
"sttributes")
self.assertEqual(self.result, True)
self.assertEqual(
logging_debug.call_count, 1)
def test_on_flow_update_post(self):
with patch("logging.debug") as logging_debug:
self.target._on_flow_update_post("network_id",
"prev",
"curr",
"sttributes",
"resp_list")
self.assertEqual(
logging_debug.call_count, 1)
def test_on_node_delete(self):
self.target._on_node_delete_pre = Mock(
return_value=True)
self.target._delete_node_conversion = Mock(
return_value="resp_list")
self.target._on_node_delete_post = Mock()
self.target._on_node_delete("network_id", "msg")
self.target._on_node_delete_pre.assert_called_once_with(
"network_id", "msg")
self.target._delete_node_conversion.assert_called_once_with(
"network_id", "msg")
self.target._on_node_delete_post.assert_called_once_with(
"network_id", "msg", "resp_list")
def test_on_node_delete_pre(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._on_node_delete_pre("network_id",
"msg")
self.assertEqual(self.result, True)
self.assertEqual(
logging_debug.call_count, 1)
def test_on_node_delete_post(self):
with patch("logging.debug") as logging_debug:
self.target._on_node_delete_post("network_id",
"msg",
"resp_list")
self.assertEqual(
logging_debug.call_count, 1)
def test_on_port_delete(self):
self.target._on_port_delete_pre = Mock(
return_value=True)
self.target._delete_port_conversion = Mock(
return_value="resp_list")
self.target._on_port_delete_post = Mock()
self.target._on_port_delete("network_id", "msg")
self.target._on_port_delete_pre.assert_called_once_with(
"network_id", "msg")
self.target._delete_port_conversion.assert_called_once_with(
"network_id", "msg")
self.target._on_port_delete_post.assert_called_once_with(
"network_id", "msg", "resp_list")
def test_on_port_delete_pre(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._on_port_delete_pre("network_id",
"msg")
self.assertEqual(self.result, True)
self.assertEqual(
logging_debug.call_count, 1)
def test_on_port_delete_post(self):
with patch("logging.debug") as logging_debug:
self.target._on_port_delete_post("network_id",
"msg",
"resp_list")
self.assertEqual(
logging_debug.call_count, 1)
def test_on_link_delete(self):
self.target._on_link_delete_pre = Mock(
return_value=True)
self.target._delete_link_conversion = Mock(
return_value="resp_list")
self.target._on_link_delete_post = Mock()
self.target._on_link_delete("network_id", "msg")
self.target._on_link_delete_pre.assert_called_once_with(
"network_id", "msg")
self.target._delete_link_conversion.assert_called_once_with(
"network_id", "msg")
self.target._on_link_delete_post.assert_called_once_with(
"network_id", "msg", "resp_list")
def test_on_link_delete_pre(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._on_link_delete_pre("network_id",
"msg")
self.assertEqual(self.result, True)
self.assertEqual(
logging_debug.call_count, 1)
def test_on_link_delete_post(self):
with patch("logging.debug") as logging_debug:
self.target._on_link_delete_post("network_id",
"msg",
"resp_list")
self.assertEqual(
logging_debug.call_count, 1)
def test_on_flow_delete(self):
self.target._on_flow_delete_pre = Mock(
return_value=True)
self.target._delete_flow_conversion = Mock(
return_value="resp_list")
self.target._on_flow_delete_post = Mock()
self.target._on_flow_delete("network_id", "msg")
self.target._on_flow_delete_pre.assert_called_once_with(
"network_id", "msg")
self.target._delete_flow_conversion.assert_called_once_with(
"network_id", "msg")
self.target._on_flow_delete_post.assert_called_once_with(
"network_id", "msg", "resp_list")
def test_on_flow_delete_pre(self):
with patch("logging.debug") as logging_debug:
self.result = self.target._on_flow_delete_pre("network_id",
"msg")
self.assertEqual(self.result, True)
self.assertEqual(
logging_debug.call_count, 1)
def test_on_flow_delete_post(self):
with patch("logging.debug") as logging_debug:
self.target._on_flow_delete_post("network_id",
"msg",
"resp_list")
self.assertEqual(
logging_debug.call_count, 1)
def test_add_node_conversion_success(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_node,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
port = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node = Node("Node", "0001", "Node01",
{"port_id": port}, {"attribute_key": "value"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "node_item")
mock_node.return_value = node
mock_put_object.return_value = self.value
self.result = self.target._add_node_conversion("network1",
node)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2"].body, "node_item")
self.assertEqual(
conversion_table._ConversionTable__node_conversion_table,
{"network1::Node01": ["network2::Node01"],
"network2::Node01": ["network1::Node01"]})
def test_add_node_conversion_not_in__network_interfaces(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_node,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
port = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node = Node("Node", "0001", "Node01",
{"port_id": port}, {"attribute_key": "value"})
self.target._network_interfaces["network1"] = \
NetworkInterface(self.target.dispatcher, "network1")
self.value = Response(200, "node_item")
mock_node.return_value = node
mock_put_object.return_value = self.value
self.result = self.target._add_node_conversion("network1",
node)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
self.assertEqual(
conversion_table._ConversionTable__node_conversion_table,
{})
def test_add_node_conversion_error(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
port = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node = Node("Node", "0001", "Node01",
{"port_id": port}, {"attribute_key": "value"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"key": "error"})
mock_put_object.return_value = self.value
self.result = self.target._add_node_conversion("network1",
node)
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result["network2"].body, {"key": "error"})
self.assertEqual(
conversion_table._ConversionTable__node_conversion_table,
{})
def test_add_port_conversion_success(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_port,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
port = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "port_item")
mock_port.return_value = port
mock_put_object.return_value = self.value
self.result = self.target._add_port_conversion("network1",
port)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2"].body, "port_item")
self.assertEqual(
conversion_table._ConversionTable__port_conversion_table,
{"network1::NodeId::PortId": ["network2::NodeId::PortId"],
"network2::NodeId::PortId": ["network1::NodeId::PortId"]})
def test_add_port_conversion_not_in_network_interfaces(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_port,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
port = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
self.target._network_interfaces["network1"] = \
NetworkInterface(self.target.dispatcher, "network1")
self.value = Response(200, "port_item")
mock_port.return_value = port
mock_put_object.return_value = self.value
self.result = self.target._add_port_conversion("network1",
port)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
self.assertEqual(
conversion_table._ConversionTable__port_conversion_table,
{})
def test_add_port_conversion_error(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
port = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"key": "error"})
mock_put_object.return_value = self.value
self.result = self.target._add_port_conversion("network1",
port)
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result["network2"].body, {"key": "error"})
self.assertEqual(
conversion_table._ConversionTable__port_conversion_table,
{})
def test_add_link_conversion_success(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_link,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
link = Link("Link", "1", "LinkId", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "PortVal"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "link_item")
mock_link.return_value = link
mock_put_object.return_value = self.value
self.result = self.target._add_link_conversion("network1",
link)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2"].body, "link_item")
self.assertEqual(
conversion_table._ConversionTable__link_conversion_table,
{"network1::LinkId": ["network2::LinkId"],
"network2::LinkId": ["network1::LinkId"]})
def test_add_link_conversion_not_in_network_interfaces(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_link,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
link = Link("Link", "1", "LinkId", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "PortVal"})
self.target._network_interfaces["network1"] = \
NetworkInterface(self.target.dispatcher, "network1")
self.value = Response(200, "link_item")
mock_link.return_value = link
mock_put_object.return_value = self.value
self.result = self.target._add_link_conversion("network1",
link)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
self.assertEqual(
conversion_table._ConversionTable__link_conversion_table,
{})
def test_add_link_conversion_error(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
link = Link("Link", "1", "LinkId", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "PortVal"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"key": "error"})
mock_put_object.return_value = self.value
self.result = self.target._add_link_conversion("network1",
link)
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result["network2"].body, {"key": "error"})
self.assertEqual(
conversion_table._ConversionTable__link_conversion_table,
{})
def test_add_flow_conversion_error(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
flow = Flow("BasicFlow", "1", "FlowId", "Owner",
True, 123456789, "establishing",
{"PortKey": "PortVal"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"key": "error"})
mock_put_object.return_value = self.value
self.result = self.target._add_flow_conversion("network1", flow)
self.assertEqual(logging_error.call_count, 1)
self.assertEqual(self.result["network2"].body, {"key": "error"})
self.assertEqual(conversion_table._ConversionTable__flow_conversion_table, {})
def test_add_flow_conversion_success(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_flow,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
flow = Flow("BasicFlow", "1", "FlowId", "Owner",
True, 123456789, "establishing",
{"PortKey": "PortVal"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "flow_item")
mock_flow.return_value = flow
mock_put_object.return_value = self.value
self.result = self.target._add_flow_conversion("network1", flow)
self.assertEqual(logging_error.call_count, 0)
self.assertEqual(self.result["network2"].body, "flow_item")
def test_add_flow_conversion_not_in_network_interfaces(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
flow = Flow("BasicFlow", "1", "FlowId", "Owner",
True, 123456789, "establishing",
{"PortKey": "PortVal"})
self.target._network_interfaces["network1"] = \
NetworkInterface(self.target.dispatcher, "network1")
self.value = Response(200, "flow_item")
mock_put_object.return_value = self.value
self.result = self.target._add_flow_conversion("network1", flow)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_add_in_packet_conversion_success(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"in_packet.InPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_in_packet,
mock_del_object,
mock_post_object):
conversion_table._ConversionTable__node_conversion_table =\
{"network1::NodeId": ["network2::NodeId"],
"network2::NodeId": ["network1::NodeId"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::NodeId::PortId": ["network2::NodeId::PortId"],
"network2::NodeId::PortId": ["network1::NodeId::PortId"]}
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
inpacket = InPacket("inpacket_id", "InPcket", "attributes",
"NodeId", "PortId", basic_flow_match,
"Data")
inpacket_add = InPacketAdded("inpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] =\
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "inpacket_item")
mock_del_object.return_value = self.value
mock_in_packet.return_value = inpacket
mock_post_object.return_value = self.value
self.result = self.target._add_in_packet_conversion("network1",
inpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2"].body, "inpacket_item")
def test_add_in_packet_conversionnot_in_network_interfaces(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"in_packet.InPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_in_packet,
mock_del_object,
mock_post_object):
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
inpacket = InPacket("inpacket_id", "InPcket", "attributes",
"NodeId", "PortId", basic_flow_match,
"Data")
inpacket_add = InPacketAdded("inpacket_id")
self.value = Response(200, "inpacket_item")
mock_del_object.return_value = self.value
mock_in_packet.return_value = inpacket
mock_post_object.return_value = self.value
self.result = self.target._add_in_packet_conversion("network1",
inpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_add_in_packet_conversion_del_in_packet_None(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"in_packet.InPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_in_packet,
mock_del_object,
mock_post_object):
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
inpacket = InPacket("inpacket_id", "InPcket", "attributes",
"NodeId", "PortId", basic_flow_match,
"Data")
inpacket_add = InPacketAdded("inpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] =\
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "inpacket_item")
mock_del_object.return_value = self.value
mock_in_packet.return_value = None
mock_post_object.return_value = self.value
self.result = self.target._add_in_packet_conversion("network1",
inpacket_add)
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result, {})
def test_add_in_packet_conversion_del_in_packet_node_None(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"in_packet.InPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_in_packet,
mock_del_object,
mock_post_object):
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
inpacket = InPacket("inpacket_id", "InPcket", "attributes",
None, "PortId", basic_flow_match,
"Data")
inpacket_add = InPacketAdded("inpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] =\
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "inpacket_item")
mock_del_object.return_value = self.value
mock_in_packet.return_value = inpacket
mock_post_object.return_value = self.value
self.result = self.target._add_in_packet_conversion("network1",
inpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_add_in_packet_conversion_convert_in_node_id_list_None(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"in_packet.InPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_in_packet,
mock_del_object,
mock_post_object):
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
inpacket = InPacket("inpacket_id", "InPcket", "attributes",
"NodeId", "PortId", basic_flow_match,
"Data")
inpacket_add = InPacketAdded("inpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] =\
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "inpacket_item")
mock_del_object.return_value = self.value
mock_in_packet.return_value = inpacket
mock_post_object.return_value = self.value
self.result = self.target._add_in_packet_conversion("network1",
inpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_add_in_packet_conversion_cdel_in_packet_port_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"in_packet.InPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_in_packet,
mock_del_object,
mock_post_object):
conversion_table._ConversionTable__node_conversion_table =\
{"network1::NodeId": ["network2::NodeId"],
"network2::NodeId": ["network1::NodeId"]}
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
inpacket = InPacket("inpacket_id", "InPcket", "attributes",
"NodeId", None, basic_flow_match,
"Data")
inpacket_add = InPacketAdded("inpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] =\
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "inpacket_item")
mock_del_object.return_value = self.value
mock_in_packet.return_value = inpacket
mock_post_object.return_value = self.value
self.result = self.target._add_in_packet_conversion("network1",
inpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_add_in_packet_conversion_convert_in_port_id_list_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"in_packet.InPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_in_packet,
mock_del_object,
mock_post_object):
conversion_table._ConversionTable__node_conversion_table =\
{"network1::NodeId": ["network2::NodeId"],
"network2::NodeId": ["network1::NodeId"]}
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
inpacket = InPacket("inpacket_id", "InPcket", "attributes",
"NodeId", "PortId", basic_flow_match,
"Data")
inpacket_add = InPacketAdded("inpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] =\
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "inpacket_item")
mock_del_object.return_value = self.value
mock_in_packet.return_value = inpacket
mock_post_object.return_value = self.value
self.result = self.target._add_in_packet_conversion("network1",
inpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_add_in_packet_conversion_header_in_port_id_list_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"in_packet.InPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_in_packet,
mock_del_object,
mock_post_object):
conversion_table._ConversionTable__node_conversion_table =\
{"network1::NodeId": ["network2::NodeId"],
"network2::NodeId": ["network1::NodeId"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::NodeId::PortId": ["network2::NodeId::PortId"],
"network2::NodeId::PortId": ["network1::NodeId::PortId"]}
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "Node01",
"Port01")
inpacket = InPacket("inpacket_id", "InPcket", "attributes",
"NodeId", "PortId", basic_flow_match,
"Data")
inpacket_add = InPacketAdded("inpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] =\
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "inpacket_item")
mock_del_object.return_value = self.value
mock_in_packet.return_value = inpacket
mock_post_object.return_value = self.value
self.result = self.target._add_in_packet_conversion("network1",
inpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_add_in_packet_conversion_attr_list_not_in_interfaces(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"in_packet.InPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_in_packet,
mock_del_object,
mock_post_object):
conversion_table._ConversionTable__node_conversion_table =\
{"network1::NodeId": ["network2::NodeId"],
"network2::NodeId": ["network1::NodeId"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::NodeId::PortId": ["network2::NodeId::PortId"],
"network2::NodeId::PortId": ["network1::NodeId::PortId"]}
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
inpacket = InPacket("inpacket_id", "InPcket", "attributes",
"NodeId", "PortId", basic_flow_match,
"Data")
inpacket_add = InPacketAdded("inpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.value = Response(200, "inpacket_item")
mock_del_object.return_value = self.value
mock_in_packet.return_value = inpacket
mock_post_object.return_value = self.value
self.result = self.target._add_in_packet_conversion("network1",
inpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_add_out_packet_conversion_ports_success(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"out_packet.OutPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_out_packet,
mock_del_object,
mock_post_object):
conversion_table._ConversionTable__node_conversion_table =\
{"network1::NodeId": ["network2::NodeId"],
"network2::NodeId": ["network1::NodeId"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::NodeId::PortId": ["network2::NodeId::PortId"],
"network2::NodeId::PortId": ["network1::NodeId::PortId"]}
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
outpacket = OutPacket("outpacket_id", "OutPcket", "attributes",
"NodeId", ["PortId"], None,
basic_flow_match, "Data")
outpacket_add = OutPacketAdded("outpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] =\
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "outpacket_item")
mock_del_object.return_value = self.value
mock_out_packet.return_value = outpacket
mock_post_object.return_value = self.value
self.result = self.target._add_out_packet_conversion("network1",
outpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2"].body, "outpacket_item")
def test_add_out_packet_conversion_portsEx_success(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"out_packet.OutPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_out_packet,
mock_del_object,
mock_post_object):
conversion_table._ConversionTable__node_conversion_table =\
{"network1::PortExId": ["network2::PortExId"],
"network1::NodeId": ["network2::NodeId"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::NodeId::PortExId": ["network2::NodeId::PortExId"],
"network1::NodeId::PortId": ["network2::NodeId::PortId"]}
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
outpacket = OutPacket("outpacket_id", "OutPcket", "attributes",
"NodeId", None, ["PortExId"],
basic_flow_match, "Data")
outpacket_add = OutPacketAdded("outpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] =\
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "outpacket_item")
mock_del_object.return_value = self.value
mock_out_packet.return_value = outpacket
mock_post_object.return_value = self.value
self.result = self.target._add_out_packet_conversion("network1",
outpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2"].body, "outpacket_item")
def test_add_out_packet_conversion_network_id_not_in_interfaces(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"out_packet.OutPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_out_packet,
mock_del_object,
mock_post_object):
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
outpacket = OutPacket("outpacket_id", "OutPcket", "attributes",
"NodeId", ["PortId"], ["Ports_ex"],
basic_flow_match, "Data")
outpacket_add = OutPacketAdded("outpacket_id")
self.value = Response(200, "outpacket_item")
mock_del_object.return_value = self.value
mock_out_packet.return_value = outpacket
mock_post_object.return_value = self.value
self.result = self.target._add_out_packet_conversion("network1",
outpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_add_out_packet_conversion_del_out_packet_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"out_packet.OutPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_out_packet,
mock_del_object,
mock_post_object):
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
outpacket = OutPacket("outpacket_id", "OutPcket", "attributes",
"NodeId", ["PortId"], ["Ports_ex"],
basic_flow_match, "Data")
outpacket_add = OutPacketAdded("outpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] =\
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "outpacket_item")
mock_del_object.return_value = self.value
mock_out_packet.return_value = None
mock_post_object.return_value = self.value
self.result = self.target._add_out_packet_conversion("network1",
outpacket_add)
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result, {})
def test_add_out_packet_conversion_convert_port_id_list_zero(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"out_packet.OutPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_out_packet,
mock_del_object,
mock_post_object):
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
outpacket = OutPacket("outpacket_id", "OutPcket", "attributes",
"NodeId", ["PortId"], ["Ports_ex"],
basic_flow_match, "Data")
outpacket_add = OutPacketAdded("outpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] =\
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "outpacket_item")
mock_del_object.return_value = self.value
mock_out_packet.return_value = outpacket
mock_post_object.return_value = self.value
self.result = self.target._add_out_packet_conversion("network1",
outpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_add_out_packet_conversion_del_out_packet_node_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"out_packet.OutPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_out_packet,
mock_del_object,
mock_post_object):
conversion_table._ConversionTable__port_conversion_table =\
{"network1::NodeId::PortId": ["network2::NodeId::PortId"],
"network2::NodeId::PortId": ["network1::NodeId::PortId"]}
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
outpacket = OutPacket("outpacket_id", "OutPcket", "attributes",
None, ["PortId"], ["Ports_ex"],
basic_flow_match, "Data")
outpacket_add = OutPacketAdded("outpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] =\
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "outpacket_item")
mock_del_object.return_value = self.value
mock_out_packet.return_value = outpacket
mock_post_object.return_value = self.value
self.result = self.target._add_out_packet_conversion("network1",
outpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_add_out_packet_conversion_convert_node_id_list_zero(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"out_packet.OutPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_out_packet,
mock_del_object,
mock_post_object):
conversion_table._ConversionTable__port_conversion_table =\
{"network1::NodeId::PortId": ["network2::NodeId::PortId"],
"network2::NodeId::PortId": ["network1::NodeId::PortId"]}
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
outpacket = OutPacket("outpacket_id", "OutPcket", "attributes",
"NodeId", ["PortId"], ["Ports_ex"],
basic_flow_match, "Data")
outpacket_add = OutPacketAdded("outpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] =\
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "outpacket_item")
mock_del_object.return_value = self.value
mock_out_packet.return_value = outpacket
mock_post_object.return_value = self.value
self.result = self.target._add_out_packet_conversion("network1",
outpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_add_out_packet_conversion_ports_list_zero(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"out_packet.OutPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_out_packet,
mock_del_object,
mock_post_object):
conversion_table._ConversionTable__node_conversion_table =\
{"network1::NodeId": ["network2::NodeId"],
"network1::Node01": ["network2::Node01"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::NodeId::PortId": ["network2::NodeId::PortId"],
"network2::NodeId::PortId": ["network1::NodeId::PortId"]}
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
outpacket = OutPacket("outpacket_id", "OutPcket", "attributes",
"Node01", ["Port01"], None,
basic_flow_match, "Data")
outpacket_add = OutPacketAdded("outpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] =\
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "outpacket_item")
mock_del_object.return_value = self.value
mock_out_packet.return_value = outpacket
mock_post_object.return_value = self.value
self.result = self.target._add_out_packet_conversion("network1",
outpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_add_out_packet_conversion_portsEx_list_zero(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"out_packet.OutPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_post_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_out_packet,
mock_del_object,
mock_post_object):
conversion_table._ConversionTable__node_conversion_table =\
{"network1::PortExId": ["network2::PortExId"],
"network1::NodeId": ["network2::NodeId"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::NodeId::PortEx": ["network2::NodeId::PortEx"],
"network1::NodeId::PortId": ["network2::NodeId::PortId"]}
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
outpacket = OutPacket("outpacket_id", "OutPcket", "attributes",
"NodeId", None, ["PortExId"],
basic_flow_match, "Data")
outpacket_add = OutPacketAdded("outpacket_id")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] =\
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "outpacket_item")
mock_del_object.return_value = self.value
mock_out_packet.return_value = outpacket
mock_post_object.return_value = self.value
self.result = self.target._add_out_packet_conversion("network1",
outpacket_add)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_node_conversion_success(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_node,
mock_get_object,
mock_put_object):
self.target._Logic__subscription_table =\
{"NodeChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__node_conversion_table =\
{"network1::Node01": ["network2::Node01"],
"network2::Node01": ["network1::Node01"]}
port_prev = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_prev = Node("Node", "0001", "Node02",
{"port_id": port_prev},
{"attribute_prev": "prev_value"})
port_curr = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_curr = Node("Node", "0001", "Node01",
{"port_id": port_curr},
{"attribute_curr": "prev_curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "node_item")
mock_node.return_value = node_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_node_conversion("network1",
node_prev,
node_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2::Node01"].packed_object(),
node_prev.packed_object())
def test_update_node_conversion_network_id_None(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_node,
mock_get_object,
mock_put_object):
port_prev = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_prev = Node("Node", "0001", "Node02",
{"port_id": port_prev},
{"attribute_prev": "prev_value"})
port_curr = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_curr = Node("Node", "0001", "Node01",
{"port_id": port_curr},
{"attribute_curr": "prev_curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "node_item")
mock_node.return_value = node_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_node_conversion(None,
node_prev,
node_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_node_conversion_node_curr_None(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_node,
mock_get_object,
mock_put_object):
port_prev = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_prev = Node("Node", "0001", "Node02",
{"port_id": port_prev},
{"attribute_prev": "prev_value"})
port_curr = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_curr = Node("Node", "0001", "Node01",
{"port_id": port_curr},
{"attribute_curr": "prev_curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "node_item")
mock_node.return_value = node_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_node_conversion("network1",
node_prev,
None,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_node_conversion_attributes_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_node,
mock_get_object,
mock_put_object):
self.target._Logic__subscription_table =\
{"NodeChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__node_conversion_table =\
{"network1::Node01": ["network2::Node01"],
"network2::Node01": ["network1::Node01"]}
port_prev = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_prev = Node("Node", "0001", "Node02",
{"port_id": port_prev},
{"attribute_prev": "prev_value"})
port_curr = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_curr = Node("Node", "0001", "Node01",
{"port_id": port_curr},
{"attribute_curr": "prev_curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "node_item")
mock_node.return_value = node_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_node_conversion("network1",
node_prev,
node_curr,
None)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2::Node01"].packed_object(),
node_prev.packed_object())
def test_update_node_conversion_node_id_not_in_interfaces(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_node,
mock_get_object,
mock_put_object):
self.target._Logic__subscription_table =\
{"NodeChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__node_conversion_table =\
{"network1::Node01": ["network2::Node01"],
"network2::Node01": ["network1::Node01"]}
port_prev = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_prev = Node("Node", "0001", "Node02",
{"port_id": port_prev},
{"attribute_prev": "prev_value"})
port_curr = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_curr = Node("Node", "0001", "Node01",
{"port_id": port_curr},
{"attribute_curr": "prev_curr"})
self.value = Response(200, "node_item")
mock_node.return_value = node_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_node_conversion("network1",
node_prev,
node_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_node_conversion_success_get_node_false(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_node,
mock_get_object,
mock_put_object):
self.target._Logic__subscription_table =\
{"NodeChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__node_conversion_table =\
{"network1::Node01": ["network2::Node01"],
"network2::Node01": ["network1::Node01"]}
port_prev = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_prev = Node("Node", "0001", "Node02",
{"port_id": port_prev},
{"attribute_prev": "prev_value"})
port_curr = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_curr = Node("Node", "0001", "Node01",
{"port_id": port_curr},
{"attribute_curr": "prev_curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "node_item")
mock_node.return_value = None
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_node_conversion("network1",
node_prev,
node_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_node_conversion_attr_key_in_ignore_attributes(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_node,
mock_get_object,
mock_put_object):
self.target._Logic__subscription_table =\
{"NodeChanged::UPDATE::network1": ["physical_id", "vendor"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__node_conversion_table =\
{"network1::Node01": ["network2::Node01"],
"network2::Node01": ["network1::Node01"]}
port_prev = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
port_curr = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_prev = Node("Node", "1", "Node01", {"port_id": port_prev},
{"oper_status": "DOWN"})
node_curr = Node("Node", "2", "Node01", {"port_id": port_curr},
{"oper_status": "UP"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "node_item")
mock_node.return_value = node_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_node_conversion("network1",
node_prev,
node_curr,
["physical_id", "vendor"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_node_conversion_attributes_exist(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_node,
mock_get_object,
mock_put_object):
self.target._Logic__subscription_table =\
{"NodeChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__node_conversion_table =\
{"network1::Node01": ["network2::Node01"],
"network2::Node01": ["network1::Node01"]}
port_prev = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_prev = Node("Node", "0001", "Node02",
{"port_id": port_prev},
{"attribute": "value"})
port_curr = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_curr = Node("Node", "0001", "Node01",
{"port_id": port_curr},
{"attribute": "value"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "node_item")
mock_node.return_value = node_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_node_conversion("network1",
node_prev,
node_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_node_conversion_KeyError(self):
conversion_table = self.target._conversion_table
port_prev = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_prev = Node("Node", "0001", "Node02",
{"port_id": port_prev},
{"attribute_prev": "prev_value"})
port_curr = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_curr = Node("Node", "0001", "Node01",
{"port_id": port_curr},
{"attribute_curr": "prev_curr"})
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util." +
"network_interface.NetworkInterface.get_node")
) as (logging_debug,
logging_error,
mock_node,
mock_get_object,
mock_put_object,
mock_get_node):
self.target._Logic__subscription_table =\
{"NodeChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__node_conversion_table =\
{"network1::Node01": ["network2::Node01"],
"network2::Node01": ["network1::Node01"]}
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "node_item")
mock_node.side_effect = KeyError()
mock_get_node.return_value = node_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_node_conversion("network1",
node_prev,
node_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result, {})
def test_update_port_conversion_success(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_port,
mock_get_object,
mock_put_object):
self.target._Logic__subscription_table =\
{"PortChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::Node02::Port02": ["network2::Node02::Port02"],
"network2::Node02::Port02": ["network1::Node02::Port02"]}
port_prev = Port("Port", "1", "Port01", "Node01",
"OutLink", "InLink", {"PortKey": "prev"})
port_curr = Port("Port", "1", "Port02", "Node02",
"OutLink", "InLink", {"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "port_item")
mock_port.return_value = port_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_port_conversion("network1",
port_prev,
port_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2::Node02::Port02"].packed_object(),
port_prev.packed_object())
def test_update_port_conversion_network_id_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_port,
mock_get_object,
mock_put_object):
self.target._Logic__subscription_table =\
{"PortChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::Node02::Port02": ["network2::Node02::Port02"],
"network2::Node02::Port02": ["network1::Node02::Port02"]}
port_prev = Port("Port", "1", "Port01", "Node01",
"OutLink", "InLink", {"PortKey": "prev"})
port_curr = Port("Port", "1", "Port02", "Node02",
"OutLink", "InLink", {"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "port_item")
mock_port.return_value = port_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_port_conversion(None,
port_prev,
port_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_port_conversion_port_curr_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_port,
mock_get_object,
mock_put_object):
self.target._Logic__subscription_table =\
{"PortChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::Node02::Port02": ["network2::Node02::Port02"],
"network2::Node02::Port02": ["network1::Node02::Port02"]}
port_prev = Port("Port", "1", "Port01", "Node01",
"OutLink", "InLink", {"PortKey": "prev"})
port_curr = Port("Port", "1", "Port02", "Node02",
"OutLink", "InLink", {"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "port_item")
mock_port.return_value = port_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_port_conversion("network1",
port_prev,
None,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_port_conversion_attributes_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_port,
mock_get_object,
mock_put_object):
self.target._Logic__subscription_table =\
{"PortChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::Node02::Port02": ["network2::Node02::Port02"],
"network2::Node02::Port02": ["network1::Node02::Port02"]}
port_prev = Port("Port", "1", "Port01", "Node01",
"OutLink", "InLink", {"PortKey": "prev"})
port_curr = Port("Port", "1", "Port02", "Node02",
"OutLink", "InLink", {"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "port_item")
mock_port.return_value = port_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_port_conversion("network1",
port_prev,
port_curr,
None)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2::Node02::Port02"].packed_object(),
port_prev.packed_object())
def test_update_port_conversion_port_id_not_in_interfaces(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_port,
mock_get_object,
mock_put_object):
self.target._Logic__subscription_table =\
{"PortChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::Node02::Port02": ["network2::Node02::Port02"],
"network2::Node02::Port02": ["network1::Node02::Port02"]}
port_prev = Port("Port", "1", "Port01", "Node01",
"OutLink", "InLink", {"PortKey": "prev"})
port_curr = Port("Port", "1", "Port02", "Node02",
"OutLink", "InLink", {"PortKey": "curr"})
self.value = Response(200, "port_item")
mock_port.return_value = port_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_port_conversion("network1",
port_prev,
port_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_port_conversion_port_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_port,
mock_get_object,
mock_put_object):
self.target._Logic__subscription_table =\
{"PortChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::Node02::Port02": ["network2::Node02::Port02"],
"network2::Node02::Port02": ["network1::Node02::Port02"]}
port_prev = Port("Port", "1", "Port01", "Node01",
"OutLink", "InLink", {"PortKey": "prev"})
port_curr = Port("Port", "1", "Port02", "Node02",
"OutLink", "InLink", {"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "port_item")
mock_port.return_value = None
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_port_conversion("network1",
port_prev,
port_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_port_conversion_attr_key_ignore_attributes(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_port,
mock_get_object,
mock_put_object):
self.target._Logic__subscription_table =\
{"PortChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::Node02::Port02": ["network2::Node02::Port02"],
"network2::Node02::Port02": ["network1::Node02::Port02"]}
port_prev = Port("Port", "1", "Port01", "Node01",
"OutLink", "InLink", {"PortKey": "prev"})
port_curr = Port("Port", "1", "Port02", "Node02",
"OutLink", "InLink", {"max_bandwidth": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "port_item")
mock_port.return_value = port_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_port_conversion("network1",
port_prev,
port_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_port_conversion_attributes_exist(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_port,
mock_get_object,
mock_put_object):
self.target._Logic__subscription_table =\
{"PortChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::Node02::Port02": ["network2::Node02::Port02"],
"network2::Node02::Port02": ["network1::Node02::Port02"]}
port_prev = Port("Port", "1", "Port01", "Node01",
"OutLink", "InLink", {"PortKey": "curr"})
port_curr = Port("Port", "1", "Port02", "Node02",
"OutLink", "InLink", {"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "port_item")
mock_port.return_value = port_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_port_conversion("network1",
port_prev,
port_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_port_conversion_KeyError(self):
conversion_table = self.target._conversion_table
self.target._Logic__subscription_table =\
{"PortChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::Node02::Port02": ["network2::Node02::Port02"],
"network2::Node02::Port02": ["network1::Node02::Port02"]}
port_prev = Port("Port", "1", "Port01", "Node01",
"OutLink", "InLink", {"PortKey": "prev"})
port_curr = Port("Port", "1", "Port02", "Node02",
"OutLink", "InLink", {"PortKey": "curr"})
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util." +
"network_interface.NetworkInterface.get_port")
) as (logging_debug,
logging_error,
mock_port,
mock_put_object,
mock_get_port):
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "port_item")
mock_get_port.return_value = port_prev
mock_port.side_effect = KeyError()
mock_put_object.return_value = self.value
self.result = self.target._update_port_conversion("network1",
port_prev,
port_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result, {})
def test_update_link_conversion_success(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_link,
mock_get_object,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__link_conversion_table =\
{"network1::Link02": ["network2::Link02"],
"network2::Link02": ["network1::Link02"]}
link_prev = Link("Link", "1", "Link01", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "prev"})
link_curr = Link("Link", "1", "Link02", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "link_item")
mock_link.return_value = link_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_link_conversion("network1",
link_prev,
link_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2::Link02"].packed_object(),
link_prev.packed_object())
def test_update_link_conversion_network_id_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_link,
mock_get_object,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__link_conversion_table =\
{"network1::Link02": ["network2::Link02"],
"network2::Link02": ["network1::Link02"]}
link_prev = Link("Link", "1", "Link01", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "prev"})
link_curr = Link("Link", "1", "Link02", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "link_item")
mock_link.return_value = link_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_link_conversion(None,
link_prev,
link_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_link_conversion_link_curr_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_link,
mock_get_object,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__link_conversion_table =\
{"network1::Link02": ["network2::Link02"],
"network2::Link02": ["network1::Link02"]}
link_prev = Link("Link", "1", "Link01", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "prev"})
link_curr = Link("Link", "1", "Link02", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "link_item")
mock_link.return_value = link_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_link_conversion("network1",
link_prev,
None,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_link_conversion_attributes(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_link,
mock_get_object,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__link_conversion_table =\
{"network1::Link02": ["network2::Link02"],
"network2::Link02": ["network1::Link02"]}
link_prev = Link("Link", "1", "Link01", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "prev"})
link_curr = Link("Link", "1", "Link02", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "link_item")
mock_link.return_value = link_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_link_conversion("network1",
link_prev,
link_curr,
None)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2::Link02"].packed_object(),
link_prev.packed_object())
def test_update_link_conversion_link_id_not_in_interfaces(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_link,
mock_get_object,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__link_conversion_table =\
{"network1::Link02": ["network2::Link02"],
"network2::Link02": ["network1::Link02"]}
link_prev = Link("Link", "1", "Link01", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "prev"})
link_curr = Link("Link", "1", "Link02", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "curr"})
self.value = Response(200, "link_item")
mock_link.return_value = link_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_link_conversion("network1",
link_prev,
link_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_link_conversion_line_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_link,
mock_get_object,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__link_conversion_table =\
{"network1::Link02": ["network2::Link02"],
"network2::Link02": ["network1::Link02"]}
link_prev = Link("Link", "1", "Link01", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "prev"})
link_curr = Link("Link", "1", "Link02", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "link_item")
mock_link.return_value = None
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_link_conversion("network1",
link_prev,
link_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_link_conversion_attr_key_in_ignore_attributes(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_link,
mock_get_object,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__link_conversion_table =\
{"network1::Link02": ["network2::Link02"],
"network2::Link02": ["network1::Link02"]}
link_prev = Link("Link", "1", "Link01", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "prev"})
link_curr = Link("Link", "1", "Link02", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"unreserved_bandwidth": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "link_item")
mock_link.return_value = link_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_link_conversion("network1",
link_prev,
link_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_link_conversion_attributes_exist(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_link,
mock_get_object,
mock_put_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__link_conversion_table =\
{"network1::Link02": ["network2::Link02"],
"network2::Link02": ["network1::Link02"]}
link_prev = Link("Link", "1", "Link01", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "curr"})
link_curr = Link("Link", "1", "Link02", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "link_item")
mock_link.return_value = link_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_link_conversion("network1",
link_prev,
link_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_link_conversion_KeyError(self):
conversion_table = self.target._conversion_table
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__link_conversion_table =\
{"network1::Link02": ["network2::Link02"],
"network2::Link02": ["network1::Link02"]}
link_prev = Link("Link", "1", "Link01", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "prev"})
link_curr = Link("Link", "1", "Link02", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "curr"})
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util." +
"network_interface.NetworkInterface.get_link")
) as (logging_debug,
logging_error,
mock_link,
mock_get_object,
mock_put_object,
Mock_get_link):
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "link_item")
Mock_get_link.return_value = link_prev
mock_link.side_effect = KeyError()
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_link_conversion("network1",
link_prev,
link_curr,
["oper_status"])
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result, {})
def test_update_flow_conversion_success(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util.network_interface."
"NetworkInterface.get_flow")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow02": ["network2::Flow02"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "prev"})
flow_curr = Flow("BasicFlow", "1", "Flow02", "Owner",
True, 987654321, "establishing",
{"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"type": "Flow"})
mock_get_flow.return_value = flow_prev
self.target._Logic__get_ignore_keys =\
Mock(return_value=["bandwidth", "latency", "req_latency"])
mock_flow.return_value = flow_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_flow_conversion(
"network1", flow_prev, flow_curr, ["priority"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2::Flow02"].packed_object(),
flow_prev.packed_object())
def test_update_flow_conversion_network_id_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util.network_interface."
"NetworkInterface.get_flow")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow02": ["network2::Flow02"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "prev"})
flow_curr = Flow("BasicFlow", "1", "Flow02", "Owner",
True, 987654321, "establishing",
{"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"type": "Flow"})
mock_get_flow.return_value = flow_prev
self.target._Logic__get_ignore_keys =\
Mock(return_value=["bandwidth", "latency", "req_latency"])
mock_flow.return_value = flow_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_flow_conversion(
None, flow_prev, flow_curr, ["priority"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_flow_conversion_flow_curr_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util.network_interface."
"NetworkInterface.get_flow")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow02": ["network2::Flow02"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "prev"})
flow_curr = Flow("BasicFlow", "1", "Flow02", "Owner",
True, 987654321, "establishing",
{"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"type": "Flow"})
mock_get_flow.return_value = flow_prev
self.target._Logic__get_ignore_keys =\
Mock(return_value=["bandwidth", "latency", "req_latency"])
mock_flow.return_value = flow_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_flow_conversion(
"network1", flow_prev, None, ["priority"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_flow_conversion_attributes_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util.network_interface."
"NetworkInterface.get_flow")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow02": ["network2::Flow02"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "prev"})
flow_curr = Flow("BasicFlow", "1", "Flow02", "Owner",
True, 987654321, "establishing",
{"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"type": "Flow"})
mock_get_flow.return_value = flow_prev
self.target._Logic__get_ignore_keys =\
Mock(return_value=["bandwidth", "latency", "req_latency"])
mock_flow.return_value = flow_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_flow_conversion(
"network1", flow_prev, flow_curr, None)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2::Flow02"].packed_object(),
flow_prev.packed_object())
def test_update_flow_conversion_flow_id_not_in_interfaces(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util.network_interface."
"NetworkInterface.get_flow")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow02": ["network2::Flow02"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "prev"})
flow_curr = Flow("BasicFlow", "1", "Flow02", "Owner",
True, 123456789, "establishing",
{"PortKey": "curr"})
self.value = Response(200, {"type": "Flow"})
self.target._Logic__get_ignore_keys =\
Mock(return_value=["bandwidth", "latency", "req_latency"])
mock_flow.return_value = flow_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_flow_conversion(
"network1", flow_prev, flow_curr, ["priority"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_flow_conversion_flow_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util.network_interface."
"NetworkInterface.get_flow")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow02": ["network2::Flow02"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "prev"})
flow_curr = Flow("BasicFlow", "1", "Flow02", "Owner",
True, 123456789, "establishing",
{"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"type": "Flow"})
self.target._Logic__get_ignore_keys =\
Mock(return_value=["bandwidth", "latency", "req_latency"])
mock_get_flow.return_value = None
mock_flow.return_value = flow_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_flow_conversion(
"network1", flow_prev, flow_curr, ["priority"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_flow_conversion_differ_enabled(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util.network_interface."
"NetworkInterface.get_flow")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow02": ["network2::Flow02"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "prev"})
flow_curr = Flow("BasicFlow", "1", "Flow02", "Owner",
False, 123456789, "establishing",
{"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"type": "Flow"})
mock_get_flow.return_value = flow_prev
self.target._Logic__get_ignore_keys =\
Mock(return_value=["bandwidth", "latency", "req_latency"])
mock_flow.return_value = flow_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_flow_conversion(
"network1", flow_prev, flow_curr, ["priority"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2::Flow02"].packed_object(),
flow_prev.packed_object())
def test_update_flow_conversion_differ_status(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util.network_interface."
"NetworkInterface.get_flow")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow02": ["network2::Flow02"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "prev"})
flow_curr = Flow("BasicFlow", "1", "Flow02", "Owner",
True, 123456789, "teardown",
{"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"type": "Flow"})
mock_get_flow.return_value = flow_prev
self.target._Logic__get_ignore_keys =\
Mock(return_value=["bandwidth", "latency", "req_latency"])
mock_flow.return_value = flow_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_flow_conversion(
"network1", flow_prev, flow_curr, ["priority"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2::Flow02"].packed_object(),
flow_prev.packed_object())
def test_update_flow_conversion_attr_key_in_ignore_attributes(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util.network_interface."
"NetworkInterface.get_flow")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow02": ["network2::Flow02"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "prev"})
flow_curr = Flow("BasicFlow", "1", "Flow02", "Owner",
True, 123456789, "establishing",
{"bandwidth": "curr"})
self.target._network_interfaces["network1"] = \
NetworkInterface(self.target.dispatcher, "network1")
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"type": "Flow"})
mock_get_flow.return_value = flow_prev
self.target._Logic__get_ignore_keys =\
Mock(return_value=["enabled", "priority",
"status", "bandwidth"])
mock_flow.return_value = flow_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_flow_conversion(
"network1", flow_prev, flow_curr, ["priority"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_flow_conversion_attributes_exist(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util.network_interface."
"NetworkInterface.get_flow")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow02": ["network2::Flow02"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "curr"})
flow_curr = Flow("BasicFlow", "1", "Flow02", "Owner",
True, 123456789, "establishing",
{"PortKey": "curr"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"type": "Flow"})
mock_get_flow.return_value = flow_prev
self.target._Logic__get_ignore_keys =\
Mock(return_value=["enabled", "priority", "status"])
mock_flow.return_value = flow_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_flow_conversion(
"network1", flow_prev, flow_curr, ["priority"])
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_update_flow_conversion_KeyError(self):
conversion_table = self.target._conversion_table
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow02": ["network2::Flow02"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "prev"})
flow_curr = Flow("BasicFlow", "1", "Flow02", "Owner",
True, 987654321, "establishing",
{"PortKey": "curr"})
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util.network_interface."
"NetworkInterface.get_flow")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow):
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"type": "Flow"})
mock_get_flow.return_value = flow_prev
self.target._Logic__get_ignore_keys =\
Mock(return_value=["bandwidth", "latency", "req_latency"])
mock_flow.side_effect = KeyError()
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
self.result = self.target._update_flow_conversion(
"network1", flow_prev, flow_curr, ["priority"])
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result, {})
def test_delete_node_conversion_success(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_node,
mock_get_object,
mock_put_object,
mock_del_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__node_conversion_table =\
{"network1::Node01": ["network2::Node01"],
"network2::Node02": ["network1::Node02"]}
port_prev = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_prev = Node("Node", "0001", "Node02",
{"port_id": port_prev},
{"attribute_prev": "prev_value"})
self.target._network_interfaces["network1"] = \
NetworkInterface(self.target.dispatcher, "network1")
self.value = Response(200, {"item": "node_item"})
mock_node.return_value = node_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
self.result = self.target._delete_node_conversion("network2",
node_prev)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
conversion_table._ConversionTable__node_conversion_table,
{"network1::Node01": ["network2::Node01"]})
self.assertEqual(
self.result["network1::Node02"].packed_object(),
node_prev.packed_object())
def test_delete_node_conversion_network_id_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_node,
mock_get_object,
mock_put_object,
mock_del_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__node_conversion_table =\
{"network1::Node01": ["network2::Node01"],
"network2::Node02": ["network1::Node02"]}
port_prev = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_prev = Node("Node", "0001", "Node02",
{"port_id": port_prev},
{"attribute_prev": "prev_value"})
self.target._network_interfaces["network1"] = \
NetworkInterface(self.target.dispatcher, "network1")
self.value = Response(200, {"item": "node_item"})
mock_node.return_value = node_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
self.result = self.target._delete_node_conversion(None,
node_prev)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_delete_node_conversion_node_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_node,
mock_get_object,
mock_put_object,
mock_del_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__node_conversion_table =\
{"network1::Node01": ["network2::Node01"],
"network2::Node02": ["network1::Node02"]}
port_prev = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_prev = Node("Node", "0001", "Node02",
{"port_id": port_prev},
{"attribute_prev": "prev_value"})
self.target._network_interfaces["network1"] = \
NetworkInterface(self.target.dispatcher, "network1")
self.value = Response(200, {"item": "node_item"})
mock_node.return_value = node_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
self.result = self.target._delete_node_conversion("network2",
None)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_delete_node_conversion_node_id_not_in_interfaces(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_node,
mock_get_object,
mock_put_object,
mock_del_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__node_conversion_table =\
{"network1::Node01": ["network2::Node01"],
"network2::Node02": ["network1::Node02"]}
port_prev = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_prev = Node("Node", "0001", "Node02",
{"port_id": port_prev},
{"attribute_prev": "prev_value"})
self.value = Response(200, {"item": "node_item"})
mock_node.return_value = node_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
self.result = self.target._delete_node_conversion("network2",
node_prev)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_delete_node_conversion_KeyError(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"node.Node."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_node,
mock_get_object,
mock_put_object,
mock_del_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__node_conversion_table =\
{"network1::Node01": ["network2::Node01"],
"network2::Node02": ["network1::Node02"]}
port_prev = Port("Port", "1", "PortId", "NodeId",
"OutLink", "InLink", {"PortKey": "PortVal"})
node_prev = Node("Node", "0001", "Node02",
{"port_id": port_prev},
{"attribute_prev": "prev_value"})
self.target._network_interfaces["network1"] = \
NetworkInterface(self.target.dispatcher, "network1")
self.value = Response(200, {"key": "error"})
mock_node.side_effect = KeyError()
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
self.result = self.target._delete_node_conversion("network2",
node_prev)
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result, None)
def test_delete_port_conversion_success(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_port,
mock_get_object,
mock_put_object,
mock_del_object):
self.target._Logic__subscription_table =\
{"PortChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::Node01::Port01": ["network2::Node01::Port01"],
"network2::Node02::Port02": ["network1::Node02::Port02"]}
port_prev = Port("Port", "1", "Port01", "Node01",
"OutLink", "InLink", {"PortKey": "prev"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "port_item")
mock_port.return_value = port_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
self.result = self.target._delete_port_conversion("network1",
port_prev)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2::Node01::Port01"].packed_object(),
port_prev.packed_object())
self.assertEqual(
conversion_table._ConversionTable__port_conversion_table,
{"network2::Node02::Port02": ["network1::Node02::Port02"]})
def test_delete_port_conversion_network_id_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_port,
mock_get_object,
mock_put_object,
mock_del_object):
self.target._Logic__subscription_table =\
{"PortChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::Node01::Port01": ["network2::Node01::Port01"],
"network2::Node02::Port02": ["network1::Node02::Port02"]}
port_prev = Port("Port", "1", "Port01", "Node01",
"OutLink", "InLink", {"PortKey": "prev"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "port_item")
mock_port.return_value = port_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
self.result = self.target._delete_port_conversion(None,
port_prev)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_delete_port_conversion_port_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_port,
mock_get_object,
mock_put_object,
mock_del_object):
self.target._Logic__subscription_table =\
{"PortChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::Node01::Port01": ["network2::Node01::Port01"],
"network2::Node02::Port02": ["network1::Node02::Port02"]}
port_prev = Port("Port", "1", "Port01", "Node01",
"OutLink", "InLink", {"PortKey": "prev"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "port_item")
mock_port.return_value = port_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
self.result = self.target._delete_port_conversion("network1",
None)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_delete_port_conversion_port_id_not_in_interfaces(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_port,
mock_get_object,
mock_put_object,
mock_del_object):
self.target._Logic__subscription_table =\
{"PortChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::Node01::Port01": ["network2::Node01::Port01"],
"network2::Node02::Port02": ["network1::Node02::Port02"]}
port_prev = Port("Port", "1", "Port01", "Node01",
"OutLink", "InLink", {"PortKey": "prev"})
self.value = Response(200, "port_item")
mock_port.return_value = port_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
self.result = self.target._delete_port_conversion("network1",
port_prev)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_delete_port_conversion_KeyError(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"port.Port."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_port,
mock_get_object,
mock_put_object,
mock_del_object):
self.target._Logic__subscription_table =\
{"PortChanged::UPDATE::publisher_id": ["oper_status"]}
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__port_conversion_table =\
{"network1::Node01::Port01": ["network2::Node01::Port01"],
"network2::Node02::Port02": ["network1::Node02::Port02"]}
port_prev = Port("Port", "1", "Port01", "Node01",
"OutLink", "InLink", {"PortKey": "prev"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "port_item")
mock_port.side_effect = KeyError()
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
self.result = self.target._delete_port_conversion("network1",
port_prev)
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result, None)
def test_delete_link_conversion_success(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_link,
mock_get_object,
mock_put_object,
mock_del_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__link_conversion_table =\
{"network1::Link01": ["network2::Link01"],
"network2::Link02": ["network1::Link02"]}
link_prev = Link("Link", "1", "Link01", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "prev"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "link_item")
mock_link.return_value = link_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
self.result = self.target._delete_link_conversion("network1",
link_prev)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2::Link01"].packed_object(),
link_prev.packed_object())
self.assertEqual(
conversion_table._ConversionTable__link_conversion_table,
{"network2::Link02": ["network1::Link02"]})
def test_delete_link_conversion_network_id_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_link,
mock_get_object,
mock_put_object,
mock_del_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__link_conversion_table =\
{"network1::Link01": ["network2::Link01"],
"network2::Link02": ["network1::Link02"]}
link_prev = Link("Link", "1", "Link01", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "prev"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "link_item")
mock_link.return_value = link_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
self.result = self.target._delete_link_conversion(None,
link_prev)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_delete_link_conversion_link_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_link,
mock_get_object,
mock_put_object,
mock_del_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__link_conversion_table =\
{"network1::Link01": ["network2::Link01"],
"network2::Link02": ["network1::Link02"]}
link_prev = Link("Link", "1", "Link01", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "prev"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "link_item")
mock_link.return_value = link_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
self.result = self.target._delete_link_conversion("network1",
None)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_delete_link_conversion_link_in_not_interfaces(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_link,
mock_get_object,
mock_put_object,
mock_del_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__link_conversion_table =\
{"network1::Link01": ["network2::Link01"],
"network2::Link02": ["network1::Link02"]}
link_prev = Link("Link", "1", "Link01", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "prev"})
self.value = Response(200, "link_item")
mock_link.return_value = link_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
self.result = self.target._delete_link_conversion("network1",
link_prev)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_delete_link_conversion_KeyError(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.topology."
"link.Link."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_link,
mock_get_object,
mock_put_object,
mock_del_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__link_conversion_table =\
{"network1::Link01": ["network2::Link01"],
"network2::Link02": ["network1::Link02"]}
link_prev = Link("Link", "1", "Link01", "SrcNode",
"SrcPort", "DstNode", "DstPort",
{"PortKey": "prev"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, "link_item")
mock_link.side_effect = KeyError()
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
self.result = self.target._delete_link_conversion("network1",
link_prev)
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result, None)
def test_delete_flow_conversion_success(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"get_flow"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow,
mock_del_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow01": ["network2::Flow01"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "prev"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"type": "Flow"})
mock_flow.return_value = flow_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
mock_get_flow.return_value = flow_prev
self.target._network_interfaces["network1"] = \
NetworkInterface(self.target.dispatcher, "network1")
self.result = self.target._delete_flow_conversion(
"network1", flow_prev)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result["network2::Flow01"].packed_object(),
flow_prev.packed_object())
self.assertEqual(
conversion_table._ConversionTable__flow_conversion_table,
{"network2::Flow02": ["network1::Flow02"]})
def test_delete_flow_conversion_network_id_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"get_flow"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow,
mock_del_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow01": ["network2::Flow01"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "prev"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"type": "Flow"})
mock_flow.return_value = flow_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
mock_get_flow.return_value = self.value
self.result = self.target._delete_flow_conversion(
None, flow_prev)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_delete_flow_conversion_link_None(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"get_flow"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow,
mock_del_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow01": ["network2::Flow01"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "prev"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"type": "Flow"})
mock_flow.return_value = flow_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
mock_get_flow.return_value = self.value
self.result = self.target._delete_flow_conversion(
"network1", None)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_delete_flow_conversion_flow_id_interfaces(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"get_flow"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow,
mock_del_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow01": ["network2::Flow01"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "prev"})
self.value = Response(200, {"type": "Flow"})
mock_flow.return_value = flow_prev
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
mock_get_flow.return_value = flow_prev
self.target._network_interfaces["network1"] = \
NetworkInterface(self.target.dispatcher, "network1")
self.result = self.target._delete_flow_conversion(
"network1", flow_prev)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result, {})
def test_delete_flow_conversion_KeyError(self):
conversion_table = self.target._conversion_table
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.flow."
"flow.Flow."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_get_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_put_object_to_remote_object"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"get_flow"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_flow,
mock_get_object,
mock_put_object,
mock_get_flow,
mock_del_object):
conversion_table._ConversionTable__network_conversion_table =\
{"network1": ["network2"]}
conversion_table._ConversionTable__flow_conversion_table =\
{"network1::Flow01": ["network2::Flow01"],
"network2::Flow02": ["network1::Flow02"]}
flow_prev = Flow("BasicFlow", "1", "Flow01", "Owner",
True, 123456789, "establishing",
{"PortKey": "prev"})
self.target._network_interfaces["network2"] = \
NetworkInterface(self.target.dispatcher, "network2")
self.value = Response(200, {"type": "Flow"})
mock_flow.side_effect = KeyError()
mock_get_object.return_value = self.value
mock_put_object.return_value = self.value
mock_del_object.return_value = self.value
mock_get_flow.return_value = self.value
self.result = self.target._delete_flow_conversion(
"network1", flow_prev)
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result, None)
def test_del_in_packet_conversion_success(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"in_packet.InPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_in_packet,
mock_del_object):
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
inpacket = InPacket("inpacket_id", "InPcket", "attributes",
"NodeId", "PortId", basic_flow_match,
"Data")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.value = Response(200, "inpacket_item")
mock_in_packet.return_value = inpacket
mock_del_object.return_value = self.value
self.result = self.target._del_in_packet(
self.target._network_interfaces["network1"], "inpacket_id")
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result.packed_object(), inpacket.packed_object())
def test_del_in_packet_conversion_Response_error(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"in_packet.InPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_in_packet,
mock_del_object):
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
inpacket = InPacket("inpacket_id", "InPcket", "attributes",
"NodeId", "PortId", basic_flow_match,
"Data")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.value = Response("400", "inpacket_item")
mock_in_packet.return_value = inpacket
mock_del_object.return_value = self.value
self.result = self.target._del_in_packet(
self.target._network_interfaces["network1"], "inpacket_id")
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result, None)
def test_del_in_packet_conversion_KeyError(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"in_packet.InPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_in_packet,
mock_del_object):
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
inpacket = InPacket("inpacket_id", "InPcket", "attributes",
"NodeId", "PortId", basic_flow_match,
"Data")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.value = Response(200, "inpacket_item")
mock_in_packet.side_effect = KeyError()
mock_del_object.return_value = self.value
self.result = self.target._del_in_packet(
self.target._network_interfaces["network1"], "inpacket_id")
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result, None)
def test_del_out_packet_conversion_success(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"out_packet.OutPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_out_packet,
mock_del_object):
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
outpacket = OutPacket("outpacket_id", "OutPcket", "attributes",
"NodeId", "Ports", "Ports_ex",
basic_flow_match, "Data")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.value = Response(200, "outpacket_item")
mock_out_packet.return_value = outpacket
mock_del_object.return_value = self.value
self.result = self.target._del_out_packet(
self.target._network_interfaces["network1"], "outpacket_id")
self.assertEqual(
logging_debug.call_count, 3)
self.assertEqual(
logging_error.call_count, 0)
self.assertEqual(
self.result.packed_object(), outpacket.packed_object())
def test_del_out_packet_conversion_Response_error(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"out_packet.OutPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_out_packet,
mock_del_object):
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
outpacket = OutPacket("outpacket_id", "OutPcket", "attributes",
"NodeId", "Ports", "Ports_ex",
basic_flow_match, "Data")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.value = Response("400", "outpacket_item")
mock_out_packet.return_value = outpacket
mock_del_object.return_value = self.value
self.result = self.target._del_out_packet(
self.target._network_interfaces["network1"], "outpacket_id")
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result, None)
def test_del_out_packet_conversion_KeyError(self):
with nested(patch("logging.debug"),
patch("logging.error"),
patch("org.o3project.odenos.core.component.network.packet."
"out_packet.OutPacket."
"create_from_packed"),
patch("org.o3project.odenos.core.util."
"network_interface.NetworkInterface."
"_del_object_to_remote_object")
) as (logging_debug,
logging_error,
mock_out_packet,
mock_del_object):
basic_flow_match = BasicFlowMatch("BasicFlowMatch", "NodeId",
"PortId")
outpacket = OutPacket("outpacket_id", "OutPcket", "attributes",
"NodeId", "Ports", "Ports_ex",
basic_flow_match, "Data")
self.target._network_interfaces["network1"] =\
NetworkInterface(self.target.dispatcher, "network1")
self.value = Response(200, "outpacket_item")
mock_out_packet.side_effect = KeyError()
mock_del_object.return_value = self.value
self.result = self.target._del_out_packet(
self.target._network_interfaces["network1"], "outpacket_id")
self.assertEqual(
logging_error.call_count, 1)
self.assertEqual(
self.result, None)
def test_get_ignore_keys_match(self):
attributes_port = ["oper_status", "physical_id",
"vendor", "max_bandwidth",
"unreserved_bandwidth", "is_boundary"]
self.result = self.target._Logic__get_ignore_keys(
attributes_port, ["attributes::unreserved_bandwidth"])
self.assertEqual(
self.result,
["oper_status", "physical_id",
"vendor", "max_bandwidth",
"is_boundary"])
def test_get_ignore_keys_not_match(self):
attributes_port = ["oper_status", "physical_id",
"vendor", "max_bandwidth",
"unreserved_bandwidth", "is_boundary"]
self.result = self.target._Logic__get_ignore_keys(
attributes_port, ["unreserved_bandwidth"])
self.assertEqual(
self.result,
["oper_status", "physical_id",
"vendor", "max_bandwidth",
"is_boundary"])
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import argparse
import astropy.io.fits as pyfits
if __name__=="__main__":
parser = argparse.ArgumentParser(
prog='show_gti.py',
usage='show_gti.py input.fits',
description='Show GTI information',
epilog='',
add_help=True,
)
parser.add_argument(
'input_fits',metavar='input_fits',type=str,
help='Input fits file.')
args = parser.parse_args()
hdu = pyfits.open(args.input_fits)
extname_list = []
for i in range(len(hdu)):
try:
extname_list.append(hdu[i].header['EXTNAME'])
except:
pass
#print('skip the extension...')
if 'GTI' in extname_list:
gtiname = 'GTI'
filetype = 'evt'
elif 'STDGTI' in extname_list:
gtiname = 'STDGTI'
filetype = 'evt'
elif 'PREFILTER' in extname_list:
filetype = 'mkf'
total_exp = 0.0
if filetype == 'evt':
num_of_gti = len(hdu[gtiname].data)
print("# GTI-num START STOP Exposure(s)")
for i in range(num_of_gti):
gti_start = hdu[gtiname].data[i]['START']
gti_stop = hdu[gtiname].data[i]['STOP']
gti_exp = gti_stop - gti_start
print("%03d: %.3f %.3f %.3f" % (i+1,gti_start,gti_stop,gti_exp))
total_exp += gti_exp
elif filetype == 'mkf':
total_exp = len(hdu['PREFILTER'].data)
print("# Total exposure: %.3f (s)" % total_exp)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import os.path
import json
dir_path = os.path.normpath(os.path.join(__file__, '..' , '..' , 'data'))
file_path = os.path.join(dir_path, 'config_file.json')
## get user input-----------------------------------------
ssid = input("Enter wifi ssid: ")
pswd = input("Enter wifi password: ")
name = input("Enter device name: ")
device_position = input("Enter device position: ")
pub_topic = input("Enter publish topic: ")
sub_topic = input("Enter subscribe topic: ")
logger_level = input("""
LOG_LEVEL_OFF 0 no logging
LOG_LEVEL_FATAL 1 Designates very severe error events that will presumably lead the application to abort
LOG_LEVEL_ERROR 2 Designates error events that might still allow the application to continue running
LOG_LEVEL_WARNING 3 Designates potentially harmful situations
LOG_LEVEL_INFO 4 Designates informational messages that highlight the progress of the application at coarse-grained level.
LOG_LEVEL_DEBUG 5 Designates fine-grained informational events that are most useful to debug an application.
Enter logger level number: """)
logger_output = input("""
LOG_OUTPUT_SERIAL_DIRECT 0 Logging events are directly printed over serial interface when they occure
LOG_OUTPUT_SERIAL_BEGIN 1 Logging events are stored in the SPIFFS and printed over the serial interface at the start of the programm
LOG_OUTPUT_SERIAL_DIRECT_BEGIN 2 Comibination of LOG_OUTPUT_SERIAL_DIRECT and LOG_OUTPUT_SERIAL_BEGIN
LOG_OUTPUT_OFF 3 Not output at all
Enter logger output number: """)
## create json--------------------------------------------
json_dict = {
"ssid":ssid,
"password":pswd,
"aws_url":"aq60dkt3q20bd-ats.iot.eu-central-1.amazonaws.com",
"aws_port":8883,
"mqtt_pub_topic":pub_topic,
"mqtt_sub_topic":sub_topic,
"device_id":name,
"device_position":device_position,
"logger_level":int(logger_level),
"logger_output":int(logger_output)
}
file_data = json.dumps(json_dict, indent=0)
## write to file------------------------------------------
if not os.path.exists(dir_path):
os.makedirs(dir_path)
f = open(file_path, "w")
f.write(file_data)
f.close()
|
nilq/baby-python
|
python
|
from abc import ABCMeta
from discord.ext.commands import CogMeta
from bot.utils.redis_cache import RedisCache
__all__ = ['RedisCache', 'CogABCMeta']
class CogABCMeta(CogMeta, ABCMeta):
"""Metaclass for ABCs meant to be implemented as Cogs."""
pass
def pad_base64(data: str) -> str:
"""Return base64 `data` with padding characters to ensure its length is a multiple of 4."""
return data + "=" * (-len(data) % 4)
|
nilq/baby-python
|
python
|
# First
# Hello World
import developer skill
import resilience skill
import persistence skill
pythonapprentice = str('Johnny')
print(f'welcome to the python world {pythonapprentice}')
print('Learning...')
|
nilq/baby-python
|
python
|
from tweets.models import Comment
from django.db import router
# from posts.views import my_view
from rest_framework import routers
from django.urls.conf import include
from django.urls import path
from tweets.views import TweetViewSet, LikeViewSet, RetweetviewSet, CommentviewSet, index
router = routers.DefaultRouter()
router.register(r'tweets', TweetViewSet)
router.register(r'likes', LikeViewSet)
router.register(r'retweet',RetweetviewSet)
# router.register(r'trends',TrendsviewSet)
router.register(r"comment",CommentviewSet)
urlpatterns = [
path('index/', index),
path("", include(router.urls))
]
|
nilq/baby-python
|
python
|
from Repository.eventTriggerOutputGroupingSetupValueRepo import eventTriggerOutputGroupingSetupValueRepo
from sqlalchemy import Table
from sqlalchemy.engine.base import Connection
from sqlalchemy.sql.expression import BinaryExpression
class eventTriggerOutputGroupingSetupValueServices():
__eventTriggerOutputGroupingSetupValueRepo: eventTriggerOutputGroupingSetupValueRepo
def __init__(self, eventTriggerOutputGroupingSetupValueTable: Table, context: Connection):
self.__eventTriggerOutputGroupingSetupValueRepo = eventTriggerOutputGroupingSetupValueRepo(eventTriggerOutputGroupingSetupValueTable, context=context)
def AddManyEventTriggerOutputGroupSetupValueWithCustomData(self, l:list):
self.__eventTriggerOutputGroupingSetupValueRepo.InsertManyWithCustomData(l)
|
nilq/baby-python
|
python
|
'''
Version and license information.
'''
__all__ = ['__version__', '__versiondate__', '__license__']
__version__ = '1.3.3'
__versiondate__ = '2022-01-16'
__license__ = f'Sciris {__version__} ({__versiondate__}) – © 2014-2022 by the Sciris Development Team'
|
nilq/baby-python
|
python
|
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
def _impl(ctx):
"""Core implementation of passwd_file."""
f = "%s:x:%s:%s:%s:%s:%s\n" % (
ctx.attr.username,
ctx.attr.uid,
ctx.attr.gid,
ctx.attr.info,
ctx.attr.home,
ctx.attr.shell
)
ctx.file_action(
output = ctx.outputs.out,
content = f,
executable=False
)
build_tar = ctx.executable.build_tar
args = [
"--output=" + ctx.outputs.tar.path,
"--file=%s=/etc/passwd" % ctx.outputs.out.path
]
arg_file = ctx.new_file(ctx.attr.name + ".args")
ctx.file_action(arg_file, "\n".join(args))
ctx.action(
executable = build_tar,
arguments = ["--flagfile=" + arg_file.path],
inputs = [ctx.outputs.out, arg_file],
outputs = [ctx.outputs.tar],
use_default_shell_env = True
)
passwd_file = rule(
attrs = {
"username": attr.string(mandatory = True),
"uid": attr.int(default = 1000),
"gid": attr.int(default = 1000),
"info": attr.string(default = "user"),
"home": attr.string(default = "/home"),
"shell": attr.string(default = "/bin/bash"),
"build_tar": attr.label(
default = Label("@bazel_tools//tools/build_defs/pkg:build_tar"),
cfg = "host",
executable = True,
allow_files = True,
),
},
executable = False,
outputs = {
"out": "%{name}.passwd",
"tar": "%{name}.passwd.tar",
},
implementation = _impl,
)
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
import os
import sys
import traceback
from webob.exc import HTTPNotFound, HTTPInternalServerError
from .config import Config
from .config import get_config
from .request import Request
from .response import Response
from .exceptions import PageNotFound
from .tools import import_module
from .views import View
class Router(object):
"""
Main project router that calls appropriate controller.
TODO:
- decorate each controller's call with middleware
- (done) load all controllers and their actions to dict to speedup
lookup of desired url address
"""
def __init__(self):
"""
Load all controllers.
It allow us to speed-up get controller by given url.
"""
self._controllers = {}
self._project_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
self._load_config()
self._load_controllers()
self._init_view()
def __call__(self, environ, start_response):
"""
Find appropriate controller for requested address.
Return Response object that support the WSGI interface.
"""
request = Request(environ)
try:
controller_name = request.get_controller_name()
action_name = request.get_action_name()
action_handler = self.get_action_handler(controller_name, action_name)
if not callable(action_handler):
# action handler should be a callable function
raise PageNotFound(
"Controller '{name}' doesn't have action '{action}'",
name=controller_name,
action=action_name
)
resp = action_handler(request)
if not isinstance(resp, Response):
raise Exception("Controller should return Response object, but given '{}'".format(type(resp)))
except PageNotFound as err:
message = self._format_error_message(str(err), with_traceback=True)
return HTTPNotFound(message)(environ, start_response)
except Exception as err:
message = self._format_error_message(str(err), with_traceback=True)
return HTTPInternalServerError(message)(environ, start_response)
return resp(environ, start_response)
def _load_config(self):
"""
Load config for current project.
"""
self._config = Config()
def _load_controllers(self):
"""
Load all controllers from folder 'controllers'.
Ignore files with leading underscore (for example: controllers/_blogs.py)
"""
for file_name in os.listdir(os.path.join(self._project_dir, 'controllers')):
# ignore disabled controllers
if not file_name.startswith('_'):
module_name = file_name.split('.', 1)[0]
module_path = "controllers.{}".format(module_name)
module = import_module(module_path)
# transform 'blog_articles' file name to 'BlogArticles' class
controller_class_name = module_name.title().replace('_', '')
controller_class = getattr(module, controller_class_name)
controller = controller_class()
for action_name in dir(controller):
action = getattr(controller, action_name)
if action_name.startswith('_') or not callable(action):
continue
url_path = "/".join([module_name, action_name])
self._controllers[url_path] = action
return self._controllers
def _init_view(self):
"""
Initialize View with project settings.
"""
views_engine = get_config('rails.views.engine', 'jinja')
templates_dir = os.path.join(self._project_dir, "views", "templates")
self._view = View(views_engine, templates_dir)
def _format_error_message(self, msg, with_traceback=False):
if with_traceback:
tb = traceback.format_exc()
msg += "<h3>Traceback</h3>\n\n<pre>{}</pre>".format(tb)
return msg
def get_action_handler(self, controller_name, action_name):
"""
Return action of controller as callable.
If requested controller isn't found - return 'not_found' action
of requested controller or Index controller.
"""
try_actions = [
controller_name + '/' + action_name,
controller_name + '/not_found',
# call Index controller to catch all unhandled pages
'index/not_found'
]
# search first appropriate action handler
for path in try_actions:
if path in self._controllers:
return self._controllers[path]
return None
|
nilq/baby-python
|
python
|
"""
Clean up & organize outputs from processing workflow batch.
"""
import logging
import os
import re
import zipfile
import shutil
logger = logging.getLogger(__name__)
class OutputCleaner(object):
"""
Moves, renames, and deletes individual output files from a workflow
processing batch for a selected project.
"""
def __init__(self, path):
logger.debug("creating `OutputCleaner` instance for '{}'".format(path))
self.path = path
self.output_types = self._get_output_types()
def _get_output_types(self):
"""
Identify the types of outputs included for the project.
"""
out_types = ['qc', 'metrics', 'counts', 'alignments', 'logs']
logging.debug("subfolders in project folder: {}"
.format(os.listdir(self.path)))
return [f for f in os.listdir(self.path)
if f.lower() in out_types]
def _get_output_paths(self, output_type):
"""
Return full path for individual output files.
"""
logging.debug("locating output files of type '{}'".format(output_type))
output_root = os.path.join(self.path, output_type)
return [os.path.join(self.path, root, f)
for root, dirs, files in os.walk(output_root)
for f in files
if not re.search('(DS_Store|_old)', f)]
def _unzip_output(self, path):
"""
Unzip the contents of a compressed output file.
"""
logging.debug("extracting contents of '{}' to '{}'"
.format(path, os.path.dirname(path)))
paths = []
with zipfile.ZipFile(path) as zf:
logger.debug("zip folder contents: {}".format(zf.namelist()))
for f in zf.namelist():
if f != './':
paths.append(zf.extract(f, os.path.dirname(path)))
logging.debug("unzipped the following files: {}".format(paths))
return paths
def _unnest_output(self, path):
"""
Unnest files in a subfolder by concatenating filenames and
moving up one level.
"""
logging.debug("unnesting output '{}' from subfolder '{}'"
.format(path, os.path.dirname(path)))
prefix = os.path.dirname(path)
if re.search('.zip$', path):
logging.debug("unzipping contents of '{}' before unnesting"
.format(path))
for p in self._unzip_output(path):
shutil.move(p, '{}_{}'.format(prefix, os.path.basename(p)))
try:
shutil.rmtree(os.path.splitext(path)[0])
except OSError:
pass
else:
shutil.move(path, '{}_{}'.format(prefix, os.path.basename(path)))
def _recode_output(self, path, output_type):
"""
Rename file according to template.
"""
filename_map = {'QC': ('fastqc_data.txt', 'fastqc_qc.txt')}
swap = filename_map[output_type]
newpath = re.sub(swap[0], swap[1], path)
logging.debug("renaming '{}' to '{}'".format(path, newpath))
shutil.move(path, newpath)
return newpath
def clean_outputs(self):
"""
Walk through output types to unzip, unnest, and rename files.
"""
for output_type in self.output_types:
if output_type == 'QC':
outputs = self._get_output_paths(output_type)
for o in outputs:
outregex = re.compile(output_type + '$')
if not outregex.search(os.path.dirname(o)):
self._unnest_output(o)
for o in os.listdir(os.path.join(self.path, output_type)):
self._recode_output(
os.path.join(self.path, output_type, o),
output_type
)
|
nilq/baby-python
|
python
|
# Copyright 2018 Comcast Cable Communications Management, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TODO: Add module docstring."""
import json
from datetime import datetime
import responses
from sampledata import acl_rule, forward_zone, ip4_zone, ip6_zone, sample_zone_change
from vinyldns.serdes import to_json_string, from_json_string
from vinyldns.zone import Zone, ZoneChange, ListZonesResponse, ListZoneChangesResponse
def check_zone_connections_are_same(a, b):
if a is None:
assert b is None
else:
assert a.primary_server == b.primary_server
assert a.key == b.key
assert a.name == b.name
assert a.key_name == b.key_name
def check_zones_are_same(a, b):
assert a.id == b.id
assert a.name == b.name
assert a.email == b.email
assert a.admin_group_id == b.admin_group_id
assert a.status == b.status
assert a.updated == b.updated
assert a.created == b.created
check_zone_connections_are_same(a.connection, b.connection)
check_zone_connections_are_same(a.transfer_connection, b.transfer_connection)
assert all([l.__dict__ == r.__dict__ for l, r in zip(a.acl.rules, b.acl.rules)])
def test_zone_serdes():
s = to_json_string(forward_zone)
print(json.dumps(s, indent=4))
z = from_json_string(s, Zone.from_dict)
assert z.name == forward_zone.name
assert z.connection.primary_server == forward_zone.connection.primary_server
assert all([a.__dict__ == b.__dict__ for a, b in zip(z.acl.rules, forward_zone.acl.rules)])
def test_connect_zone(mocked_responses, vinyldns_client):
mocked_responses.add(
responses.POST, 'http://test.com/zones',
body=to_json_string(sample_zone_change), status=200)
r = vinyldns_client.connect_zone(forward_zone)
check_zones_are_same(forward_zone, r.zone)
def test_update_zone(mocked_responses, vinyldns_client):
mocked_responses.add(
responses.PUT, 'http://test.com/zones/{0}'.format(forward_zone.id),
body=to_json_string(sample_zone_change), status=200)
r = vinyldns_client.update_zone(forward_zone)
check_zones_are_same(forward_zone, r.zone)
def test_abandon_zone(mocked_responses, vinyldns_client):
mocked_responses.add(
responses.DELETE, 'http://test.com/zones/{0}'.format(forward_zone.id),
body=to_json_string(sample_zone_change), status=200)
r = vinyldns_client.abandon_zone(forward_zone.id)
check_zones_are_same(forward_zone, r.zone)
def test_sync_zone(mocked_responses, vinyldns_client):
mocked_responses.add(
responses.POST, 'http://test.com/zones/{0}/sync'.format(forward_zone.id),
body=to_json_string(sample_zone_change), status=200)
r = vinyldns_client.sync_zone(forward_zone.id)
assert sample_zone_change.id == r.id
assert sample_zone_change.change_type == r.change_type
assert sample_zone_change.status == r.status
assert sample_zone_change.system_message == r.system_message
assert sample_zone_change.user_id == r.user_id
check_zones_are_same(forward_zone, r.zone)
def test_list_zones(mocked_responses, vinyldns_client):
lzr = ListZonesResponse(zones=[forward_zone, ip4_zone, ip6_zone], name_filter='*', start_from='start-from',
next_id='next', max_items=100)
mocked_responses.add(
responses.GET, 'http://test.com/zones?nameFilter=*&startFrom=start-from&maxItems=100',
body=to_json_string(lzr), status=200
)
r = vinyldns_client.list_zones('*', 'start-from', 100)
assert r.name_filter == lzr.name_filter
assert r.start_from == lzr.start_from
assert r.next_id == lzr.next_id
assert r.max_items == lzr.max_items
for l, r in zip(lzr.zones, r.zones):
check_zones_are_same(l, r)
def test_get_zone(mocked_responses, vinyldns_client):
mocked_responses.add(
responses.GET, 'http://test.com/zones/{0}'.format(forward_zone.id),
body=to_json_string({'zone': forward_zone}), status=200)
r = vinyldns_client.get_zone(forward_zone.id)
check_zones_are_same(forward_zone, r)
def test_list_zone_changes(mocked_responses, vinyldns_client):
change1 = ZoneChange(zone=forward_zone, user_id='some-user', change_type='Create', status='Pending',
created=datetime.utcnow(), system_message=None, id='zone-change-id1')
change2 = ZoneChange(zone=ip4_zone, user_id='some-user', change_type='Create', status='Pending',
created=datetime.utcnow(), system_message='msg', id='zone-change-id2')
lzcr = ListZoneChangesResponse(forward_zone.id, [change1, change2], 'next', 'start', 100)
mocked_responses.add(
responses.GET, 'http://test.com/zones/{0}/changes?startFrom=start&maxItems=100'.format(forward_zone.id),
body=to_json_string(lzcr), status=200
)
r = vinyldns_client.list_zone_changes(forward_zone.id, 'start', 100)
assert r.start_from == lzcr.start_from
assert r.next_id == lzcr.next_id
assert r.max_items == lzcr.max_items
for l, r in zip(lzcr.zone_changes, r.zone_changes):
assert l.id == r.id
assert l.user_id == r.user_id
assert l.change_type == r.change_type
assert l.status == r.status
assert l.created == r.created
assert l.system_message == r.system_message
check_zones_are_same(l.zone, r.zone)
def test_add_acl_rule(mocked_responses, vinyldns_client):
mocked_responses.add(
responses.PUT, 'http://test.com/zones/{0}/acl/rules'.format(forward_zone.id),
body=to_json_string(sample_zone_change)
)
r = vinyldns_client.add_zone_acl_rule(forward_zone.id, acl_rule)
check_zones_are_same(r.zone, sample_zone_change.zone)
def test_delete_acl_rule(mocked_responses, vinyldns_client):
mocked_responses.add(
responses.DELETE, 'http://test.com/zones/{0}/acl/rules'.format(forward_zone.id),
body=to_json_string(sample_zone_change)
)
r = vinyldns_client.delete_zone_acl_rule(forward_zone.id, acl_rule)
check_zones_are_same(r.zone, sample_zone_change.zone)
|
nilq/baby-python
|
python
|
import sftoolbox
class Variable(object):
"""variable
"""
def __init__(self, project):
"""construct"""
project.add(self)
self.project = project
self.idname = None
def _apply_json(self, data):
"""apply the json data
"""
self.label = data.get('label')
self.idname = data.get('idname')
@classmethod
def from_json(cls, project, value):
instance = cls(project)
instance._apply_json(value)
return instance
@sftoolbox.engine.register_variable_class
class TextVariable(Variable):
"""text
"""
json_type = 'text'
def __init__(self, project):
super(TextVariable, self).__init__(project)
self._value = None
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def set_value(self, value):
self.value = value
def get_value(self):
return self.value
def _apply_json(self, data):
super(TextVariable, self)._apply_json(data)
def from_json(project, value):
"""return a action from the given json
"""
json_type = value.get('type')
for class_ in sftoolbox.engine.variable_classes_register:
if json_type == class_.json_type:
return class_.from_json(project, value)
return Variable.from_json(project, value)
|
nilq/baby-python
|
python
|
# Generated by Django 3.2.7 on 2021-12-07 12:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('insta', '0002_auto_20211206_1623'),
]
operations = [
migrations.RenameField(
model_name='image',
old_name='comments',
new_name='comment',
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.CharField(max_length=250)),
('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='insta.image')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to=settings.AUTH_USER_MODEL)),
],
),
]
|
nilq/baby-python
|
python
|
# Copyright 2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla as nn
import nnabla.functions as F
from nnabla.utils.image_utils import imsave
import numpy as np
import random
def save_generations(rgb_output, filepath, drange=[-1, 1], return_images=False):
"""
Save generated images
"""
if return_images:
images = []
for i in range(rgb_output.shape[0]):
scale = 255 / (drange[1] - drange[0])
if isinstance(rgb_output, nn.Variable):
image = rgb_output.d[i] * scale + (0.5 - drange[0] * scale)
else:
image = rgb_output.data[i] * scale + (0.5 - drange[0] * scale)
if return_images:
images.append(np.uint8(np.clip(image, 0, 255)))
else:
imsave(f'{filepath}_{i}.png', np.uint8(
np.clip(image, 0, 255)), channel_first=True)
print(f'Output saved. Saved {filepath}_{i}.png')
if return_images:
return images
def collect_data(data):
data = [np.expand_dims(d, 1) for d in data]
data = np.concatenate(data, 1)
return data
def mixing_noise(batch_size, latent_dim, mixing_prob, seed):
rnd = np.random.RandomState(seed=seed[0])
z = rnd.randn(batch_size, latent_dim).astype(np.float32)
if mixing_prob > 0 and random.random() < mixing_prob:
rnd_2 = np.random.RandomState(seed=seed[1])
z_1 = z
z_2 = rnd_2.randn(batch_size, latent_dim).astype(np.float32)
else:
z_1 = z_2 = z
return z_1, z_2
def slerp(noise_1, noise_2, ratio):
interpolated_noises = []
for a, b in zip(noise_1, noise_2):
a_norm = F.pow_scalar(
F.sum(F.pow_scalar(a, 2), axis=1, keepdims=True), 0.5)
b_norm = F.pow_scalar(
F.sum(F.pow_scalar(b, 2), axis=1, keepdims=True), 0.5)
a /= a_norm
b /= b_norm
d = F.sum(a*b, axis=1, keepdims=True)
p = ratio*F.acos(d)
c = b-d*a
c_norm = F.pow_scalar(
F.sum(F.pow_scalar(c, 2), axis=1, keepdims=True), 0.5)
c /= c_norm
d = a*F.cos(p) + c*F.sin(p)
d = d/F.pow_scalar(F.sum(F.pow_scalar(d, 2),
axis=1, keepdims=True), 0.5)
interpolated_noises.append(d)
return interpolated_noises
def lerp(a, b, t):
return a + (b - a) * t
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
'''
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
Quick description:
'''
# standard library
from math import *
from copy import deepcopy
# local library
import inkex
import pathmodifier
import cubicsuperpath
import bezmisc
import simplepath
import simpletransform
def getColorAndOpacity(longColor):
'''
Convert the long into a #rrggbb color value
Conversion back is A + B*256^1 + G*256^2 + R*256^3
'''
longColor = long(longColor)
if longColor < 0:
longColor = longColor & 0xFFFFFFFF
hexColor = hex(longColor)
hexOpacity = hexColor[-3:-1]
hexColor = '#' + hexColor[2:-3].rjust(6, '0')
return (hexColor, hexOpacity)
def setColorAndOpacity(style, color, opacity):
declarations = style.split(';')
strokeOpacityInStyle = False
newOpacity = round((int(opacity, 16) / 255.0), 8)
for i,decl in enumerate(declarations):
parts = decl.split(':', 2)
if len(parts) == 2:
(prop, val) = parts
prop = prop.strip().lower()
if (prop == 'stroke' and val != color):
declarations[i] = prop + ':' + color
if prop == 'stroke-opacity':
if val != newOpacity:
declarations[i] = prop + ':' + str(newOpacity)
strokeOpacityInStyle = True
if not strokeOpacityInStyle:
declarations.append('stroke-opacity' + ':' + str(newOpacity))
return ";".join(declarations)
def getSkeletonPath(d, offs):
'''
Recieves a current skeleton path and offset specified by the user if it's line.
Calculates new skeleton path to use for creating contour with given offset.
'''
if offs != 0:
comps = d.split()
if ((comps[2] == 'h' or comps[2] == 'H') and len(comps) == 4):
startPt = comps[1].split(',')
startX = float(startPt[0])
startY = float(startPt[1])
finalX = float(comps[3]) if comps[2] == 'H' else startX + float(comps[3])
if startX < finalX:
startY -= offs
else:
startY += offs
comps[1] = startPt[0] + ',' + str(startY)
elif ((comps[2] == 'v' or comps[2] == 'V') and len(comps) == 4):
startPt = comps[1].split(',')
startX = float(startPt[0])
startY = float(startPt[1])
finalY = float(comps[3]) if comps[2] == 'V' else startY + float(comps[3])
if startY < finalY:
startX += offs
else:
startX -= offs
comps[1] = str(startX) + ',' + startPt[1]
elif (comps[0] == 'M' and len(comps) == 3):
startPt = comps[1].split(',')
startX = float(startPt[0])
startY = float(startPt[1])
finalPt = comps[2].split(',')
finalX = float(finalPt[0])
finalY = float(finalPt[1])
if startX < finalX:
if (startY > finalY):
startX -= offs
finalX -= offs
else:
startX += offs
finalX += offs
startY -= offs
finalY -= offs
else:
if startY > finalY:
startX -= offs
finalX -= offs
else:
startX += offs
finalX += offs
startY += offs
finalY += offs
comps[1] = str(startX) + ',' + str(startY)
comps[2] = str(finalX) + ',' + str(finalY)
elif (comps[0] == 'm' and len(comps) == 3):
startPt = comps[1].split(',')
startX = float(startPt[0])
startY = float(startPt[1])
finalPt = comps[2].split(',')
dx = float(finalPt[0])
dy = float(finalPt[1])
finalX = startX + dx
finalY = startY + dy
if startX < finalX:
if startY > finalY:
startX -= offs
else:
startX += offs
startY -= offs
else:
if startY > finalY:
startX -= offs
else:
startX += offs
startY += offs
comps[1] = str(startX) + ',' + str(startY)
comps[2] = str(dx) + ',' + str(dy)
return cubicsuperpath.parsePath(' '.join(comps))
return cubicsuperpath.parsePath(d)
def modifySkeletonPath(skelPath):
resPath = []
l = len(skelPath)
resPath += skelPath[0]
if l > 1:
for i in range(1, l):
if skelPath[i][0][1] == resPath[-1][1]:
skelPath[i][0][0] = resPath[-1][0]
del resPath[-1]
resPath += skelPath[i]
return resPath
def linearize(p, tolerance=0.001):
'''
This function receives a component of a 'cubicsuperpath' and returns two things:
The path subdivided in many straight segments, and an array containing the length of each segment.
'''
zero = 0.000001
i = 0
d = 0
lengths=[]
while i < len(p) - 1:
box = bezmisc.pointdistance(p[i][1], p[i][2])
box += bezmisc.pointdistance(p[i][2], p[i+1][0])
box += bezmisc.pointdistance(p[i+1][0], p[i+1][1])
chord = bezmisc.pointdistance(p[i][1], p[i+1][1])
if (box - chord) > tolerance:
b1, b2 = bezmisc.beziersplitatt([p[i][1], p[i][2], p[i + 1][0], p[i + 1][1]], 0.5)
p[i][2][0], p[i][2][1] = b1[1]
p[i + 1][0][0], p[i + 1][0][1] = b2[2]
p.insert(i + 1, [[b1[2][0], b1[2][1]], [b1[3][0], b1[3][1]], [b2[1][0], b2[1][1]]])
else:
d = (box + chord) / 2
lengths.append(d)
i += 1
new = [p[i][1] for i in range(0, len(p) - 1) if lengths[i] > zero]
new.append(p[-1][1])
lengths = [l for l in lengths if l > zero]
return (new, lengths)
def isSkeletonClosed(sklCmp):
cntOfDgts = 2
if (round(sklCmp[0][0], cntOfDgts) != round(sklCmp[-1][0], cntOfDgts) or round(sklCmp[0][1], cntOfDgts) != round(sklCmp[-1][1], cntOfDgts)):
return False
return True
def getPolygonCentroid(polygon):
x = 0
y = 0
n = len(polygon)
for vert in polygon:
x += vert[0]
y += vert[1]
x = x / n
y = y / n
return [x, y]
def getPoint(p1, p2, x, y):
x1 = p1[0]
y1 = p1[1]
x2 = p2[0]
y2 = p2[1]
a = (y1 - y2) / (x1 - x2)
b = y1 - a * x1
if x == None:
x = (y - b) / a
else:
y = a * x + b
return [x, y]
def getPtOnSeg(p1, p2, segLen, l):
if p1[0] == p2[0]:
return [p2[0], p2[1] - l] if p2[1] < p1[1] else [p2[0], p2[1] + l]
if p1[1] == p2[1]:
return [p2[0] - l, p2[1]] if p2[0] < p1[0] else [p2[0] + l, p2[1]]
dy = abs(p1[1] - p2[1])
angle = asin(dy / segLen)
dx = l * cos(angle)
x = p1[0] - dx if p1[0] > p2[0] else p1[0] + dx
return getPoint(p1, p2, x, None)
def drawfunction(nodes, width, fx):
# x-bounds of the plane
xstart = 0.0
xend = 2 * pi
# y-bounds of the plane
ybottom = -1.0
ytop = 1.0
# size and location of the plane on the canvas
height = 2
left = 15
bottom = 15 + height
# function specified by the user
try:
if fx != "":
f = eval('lambda x: ' + fx.strip('"'))
except SyntaxError:
return []
scalex = width / (xend - xstart)
xoff = left
# conver x-value to coordinate
coordx = lambda x: (x - xstart) * scalex + xoff
scaley = height / (ytop - ybottom)
yoff = bottom
# conver y-value to coordinate
coordy = lambda y: (ybottom - y) * scaley + yoff
# step is the distance between nodes on x
step = (xend - xstart) / (nodes - 1)
third = step / 3.0
# step used in calculating derivatives
ds = step * 0.001
# initialize function and derivative for 0;
# they are carried over from one iteration to the next, to avoid extra function calculations.
x0 = xstart
y0 = f(xstart)
# numerical derivative, using 0.001*step as the small differential
x1 = xstart + ds # Second point AFTER first point (Good for first point)
y1 = f(x1)
dx0 = (x1 - x0) / ds
dy0 = (y1 - y0) / ds
# path array
a = []
# Start curve
a.append(['M ', [coordx(x0), coordy(y0)]])
for i in range(int(nodes - 1)):
x1 = (i + 1) * step + xstart
x2 = x1 - ds # Second point BEFORE first point (Good for last point)
y1 = f(x1)
y2 = f(x2)
# numerical derivative
dx1 = (x1 - x2) / ds
dy1 = (y1 - y2) / ds
# create curve
a.append([' C ', [coordx(x0 + (dx0 * third)), coordy(y0 + (dy0 * third)),
coordx(x1 - (dx1 * third)), coordy(y1 - (dy1 * third)),
coordx(x1), coordy(y1)]])
# Next segment's start is this segment's end
x0 = x1
y0 = y1
# Assume the function is smooth everywhere, so carry over the derivative too
dx0 = dx1
dy0 = dy1
return a
def offset(pathComp, dx, dy):
for ctl in pathComp:
for pt in ctl:
pt[0] += dx
pt[1] += dy
def stretch(pathComp, xscale, yscale, org):
for ctl in pathComp:
for pt in ctl:
pt[0] = org[0] + (pt[0] - org[0]) * xscale
pt[1] = org[1] + (pt[1] - org[1]) * yscale
class GuillocheContour(pathmodifier.PathModifier):
def __init__(self):
pathmodifier.PathModifier.__init__(self)
self.OptionParser.add_option("--tab",
action="store", type="string",
dest="tab", default="contour",
help="Active tab")
self.OptionParser.add_option("--contourFunction",
action="store", type="string",
dest="contourFunction", default="sin",
help="Function of the contour")
self.OptionParser.add_option("--frequency",
action="store", type="int",
dest="frequency", default=10,
help="Frequency of the function")
self.OptionParser.add_option("--amplitude",
action="store", type="int",
dest="amplitude", default=1,
help="Amplitude of the function")
self.OptionParser.add_option("--phaseOffset",
action="store", type="int",
dest="phaseOffset", default=0,
help="Phase offset of the function")
self.OptionParser.add_option("--offset",
action="store", type="int",
dest="offset", default=0,
help="Offset of the function")
self.OptionParser.add_option("--nodes",
action="store", type="int",
dest="nodes", default=20,
help="Count of nodes")
self.OptionParser.add_option("--remove",
action="store", type="inkbool",
dest="remove", default=False,
help="If True, control object will be removed")
self.OptionParser.add_option("--strokeColor",
action="store", type="string",
dest="strokeColor", default=255,
help="The line's color")
self.OptionParser.add_option("--amplitude1",
action="store", type="float",
dest="amplitude1", default=0.0,
help="Amplitude of first harmonic")
self.OptionParser.add_option("--phase1",
action="store", type="int",
dest="phase1", default=0,
help="Phase offset of first harmonic")
self.OptionParser.add_option("--amplitude2",
action="store", type="float",
dest="amplitude2", default=0.0,
help="Amplitude of second harmonic")
self.OptionParser.add_option("--phase2",
action="store", type="int",
dest="phase2", default=0,
help="Phase offset of second harmonic")
self.OptionParser.add_option("--amplitude3",
action="store", type="float",
dest="amplitude3", default=0.0,
help="Amplitude of third harmonic")
self.OptionParser.add_option("--phase3",
action="store", type="int",
dest="phase3", default=0,
help="Phase offset of third harmonic")
self.OptionParser.add_option("--amplitude4",
action="store", type="float",
dest="amplitude4", default=0.0,
help="Amplitude of fourth harmonic")
self.OptionParser.add_option("--phase4",
action="store", type="int",
dest="phase4", default=0,
help="Phase offset of fourth harmonic")
self.OptionParser.add_option("--amplitude5",
action="store", type="float",
dest="amplitude5", default=0.0,
help="Amplitude of fifth harmonic")
self.OptionParser.add_option("--phase5",
action="store", type="int",
dest="phase5", default=0,
help="Phase offset of fifth harmonic")
def prepareSelectionList(self):
self.skeletons = self.selected
self.expandGroupsUnlinkClones(self.skeletons, True, False)
self.objectsToPaths(self.skeletons)
def linearizePath(self, skelPath, offs):
comps, lengths = linearize(skelPath)
self.skelCompIsClosed = isSkeletonClosed(comps)
if (self.skelCompIsClosed and offs != 0):
centroid = getPolygonCentroid(comps)
for i in range(len(comps)):
pt1 = comps[i]
dist = bezmisc.pointdistance(centroid, pt1)
comps[i] = getPtOnSeg(centroid, pt1, dist, dist + offs)
if i > 0:
lengths[i - 1] = bezmisc.pointdistance(comps[i - 1], comps[i])
return (comps, lengths)
def getFunction(self, func):
res = ''
presetAmp1 = presetAmp2 = presetAmp3 = presetAmp4 = presetAmp5 = 0.0
presetPhOf1 = presetPhOf2 = presetPhOf3 = presetPhOf4 = presetPhOf5 = presetOffs = 0
if (func == 'sin' or func == 'cos'):
return '(' + str(self.options.amplitude) + ') * ' + func + '(x + (' + str(self.options.phaseOffset / 100.0 * 2 * pi) + '))'
if func == 'env1':
presetAmp1 = presetAmp3 = 0.495
elif func == 'env2':
presetAmp1 = presetAmp3 = 0.65
presetPhOf1 = presetPhOf3 = 25
elif func == 'env3':
presetAmp1 = 0.75
presetPhOf1 = 25
presetAmp3 = 0.24
presetPhOf3 = -25
elif func == 'env4':
presetAmp1 = 1.105
presetAmp3 = 0.27625
presetPhOf3 = 50
elif func == 'env5':
presetAmp1 = 0.37464375
presetPhOf1 = 25
presetAmp2 = 0.5655
presetAmp3 = 0.37464375
presetPhOf3 = -25
elif func == 'env6':
presetAmp1 = 0.413725
presetPhOf1 = 25
presetAmp2 = 0.45695
presetPhOf2 = 50
presetAmp3 = 0.494
presetPhOf3 = -25
elif func == 'env7':
presetAmp1 = 0.624
presetPhOf1 = 25
presetAmp2 = 0.312
presetAmp3 = 0.624
presetPhOf3 = 25
elif func == 'env8':
presetAmp1 = 0.65
presetPhOf1 = 50
presetAmp2 = 0.585
presetAmp3 = 0.13
elif func == 'env9':
presetAmp1 = 0.07605
presetPhOf1 = 25
presetAmp2 = 0.33345
presetPhOf2 = 50
presetAmp3 = 0.468
presetPhOf3 = -25
presetAmp4 = 0.32175
elif func == 'env10':
presetAmp1 = 0.3575
presetPhOf1 = -25
presetAmp2 = 0.3575
presetAmp3 = 0.3575
presetPhOf3 = 25
presetAmp4 = 0.3575
presetPhOf4 = 50
elif func == 'env11':
presetAmp1 = 0.65
presetPhOf1 = 25
presetAmp2 = 0.13
presetPhOf2 = 50
presetAmp3 = 0.26
presetPhOf3 = 25
presetAmp4 = 0.39
elif func == 'env12':
presetAmp1 = 0.5525
presetPhOf1 = -25
presetAmp2 = 0.0414375
presetPhOf2 = 50
presetAmp3 = 0.15884375
presetPhOf3 = 25
presetAmp4 = 0.0966875
presetAmp5 = 0.28315625
presetPhOf5 = -25
harm1 = '(' + str(self.options.amplitude * (presetAmp1 + self.options.amplitude1)) + ') * cos(1 * (x + (' + str(self.options.phaseOffset / 100.0 * 2 * pi) + ')) - (' + str((presetPhOf1 + self.options.phase1) / 100.0 * 2 * pi) + '))'
harm2 = '(' + str(self.options.amplitude * (presetAmp2 + self.options.amplitude2)) + ') * cos(2 * (x + (' + str(self.options.phaseOffset / 100.0 * 2 * pi) + ')) - (' + str((presetPhOf2 + self.options.phase2) / 100.0 * 2 * pi) + '))'
harm3 = '(' + str(self.options.amplitude * (presetAmp3 + self.options.amplitude3)) + ') * cos(3 * (x + (' + str(self.options.phaseOffset / 100.0 * 2 * pi) + ')) - (' + str((presetPhOf3 + self.options.phase3) / 100.0 * 2 * pi) + '))'
harm4 = '(' + str(self.options.amplitude * (presetAmp4 + self.options.amplitude4)) + ') * cos(4 * (x + (' + str(self.options.phaseOffset / 100.0 * 2 * pi) + ')) - (' + str((presetPhOf4 + self.options.phase4) / 100.0 * 2 * pi) + '))'
harm5 = '(' + str(self.options.amplitude * (presetAmp5 + self.options.amplitude5)) + ') * cos(5 * (x + (' + str(self.options.phaseOffset / 100.0 * 2 * pi) + ')) - (' + str((presetPhOf5 + self.options.phase5) / 100.0 * 2 * pi) + '))'
res = harm1 + ' + ' + harm2 + ' + ' + harm3 + ' + ' + harm4 + ' + ' + harm5
return res
def lengthToTime(self, l):
'''
Recieves an arc length l, and returns the index of the segment in self.skelComp
containing the corresponding point, together with the position of the point on this segment.
If the deformer is closed, do computations modulo the total length.
'''
if self.skelCompIsClosed:
l = l % sum(self.lengths)
if l <= 0:
return 0, l / self.lengths[0]
i = 0
while (i < len(self.lengths)) and (self.lengths[i] <= l):
l -= self.lengths[i]
i += 1
t = l / self.lengths[min(i, len(self.lengths) - 1)]
return (i, t)
def applyDiffeo(self, bpt, vects=()):
'''
The kernel of this stuff:
bpt is a base point and for v in vectors, v'=v-p is a tangent vector at bpt.
'''
s = bpt[0] - self.skelComp[0][0]
i, t = self.lengthToTime(s)
if i == len(self.skelComp) - 1:
x, y = bezmisc.tpoint(self.skelComp[i - 1], self.skelComp[i], t + 1)
dx = (self.skelComp[i][0] - self.skelComp[i - 1][0]) / self.lengths[-1]
dy = (self.skelComp[i][1] - self.skelComp[i - 1][1]) / self.lengths[-1]
else:
x, y = bezmisc.tpoint(self.skelComp[i], self.skelComp[i + 1], t)
dx = (self.skelComp[i + 1][0] - self.skelComp[i][0]) / self.lengths[i]
dy = (self.skelComp[i + 1][1] - self.skelComp[i][1]) / self.lengths[i]
vx = 0
vy = bpt[1] - self.skelComp[0][1]
bpt[0] = x + vx * dx - vy * dy
bpt[1] = y + vx * dy + vy * dx
for v in vects:
vx = v[0] - self.skelComp[0][0] - s
vy = v[1] - self.skelComp[0][1]
v[0] = x + vx * dx - vy * dy
v[1] = y + vx * dy + vy * dx
def effect(self):
if len(self.options.ids) < 1:
inkex.errormsg(_("This extension requires one selected path."))
return
self.prepareSelectionList()
for skeleton in self.skeletons.itervalues():
resPath = []
pattern = inkex.etree.Element(inkex.addNS('path','svg'))
self.options.strokeHexColor, self.strokeOpacity = getColorAndOpacity(self.options.strokeColor)
# Copy style of skeleton with setting color and opacity
s = skeleton.get('style')
if s:
pattern.set('style', setColorAndOpacity(s, self.options.strokeHexColor, self.strokeOpacity))
skeletonPath = modifySkeletonPath(getSkeletonPath(skeleton.get('d'), self.options.offset))
self.skelComp, self.lengths = self.linearizePath(skeletonPath, self.options.offset)
length = sum(self.lengths)
patternWidth = length / self.options.frequency
selectedFunction = self.getFunction(self.options.contourFunction)
pattern.set('d', simplepath.formatPath(drawfunction(self.options.nodes, patternWidth, selectedFunction)))
# Add path into SVG structure
skeleton.getparent().append(pattern)
if self.options.remove:
skeleton.getparent().remove(skeleton)
# Compute bounding box
bbox = simpletransform.computeBBox([pattern])
width = bbox[1] - bbox[0]
dx = width
if dx < 0.01:
exit(_("The total length of the pattern is too small."))
patternPath = cubicsuperpath.parsePath(pattern.get('d'))
curPath = deepcopy(patternPath)
xoffset = self.skelComp[0][0] - bbox[0]
yoffset = self.skelComp[0][1] - (bbox[2] + bbox[3]) / 2
patternCopies = max(1, int(round(length / dx)))
width = dx * patternCopies
newPath = []
# Repeat pattern to cover whole skeleton
for subPath in curPath:
for i in range(0, patternCopies, 1):
newPath.append(deepcopy(subPath))
offset(subPath, dx, 0)
curPath = newPath
# Offset pattern to the first node of the skeleton
for subPath in curPath:
offset(subPath, xoffset, yoffset)
# Stretch pattern to whole skeleton
for subPath in curPath:
stretch(subPath, length / width, 1, self.skelComp[0])
for subPath in curPath:
for ctlpt in subPath:
self.applyDiffeo(ctlpt[1], (ctlpt[0], ctlpt[2]))
# Check if there is a need to close path manually
if self.skelCompIsClosed:
firstPtX = round(curPath[0][0][1][0], 8)
firstPtY = round(curPath[0][0][1][1], 8)
finalPtX = round(curPath[-1][-1][1][0], 8)
finalPtY = round(curPath[-1][-1][1][1], 8)
if (firstPtX != finalPtX or firstPtY != finalPtY):
curPath[-1].append(curPath[0][0])
resPath += curPath
pattern.set('d', cubicsuperpath.formatPath(resPath))
if __name__ == '__main__':
e = GuillocheContour()
e.affect()
# vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 fileencoding=utf-8 textwidth=99
|
nilq/baby-python
|
python
|
import cv2
from collections import defaultdict
from utils.timer import Timer
_GRAY = [218, 227, 218]
_RED = [0, 0, 255]
_GREEN = [18, 127, 15]
_BULE = [255, 144, 30]
_WHITE = [255, 255, 255]
_BLACK = [0, 0, 0]
colors = [_RED, _GREEN, _BULE, _WHITE]
def get_class_string(class_index, score, dataset):
class_text = dataset.classes[class_index] if dataset is not None else 'id{:d}'.format(class_index)
return class_text + ' {:0.2f}'.format(score).lstrip('0')
def vis_quad(img, cfg_vis, quad, color=None):
border_thick = cfg_vis.SHOW_QUAD_BOX.BORDER_THICK
for j in range(4):
str_point = (quad[j * 2], quad[j * 2 + 1])
end_point = (quad[((j + 1) * 2) % len(quad)], quad[(((j + 1) * 2 + 1) % len(quad))])
if color is not None:
cv2.line(img, str_point, end_point, color, thickness=border_thick)
else:
cv2.line(img, str_point, end_point, _BULE, thickness=border_thick)
cv2.circle(img, (quad[0], quad[1]), cfg_vis.SHOW_QUAD_BOX.CENTER_RADIUS, (0, 0, 255), -1)
return img
def vis_point(img, cfg_vis, point, color):
cv2.circle(img, (point[0], point[1]), cfg_vis.SHOW_QUAD_BOX.CENTER_RADIUS, color, -1)
return img
def vis_class(img, cfg_vis, pos, class_str, bg_color):
"""Visualizes the class."""
font_color = cfg_vis.SHOW_CLASS.COLOR
font_scale = cfg_vis.SHOW_CLASS.FONT_SCALE
x0, y0 = int(pos[0]), int(pos[1])
# Compute text size.
txt = class_str
font = cv2.FONT_HERSHEY_SIMPLEX
((txt_w, txt_h), _) = cv2.getTextSize(txt, font, font_scale, 1)
# Place text background.
back_tl = x0, y0 - int(1.3 * txt_h)
back_br = x0 + txt_w, y0
cv2.rectangle(img, back_tl, back_br, bg_color, -1)
# Show text.
txt_tl = x0, y0 - int(0.3 * txt_h)
cv2.putText(img, txt, txt_tl, font, font_scale, font_color, lineType=cv2.LINE_AA)
return img
def vis_one_image_opencv(im, cfg_vis, boxes=None, classes=None, dataset=None):
"""Constructs a numpy array with the detections visualized."""
timers = defaultdict(Timer)
timers['bbox_prproc'].tic()
if boxes is None or boxes.shape[0] == 0 or max(boxes[:, -1]) < cfg_vis.VIS_TH:
return im
timers['bbox_prproc'].toc()
for i in range(boxes.shape[0]):
quad = boxes[i, :-1]
score = boxes[i, -1]
if score < cfg_vis.VIS_TH:
continue
if cfg_vis.SHOW_QUAD_BOX.ENABLED:
timers['show_quad_box'].tic()
if len(quad) == 8:
im = vis_quad(im, cfg_vis, quad)
elif len(quad) == 10:
im = vis_quad(im, cfg_vis, quad[:8])
center = quad[8:10]
im = vis_point(im, cfg_vis, center, _GRAY)
timers['show_quad_box'].toc()
# show class (on by default)
if cfg_vis.SHOW_CLASS.ENABLED:
timers['show_class'].tic()
class_str = get_class_string(classes[i], score, dataset)
im = vis_class(im, cfg_vis, (quad[0], quad[1] - 2), class_str, _BLACK)
timers['show_class'].toc()
return im
|
nilq/baby-python
|
python
|
# The parsing logic is heavily borrowed from the python-nubia project, available at:
# https://github.com/facebookincubator/python-nubia
#
# In compliance with python-nubia's BSD-style license, its copyright and license terms
# are included below:
#
# BSD License
#
# For python-nubia software
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name Facebook nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import pyparsing as pp
from enum import auto, Enum
from functools import lru_cache
from typing import NamedTuple
from prompt_toolkit.document import Document
from ..errors import PartialParseError, TotalParseError
class Patterns:
ALLOWED_SYMBOLS_IN_STRING = r'-_/#@£$€%*+~|<>?.'
IDENTIFIER = r'([a-zA-Z_][a-zA-Z0-9_\-]*)'
WHITESPACE = r'\s+'
UNQUOTED_STRING = r'([a-zA-Z0-9' + ALLOWED_SYMBOLS_IN_STRING + r']+)'
STRING_SINGLE_QUOTE = r"\'([^\\\']|\\.)*\'"
STRING_DOUBLE_QUOTE = r'\"([^\\\"]|\\.)*\"'
BOOLEAN = r'(True|true|False|false)'
FLOAT = r'\-?\d+\.\d*([eE]\d+)?'
INTEGER = r'\-?\d+'
KWARG = IDENTIFIER + r'(\s*=\s*)'
COMMAND = r'^' + IDENTIFIER + r'(\s+|$)'
@staticmethod
def is_valid_identifier(
s: str
) -> bool:
"""Whether the specified string is a valid command name or kwarg identifier."""
return bool(re.fullmatch(Patterns.IDENTIFIER, s))
def _no_transform(x):
return x
def _bool_transform(x):
return x in ('True', 'true',)
def _str_transform(x):
return x.strip('"\'')
_TRANSFORMS = {
'bool': _bool_transform,
'str': _str_transform,
'int': int,
'float': float,
'dict': dict,
}
def _parse_type(data_type):
transform = _TRANSFORMS.get(data_type, _no_transform)
def _parse(s, loc, toks):
return [transform(x) for x in toks]
return _parse
# Valid identifiers cannot start with a number, but may contain them in their body.
identifier = pp.Word(pp.alphas + '_-', pp.alphanums + '_-')
# XXX: allow for hex?
int_value = pp.Regex(Patterns.INTEGER).setParseAction(_parse_type('int'))
float_value = pp.Regex(Patterns.FLOAT).setParseAction(_parse_type('float'))
bool_value = (
pp.Literal('True') ^ pp.Literal('true') ^
pp.Literal('False') ^ pp.Literal('false')
).setParseAction(_parse_type('bool'))
quoted_string = pp.quotedString.setParseAction(_parse_type('str'))
unquoted_string = pp.Word(
pp.alphanums + Patterns.ALLOWED_SYMBOLS_IN_STRING
).setParseAction(_parse_type('str'))
string_value = quoted_string | unquoted_string
single_value = bool_value | float_value | int_value | string_value
list_value = pp.Group(
pp.Suppress('[') +
pp.Optional(pp.delimitedList(single_value)) +
pp.Suppress(']')
).setParseAction(_parse_type('list'))
dict_value = pp.Forward()
value = list_value ^ single_value ^ dict_value
dict_key_value = pp.dictOf(string_value + pp.Suppress(':'), value)
dict_value << pp.Group(
pp.Suppress('{') + pp.delimitedList(dict_key_value) + pp.Suppress('}')
).setParseAction(_parse_type('dict'))
# Positionals must be end of line or has a space (or more) afterwards.
# This is to ensure that the parser treats text like "something=" as invalid
# instead of parsing this as positional "something" and leaving the "=" as
# invalid on its own.
positionals = pp.ZeroOrMore(
value + (pp.StringEnd() ^ pp.Suppress(pp.OneOrMore(pp.White())))
).setResultsName('positionals')
key_value = pp.Dict(pp.ZeroOrMore(pp.Group(
identifier + pp.Suppress('=') + value
))).setResultsName('kv')
command = identifier.setResultsName('command')
command_line = command + positionals + key_value
class ParseState(Enum):
FULL = auto()
PARTIAL = auto()
NONE = auto()
class ParseStatus(NamedTuple):
results: pp.ParseResults
unparsed_text: str
unparsed_start_pos: int
state: ParseState
@lru_cache()
def parse_cmd_line(
text: str
) -> ParseStatus:
"""Attempt to parse a command line, returning a :class:`ParseStatus` object."""
try:
parse_results = _raw_parse_cmd_line(text)
unparsed_text = ''
unparsed_start_pos = len(text)
parse_state = ParseState.FULL
except PartialParseError as e:
parse_results = e.partial_result
unparsed_text = e.remaining
unparsed_start_pos = e.error_pos
parse_state = ParseState.PARTIAL
except TotalParseError:
parse_results = None
unparsed_text = text
unparsed_start_pos = 0
parse_state = ParseState.NONE
return ParseStatus(parse_results, unparsed_text, unparsed_start_pos, parse_state)
def _raw_parse_cmd_line(
text: str
) -> pp.ParseResults:
"""Attempt to parse the command line as per the grammar defined in this module.
If the specified text can be fully parsed, then a `pypaysing.ParseResults` will be
returned with the following attributes:
* command: The name or alias of the command.
* kv: A dictionary of key-value pairs representing the keyword arguments.
* positionals: Any positional argument values.
Otherwise, a descendant of :class:`CommandParseError` is raised.
Raises:
:class:`CommandPartialParseError`: If the specified text can be partially
parsed, but errors still exist.
:class:`CommandPartialParseError`: If the text cannot even be partially parsed.
"""
try:
result = command_line.parseString(text, parseAll=True)
return result
except pp.ParseException as e:
remaining = e.markInputline()
remaining = remaining[(remaining.find('>!<') + 3):]
try:
partial_result = command_line.parseString(text, parseAll=False)
except pp.ParseException as ee:
raise TotalParseError(str(ee)) from None
new_exc = PartialParseError(str(e), remaining, partial_result, e.col)
raise new_exc from None
class IncompleteToken:
"""Encapsulation of a token that could only be partially parsed."""
def __init__(
self,
token: str
) -> None:
self._token = token
self._key = ''
self._value = ''
self._is_kw_arg = False
self._is_pos_arg = False
self._parse()
def _parse(
self
) -> None:
key, delim, value = self._token.partition('=')
if any(x in key for x in '[]{}"\''):
# Treat the whole token as a positional value.
self._is_pos_arg = True
self._value = self._token
return
if delim == '=':
# This is a key=value.
self._is_kw_arg = True
self._key = key
self._value = value
else:
# This could either be the beginning of something like key=value or the
# positional literal keywest.
self._key = self._value = key
@property
def is_kw_arg(
self
) -> bool:
return self._is_kw_arg
@property
def is_pos_arg(
self
) -> bool:
return self._is_pos_arg
@property
def is_ambiguous_arg(
self
) -> bool:
return not self._is_kw_arg and not self._is_pos_arg
@property
def key(
self
) -> str:
return self._key
@property
def value(
self
) -> str:
return self._value
def __str__(
self
) -> str:
if self.is_kw_arg:
return f'kwarg {self._key}={self._value}'
elif self.is_pos_arg:
return f'positional {self._value}'
elif self.is_ambiguous_arg:
return f'ambiguous {self._key}'
else:
return 'Parse error'
def __repr__(
self
) -> str:
return f'<{self.__class__.__qualname__} [{str(self)}]>'
def last_incomplete_token(
document: Document,
unparsed_text: str
) -> IncompleteToken:
if document.char_before_cursor in ' ]}':
last_token = ''
else:
last_space = document.find_backwards(' ', in_current_line=True)
if last_space is None:
last_space = -1
last_token = document.text[last_space+1:]
# The longer of the last_token and unparsed_text is taken in the event that the
# unparsed_text is an open literal, which could itself contain spaces.
if len(unparsed_text) > len(last_token):
last_token = unparsed_text
return IncompleteToken(last_token)
def last_incomplete_token_from_document(
document: Document
) -> IncompleteToken:
"""Shortcut for getting the last incomplete token only from a ``Document``."""
parse_status = parse_cmd_line(document.text)
return last_incomplete_token(document, parse_status.unparsed_text)
|
nilq/baby-python
|
python
|
import re
f = open('Dockerfile')
data = f.read()
f.close()
resp = re.sub('RUN','sudo',data)
resp = re.sub('WORKDIR','cd',resp)
resp = re.sub('FROM.*','',resp)
cmd = re.findall('ENTRYPOINT\s+\[(.*?)].*?CMD\s+\[(.*?)\]',resp,re.DOTALL)
if cmd:
cmd = cmd[0]
cmd = cmd[0].strip() + ' ' + cmd[1].strip()
cmd = cmd.replace('"','')
resp = re.findall('(.*?)# An ENTRYPOINT',resp,re.DOTALL)
f = open('debian.sh','w')
f.write(resp[0]+cmd)
f.close()
|
nilq/baby-python
|
python
|
# flush
import time
import yaqc
from .._pump import *
def run():
# Flush
# (all volumes in mL unless specified differently)
if True:
print("flush")
for i in range(65):
print(i)
return
# pall_flow_rates is the flow rate of one DSP
# FILL IN PUMP FLOW RATE BELOW
pall_flow_rates = 10 # [10,20, 30, 40, 50] (mL/min)
pall_flow_rates_mL_s = pall_flow_rates / 60 # (mL/s)
# Reactor Parameters
Vsl = 1.94
Veq = 0.715
Vrxnzone = 9.975
Vexit = 0.393
Veq_quench = 1.2
valve_open_time = 1
# Median exit time calc w/ pumps flow rates
if pall_flow_rates == 10:
median_exit_time = 48.2
elif pall_flow_rates == 20:
median_exit_time = 24.6
elif pall_flow_rates == 30:
median_exit_time = 16.8
elif pall_flow_rates == 40:
median_exit_time = 12.8
elif pall_flow_rates == 50:
median_exit_time = 10.6
# Valve assignments (A= run reactions, B= refill SL and DSPs)
valve0 = yaqc.Client(36000) # sample collection valve (A= sample, B=waste)
valve1 = yaqc.Client(36001) # Monomer
valve2 = yaqc.Client(36002) # Catalyst
valve3 = yaqc.Client(36003) # Quench
# Pump assignments
p1 = Pump(1) # Monomer line
p2 = Pump(2) # Catalyst line
p3 = Pump(4) # Quench line
# Pump injection volume for rxn
pall_rxn_inj = 2.5 * Vsl + Veq + 0.5 * Vrxnzone + 0.333 * Vexit
# Pump parameters for flush (extra step for DSPs multi-step mode)
pall_flush_inj = 0.1
pall_flush_rates = 10
pump_run_time = pall_rxn_inj / pall_flow_rates_mL_s
print("run time " + str(round((pump_run_time / 60), 1)) + " min")
# Open Mon, Cat, & Quench valves
valve1.set_identifier("A")
while valve1.busy():
continue
assert valve1.get_identifier() == "A"
valve2.set_identifier("A")
while valve2.busy():
continue
assert valve2.get_identifier() == "A"
valve3.set_identifier("A")
while valve3.busy():
continue
assert valve3.get_identifier() == "A"
# Collection valve to waste first
valve0.set_identifier("B")
while valve0.busy():
continue
assert valve0.get_identifier() == "B"
# Prompt the User (y/n)
def yes_or_no(question):
answer = input(question + "(y/n): ").lower().strip()
print("")
while not (answer == "y" or answer == "yes" or answer == "n" or answer == "no"):
print("Input yes or no")
answer = input(question + "(y/n):").lower().strip()
print("")
if answer[0] == "y":
return True
else:
return False
if yes_or_no("Are you sure you want to FLUSH the reactor?"):
print("Starting Flush")
else:
print("Flush Stopped")
exit()
print("Error") # checking exit()
# Pump instructions for reaction, flush, and refill (Cat & Mon the same)
p1.add_step(volume=pall_rxn_inj, rate=pall_flow_rates, delay=0)
p1.add_step(volume=pall_flush_inj, rate=pall_flush_rates, delay=0)
p2.add_step(volume=pall_rxn_inj, rate=pall_flow_rates, delay=0)
p2.add_step(volume=pall_flush_inj, rate=pall_flush_rates, delay=0)
# Quench needs additioal quench delay
p3.add_step(volume=pall_rxn_inj, rate=pall_flow_rates, delay=0)
p3.add_step(volume=pall_flush_inj, rate=pall_flush_rates, delay=0)
start_pumps(1, 2, 4)
# Collection valve timing and instructions
time.sleep(pump_run_time / 2)
valve0.set_identifier("A")
while valve0.busy():
continue
assert valve0.get_identifier() == "A"
time.sleep(valve_open_time)
valve0.set_identifier("B")
while valve0.busy():
continue
assert valve0.get_identifier() == "B"
# Set valves back to B for refill
time.sleep(pump_run_time)
valve1.set_identifier("B")
while valve1.busy():
continue
assert valve1.get_identifier() == "B"
valve2.set_identifier("B")
while valve2.busy():
continue
assert valve2.get_identifier() == "B"
valve3.set_identifier("B")
while valve3.busy():
continue
assert valve3.get_identifier() == "B"
valve0.set_identifier("B")
while valve0.busy():
continue
assert valve0.get_identifier() == "B"
print("Flush Complete")
|
nilq/baby-python
|
python
|
from bs4 import BeautifulSoup as bs
from splinter import Browser
import time
import pandas as pd
import lxml
# full "scrape" function, comprised of the four subfunctions
# defined below
def scrape():
# create the overall dictionary to hold all the results
# which will be returned by this function to the flask app
results = {}
# first, scrape and then add the article info
article_info = scrape_article_info()
results.update(article_info)
print("Article Info Scraped!")
# scrape and then add the featured mars image
featured_image = scrape_featured_mars_image()
results.update(featured_image)
print("Featured Image Scraped!")
# scrape and then add the Mars data table
Martian_data_table = scrape_data_table()
results.update(Martian_data_table)
print("Martian Data Table Scraped!")
# scrape and then add the hemisphere images
hemisphere_images = scrape_hemisphere_enhanced_images()
results.update({"Hemispheres":hemisphere_images})
print("Hemisphere Images Scraped!")
print(results)
return results
# first scraped info for the Mars app, article headline and summary
# from the NASA website, returned as a dictionary
def scrape_article_info():
url = "https://mars.nasa.gov/news/"
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=True)
browser.visit(url)
#add a delay so page fully loads
time.sleep(1)
mars_news = browser.html
news_soup = bs(mars_news, "html.parser")
latest_news = news_soup.find_all("li", class_="slide")[0]
latest_headline = latest_news.find("div", class_="content_title").a.text
latest_description = latest_news.find("div", class_="article_teaser_body").text
browser.quit()
return {"headline":latest_headline, "description":latest_description}
# scrape the latest Mars image from the JPL website, returned as a dictionary
def scrape_featured_mars_image():
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=True)
base_url = "https://www.jpl.nasa.gov"
image_url = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(image_url)
browser.find_by_css('img.thumb').first.click()
time.sleep(2)
browser.execute_script(
"document.getElementById('fancybox-lock').scrollTo(0, document.body.scrollHeight);")
browser.links.find_by_partial_text("more info").click()
time.sleep(1)
#get image src
img_soup = bs(browser.html, "html.parser")
img_src = img_soup.find("img", class_="main_image")["src"]
img_src = base_url + img_src
browser.quit()
return {"featured_image": img_src}
# scrape Mars data table info directly from space-facts.com/mars
def scrape_data_table():
data_table_url = "https://space-facts.com/mars/"
tables = pd.read_html(data_table_url)
mars_info_df = tables[0]
mars_info_df = mars_info_df.set_index(0)
mars_info_df.index.name = "Mars"
mars_info_df.columns = ["Data Table"]
mars_info_df
#html_mars_table = mars_info_df.to_html()
#return
output_dict = mars_info_df.to_dict()
return output_dict
# scrape high-quality pictures for each Martian hemisphere
# returns a dictionary of hemisphere name to file location
def scrape_hemisphere_enhanced_images():
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=True)
base_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
all_hemispheres = []
browser.visit(base_url)
num_hemispheres = len(browser.find_by_css(".thumb"))
for hemisphere_num in range(num_hemispheres):
curr_title = browser.find_by_tag(
"h3")[hemisphere_num].html.replace(" Enhanced", "")
browser.find_by_css(".thumb")[hemisphere_num].click()
curr_img_url = browser.find_by_text("Sample").first["href"]
# print(curr_img_url)
browser.back()
all_hemispheres.append({"title": curr_title, "img_url": curr_img_url})
browser.windows[0].close_others()
# print(all_hemispheres)
browser.quit()
return all_hemispheres
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from shuup_workbench.settings.utils import get_disabled_migrations
from shuup_workbench.settings import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'myapp.sqlite3'
}
}
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import ConfigParser
import log
import obslog
import os
from pyraf import iraf
import shutil
import utils
# ----------------------------------------------------------------------------------------------------------------------
def start(configfile):
"""
Parameters are loaded from gnirs-pype.cfg configuration file. This script will automatically detect if it is being run
on telluric data or science data. There are 5 steps.
INPUT FILES:
- Configuration file
- Science or Telluric frames
- mdfshiftrefimage
- masterflat
- /database files from the appropriate calibrations directory
OUTPUT FILES:
- If telluric: cleaned (optional), prepared, radiation-event corrected, reduced, spatial distortion corrected,
and transformed images
- If science: cleaned (optional), prepared, radiation-event corrected, reduced, spatial distortion corrected,
and transformed images
Args:
- kind (string): Either 'Science' or 'Telluric'
- configfile: gnirs-pype.cfg configuration file.
- Paths to the Science (str), reduction truth value (boolean)
E.g. 'target/date/config/{Sci,Tel}_ObsID/{Calibrations,Intermediate}', True
- Paths to the Tellurics (str), reduction truth value (boolean)
E.g. 'target/date/config/{Sci,Tel}_ObsID/{Calibrations,Intermediate}', True
- manualMode (boolean): Enable optional manualModeging pauses? Default: False
- overwrite (boolean): Overwrite old files? Default: False
# And gnirsReduce specific settings
"""
logger = log.getLogger('extract_spectra')
path = os.getcwd() # Store current working directory for later use.
# Set up/prepare IRAF.
iraf.gemini()
iraf.gemtools()
iraf.gnirs()
iraf.unlearn(iraf.gemini, iraf.gemtools, iraf.gnirs, iraf.imcopy) # reset parameters to default values
# Prepare the IRAF package for GNIRS.
# NSHEADERS lists the header parameters used by the various tasks in the GNIRS package (excluding headers values
# which have values fixed by IRAF or FITS conventions).
iraf.nsheaders("gnirs", logfile=logger.root.handlers[0].baseFilename)
# Set clobber to 'yes' for the script. This still does not make the gemini tasks overwrite files, so: YOU WILL
# LIKELY HAVE TO REMOVE FILES IF YOU RE_RUN THE SCRIPT.
us_clobber = iraf.envget("clobber")
iraf.reset(clobber='yes')
config = ConfigParser.RawConfigParser()
config.optionxform = str # make options case-sensitive
config.read(configfile)
manualMode = config.getboolean('defaults', 'manualMode')
overwrite = config.getboolean('defaults', 'overwrite')
# Order of sections is important to later check for plausible peaks located for science targets by nsextract
nsextractInter = config.getboolean('interactive', 'nsextractInter')
combinedsrc = config.get('runtimeFilenames', 'combinedsrc')
combinedsky = config.get('runtimeFilenames', 'combinedsky')
extractRegularPrefix = config.get('runtimeFilenames', 'extractRegularPrefix')
extractFullSlitPrefix = config.get('runtimeFilenames', 'extractFullSlitPrefix')
extractStepwiseTracePrefix = config.get('runtimeFilenames', 'extractStepwiseTracePrefix')
extractStepwisePrefix = config.get('runtimeFilenames', 'extractStepwisePrefix')
useApall = config.getboolean('extractSpectra1D', 'useApall')
subtractBkg = config.get('extractSpectra1D', 'subtractBkg')
extractApertureRadius = config.getfloat('extractSpectra1D', 'extractApertureRadius')
checkPeaksMatch = config.getboolean('extractSpectra1D', 'checkPeaksMatch')
toleranceOffset = config.getfloat('extractSpectra1D', 'toleranceOffset')
extractFullSlit = config.getboolean('extractSpectra1D', 'extractFullSlit')
extractStepwise = config.getboolean('extractSpectra1D', 'extractStepwise')
extractionStepSize = config.getfloat('extractSpectra1D', 'extractStepSize')
extractApertureWindow = config.getfloat('extractSpectra1D', 'extractApertureWindow')
# gnirsExtractSpectra1D will first check if the reduction truth value of the science and telluric directories is
# True -- if it is, it will then check if the required spectra to be extracted are available in the directories
# (and proceed only if it finds them there); else, it will warn the user and request to provide the spectra for
# extracting. If the reduction truth value of the science and telluric directories is False, the script will skip
# extracting 1D spectra in those directories.
# Loop through all the observation (telluric and science) directories to extract 1D spectra in each one.
# (the Telluric standards must be done first if they are to be used as a reference)
for section in ['TelluricDirectories', 'ScienceDirectories']:
for obspath in config.options(section):
if not config.getboolean(section, obspath): # Only process directories marked True
logger.debug('Skipping extraction of 1D spectra in %s', obspath)
continue
logger.info(' ----------------------- ')
logger.info('| Extracting 1D spectra |')
logger.info(' ----------------------- ')
obspath += '/Intermediate'
logger.info("%s\n", obspath)
iraf.chdir(obspath)
utils.pause(manualMode)
utils.requires([combinedsrc])
calculateSNR = config.getboolean('gnirsPipeline', 'CalculateSNR')
if calculateSNR:
if not utils.exists([combinedsky], overwrite=False):
logger.warning('Could not find combined sky spectra. Setting calculateSNR = False')
calculateSNR = False
orders = utils.get_orders(obspath)
extractApertureWindow = get_window(obspath)
if nsextractInter:
subtractBkg = 'fit'
logger.info('Setting background subtraction method to "fit"')
if useApall:
nsum = 20
else:
nsum = 10
extractSpectra(combinedsrc, extractRegularPrefix, nsextractInter, useApall, nsum, subtractBkg,
extractApertureRadius, overwrite)
if calculateSNR:
logger.info("Extracting the combined sky spectrum reduced without sky subtraction.")
subtractBkg = 'none'
extractSpectra(combinedsky, extractRegularPrefix, nsextractInter, useApall, nsum, subtractBkg,
extractApertureRadius, overwrite)
if 'Science' in section:
# If the extraction was not done interactively check if checkPeaksMatch is set: if yes, check if the
# required telluric extraction reference files available in the telluric /database directory; else,
# warn the user that both nsextractInter and checkPeaksMatch are not set, request the user to
# manually check if the science target peak identified by task nsextract might identify a wrong peak
# if the science target is not bright enough.
# Get symbolic path to the tel database directory in the sci directory
# Relative path/link expected to be at the top level of every sci directory
scidatabasepath = 'database'
logger.info("Science database path: %s", scidatabasepath)
telpath = '../Telluric/Intermediate'
logger.info("Telluric path: %s", telpath)
teldatabasepath = telpath + '/database'
logger.info("Telluric database path: %s", teldatabasepath)
sci_combinedsrc = obspath + '/' + combinedsrc
tel_combinedsrc = telpath + '/' + combinedsrc
if not nsextractInter: # if nsextract was not run interactively
if not checkPeaksMatch:
logger.warning("Parameters 'nsextractInter' and 'checkPeaksMatch' are both set to False.")
logger.warning("Please manually verify that nsextract identified the science peaks correctly.")
else:
logger.info("Finding extraction locations for Telluric standard...")
telpeaks = get_peaks(teldatabasepath)
logger.info("Finding extration locations for Science target...")
scipeaks = get_peaks(scidatabasepath)
logger.info("Comparing the science and Telluric extraction locations...")
reextract, predicted = compare_peaks(obspath, telpath, scipeaks, telpeaks, toleranceOffset)
if any(reextract):
logger.warning("Re-extracting...")
useApall = 'yes'
nsum = 20
reExtractSpectra(reextract, scipeaks, telpeaks, predicted, obspath, telpath, nsum,
extractApertureRadius, useApall, subtractBkg, nsextractInter)
# ------------------------------------------------------------------------------------------------------
if extractFullSlit:
logger.warning('Full-slit extraction is untested')
utils.pause(manualMode)
# Approx. full-slit extraction (science target only)
# Uses +/- 23 pix aperture (6.9", almost whole length of slit), appropriate for objects centred
# along length of slit (q=0). Not sure what the effect is if nsextract finds a spectrum that's
# not centred along the slit.
iraf.nsextract(
inimages='src_comb', outspectra='', outprefix='a', dispaxis=1, database='', line=700,
nsum=20, ylevel='INDEF', upper=23, lower=-23, background='none', fl_vardq='yes', fl_addvar='no',
fl_skylines='yes', fl_inter=nsextractInter, fl_apall=useApall, fl_trace='no',
aptable='gnirs$data/apertures.fits', fl_usetabap='no', fl_flipped='yes', fl_project='yes',
fl_findneg='no', bgsample='*', trace='', tr_nsum=10, tr_step=10, tr_nlost=3,
tr_function='legendre', tr_order=5, tr_sample='*', tr_naver=1, tr_niter=0, tr_lowrej=3.0,
tr_highrej=3.0, tr_grow=0.0, weights='variance', logfile=logger.root.handlers[0].baseFilename,
verbose='yes', mode='al')
# ------------------------------------------------------------------------------------------------------
if extractStepwise: # Extract in steps on either side of the peak
logger.warning('Step-wise extraction is untestd')
utils.pause(manualMode)
# Calling apall and tracing the peak first to make sure the same part of the object is extracted in
# each step along the slit for all orders (needed when there is complex, spectrally-varying
# structure in a galaxy, for example; otherwise the spectra can have offsets between orders)
# This first nsextract step, outside the loop, gets the trace into the database to be used when we
# do the "real" extraction
iraf.nsextract(
inimages='src_comb', outspectra='trace_ref', outprefix='x', dispaxis=1, database='', line=700,
nsum=20, ylevel='INDEF', upper=3, lower=-3, background='none', fl_vardq='yes', fl_addvar='no',
fl_skylines='yes', fl_inter=nsextractInter, fl_apall='yes', fl_trace='yes',
aptable='gnirs$data/apertures.fits', fl_usetabap='no', fl_flipped='yes' ,fl_project='no',
fl_findneg='no', bgsample='*', trace='', tr_nsum=10, tr_step=10, tr_nlost=3,
tr_function='legendre', tr_order=5, tr_sample='300:1000', tr_naver=1, tr_niter=0, tr_lowrej=3.0,
tr_highrej=3.0, tr_grow=0.0, weights='variance', logfile=logger.root.handlers[0].baseFilename,
verbose='yes', mode='al')
# This is non-interactive because it uses info from the previous call (and it would be very tedious)
# TODO: Make sure that the stepping range and step size results in an integer number of steps
step = 3
n = 0
for i in range(-21, 21, step):
iraf.nsextract(
inimages='src_comb', outspectra='', outprefix='s'+str(n), dispaxis=1, database='', line=700,
nsum=20, ylevel='INDEF', lower=i, upper=i+step, background='none', fl_vardq='yes',
fl_addvar='no', fl_skylines='yes', fl_inter='no', fl_apall='no', fl_trace='no',
aptable='gnirs$data/apertures.fits', fl_usetabap='no', fl_flipped='yes', fl_project='yes',
fl_findneg='no', bgsample='*', trace='', tr_nsum=10, tr_step=10, tr_nlost=3,
tr_function='legendre', tr_order=5, tr_sample='*', tr_naver=1, tr_niter=0, tr_lowrej=3.0,
tr_highrej=3.0, tr_grow=0.0, weights='variance',
logfile=logger.root.handlers[0].baseFilename, verbose='yes', mode='al')
n += 1
logger.info("Extraction complete for")
logger.info("%s", obspath)
iraf.chdir(path) # Return to directory script was begun from
return
# ----------------------------------------------------------------------------------------------------------------------
def get_window(path):
logger = log.getLogger('get_window')
# Determine the full-slit extraction window
if 'LB_SXD' in path:
# extractApertureRadius = 23 (+/-23 pixels or 6.9" covers almost the entire slit length,
# but this is only appropriate for objects centred along length of slit (with absolute Q offset of 0).
# [-46/2,46/2+6) [-23.0 -17 -11 -5 1 7 13 19 23 29) warn the user if last step in extract >0.1" away
# from the end of the slit or if extraction proceeding out of the slit
window = 46
elif 'LB_LXD' in path:
window = 33 # [-33/2,33/2+6] [-16.5 -10.5 -4.5 2.5 8.5 14.5 20.5]
elif 'SB_SXD' in path:
window = 46
else:
logger.error("Unknown GNIRS XD configuration.")
raise SystemExit
logger.debug('Window: %s pix', window)
return window
# ----------------------------------------------------------------------------------------------------------------------
def extractSpectra(inimage, outprefix, interactive, apall, nsum, background, radius, overwrite):
"""
Extracting 1D spectra from the combined 2D spectra using nsextract.
background = Type of background to subtract (none|average|median|minimum|fit)
"""
# This is really just a wrapper around nsextract.
# I'm tempted to name this 'nsextract' and call this whenever I need nsextract.
# I guess it might not have all the parameters, but they could be included as optional.
logger = log.getLogger('extractSpectra')
logger.debug('inimage: %s', inimage)
logger.debug('background: %s', background)
logger.debug('radius: %s pix', radius)
logger.debug('nsum: %s pix', nsum)
utils.requires([inimage])
orders = utils.get_orders(os.getcwd())
outfiles = [outprefix + inimage] + \
['database/apsrc_comb_DQ_%d_' % i for i in range(1, len(orders)+1)] + \
['database/apsrc_comb_SCI_%d_' % i for i in range(1, len(orders)+1)]
if utils.exists(outfiles, overwrite):
logger.info('Spectra already extracted.')
return
iraf.nsextract(
inimages=inimage, outspectra='', outprefix=outprefix, dispaxis=1, database='', line=700,
nsum=nsum, ylevel='INDEF', upper=radius, lower=-radius, background=background,
fl_vardq='yes', fl_addvar='no', fl_skylines='yes', fl_inter=interactive, fl_apall=apall, fl_trace='no',
aptable='gnirs$data/apertures.fits', fl_usetabap='no', fl_flipped='yes', fl_project='yes', fl_findneg='no',
bgsample='*', trace='', tr_nsum=10, tr_step=10, tr_nlost=3, tr_function='legendre', tr_order=5,
tr_sample='*', tr_naver=1, tr_niter=0, tr_lowrej=3.0, tr_highrej=3.0, tr_grow=0.0, weights='variance',
logfile=logger.root.handlers[0].baseFilename, verbose='yes', mode='al')
return
# ----------------------------------------------------------------------------------------------------------------------
def get_peaks(databasepath):
"""
Check the extraction reference files in the telluric directory databases to find the location of the peaks.
"""
logger = log.getLogger('get_peaks')
logger.debug('databasepath: %s', databasepath)
orders = utils.get_orders(os.getcwd())
infiles = ['%s/apsrc_comb_SCI_%d_' % (databasepath, i) for i in range(1, len(orders)+1)]
utils.requires(infiles)
peaks = []
for i in range(1, len(orders)+1):
apfile = '%s/apsrc_comb_SCI_%d_' % (databasepath, i)
with open(apfile, 'r') as f:
p = None
for line in f:
# The peak location is the number in the second column of the line beginning with 'center'
if 'center' in line:
p = float(line.split()[1])
break
peaks.append(p)
if p is None:
logger.warning('Peak not found')
logger.debug('peaks: %s', peaks)
return peaks
# ----------------------------------------------------------------------------------------------------------------------
def compare_peaks(scipath, telpath, scipeaks, telpeaks, tolerance):
"""
Compare locations of the extraction locations of the science and Telluric standard.
For faint targets, NSEXTRACT may find a noise peak instead of the science peak. In such cases, it is advisable
to check the aperture center of extraction of the science with respect to the telluric and re-extract at the
expected location.
Look the science and telluric absolute Q offsets and determine if the relative location of the target
peak was correct. If not, we should re-extract at the expected location.
"""
logger = log.getLogger('gnirsReduce.peaksMatch')
# Get the absolute P,Q offsets from the obslog:
sciinfo = obslog.readcsv(scipath + '/obslog.csv')
telinfo = obslog.readcsv(telpath + '/obslog.csv')
sciA = utils.files_in([scipath + '/nodA.list'])
telA = utils.files_in([telpath + '/nodA.list'])
# Assuming that both the science and Telluric were acquired in the center of the slit, the extraction locations
# should be the same minus any difference in the Q-offset.
# Here I assume that I only need to compare the "A" offset (B might be a sky):
logger.debug('Science "A" offset: %s arcsec', sciinfo[sciA[0]]['Q'])
logger.debug('Telluric "A" offset: %s arcsec', telinfo[telA[0]]['Q'])
# TODO: The PIXSCALE should be in the obslog, but it's not, so for now:
pixscale = 0.15
offset = (float(sciinfo[sciA[0]]['Q']) - float(telinfo[telA[0]]['Q'])) / pixscale
logger.debug('offset: %s pix', offset)
logger.debug('Extraction locations (pixels):')
shifts = []
predicted = []
reextract = []
for s,t in zip(scipeaks, telpeaks):
predicted.append(t + offset)
shifts.append(t + offset - s)
reextract.append(abs(shifts[-1]) > tolerance)
logger.debug('Telluric: %6.2f Predicted sci: %6.2f Actual sci: %6.2f', t, t + offset, s)
logger.debug('predicted: %s', predicted)
logger.debug('shifts: %s', shifts)
logger.debug('reextract: %s', reextract)
if any(reextract):
logger.warning('Some orders are not where they were expected to be.')
# I'm not sure if I should do the tolerance checking here, or pass back the list of shifts,
# or a list of booleans, or do the comparison in the main program...
# nsextract should find the spectrum within a 'tolerance' pixels of expected location. This depends on how well the
# observer centred the target along the slit. Here, we use 5 pixels as a reasonable tolerance level. A more robust
# way would be to use some measure of whether the peak found by nsextract was real, e.g. counts + FWHM. However,
# this information is not recorded in database.
return reextract, predicted
# ----------------------------------------------------------------------------------------------------------------------
def reExtractSpectra(reextract, scipeaks, telpeaks, predicted, scipath, telpath, nsum, aperture, apall, background,
interactive):
# rextract - boolean list of extensions that should be re-extracted
# predicted - float list of new extraction locations
logger = log.getLogger('reExtractSpectra')
logger.debug('scipath: %s', scipath)
logger.debug('rextract: %s', reextract)
logger.debug('predicted: %s', predicted)
# Rename the old extracted spectum for posterity
# Rename the old aperture files for posterity
# Copy in the Telluric aperture files
# Edit the Telluric aperture files to have the predicted science spectra locations
# Run nsextract with fl_trace=no and set the tracing reference image (trace) to the edited Telluric file
# Test by setting the tolerance to be ~1 pix which will force some orders to be reextracted.
logger.debug('Renaming old extracted spectra...')
os.rename('vsrc_comb.fits', 'vsrc_comb_TRACED.fits')
logger.debug('Generating reference files...')
for i in range(len(reextract)):
ext = i+1
oldsciapfile = '%s/database/apsrc_comb_SCI_%d_' % (scipath, ext)
os.rename(oldsciapfile, oldsciapfile + 'TRACED')
telapfile = '%s/database/apsrc_comb_SCI_%d_' % (telpath, ext)
refapfile = '%s/database/apref_comb_SCI_%d_' % (scipath, ext)
shutil.copy(telapfile, refapfile)
with open(refapfile, 'r') as f:
data = f.read()
# Following XDGNIRS replace the Telluric location with either the shifted Telluric or the Science location:
with open(refapfile, 'w') as f:
if reextract[i]:
logger.debug('Substituting predicted position: %s', predicted[i])
f.write(data.replace(str(telpeaks[i]), str(predicted[i])).replace('src_comb', 'ref_comb'))
else:
logger.debug('Substituting science position: %s', scipeaks[i])
f.write(data.replace(str(telpeaks[i]), str(scipeaks[i])).replace('src_comb', 'ref_comb'))
shutil.copy(telpath + '/src_comb.fits', 'ref_comb.fits')
logger.debug('Running nsextract with the modified reference file and trace=no...')
iraf.nsextract(
inimages='src_comb.fits', outspectra='', outprefix='v', dispaxis=1, database='', line=700, nsum=nsum,
ylevel='INDEF', upper=aperture, lower=-aperture, background=background, fl_vardq='yes', fl_addvar='no',
fl_skylines='yes', fl_inter=interactive, fl_apall=apall, fl_trace='no', aptable='gnirs$data/apertures.fits',
fl_usetabap='no', fl_flipped='yes', fl_project='yes', fl_findneg='no', bgsample='*', trace='ref_comb',
tr_nsum=10, tr_step=10, tr_nlost=3, tr_function='legendre', tr_order=5, tr_sample='*', tr_naver=1, tr_niter=0,
tr_lowrej=3.0, tr_highrej=3.0, tr_grow=0.0, weights='variance', logfile=logger.root.handlers[0].baseFilename,
verbose='yes')
# Sometimes nsextract locates the aperture too close to the end of the slit.
# When this happens it fails with "Aperture too large" and spectra are not extracted for that order.
# Check if all file extensions are present in the extracted target file:
extracted_sci_extensions = iraf.gemextn(
inimages='src_comb', check='exists,mef', process='expand', index='', extname='SCI', extversion='', ikparams='',
omit='', replace='', outfile='STDOUT', logfile=logger.root.handlers[0].baseFilename, glogpars='', verbose='yes',
fail_count='0', count='20', status='0', Stdout=1)
logger.debug('extracted_sci_extensions: %s', extracted_sci_extensions)
if len(extracted_sci_extensions) != len(reextract):
logger.error("The combined science image file contains only %d extensions.", len(extracted_sci_extensions))
raise SystemExit
return
# ----------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
log.configure('gnirs-pype.log', filelevel='INFO', screenlevel='DEBUG')
start('gnirs-pype.cfg')
|
nilq/baby-python
|
python
|
def jackpot():
if 10*"@" in first_half and 10*"@" in second_half:
return ["True", "@", 10]
elif 10*"#" in first_half and 10*"#" in second_half:
return ["True", "#", 10]
elif 10*"$" in first_half and 10*"$" in second_half:
return ["True", "$", 10]
elif 10*"^" in first_half and 10*"^" in second_half:
return ["True", "^", 10]
else:
return ["False"]
def winning_symbols():
if 9*"@" in first_half and 9*"@" in second_half:
return ["@", 9]
elif 8*"@" in first_half and 8*"@" in second_half:
return ["@", 8]
elif 7*"@" in first_half and 7*"@" in second_half:
return ["@", 7]
elif 6*"@" in first_half and 6*"@" in second_half:
return ["@", 6]
elif 9*"#" in first_half and 9*"#" in second_half:
return ["#", 9]
elif 8*"#" in first_half and 8*"#" in second_half:
return ["#", 8]
elif 7*"#" in first_half and 7*"#" in second_half:
return ["#", 7]
elif 6*"#" in first_half and 6*"#" in second_half:
return ["#", 6]
elif 9*"$" in first_half and 9*"$" in second_half:
return ["$", 9]
elif 8*"$" in first_half and 8*"$" in second_half:
return ["$", 8]
elif 7*"$" in first_half and 7*"$" in second_half:
return ["$", 7]
elif 6*"$" in first_half and 6*"$" in second_half:
return ["$", 6]
elif 9*"^" in first_half and 9*"^" in second_half:
return ["^", 9]
elif 8*"^" in first_half and 8*"^" in second_half:
return ["^", 8]
elif 7*"^" in first_half and 7*"^" in second_half:
return ["^", 7]
elif 6*"^" in first_half and 6*"^" in second_half:
return ["^", 6]
else:
return "False"
tickets = [i.strip() for i in input().split(", ")]
for ticket in tickets:
first_half = ticket[:((len(ticket))//2)]
second_half = ticket[((len(ticket))//2):]
if len(ticket) != 20:
print("invalid ticket")
continue
list_jackpot = jackpot()
if list_jackpot[0] == 'True':
print(f'ticket "{ticket}" - {list_jackpot[2]}{list_jackpot[1]} Jackpot!')
continue
if winning_symbols() == "False":
print(f'ticket "{ticket}" - no match')
continue
if winning_symbols != "None":
list_ws = winning_symbols()
print(f'ticket "{ticket}" - {list_ws[1]}{list_ws[0]}')
|
nilq/baby-python
|
python
|
from ...language.base import parse
from ...utils.ast_to_code import ast_to_code
from ..compiled import GraphQLCompiledDocument
from .schema import schema
def test_compileddocument_from_module_dict():
# type: () -> None
document_string = "{ hello }"
document_ast = parse(document_string)
document = GraphQLCompiledDocument.from_module_dict(
schema,
{
"document_string": document_string,
"document_ast": document_ast,
"execute": lambda *_: True,
},
)
assert document.operations_map == {None: "query"}
assert document.document_string == document_string
assert document.document_ast == document_ast
assert document.schema == schema
assert document.execute()
def test_compileddocument_from_code():
# type: () -> None
document_string = "{ hello }"
document_ast = parse(document_string)
code = '''
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from graphql.language import ast
from graphql.language.parser import Loc
from graphql.language.source import Source
schema = None
document_string = """{document_string}"""
source = Source(document_string)
def loc(start, end):
return Loc(start, end, source)
document_ast = {document_ast}
def execute(*_):
return True
'''.format(
document_string=document_string, document_ast=ast_to_code(document_ast)
)
document = GraphQLCompiledDocument.from_code(schema, code)
assert document.operations_map == {None: "query"}
assert document.document_string == document_string
assert document.document_ast == document_ast
assert document.schema == schema
assert document.execute()
|
nilq/baby-python
|
python
|
import argparse
from random import seed
from yaml import dump
from utils.experiment import test
from utils.utils import *
if __name__ == "__main__":
seed(0)
parser = argparse.ArgumentParser(
description='Test error for a combination of ensembler and weak learner.')
parser.add_argument('dataset', help='dataset filename')
parser.add_argument('ensembler', help='chosen ensembler')
parser.add_argument('weak_learner', help='chosen weak learner')
parser.add_argument('M', metavar='# weak_learners',
help='number of weak learners', type=int)
parser.add_argument(
'trials', help='number of trials (each with different shuffling of the data); defaults to 1', type=int, default=1, nargs='?')
parser.add_argument('--record', action='store_const', const=True,
default=False, help='export the results in YAML format')
args = parser.parse_args()
ensembler = get_ensembler(args.ensembler)
weak_learner = get_weak_learner(args.weak_learner)
data = load_data("data/" + args.dataset)
accuracy, baseline = test(
ensembler, weak_learner, data, args.M, trials=args.trials)
print "Accuracy:"
print accuracy
print "Baseline:"
print baseline[-1]
if args.record:
results = {
'm': args.M,
'accuracy': accuracy,
'baseline': baseline[-1],
'booster': args.ensembler,
'weak_learner': args.weak_learner,
'trials': args.trials,
'seed': 0
}
filename = args.ensembler + "_" + \
args.weak_learner + "_" + str(args.M) + ".yml"
f = open(filename, 'w+')
f.write(dump(results))
|
nilq/baby-python
|
python
|
# Copyright (c) 2009-2013, Monoidics ltd.
# Copyright (c) 2013-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import subprocess
from . import config
class InferJavacCapture():
def __init__(self, javac_args):
self.javac_args = javac_args
def start(self):
infer = os.path.join(config.BIN_DIRECTORY, 'infer')
# pass --continue to prevent removing the results-dir
cmd = [
infer,
'capture',
'--continue',
'--', 'javac'
] + self.javac_args
try:
return subprocess.check_call(cmd)
except Exception as e:
print('Failed to execute:', ' '.join(cmd))
raise e
def _get_javac_args(javac_args):
# replace any -g:.* flag with -g to preserve debugging symbols
args = map(lambda arg: '-g' if '-g:' in arg else arg, javac_args)
# skip -Werror
args = filter(lambda arg: arg != '-Werror', args)
return args
def create_infer_command(javac_args):
return InferJavacCapture(_get_javac_args(javac_args))
|
nilq/baby-python
|
python
|
import unittest
from rating.processing import rates
from rating.processing.utils import ConfigurationException
class TestConversion(unittest.TestCase):
def test_conversion_byte_second_to_hour_harder(self):
rating_unit = 'GiB-hours'
metric_unit = 'byte-seconds'
qty = 7e12
converted = rates.convert_metrics_unit(metric_unit,
rating_unit,
qty)
self.assertAlmostEqual(converted, 1.8109050061, delta=1e-6)
def test_conversion_core_second_to_hour_basic(self):
rating_unit = 'core-hours'
metric_unit = 'core-seconds'
qty = 10
converted = rates.convert_metrics_unit(metric_unit,
rating_unit,
qty)
self.assertAlmostEqual(converted, 0.002777, delta=1e-6)
def test_conversion_core_second_to_hour_harder(self):
rating_unit = 'core-hours'
metric_unit = 'core-seconds'
qty = 24
converted = rates.convert_metrics_unit(metric_unit,
rating_unit,
qty)
self.assertAlmostEqual(converted, 0.006666, delta=1e-6)
def test_wrong_conversion(self):
rating_unit = 'some-random-rating_unit'
metric_unit = 'core-seconds'
qty = 1
with self.assertRaisesRegex(ConfigurationException,
'Unsupported key'):
rates.convert_metrics_unit(metric_unit,
rating_unit,
qty)
|
nilq/baby-python
|
python
|
from django.contrib import admin
from users.models import Profile, ConfirmedMail
# Represents Profile and ConfirmedMail models at admin site.
admin.site.register(Profile)
admin.site.register(ConfirmedMail)
|
nilq/baby-python
|
python
|
n1 = int(input('Digite um número: '))
n2 = int(input('Digite outro número '))
r = n1 + n2
print('O resultado da soma é {}'.format(r))
r = n1 - n2
print('O resultado da subtração é {}'.format(r))
r = n1 * n2
print('O resultado da multiplicação é {}'.format(r))
r = n1 / n2
print('O resultado da divisão é {}'.format(r))
# Se quiser colocar quantas casas decimais serão mostradas deve escrever da seguinte maneira:
# print('O resultado da divisão é {:.3f}'.format(r)), Assim terão 3 casas decimais.
r = n1 ** n2
print('O resultado da potência é {}'.format(r))
r = n1 // n2
print('O resultado da divisão inteira é {}'.format(r))
r = n1 % n2
print('O resultado do resto da divisão é {}'.format(r))
# Operadores aritiméticos: +, -, *, /, **, //, %
# São respectivamente: adição, subtração, multiplicação, divisão, potência, divisão inteira, resto da divisão
# Ex:
# 5 + 2 = 7
# 5 - 2 = 3
# 5 * 2 = 10
# 5 / 2 = 2.5
# 5 ** 2 = 25
# 5 // 2 = 2 Aqui temos uma divisão onde o resultado será sempre um número inteiro.
# 5 % 2 = 1 Aqui o é o resto da divisão da operação acima.
# Ordem de precedência. Ordem de importância para obter resultado em uma conta onde possui mais de 1 operador.
# 1: ()
# 2: **
# 3: *, /, //, %
# 4: +, -
# Obs. Quando quisermos expressar igual dentro de uma operação demvemos usar o operador ==
# Ex:
# 5 + 2 == 7
# Outro método para utilizar potência é:
# pow(4,3) Onde quero saber o resultado de 4 elevado a 3
# Calcular raiz quadrada. Raiz quadrada é elevar o número pela metade.
# Ex:
# 81**(1/2)==9.0 Ficará dessa maneira.
# Se quiser saber a raiz cúbica de ser feito assim: 127**(1/3)==5.0
# É possível realizar operações com strings.
# Ex:
# 'Oi' + 'Olá' == OiOlá
p1 = input('Digite uma palavra: ')
p2 = input('Digite outra palavra: ')
print('Resultado da Concatenação é:', p1 + p2)
# Quebra de linha: \n
# Ex:
# print('Resultado da Concatenação é:\n', p1 + p2)
# É possível multiplicar também:
# p1*5 == p1p1p1p1p1p1
print('Palavra 1 repetida 5 vezes: ', p1 * 5)
# Continuar print na mesma linha end=' '
# Ex:
# print('Palavra 1 repetida 5 vezes: ', p1 *5, end=' ')
# Isso pode ajudar ao repetir símbolos também:
print('Símbolo de = repetido 20 vezes: ', '=' * 20)
# Outro exemplo de como podemos usar operadores:
nome = input('Digite seu nome: ')
print('Seja bem vindo {:20}!'.format(nome))
print('Seja bem vindo {:=>20}!'.format(nome))
print('Seja bem vindo {:=<20}!'.format(nome))
print('Seja bem vindo {:=^20}!'.format(nome))
|
nilq/baby-python
|
python
|
import pdb, traceback, sys
try:
1/0
except:
extype, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
|
nilq/baby-python
|
python
|
# Open3D: www.open3d.org
# The MIT License (MIT)
# See license file or visit www.open3d.org for details
import importlib
import platform
if platform.architecture()[0] == '32bit':
globals().update(importlib.import_module('open3d.win32.32b.open3d').__dict__)
elif platform.architecture()[0] == '64bit':
globals().update(importlib.import_module('open3d.win32.64b.open3d').__dict__)
|
nilq/baby-python
|
python
|
from copy import copy
from contextlib import contextmanager
from logging import getLogger
import tensorflow as tf
from rl.utils.tf_utils import (purge_orphaned_summaries
as _purge_orphaned_summaries)
USE_DEFAULT = object()
logger = getLogger("rl")
class SummaryManager(object):
def __init__(self, logdir=None, summary_writer=None, summary_period=None,
last_summary_step=None):
if (logdir is None) == (summary_writer is None):
raise ValueError("exactly one of logdir or summary_writer must be set")
if summary_writer is None:
summary_writer = tf.summary.FileWriterCache.get(logdir)
self._summary_writer = summary_writer
self._summary_period = summary_period
self._last_summary_step = last_summary_step
@property
def summary_writer(self):
return self._summary_writer
def copy(self):
return copy(self)
def _get_step(self, step, session=None):
if isinstance(step, (tf.Variable, tf.Tensor)):
if session is None:
raise ValueError("session is None when step is instance %s"
% type(step))
step = session.run(step)
return step
def summary_time(self, step, session=None):
step = self._get_step(step, session)
if self._summary_period is None:
return False
elif self._last_summary_step is None:
return True
else:
return step - self._last_summary_step >= self._summary_period
def add_summary(self, summary, step, session=None,
update_last_summary_step=True):
step = self._get_step(step, session)
if step is None:
step = session.run(self._step)
self._summary_writer.add_summary(summary, global_step=step)
if update_last_summary_step:
self._last_summary_step = step
def add_summary_dict(self, summary_dict, step, session=None,
update_last_summary_step=True):
summary = tf.Summary()
for key, val in summary_dict.items():
summary.value.add(tag=key, simple_value=val)
self.add_summary(summary, step=step, session=session,
update_last_summary_step=update_last_summary_step)
def update_last_summary_step(self, step, session=None):
self._last_summary_step = self._get_step(step, session)
class DistributedTrainer(object):
def __init__(self,
target,
is_chief,
summary_manager=None,
checkpoint_dir=None,
checkpoint_period=None,
checkpoint=None,
config=USE_DEFAULT):
self._target = target
self._is_chief = is_chief
self._summary_manager = summary_manager
if (summary_manager is None
and checkpoint_dir is None
and checkpoint_period is not None):
raise ValueError("Either summary_manager or checkpoint_dir must be"
" specified when checkpoint_period is not None")
if checkpoint_dir is not None and checkpoint_period is None:
raise ValueError("checkpoint_period must be specified"
" when checkpoint_dir is not None")
if checkpoint_period is not None and checkpoint_dir is None:
checkpoint_dir = summary_manager.summary_writer.get_logdir()
self._checkpoint_dir = checkpoint_dir
self._checkpoint_period = checkpoint_period
self._checkpoint = checkpoint
if config == USE_DEFAULT:
config = self._get_default_config()
self._config = config
self._session = None
def _get_default_config(self):
config = tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=2)
# Dynamic memory allocation on gpu.
# https://github.com/tensorflow/tensorflow/issues/1578
config.gpu_options.allow_growth = True
return config
@contextmanager
def managed_session(self, init_vars=None, restore_vars=None,
save_vars=None, hooks=USE_DEFAULT,
purge_orphaned_summaries=True):
if init_vars is None:
init_vars = list(filter(
lambda v: v.device.startswith("/job:ps/"),
tf.global_variables()
))
if restore_vars is None:
restore_vars = init_vars
if save_vars is None:
save_vars = init_vars
ready_op = tf.report_uninitialized_variables(init_vars)
restorer = tf.train.Saver(restore_vars)
scaffold = tf.train.Scaffold(ready_for_local_init_op=ready_op,
ready_op=ready_op, saver=restorer)
if hooks == USE_DEFAULT:
if self._is_chief and self._checkpoint_dir is not None:
saver = tf.train.Saver(save_vars)
hooks = [
tf.train.CheckpointSaverHook(
checkpoint_dir=self._checkpoint_dir, saver=saver,
save_steps=self._checkpoint_period)
]
else:
hooks = None
if self._is_chief:
session_creator = tf.train.ChiefSessionCreator(
scaffold=scaffold, master=self._target,
config=self._config, checkpoint_filename_with_path=self._checkpoint)
else:
session_creator = tf.train.WorkerSessionCreator(
scaffold=scaffold, master=self._target, config=self._config)
with tf.train.MonitoredSession(session_creator, hooks=hooks) as sess:
if purge_orphaned_summaries and self._summary_manager is not None:
_purge_orphaned_summaries(self._summary_manager.summary_writer,
sess.run(tf.train.get_global_step()))
self._session = sess
yield sess
def step(self, algorithm=None, fetches=None,
feed_dict=None, summary_time=None, sess=None):
if summary_time and algorithm is None:
raise ValueError("algorithm cannot be None when summary_time is True")
if algorithm is None and fetches is None:
raise ValueError("algorithm and fetches cannot both be None")
if sess is None:
sess = self._session or tf.get_default_session()
global_step = tf.train.get_global_step()
step = sess.run(global_step)
if (summary_time is None
and algorithm is not None
and self._summary_manager is not None
and self._summary_manager.summary_time(step=step)):
summary_time = True
run_fetches = {}
if fetches is not None:
run_fetches["fetches"] = fetches
if algorithm is not None:
run_fetches["train_op"] = algorithm.train_op
if summary_time:
run_fetches["logging"] = algorithm.logging_fetches
run_fetches["summaries"] = algorithm.summaries
if feed_dict is None:
feed_dict = {}
if algorithm is not None:
algorithm_feed_dict = algorithm.get_feed_dict(
sess, summary_time=summary_time)
else:
algorithm_feed_dict = {}
if len(algorithm_feed_dict.keys() & feed_dict.keys()) > 0:
intersection = algorithm_feed_dict.keys() & feed_dict.keys()
raise ValueError(
"Algorithm feed dict intersects with the given feed dict: {}"
.format(intersection)
)
feed_dict.update(algorithm_feed_dict)
values = sess.run(run_fetches, feed_dict)
if summary_time:
logger.info("Step #{}, {}".format(step, values["logging"]))
self._summary_manager.add_summary(values["summaries"], step=step)
if "fetches" in values:
return step, values["fetches"]
else:
return step
def train(self, algorithm, num_steps):
global_step = tf.train.get_global_step()
def _train(sess):
step = sess.run(global_step)
while not sess.should_stop() and step < num_steps:
step = self.step(algorithm)
if self._session is not None:
_train(self._session)
else:
with self.managed_session() as sess:
_train(sess)
class SingularTrainer(DistributedTrainer):
def __init__(self,
summary_manager=None,
checkpoint_dir=None,
checkpoint_period=None,
checkpoint=None,
config=USE_DEFAULT):
super(SingularTrainer, self).__init__(
target='',
is_chief=True,
summary_manager=summary_manager,
checkpoint_dir=checkpoint_dir,
checkpoint_period=checkpoint_period,
checkpoint=checkpoint,
config=config)
@contextmanager
def managed_session(self, save_vars=None, restore_vars=None,
hooks=USE_DEFAULT, purge_orphaned_summaries=True):
if self._checkpoint is not None:
restorer = tf.train.Saver(restore_vars)
if hooks == USE_DEFAULT:
if self._checkpoint_dir is not None:
saver = tf.train.Saver(save_vars)
hooks = [
tf.train.CheckpointSaverHook(
self._checkpoint_dir,
saver=saver,
save_steps=self._checkpoint_period
),
]
else:
hooks = None
with tf.train.SingularMonitoredSession(hooks=hooks,
config=self._config) as sess:
if self._checkpoint is not None:
restorer.restore(sess, self._checkpoint)
if purge_orphaned_summaries and self._summary_manager is not None:
_purge_orphaned_summaries(self._summary_manager.summary_writer,
sess.run(tf.train.get_global_step()))
self._session = sess
yield sess
|
nilq/baby-python
|
python
|
num1 = 11
num2 =222
num3 =3333
|
nilq/baby-python
|
python
|
""""
Copyright © Krypton 2021 - https://github.com/kkrypt0nn (https://krypt0n.co.uk)
Description:
This is a template to create your own discord bot in python.
Version: 4.1
"""
class UserBlacklisted(Exception):
"""
Thrown when a user is attempting something, but is blacklisted.
"""
def __init__(self, message="User is blacklisted!"):
self.message = message
super().__init__(self.message)
class UserNotOwner(Exception):
"""
Thrown when a user is attempting something, but is not an owner of the bot.
"""
def __init__(self, message="User is not an owner of the bot!"):
self.message = message
super().__init__(self.message)
|
nilq/baby-python
|
python
|
a = [0, 0, 0, 1, 1, 1, 3, 3, 6, 6, 9, 9]
print(len(a))
def root(x):
while x != a[x]:
a[x] = a[a[x]]
print(x, a[x])
x = a[x]
return x
def root2(x):
if x != a[x]:
a[x] = root2(a[x])
return a[x]
root2(9)
print(a)
|
nilq/baby-python
|
python
|
"""
@Note: Implementation of Knowledge Distillation Algorithms
@Author: LucasX
"""
import copy
import os
import sys
import time
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from sklearn.metrics import confusion_matrix
from torch.optim import lr_scheduler
from torchvision import models
sys.path.append('../')
from research.kd import data_loader
from research.kd.cfg import cfg
from research.kd.losses import KDLoss, RegularizedTfKDLoss, SelfTfKDLoss
def train_model_with_kd(use_lsr, teacher_model_w_weights, student_model_wo_weights, dataloaders, criterion,
optimizer, scheduler,
num_epochs, inference=False):
"""
train model with Knowledge Distillation
:param use_lsr: whether to use LabelSmoothingRegularization
:param teacher_model_w_weights:
:param student_model_wo_weights:
:param dataloaders:
:param criterion:
:param optimizer:
:param scheduler:
:param num_epochs:
:param inference:
:return:
"""
print(student_model_wo_weights)
model_name = student_model_wo_weights.__class__.__name__
student_model_wo_weights = student_model_wo_weights.float()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
student_model_wo_weights = student_model_wo_weights.to(device)
teacher_model_w_weights = teacher_model_w_weights.to(device)
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
student_model_wo_weights = nn.DataParallel(student_model_wo_weights)
dataset_sizes = {x: len(dataloaders[x].dataset) for x in ['train', 'val', 'test']}
for _ in dataset_sizes.keys():
print('Dataset size of {0} is {1}...'.format(_, dataset_sizes[_]))
if not inference:
print('Start training %s...' % model_name)
since = time.time()
best_model_wts = copy.deepcopy(student_model_wo_weights.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('-' * 100)
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
if torch.__version__ <= '1.1.0':
scheduler.step()
student_model_wo_weights.train() # Set model to training mode
else:
student_model_wo_weights.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for i, data in enumerate(dataloaders[phase], 0):
inputs, types = data['image'], data['type']
inputs = inputs.to(device)
types = types.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
student_outputs = student_model_wo_weights(inputs)
_, preds = torch.max(student_outputs, 1)
if not use_lsr:
teacher_outputs = teacher_model_w_weights(inputs)
if use_lsr:
loss = criterion(student_outputs, types)
else:
loss = criterion(teacher_outputs, student_outputs, types)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == types.data)
if phase == 'train':
if torch.__version__ >= '1.1.0':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
tmp_correct = 0
tmp_total = 0
tmp_y_pred = []
tmp_y_true = []
tmp_filenames = []
for data in dataloaders['val']:
images, types, filename = data['image'], data['type'], data['filename']
images = images.to(device)
types = types.to(device)
outputs = student_model_wo_weights(images)
_, predicted = torch.max(outputs.data, 1)
tmp_total += types.size(0)
tmp_correct += (predicted == types).sum().item()
tmp_y_pred += predicted.to("cpu").detach().numpy().tolist()
tmp_y_true += types.to("cpu").detach().numpy().tolist()
tmp_filenames += filename
tmp_acc = tmp_correct / tmp_total
print('Confusion Matrix of {0} on val set: '.format(model_name))
cm = confusion_matrix(tmp_y_true, tmp_y_pred)
print(cm)
cm = np.array(cm)
print('Accuracy = {0}'.format(tmp_acc))
precisions = []
recalls = []
for i in range(len(cm)):
precisions.append(cm[i][i] / sum(cm[:, i].tolist()))
recalls.append(cm[i][i] / sum(cm[i, :].tolist()))
print("Precision of {0} on val set = {1}".format(model_name,
sum(precisions) / len(precisions)))
print(
"Recall of {0} on val set = {1}".format(model_name, sum(recalls) / len(recalls)))
best_acc = epoch_acc
best_model_wts = copy.deepcopy(student_model_wo_weights.state_dict())
student_model_wo_weights.load_state_dict(best_model_wts)
model_path_dir = './model'
os.makedirs(model_path_dir, exist_ok=True)
if torch.cuda.device_count() > 1:
torch.save(student_model_wo_weights.module.state_dict(),
'./model/{0}_best_epoch-{1}.pth'.format(model_name, epoch))
else:
torch.save(student_model_wo_weights.state_dict(),
'./model/{0}_best_epoch-{1}.pth'.format(model_name, epoch))
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
student_model_wo_weights.load_state_dict(best_model_wts)
model_path_dir = './model'
os.makedirs(model_path_dir, exist_ok=True)
if torch.cuda.device_count() > 1:
torch.save(student_model_wo_weights.module.state_dict(), './model/%s.pth' % model_name)
else:
torch.save(student_model_wo_weights.state_dict(), './model/%s.pth' % model_name)
else:
print('Start testing %s...' % model_name)
student_model_wo_weights.load_state_dict(torch.load(os.path.join('./model/%s.pth' % model_name)))
student_model_wo_weights.eval()
correct = 0
total = 0
y_pred = []
y_true = []
filenames = []
probs = []
with torch.no_grad():
for data in dataloaders['test']:
images, types, filename = data['image'], data['type'], data['filename']
images = images.to(device)
types = types.to(device)
outputs = student_model_wo_weights(images)
outputs = F.softmax(outputs)
# get TOP-K output labels and corresponding probabilities
topK_prob, topK_label = torch.topk(outputs, 2)
probs += topK_prob.to("cpu").detach().numpy().tolist()
_, predicted = torch.max(outputs.data, 1)
total += types.size(0)
correct += (predicted == types).sum().item()
y_pred += predicted.to("cpu").detach().numpy().tolist()
y_true += types.to("cpu").detach().numpy().tolist()
filenames += filename
print('Accuracy of {0} on test set: {1}% '.format(model_name, 100 * correct / total))
print(
'Confusion Matrix of {0} on test set: '.format(model_name))
cm = confusion_matrix(y_true, y_pred)
print(cm)
cm = np.array(cm)
precisions = []
recalls = []
for i in range(len(cm)):
precisions.append(cm[i][i] / sum(cm[:, i].tolist()))
recalls.append(cm[i][i] / sum(cm[i, :].tolist()))
print('Precision List: ')
print(precisions)
print('Recall List: ')
print(recalls)
print("Precision of {0} on val set = {1}".format(model_name,
sum(precisions) / len(precisions)))
print(
"Recall of {0} on val set = {1}".format(model_name, sum(recalls) / len(recalls)))
print('Output CSV...')
col = ['filename', 'gt', 'pred', 'prob']
df = pd.DataFrame([[filenames[i], y_true[i], y_pred[i], probs[i][0]] for i in range(len(filenames))],
columns=col)
df.to_csv("./%s.csv" % model_name, index=False)
print('CSV has been generated...')
def run_img_classification(teacher_w_weights, student_wo_weights, epoch):
"""
run image classification
:param teacher_w_weights:
:param student_wo_weights:
:param epoch:
:return:
"""
if cfg['use_lsr']:
criterion = RegularizedTfKDLoss(alpha=0.5, temperature=10)
else:
criterion = KDLoss(alpha=0.5, temperature=10) # vanilla KD Loss
teacher_w_weights.eval()
optimizer_ft = optim.SGD(student_wo_weights.parameters(), lr=cfg['init_lr'], momentum=0.9,
weight_decay=cfg['weight_decay'])
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=cfg['lr_decay_step'], gamma=0.1)
# cosine_anneal_warmup_lr_scheduler = lr_scheduler.CosineAnnealingWarmRestarts(optimizer_ft, T_0=10, T_mult=10,
# eta_min=1e-5, last_epoch=-1)
trainloader, valloader, testloader = data_loader.load_mengzhucrop_data()
dataloaders = {
'train': trainloader,
'val': valloader,
'test': testloader,
}
train_model_with_kd(use_lsr=cfg['use_lsr'], teacher_model_w_weights=teacher_w_weights,
student_model_wo_weights=student_wo_weights,
dataloaders=dataloaders, criterion=criterion, optimizer=optimizer_ft,
scheduler=exp_lr_scheduler, num_epochs=epoch, inference=False)
if __name__ == '__main__':
densenet169 = models.densenet169(pretrained=False)
num_ftrs = densenet169.classifier.in_features
densenet169.classifier = nn.Linear(num_ftrs, cfg['out_num'])
densenet169.load_state_dict(torch.load("/home/lucasxu/ModelZoo/DenseNet169.pth"))
# shufflenet_v2 = models.shufflenet_v2_x1_0(pretrained=True)
# num_ftrs = shufflenet_v2.fc.in_features
# shufflenet_v2.fc = nn.Linear(num_ftrs, cfg['out_num'])
mobilenet_v2 = models.mobilenet_v2(pretrained=True)
num_ftrs = mobilenet_v2.classifier[1].in_features
mobilenet_v2.classifier[1] = nn.Linear(num_ftrs, cfg['out_num'])
# resnet18 = models.resnet18(pretrained=True)
# num_ftrs = resnet18.fc.in_features
# resnet18.fc = nn.Linear(num_ftrs, 6)
# mixnet_m = ptcv_get_model("mixnet_m", pretrained=True)
# num_ftrs = mixnet_m.output.in_features
# mixnet_m.output = nn.Linear(num_ftrs, 6)
# condensenet74 = ptcv_get_model("condensenet74_c4_g4", pretrained=True)
# condensenet74.output.linear = nn.Linear(1032, 6, bias=True)
run_img_classification(teacher_w_weights=densenet169, student_wo_weights=mobilenet_v2, epoch=cfg['epoch'])
|
nilq/baby-python
|
python
|
import tensorflow as tf
import numpy as np
import os
# from sklearn.manifold._utils import _binary_search_perplexity
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
class TSNE:
def __init__(self,n_components=2, perplexity=30, early_exaggeration=12, learning_rate=500, n_iter=1000, momentum=0.8, verbose=0):
self.learning_rate=learning_rate
self.perplexity=perplexity
self.n_components=n_components
self.early_exaggeration=early_exaggeration
self.n_iter=n_iter
self.verbose=verbose
self.momentum=momentum
def fit_transform(self, P_coor):
with tf.Graph().as_default():
p2, p, sigma_mean, dists=TSNE.p_joint(P_coor, self.perplexity)
sigma_mean=tf.Variable(sigma_mean, trainable=False)
P_=tf.Variable(p2*self.early_exaggeration, trainable=False)
P=tf.stop_gradient(P_)
Q_coor=tf.Variable(tf.random_normal([tf.shape(P_coor)[0], self.n_components]))
momentum=tf.Variable(0.8, trainable=False)
Q_coor_loss, grad=TSNE.tsne(P, Q_coor)
opt=TSNE.gradient_descent(Q_coor_loss, grad, Q_coor, self.learning_rate, momentum)
grad_norm=tf.linalg.norm(grad)
# opt=tf.train.MomentumOptimizer(learning_rate=self.learning_rate, momentum=momentum)
# grad=opt.compute_gradients(Q_coor_loss, var_list=[Q_coor])
# grad_norm=tf.linalg.norm(tf.concat([x[0] for x in grad], axis=0))
# update_Q_coor=opt.apply_gradients(grad)
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
if self.verbose>=2:
print("sigma mean:", sess.run(sigma_mean))
for i in range(self.n_iter):
# if i is 20:
# sess.run(momentum.assign(0.8))
if i is 100:
sess.run(P_.assign(P/self.early_exaggeration))
if self.verbose>=2 :
print("early exaggeration end.") # refering to sklearn
q, _, loss, gn= sess.run([Q_coor, opt, Q_coor_loss,grad_norm])
if self.verbose>=2 and i % 50 == 0:
print("Iteration {} loss: {}, grad norm: {:.6f}".format(i, loss, gn))
return q
@staticmethod
def remove_diag(x):
diag_not_mask=~tf.cast(tf.diag(tf.ones(x.shape[:1])), dtype=tf.bool)
with_out_diag=tf.reshape(tf.boolean_mask(x, diag_not_mask), x.shape-np.array([0,1]))
return with_out_diag
@staticmethod
def add_diag(x):
xshape=tf.shape(x)
tmp=tf.reshape(x, [xshape[1],xshape[0]])
a=tf.zeros([xshape[1], 1], dtype=tf.float32)
tmp=tf.concat([tmp,a], axis=1)
tmp=tf.concat([[0.0], tf.reshape(tmp, [-1])], axis=0)
tmp=tf.reshape(tmp, [xshape[0], xshape[0]])
return tmp
@staticmethod
def squared_dists(x, diag=False): # |x_i - x_j|^2
sum_square=tf.reduce_sum(tf.square(x), axis=1, keepdims=True)
dists=tf.maximum(sum_square -2*x@tf.transpose(x) +tf.transpose(sum_square),1e-6) # relu against negtive caused by overflow
if diag:
return dists
else:
return TSNE.remove_diag(dists)
@staticmethod
def set_diag_zero(x):
return tf.linalg.set_diag(x ,tf.zeros(tf.shape(x)[:1]))
@staticmethod
def cross_entropy(x, y, axis=-1):
safe_y = tf.where(tf.equal(x, 0.), tf.ones_like(y), y)
return -tf.reduce_sum(x * tf.log(safe_y), axis)
@staticmethod
def softmax_entropy_with_logits(logits, axis=-1): # H=-sum(p*log(p)) where p=softmax(logits)
P=tf.nn.softmax(logits, axis=axis)
H=tf.reduce_logsumexp(logits, axis=axis)- tf.reduce_sum(P*logits, axis=axis) # LSE(logits)-E(logits)
'''
-sum(p*log(p))
=-sum(p*log_softmax(logits))
=-sum(p*(logits-lse(logits)))
=sum(p*lse(logits)-p*logits)
=sum(p)*lse(logits)-sum(p*logits)
=lse(logits)-E(logits)
'''
return H,P
@staticmethod
def calc_perplexity_and_probs(neg_dists, betas): # betas=1/2*sigmas^2
logits=neg_dists*tf.reshape(betas,[-1,1])
return TSNE.softmax_entropy_with_logits(logits)
@staticmethod
def binary_search_sigma(neg_dists, target, tol=1e-5, max_iter=50, lower=1e-20, upper=1000000.):
#loop initial value
target_entropy=np.log(target)
def body(lows, ups, ans, finding_mask, x):
finding_indices=tf.cast(tf.where(finding_mask), tf.int32)
guess=(lows+ups)/2
val2, _=TSNE.calc_perplexity_and_probs(tf.boolean_mask(neg_dists, finding_mask), tf.boolean_mask(guess, finding_mask)) # !TODO: compare the speed
val=tf.scatter_nd(finding_indices, val2, tf.shape(finding_mask))
diff=val-target_entropy
new_ans_mask= ((tf.abs(diff)<= tol) | tf.equal(x+1, max_iter)) & finding_mask
new_finding_mask= ~new_ans_mask & finding_mask
greater_mask= (diff<- tol) & finding_mask
leq_mask= (diff>tol) & finding_mask
# dependencies=[
# tf.Print(val, [val], "val ",summarize=10),
# tf.Print(val2, [val2], "val2",summarize=10),
# tf.Print(guess, [guess], "guess:",summarize=10),
# tf.Print(greater_mask, [greater_mask], "gm ",summarize=10),
# tf.Print(ups, [ups], "ups ",summarize=10),
# tf.Print(leq_mask, [leq_mask], "lem ",summarize=10),
# tf.Print(lows, [lows], "lows",summarize=10),
# tf.Print(finding_mask, [finding_mask], "\nfm ",summarize=10),
# tf.Print(ans, [ans], "ans",summarize=10),
# tf.Print(new_finding_mask, [new_finding_mask], "nfm ",summarize=10),
# tf.Print(new_ans_mask,[new_ans_mask], 'nam ',summarize=10),
# tf.Print(finding_indices,[finding_indices],'fid ',summarize=10),
# tf.print("x ", x)
# ]
# with tf.control_dependencies(dependencies):
return [tf.where(leq_mask, guess, lows),
tf.where(greater_mask, guess, ups),
tf.where(new_ans_mask, guess, ans),
new_finding_mask,
tf.add(x,1)
]
cond= lambda a,b,ans,finding_mask,x: tf.reduce_any(finding_mask) & (x<max_iter)
nums=tf.shape(neg_dists)[:1]
lows=tf.fill(nums, lower)
ups=tf.fill(nums, upper)
finding_mask=tf.fill(nums, True)
res=tf.while_loop(cond, body ,(lows, ups, lows, finding_mask,0), back_prop=False)
ans=res[2]
pra_iter=res[4]
# with tf.control_dependencies([tf.Assert(pra_iter<max_iter, ['exceeded_max_iter'])]):
# print("[Warning] exceeded mat iter, maybe sigma's precision is not enough.")
return tf.identity(ans, name='betas')
# @staticmethod
# def transpose_without_diag(x):
# xshape=tf.shape(x)
# tmp=tf.reshape(x, xshape[::-1])
# a=tf.zeros([xshape[1], 1], dtype=tf.float32)
# tmp=tf.concat([tmp,a], axis=1)
# tmp=tf.concat([[0.0], tf.reshape(tmp, [-1])], axis=0)
# tmp=tf.reshape(tmp, [xshape[0], xshape[0]])
# # origin got
# tmp=tf.reshape(tf.transpose(tmp),[-1])[1:]
# tmp=tf.reshape(tmp,[-1, xshape[0]+1])[:,:-1]
# tmp=tf.reshape(tmp, xshape)
# return tmp
@staticmethod
def p_joint(x, target_perplexity):
neg_dists_no_diag=-TSNE.squared_dists(x, diag=False)
betas= TSNE.binary_search_sigma(neg_dists_no_diag, target_perplexity)
p=tf.nn.softmax(neg_dists_no_diag*tf.reshape(betas, [-1,1]))
p=TSNE.add_diag(p)
p=p/tf.reduce_sum(p, axis=-1, keepdims=True)
return (p+tf.transpose(p))/(2*tf.cast(tf.shape(x)[0], dtype=tf.float32)), p, tf.reduce_mean(tf.sqrt(1/betas)), neg_dists_no_diag
# sum_square=np.sum(np.square(x), axis=1, keepdims=True)
# dists=np.maximum(sum_square -2*x@x.T + sum_square.T, 1e-6)
# p= _binary_search_perplexity(dists, None, target_perplexity, 6)
# p=(p+p.T)/(2*p.shape[0])
# return p.astype(np.float32), p, tf.constant([1]), dists
@staticmethod
# @tf.custom_gradient
def tsne(p,y):
dists=TSNE.squared_dists(y, diag=True)
q_num=TSNE.set_diag_zero(1/(1+dists))
q=tf.nn.relu(q_num/tf.reduce_sum(q_num))
y=tf.expand_dims(y, axis=-2)
y_cross_diff= y-tf.transpose(y, [1,0,2])
# L2=tf.reduce_sum(dists) grddddd>>>??
loss= -tf.reduce_sum(TSNE.cross_entropy(p,p)-TSNE.cross_entropy(p,q))
grad= tf.reduce_sum((tf.expand_dims((p-q)*q_num, axis=-1))*y_cross_diff, axis=1)
return loss, grad
def gradient_descent(loss, grad, x, lr, momentum, min_gain=0.01):
gains=tf.Variable(tf.ones_like(x, dtype=tf.float32))
update=tf.Variable(tf.zeros_like(x, dtype=tf.float32))
direct= update*grad < 0.0
gains=gains.assign(tf.maximum(tf.where(direct, gains+0.2, gains*0.8), min_gain))
update=update.assign(update*momentum - lr*grad*gains)
return x.assign(x+update)
|
nilq/baby-python
|
python
|
n = int(input())
print(pow(2, n+1)-2)
|
nilq/baby-python
|
python
|
import unittest
from typing import Generator, List
from common import open_fixture
BASE_PATTERN = (0, 1, 0, -1)
def decode(s: str) -> List[int]:
return [int(c) for c in s.strip()]
def pattern(position: int) -> Generator[int, None, None]:
skip = True
while True:
for i in range(len(BASE_PATTERN)):
for _ in range(position):
if skip:
skip = False
continue
yield BASE_PATTERN[i]
def fft(signal: List[int]) -> None:
output = [0] * len(signal)
for i in range(len(signal)):
n = 0
gen = pattern(i + 1)
for j in range(len(signal)):
n += signal[j] * next(gen)
output[i] = abs(n) % 10
for i in range(len(signal)):
signal[i] = output[i]
class TestDay16(unittest.TestCase):
def test_part1_example1(self):
A = decode("12345678")
tests = [
decode("48226158"),
decode("34040438"),
decode("03415518"),
decode("01029498"),
]
for test in tests:
fft(A)
self.assertListEqual(A, test)
def test_part1_example2(self):
tests = [
(decode("80871224585914546619083218645595"), decode("24176176")),
(decode("19617804207202209144916044189917"), decode("73745418")),
(decode("69317163492948606335995924319873"), decode("52432133")),
]
for A, expect in tests:
for _ in range(100):
fft(A)
self.assertListEqual(A[:8], expect)
def test_part1(self):
with open_fixture("day16") as fp:
A = decode(fp.readline())
for _ in range(100):
fft(A)
self.assertListEqual(A[:8], decode("96136976"))
def test_part2(self):
# TODO: exploit tge predictable pattern in the last half of the algorithm
# Answer: 85,600,369
# https://www.reddit.com/r/adventofcode/comments/ebf5cy/2019_day_16_part_2_understanding_how_to_come_up/fb4bvw4/
pass
|
nilq/baby-python
|
python
|
class newNode:
# Construct to create a newNode
def __init__(self, key):
self.data = key
self.left = None
self.right = None
self.hd = 0
# function should print the topView
# of the binary tree
def topview(root) :
if(root == None) :
return
q = []
mp = dict()
head = 0
root.head = head
# push node and horizontal
# distance to queue
q.append(root)
while(len(q)) :
root = q[0]
head = root.head
# count function returns 1 if the
# container contains an element
# whose key is equivalent to hd,
# or returns zero otherwise.
if head not in mp:
mp[head] = root.data
if(root.left) :
root.left.head = head – 1
q.append(root.left)
if(root.right):
root.right.head = head + 1
q.append(root.right)
q.pop(0)
for i in sorted (mp):
print(mp[i], end = “”)
# Driver Code
if __name__ == ‘__main__’:
root = newNode(1)
root.left = newNode(2)
root.right = newNode(3)
root.left.right = newNode(4)
root.left.right.right = newNode(5)
root.left.right.right.right = newNode(6)
print(“Following are nodes in top”,“view of Binary Tree”)
topview(root)
|
nilq/baby-python
|
python
|
import tensorflow as tf
import dnnlib.tflib as tflib
from training import dataset
from training import misc
from metrics import metric_base
class ACC(metric_base.MetricBase):
def __init__(self, num_images, minibatch_per_gpu, test_data_dir, test_dataset, **kwargs):
super().__init__(**kwargs)
self.num_images = num_images
self.minibatch_per_gpu = minibatch_per_gpu
self.test_data_dir = test_data_dir
self.test_dataset = test_dataset
def _evaluate(self, classifier, Gs_kwargs, num_gpus):
self._set_dataset_obj(dataset.load_dataset(tfrecord_dir=self.test_dataset, data_dir=self.test_data_dir, shuffle_mb=2048))
dataset_object = self._get_dataset_obj()
dataset_object.configure(minibatch_size=self.minibatch_per_gpu)
num_correct = 0
num_total = 0
images_placeholder = tf.placeholder(shape=classifier.input_shapes[0], dtype=tf.float32)
label_placeholder = tf.placeholder(shape=[None, dataset_object.label_size], dtype=tf.float32)
images_adjust = misc.adjust_dynamic_range(images_placeholder, [0, 255], [-1, 1])
prediction = classifier.get_output_for(images_adjust)
one_hot_prediction = tf.one_hot(indices=tf.argmax(prediction, axis=-1), depth=dataset_object.label_size)
num_correct_pred = tf.reduce_sum(one_hot_prediction * label_placeholder)
while num_total < self.num_images:
images, labels = dataset_object.get_minibatch_np(minibatch_size=self.minibatch_per_gpu)
num_correct_pred_out = tflib.run(
num_correct_pred
, feed_dict={
images_placeholder: images,
label_placeholder: labels
})
num_correct += num_correct_pred_out
num_total += self.minibatch_per_gpu
self._report_result(num_correct / num_total)
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
"""
GaussLaguerre_doughnut.py
Calculates the intensity- and phase distributions of
Laguerre-Gauss doughnut laser modes.
cc Fred van Goor, May 2020.
"""
from LightPipes import *
import matplotlib.pyplot as plt
if LPversion < "2.0.0":
print(r'You need to upgrade LightPipes to run this script.' + '\n'+r'Type at a terminal prompt: $ pip install --upgrade LightPipes')
exit(1)
wavelength = 500*nm
size = 15*mm
N = 200
w0=3*mm
i=0
m_max=6
fig, axs = plt.subplots(nrows=2, ncols=m_max,figsize=(11.0,5.0))
s=r'Doughnut laser modes'
fig.suptitle(s)
F=Begin(size,wavelength,N)
n=0
for m in range(1,m_max+1):
F=GaussBeam(w0,F,doughnut=True,n=n,m=m)
I=Intensity(0,F)
Phi=Phase(F)
s=f'$LG_{n}$' + f'$_{m}$' + '$_*$'
axs[0][m-1].imshow(I,cmap='jet'); axs[0][m-1].axis('off'); axs[0][m-1].set_title(s)
axs[1][m-1].imshow(Phi,cmap='rainbow'); axs[1][m-1].axis('off');
plt.show()
|
nilq/baby-python
|
python
|
from layers import *
class CNN(object):
"""
Implements Convolutional Neural Network
Input shape: [8, 3, 32, 32]---------->[batch size, channels, height, width]
Model Architecture:
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
Conv2d-1 [8, 8, 32, 32] 224
ReLU-2 [8, 8, 32, 32] 0
MaxPool2d-3 [8, 8, 16, 16] 0
Conv2d-4 [8, 16, 16, 16] 1,168
ReLU-5 [8, 16, 16, 16] 0
MaxPool2d-6 [8, 16, 8, 8] 0
Linear-7 [8, 100] 102,500
ReLU-8 [8, 100] 0
Linear-9 [8, 3] 303
================================================================
Total params: 104,195
Trainable params: 104,195
Non-trainable params: 0
"""
def __init__(self, in_channels, out_dims):
self.in_channels = in_channels
self.out_dims = out_dims
# C1
self.conv1 = Conv2d(in_channels=self.in_channels, out_channels=8, kernel_size=3, strides=1)
self.relu1 = ReLU()
self.max_pool1 = MaxPooling2d(kernel_size=2, strides=2)
# C2
self.conv2 = Conv2d(in_channels=8, out_channels=16, kernel_size=3, strides=1)
self.relu2 = ReLU()
self.max_pool2 = MaxPooling2d(kernel_size=2, strides=2)
self.flatten = Flatten()
self.fc1 = Dense(in_dims=16 * 8 * 8, out_dims=100)
self.relu3 = ReLU()
self.fc2 = Dense(in_dims=100, out_dims=self.out_dims)
self.softmax = function.softmax
self.layers = [self.conv1, self.conv2, self.fc1, self.fc2]
def forward(self, x):
# C1
x = self.conv1.forward(x)
x = self.relu1.forward(x)
x = self.max_pool1.forward(x)
# print(x.shape)
# C2
x = self.conv2.forward(x)
x = self.relu2.forward(x)
x = self.max_pool2.forward(x)
# print(x.shape)
# Flatten
x = self.flatten.forward(x)
# print(x.shape)
# Fully connected layer
x = self.fc1.forward(x)
x = self.relu3.forward(x)
# print(x.shape)
x = self.fc2.forward(x)
# print(x.shape)
output = self.softmax(x)
# print(x.shape)
return output
def backward(self, y, p_y):
deltaL = p_y - y
deltaL = self.fc2.backward(deltaL)
deltaL = self.relu3.backward(deltaL)
deltaL = self.fc1.backward(deltaL)
deltaL = self.flatten.backward(deltaL)
# C2
deltaL = self.max_pool2.backward(deltaL)
deltaL = self.relu2.backward(deltaL)
deltaL = self.conv2.backward(deltaL)
# C1
deltaL = self.max_pool1.backward(deltaL)
deltaL = self.relu1.backward(deltaL)
self.conv1.backward(deltaL)
def params(self):
params = {}
for i, layer in enumerate(self.layers):
params['w' + str(i+1)] = layer.params['w']
params['b' + str(i+1)] = layer.params['b']
return params
def set_params(self, params):
for i, layer in enumerate(self.layers):
layer.params['w'] = params['w' + str(i+1)]
layer.params['b'] = params['b' + str(i+1)]
|
nilq/baby-python
|
python
|
from collections import Counter
occurrence_list = [item.lower() for item in input().split()]
odd_occurrence = [key for key, value in Counter(occurrence_list).items() if value % 2 != 0]
print(', '.join(list(odd_occurrence)))
|
nilq/baby-python
|
python
|
import requests
import wget
import time
r = requests.post("http://10.42.0.255:8000/start")
time.sleep(4)
r = requests.post("http://10.42.0.255:8000/stop")
file_url = 'http://10.42.0.100/get/10'
file_name = wget.download(file_url)
file_name.save(/'pictures/10_picture.png')
|
nilq/baby-python
|
python
|
#
# Get the language breakdown for a repo
# Usage: ghb langs USER/REPO
#
import operator
import sys
import requests
from .helpers import credentials
URL = "https://api.github.com/repos/%s/languages"
def average(total, number):
return round((number / float(total)) * 100, 2)
def main(args):
username, password = credentials.credentials()
headers = {"Accept": "application/vnd.github.v3+json"}
r = requests.get(
URL % args.repo, auth=(username, password), headers=headers
)
response_json = r.json()
if r.status_code != 200:
sys.exit("Failed with error: %s" % (response_json["message"]))
total = sum(response_json.values())
averages = {k: average(total, v) for k, v in response_json.items()}
averages = sorted(
averages.items(), key=operator.itemgetter(1), reverse=True
)
for t in averages:
print("{:>15}: {:8.2f}%".format(t[0], t[1]))
|
nilq/baby-python
|
python
|
# import all necessarily modules
import os.path
import subprocess
import sys
from configparser import ConfigParser
# working dir and extension types will be passed through CLI
try:
workDir = sys.argv[1]
extType = sys.argv[2]
newExtType = sys.argv[3]
except IndexError:
raise Exception("Usage: python3 autompeg.py <path to workfolder> <old fileformat> <new fileformat>"
"e.g. (Windows) python3 autompeg.py C:\\Users\\Test\\Work .ts .mp4"
"e.g. (Mac) python3 autompeg.py /Volumes/Volume1/Work .ts .mp4")
# Config Parser
config = ConfigParser(allow_no_value=True)
try:
with open('config.ini', 'r') as cfg:
config.read_file(cfg)
path = config.get('Path of ffmpeg', 'path')
except IOError:
print("Couldn't find or open configuration file for ffmpeg. Process is exiting now..")
sys.exit()
# exception-clause to prevent a faulty WorkDir and therefore the following ffmpeg process
if sys.platform.startswith('win32'):
workDir = workDir.replace('/', '\\')
else:
pass
for root, directories, filenames in os.walk(workDir):
for filename in filenames:
filename = os.path.join(root, filename)
newfilename = os.path.splitext(filename)[0] + newExtType
if filename.endswith(extType): # scan for files with the extension given in 'extType'
filepath = filename
newfilepath = newfilename
# no need to include an exception-clause here yet, since ffmpeg automatically detects a faulty filepath
subprocess.run(
[
path, # path of ffmpeg
"-i", # input argument for file
f'{filepath}', # file path of the old media file
"-c:v", # select video stream
"copy", # copy video stream and don't convert it (to prevent quality loss)
"-bsf:a", # select bitstream filter for the audio stream
"aac_adtstoasc", # remove the ADTS header from the audio stream
f'{newfilepath}', # file path of the 'new' media file
]
)
|
nilq/baby-python
|
python
|
import logging
from riffdog.data_structures import FoundItem
from riffdog.resource import register, ResourceDirectory
from ...aws_resource import AWSRegionalResource
logger = logging.getLogger(__name__)
@register("aws_lambda_function")
class AWSLambdaFunction(AWSRegionalResource):
"""
This is aws Lambda functions
"""
def fetch_real_regional_resources(self, region):
logging.info("Looking for %s resources..." % self.resource_type)
client = self._get_client("lambda", region)
rd = ResourceDirectory()
response = client.list_functions()
for instance in response["Functions"]:
try:
item = rd.get_item(predicted_id=instance["FunctionName"])
item.real_id = instance["FunctionName"]
item.real_data = instance
except KeyError:
# that item isnt predicted!
FoundItem("aws_lambda_function", real_id=instance["FunctionName"], real_data=instance)
def process_state_resource(self, state_resource, state_filename):
logger.info("Found a resource of type %s!" % self.resource_type)
for instance in state_resource["instances"]:
FoundItem("aws_lambda_function", terraform_id=state_resource["name"], predicted_id=instance["attributes"]["id"], state_data=instance)
def compare(self, item, depth):
pass
|
nilq/baby-python
|
python
|
# using: encoding-utf8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import time
import os
from six.moves import cPickle
import utils.opts as opts
import models
from utils.dataloader import *
import torch.utils.tensorboard as td
import utils.eval_utils as eval_utils
import utils.utils as utils
from utils.rewards import init_cider_scorer, get_self_critical_reward, get_self_critical_cider_bleu_reward, init_bleu_scorer
opt = opts.parse_opt()
os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpu_id
def train(opt):
opt.use_att = utils.if_use_att(opt.caption_model)
loader = DataLoader(opt)
opt.vocab_size = loader.vocab_size
opt.seq_length = loader.seq_length
td_summary_writer = td.writer.SummaryWriter(opt.ckpt_path)
infos = {
'iter': 0,
'epoch': 0,
'loader_state_dict': None,
'vocab': loader.get_vocab(),
}
histories = {}
if opt.start_from is not None:
# open old infos and check if models are compatible
with open(os.path.join(opt.start_from, 'infos.pkl'), 'rb') as f:
infos = cPickle.load(f, encoding='latin-1')
saved_model_opt = infos['opt']
need_be_same=["caption_model", "rnn_type", "rnn_size", "num_layers", "embed_weight_file"]
for checkme in need_be_same:
assert vars(saved_model_opt)[checkme] == vars(opt)[checkme], "Command line argument and saved model disagree on '%s' " % checkme
if os.path.isfile(os.path.join(opt.start_from, 'histories.pkl')):
with open(os.path.join(opt.start_from, 'histories.pkl'), 'rb') as f:
histories = cPickle.load(f, encoding='latin-1')
iteration = infos.get('iter', 0)
epoch = infos.get('epoch', 0)
iteration = infos['iter']
epoch = infos['epoch']
# For back compatibility
if 'iterators' in infos:
infos['loader_state_dict'] = {split: {'index_list': infos['split_ix'][split], 'iter_counter': infos['iterators'][split]} for split in ['train', 'val', 'test']}
loader.load_state_dict(infos['loader_state_dict'])
val_result_history = histories.get('val_result_history', {})
loss_history = histories.get('loss_history', {})
lr_history = histories.get('lr_history', {})
ss_prob_history = histories.get('ss_prob_history', {})
if opt.load_best_score == 1:
best_val_score = infos.get('best_val_score', None)
model = models.setup(opt)
model.cuda()
update_lr_flag = True
# Assure in training mode
model.train()
crit = utils.LanguageModelCriterion()
rl_crit = utils.RewardCriterion()
optimizer = utils.NewNoamOpt(optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=0, betas=(0.9, 0.98), eps=1e-9), max_lr=opt.learning_rate, warmup=opt.newnoamopt_warmup, batchsize=opt.batch_size, decay_start=opt.newnoamopt_decay, datasize=len(loader.dataset.split_ix['train']))
if opt.self_critical_after != -1 and epoch >= opt.self_critical_after:
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=opt.learning_rate, betas=(opt.optim_alpha, opt.optim_beta),
eps=opt.optim_epsilon, weight_decay=opt.weight_decay)
params = list(model.named_parameters())
grad_norm = np.zeros(len(params))
loss_sum = 0
while True:
if opt.self_critical_after != -1 and epoch >= opt.self_critical_after and update_lr_flag and opt.caption_model in ['svbase' ,'umv']:
print('start self critical')
if epoch >= 15 and epoch <20 and opt.learning_rate_decay_start >= 0:
opt.current_lr = opt.learning_rate
elif epoch >= 20 and opt.learning_rate_decay_start >= 0:
opt.current_lr = opt.learning_rate / 2.0
utils.set_lr(optimizer, opt.current_lr)
update_lr_flag = False
# Assign the scheduled sampling prob
if epoch > opt.scheduled_sampling_start and opt.scheduled_sampling_start >= 0:
frac = (epoch - opt.scheduled_sampling_start) // opt.scheduled_sampling_increase_every
opt.ss_prob = min(opt.scheduled_sampling_increase_prob * frac, opt.scheduled_sampling_max_prob)
model.ss_prob = opt.ss_prob
# If start self critical training
if opt.self_critical_after != -1 and epoch >= opt.self_critical_after:
sc_flag = True
opt.embed_weight_requires_grad = True
init_cider_scorer(opt.cached_tokens)
init_bleu_scorer()
else:
sc_flag = False
opt.embed_weight_requires_grad = False
start = time.time()
# Load data from train split (0)
data = loader.get_batch('train')
print('Read data:', time.time() - start)
torch.cuda.synchronize()
start = time.time()
num_bbox, att_feats = data['num_bbox'].cuda(), data['att_feats'].cuda()
labels = data['labels'].cuda()
masks = data['masks'].cuda()
optimizer.zero_grad()
if not sc_flag:
loss = crit(model(att_feats, num_bbox, labels), labels[:, 1:], masks[:,1:])
else:
gen_result, sample_logprobs = model.sample(att_feats, num_bbox, opt={'sample_max':0})
reward = get_self_critical_reward(model, att_feats, num_bbox, data, gen_result)
loss = rl_crit(sample_logprobs, gen_result, torch.from_numpy(reward).float().cuda())
loss.backward()
utils.clip_gradient(optimizer, opt.grad_clip)
for grad_wt in range(len(params)):
norm_v = torch.norm(params[grad_wt][1].grad).cpu().data.numpy() if params[grad_wt][
1].grad is not None else 0
grad_norm[grad_wt] += norm_v
if not sc_flag:
optimizer.step(epoch)
else:
optimizer.step()
train_loss = loss.item()
loss_sum += train_loss
torch.cuda.synchronize()
end = time.time()
if not sc_flag:
print("iter {} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}" \
.format(iteration, epoch, train_loss, end - start))
else:
print("lr {} iter {} (epoch {}), avg_reward = {:.3f}, time/batch = {:.3f}" \
.format(opt.current_lr, iteration, epoch, np.mean(reward[:,0]), end - start))
# Update the iteration and epoch
iteration += 1
if sc_flag:
del gen_result
del sample_logprobs
if data['bounds']['wrapped']:
epoch += 1
update_lr_flag = True
# Write the training loss summary
if (iteration % opt.losses_log_every == 0):
if opt.noamopt:
opt.current_lr = optimizer.rate()
elif not sc_flag:
opt.current_lr = optimizer.rate(epoch)
if td is not None:
td_summary_writer.add_scalar('train_loss', train_loss, iteration)
td_summary_writer.add_scalar('learning_rate', opt.current_lr, iteration)
td_summary_writer.add_scalar('scheduled_sampling_prob', model.ss_prob, iteration)
if sc_flag:
td_summary_writer.add_scalar('avg_reward', np.mean(reward[:,0]), iteration)
# tf_summary_writer.flush()
loss_history[iteration] = train_loss if not sc_flag else np.mean(reward[:,0])
lr_history[iteration] = opt.current_lr
ss_prob_history[iteration] = model.ss_prob
# make evaluation on validation set, and save model
if (iteration % opt.save_checkpoint_every == 0):
# eval model
eval_kwargs = {'split': 'val',
'dataset': opt.input_json}
eval_kwargs.update(vars(opt))
val_loss, predictions, lang_stats = eval_utils.eval_split(model, loader, eval_kwargs)
# Write validation result into summary
if td is not None:
td_summary_writer.add_scalar('validation loss', val_loss, iteration)
if lang_stats is not None:
for k,v in lang_stats.items():
td_summary_writer.add_scalar(k, v, iteration)
# tf_summary_writer.flush()
val_result_history[iteration] = {'loss': val_loss, 'lang_stats': lang_stats, 'predictions': predictions}
# Save model if is improving on validation result
if opt.language_eval == 1:
current_score = lang_stats['CIDEr']
else:
current_score = - val_loss
best_flag = False
if True: # if true
if best_val_score is None or current_score > best_val_score:
best_val_score = current_score
best_flag = True
checkpoint_path = os.path.join(opt.ckpt_path, 'model.pth')
torch.save(model.state_dict(), checkpoint_path)
print("model saved to {}".format(checkpoint_path))
optimizer_path = os.path.join(opt.ckpt_path, 'optimizer.pth')
torch.save(optimizer.state_dict(), optimizer_path)
# Dump miscalleous informations
infos['iter'] = iteration
infos['epoch'] = epoch
infos['loader_state_dict'] = loader.state_dict()
histories['val_result_history'] = val_result_history
histories['loss_history'] = loss_history
histories['lr_history'] = lr_history
histories['ss_prob_history'] = ss_prob_history
with open(os.path.join(opt.ckpt_path, 'infos.pkl'), 'wb') as f:
cPickle.dump(infos, f)
with open(os.path.join(opt.ckpt_path, 'histories_.pkl'), 'wb') as f:
cPickle.dump(histories, f)
if best_flag:
checkpoint_path = os.path.join(opt.ckpt_path, 'model-best.pth')
torch.save(model.state_dict(), checkpoint_path)
print("model saved to {}".format(checkpoint_path))
with open(os.path.join(opt.ckpt_path, 'infos-best.pkl'), 'wb') as f:
cPickle.dump(infos, f)
loss_sum = 0
grad_norm = np.zeros(len(params))
# Stop if reaching max epochs
if epoch >= opt.max_epochs and opt.max_epochs != -1:
eval_kwargs = {'split': 'val',
'dataset': opt.input_json}
eval_kwargs.update(vars(opt))
val_loss, predictions, lang_stats = eval_utils.eval_split(model, loader, eval_kwargs)
# Write validation result into summary
if td is not None:
td_summary_writer.add_scalar('validation loss', val_loss, iteration)
if lang_stats is not None:
for k,v in lang_stats.items():
td_summary_writer.add_scalar(k, v, iteration)
val_result_history[iteration] = {'loss': val_loss, 'lang_stats': lang_stats, 'predictions': predictions}
# Save model if is improving on validation result
if opt.language_eval == 1:
current_score = lang_stats['CIDEr']
else:
current_score = - val_loss
best_flag = False
if True: # if true
if best_val_score is None or current_score > best_val_score:
best_val_score = current_score
best_flag = True
checkpoint_path = os.path.join(opt.ckpt_path, 'model.pth')
torch.save(model.state_dict(), checkpoint_path)
print("model saved to {}".format(checkpoint_path))
optimizer_path = os.path.join(opt.ckpt_path, 'optimizer.pth')
torch.save(optimizer.state_dict(), optimizer_path)
# Dump miscalleous informations
infos['iter'] = iteration
infos['epoch'] = epoch
infos['loader_state_dict'] = loader.state_dict()
histories['val_result_history'] = val_result_history
histories['loss_history'] = loss_history
histories['lr_history'] = lr_history
histories['ss_prob_history'] = ss_prob_history
with open(os.path.join(opt.ckpt_path, 'infos.pkl'), 'wb') as f:
cPickle.dump(infos, f)
with open(os.path.join(opt.ckpt_path, 'histories.pkl'), 'wb') as f:
cPickle.dump(histories, f)
if best_flag:
checkpoint_path = os.path.join(opt.ckpt_path, 'model-best.pth')
torch.save(model.state_dict(), checkpoint_path)
print("model saved to {}".format(checkpoint_path))
with open(os.path.join(opt.ckpt_path, 'infos-best.pkl'), 'wb') as f:
cPickle.dump(infos, f)
break
if sc_flag:
del loss
del reward
del att_feats
del num_bbox
del labels
del masks
del data
opt = opts.parse_opt()
train(opt)
|
nilq/baby-python
|
python
|
# pylint: disable=C0103
import json
class CampaignObject():
def __init__(self, json_def):
if type(json_def) is str:
json_def = json.loads(json_def)
s = json_def
self.campaignTp = None if 'campaignTp' not in s else s['campaignTp']
self.customerId = None if 'customerId' not in s else s['customerId']
self.dailyBudget = None if 'dailyBudget' not in s else s['dailyBudget']
self.delFlag = None if 'delFlag' not in s else s['delFlag']
self.deliveryMethod = None if 'deliveryMethod' not in s else s['deliveryMethod']
self.editTm = None if 'editTm' not in s else s['editTm']
self.expectCost = None if 'expectCost' not in s else s['expectCost']
self.migType = None if 'migType' not in s else s['migType']
self.name = None if 'name' not in s else s['name']
self.nccCampaignId = None if 'nccCampaignId' not in s else s['nccCampaignId']
self.periodEndDt = None if 'periodEndDt' not in s else s['periodEndDt']
self.periodStartDt = None if 'periodStartDt' not in s else s['periodStartDt']
self.regTm = None if 'regTm' not in s else s['regTm']
self.status = None if 'status' not in s else s['status']
self.statusReason = None if 'statusReason' not in s else s['statusReason']
self.trackingMode = None if 'trackingMode' not in s else s['trackingMode']
self.trackingUrl = None if 'trackingUrl' not in s else s['trackingUrl']
self.useDailyBudget = None if 'useDailyBudget' not in s else s['useDailyBudget']
self.usePeriod = None if 'usePeriod' not in s else s['usePeriod']
self.userLock = None if 'userLock' not in s else s['userLock']
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2017 bily Huazhong University of Science and Technology
#
# Distributed under terms of the MIT license.
"""Save the paths of crops from the ImageNet VID 2015 dataset in pickle format"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import os.path as osp
import pickle
import sys
import numpy as np
import tensorflow as tf
CURRENT_DIR = osp.dirname(__file__)
sys.path.append(osp.join(CURRENT_DIR, '..'))
from utils.misc_utils import sort_nicely
class Config:
### Dataset
# directory where curated dataset is stored
#dataset_dir = 'data/ILSVRC2015-VID-Curation'
dataset_dir = '/data/ILSVRC2015_crops'
save_dir = 'data/'
# percentage of all videos for validation
validation_ratio = 0.1
class DataIter:
"""Container for dataset of one iteration"""
pass
class Dataset:
def __init__(self, config):
self.config = config
def _get_unique_trackids(self, video_dir):
"""Get unique trackids within video_dir"""
x_image_paths = glob.glob(video_dir + '/*.crop.x.jpg')
trackids = [os.path.basename(path).split('.')[1] for path in x_image_paths]
unique_trackids = set(trackids)
return unique_trackids
def dataset_iterator(self, video_dirs):
video_num = len(video_dirs)
iter_size = 150
iter_num = int(np.ceil(video_num / float(iter_size)))
for iter_ in range(iter_num):
iter_start = iter_ * iter_size
iter_videos = video_dirs[iter_start: iter_start + iter_size]
data_iter = DataIter()
num_videos = len(iter_videos)
instance_videos = []
for index in range(num_videos):
print('Processing {}/{}...'.format(iter_start + index, video_num))
video_dir = iter_videos[index]
trackids = self._get_unique_trackids(video_dir)
for trackid in trackids:
instance_image_paths = glob.glob(video_dir + '/*' + trackid + '.crop.x.jpg')
# sort image paths by frame number
instance_image_paths = sort_nicely(instance_image_paths)
# get image absolute path
instance_image_paths = [os.path.abspath(p) for p in instance_image_paths]
instance_videos.append(instance_image_paths)
data_iter.num_videos = len(instance_videos)
data_iter.instance_videos = instance_videos
yield data_iter
def get_all_video_dirs(self):
ann_dir = os.path.join(self.config.dataset_dir, 'Data', 'VID')
all_video_dirs = []
# We have already combined all training and validation videos in ILSVRC2015 and put them in the `train` directory.
# The file structure is like:
# train
# |- a
# |- b
# |_ c
# |- ILSVRC2015_train_00024001
# |- ILSVRC2015_train_00024002
# |_ ILSVRC2015_train_00024003
# |- 000045.00.crop.x.jpg
# |- 000046.00.crop.x.jpg
# |- ...
train_dirs = os.listdir(os.path.join(ann_dir, 'train'))
for dir_ in train_dirs:
train_sub_dir = os.path.join(ann_dir, 'train', dir_)
video_names = os.listdir(train_sub_dir)
train_video_dirs = [os.path.join(train_sub_dir, name) for name in video_names]
all_video_dirs = all_video_dirs + train_video_dirs
return all_video_dirs
def main():
# Get the data.
config = Config()
dataset = Dataset(config)
all_video_dirs = dataset.get_all_video_dirs()
num_validation = int(len(all_video_dirs) * config.validation_ratio)
### validation
validation_dirs = all_video_dirs[:num_validation]
validation_imdb = dict()
validation_imdb['videos'] = []
for i, data_iter in enumerate(dataset.dataset_iterator(validation_dirs)):
validation_imdb['videos'] += data_iter.instance_videos
validation_imdb['n_videos'] = len(validation_imdb['videos'])
validation_imdb['image_shape'] = (255, 255, 3)
### train
train_dirs = all_video_dirs[num_validation:]
train_imdb = dict()
train_imdb['videos'] = []
for i, data_iter in enumerate(dataset.dataset_iterator(train_dirs)):
train_imdb['videos'] += data_iter.instance_videos
train_imdb['n_videos'] = len(train_imdb['videos'])
train_imdb['image_shape'] = (255, 255, 3)
if not tf.gfile.IsDirectory(config.save_dir):
tf.logging.info('Creating training directory: %s', config.save_dir)
tf.gfile.MakeDirs(config.save_dir)
with open(os.path.join(config.save_dir, 'validation_imdb.pickle'), 'wb') as f:
pickle.dump(validation_imdb, f)
with open(os.path.join(config.save_dir, 'train_imdb.pickle'), 'wb') as f:
pickle.dump(train_imdb, f)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import argparse
import datetime as dt
import os
from gpsynth.synthesizer import big_sweep, all_kernels
parser = argparse.ArgumentParser(description='Generate wavetables with Gaussian Processes')
parser.add_argument('path', metavar='path', type=str, nargs='?', default=None,
help='the parent directory, where the result is stored')
parser.add_argument('--lsdiv', metavar='N', type=int, required=False, default=16,
help='the number of lengthscale subdivisions')
parser.add_argument('--wavetables', metavar='N', type=int, required=False, default=7,
help='the number of (randomized) wavetables per setting of kernel and lengthscale')
args = parser.parse_args()
path = args.path
if path is None:
dir_name = dt.datetime.now().strftime('%Y%m%d-%H%M') + '_multiexport'
path = os.path.join(os.getcwd(), dir_name)
os.makedirs(path, exist_ok=True)
big_sweep(all_kernels, path, args.lsdiv, args.wavetables)
|
nilq/baby-python
|
python
|
# coding=utf-8
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils
from torch.autograd import Variable
from torch.nn import Parameter, init
from torch.nn._functions.rnn import variable_recurrent_factory, StackedRNN
from torch.nn.modules.rnn import RNNCellBase
from torch.nn.utils.rnn import PackedSequence
from torch.nn._functions.thnn import rnnFusedPointwise as fusedBackend
class RecurrentDropoutLSTMCell(RNNCellBase):
def __init__(self, input_size, hidden_size, dropout=0.):
super(RecurrentDropoutLSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.dropout = dropout
self.W_i = Parameter(torch.Tensor(hidden_size, input_size))
self.U_i = Parameter(torch.Tensor(hidden_size, hidden_size))
# self.b_i = Parameter(torch.Tensor(hidden_size))
self.W_f = Parameter(torch.Tensor(hidden_size, input_size))
self.U_f = Parameter(torch.Tensor(hidden_size, hidden_size))
# self.b_f = Parameter(torch.Tensor(hidden_size))
self.W_c = Parameter(torch.Tensor(hidden_size, input_size))
self.U_c = Parameter(torch.Tensor(hidden_size, hidden_size))
# self.b_c = Parameter(torch.Tensor(hidden_size))
self.W_o = Parameter(torch.Tensor(hidden_size, input_size))
self.U_o = Parameter(torch.Tensor(hidden_size, hidden_size))
# self.b_o = Parameter(torch.Tensor(hidden_size))
self.bias_ih = Parameter(torch.Tensor(4 * hidden_size))
self.bias_hh = Parameter(torch.Tensor(4 * hidden_size))
self._input_dropout_mask = self._h_dropout_mask = None
self.reset_parameters()
def reset_parameters(self):
init.orthogonal(self.W_i)
init.orthogonal(self.U_i)
init.orthogonal(self.W_f)
init.orthogonal(self.U_f)
init.orthogonal(self.W_c)
init.orthogonal(self.U_c)
init.orthogonal(self.W_o)
init.orthogonal(self.U_o)
self.bias_ih.data.fill_(0.)
# forget gate set to 1.
self.bias_ih.data[self.hidden_size:2 * self.hidden_size].fill_(1.)
self.bias_hh.data.fill_(0.)
def set_dropout_masks(self, batch_size):
if self.dropout:
if self.training:
new_tensor = self.W_i.data.new
self._input_dropout_mask = Variable(torch.bernoulli(
new_tensor(4, batch_size, self.input_size).fill_(1 - self.dropout)), requires_grad=False)
self._h_dropout_mask = Variable(torch.bernoulli(
new_tensor(4, batch_size, self.hidden_size).fill_(1 - self.dropout)), requires_grad=False)
else:
self._input_dropout_mask = self._h_dropout_mask = [1. - self.dropout] * 4
else:
self._input_dropout_mask = self._h_dropout_mask = [1.] * 4
def forward(self, input, hidden_state):
def get_mask_slice(mask, idx):
if isinstance(mask, list): return mask[idx]
else: return mask[idx][:input.size(0)]
h_tm1, c_tm1 = hidden_state
# if self._input_dropout_mask is None:
# self.set_dropout_masks(input.size(0))
xi_t = F.linear(input * get_mask_slice(self._input_dropout_mask, 0), self.W_i)
xf_t = F.linear(input * get_mask_slice(self._input_dropout_mask, 1), self.W_f)
xc_t = F.linear(input * get_mask_slice(self._input_dropout_mask, 2), self.W_c)
xo_t = F.linear(input * get_mask_slice(self._input_dropout_mask, 3), self.W_o)
hi_t = F.linear(h_tm1 * get_mask_slice(self._h_dropout_mask, 0), self.U_i)
hf_t = F.linear(h_tm1 * get_mask_slice(self._h_dropout_mask, 1), self.U_f)
hc_t = F.linear(h_tm1 * get_mask_slice(self._h_dropout_mask, 2), self.U_c)
ho_t = F.linear(h_tm1 * get_mask_slice(self._h_dropout_mask, 3), self.U_o)
if input.is_cuda:
igates = torch.cat([xi_t, xf_t, xc_t, xo_t], dim=-1)
hgates = torch.cat([hi_t, hf_t, hc_t, ho_t], dim=-1)
state = fusedBackend.LSTMFused.apply
return state(igates, hgates, c_tm1, self.bias_ih, self.bias_hh)
else:
i_t = F.sigmoid(xi_t + self.bias_ih[:self.hidden_size] + hi_t + self.bias_hh[:self.hidden_size])
f_t = F.sigmoid(xf_t + self.bias_ih[self.hidden_size:2 * self.hidden_size] + hf_t + self.bias_hh[self.hidden_size:2 * self.hidden_size])
c_t = f_t * c_tm1 + i_t * F.tanh(xc_t + self.bias_ih[2 * self.hidden_size:3 * self.hidden_size] + hc_t + self.bias_hh[2 * self.hidden_size:3 * self.hidden_size])
o_t = F.sigmoid(xo_t + self.bias_ih[3 * self.hidden_size:4 * self.hidden_size] + ho_t + self.bias_hh[3 * self.hidden_size:4 * self.hidden_size])
h_t = o_t * F.tanh(c_t)
return h_t, c_t
class ParentFeedingLSTMCell(RNNCellBase):
def __init__(self, input_size, hidden_size):
super(ParentFeedingLSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.W_i = Parameter(torch.Tensor(hidden_size, input_size))
self.U_i = Parameter(torch.Tensor(hidden_size, hidden_size))
self.U_i_p = Parameter(torch.Tensor(hidden_size, hidden_size))
self.b_i = Parameter(torch.Tensor(hidden_size))
self.W_f = Parameter(torch.Tensor(hidden_size, input_size))
self.U_f = Parameter(torch.Tensor(hidden_size, hidden_size))
self.U_f_p = Parameter(torch.Tensor(hidden_size, hidden_size))
self.b_f = Parameter(torch.Tensor(hidden_size))
self.b_f_p = Parameter(torch.Tensor(hidden_size))
self.W_c = Parameter(torch.Tensor(hidden_size, input_size))
self.U_c = Parameter(torch.Tensor(hidden_size, hidden_size))
self.U_c_p = Parameter(torch.Tensor(hidden_size, hidden_size))
self.b_c = Parameter(torch.Tensor(hidden_size))
self.W_o = Parameter(torch.Tensor(hidden_size, input_size))
self.U_o = Parameter(torch.Tensor(hidden_size, hidden_size))
self.U_o_p = Parameter(torch.Tensor(hidden_size, hidden_size))
self.b_o = Parameter(torch.Tensor(hidden_size))
self.reset_parameters()
def reset_parameters(self):
init.orthogonal(self.W_i)
init.orthogonal(self.U_i)
init.orthogonal(self.U_i_p)
init.orthogonal(self.W_f)
init.orthogonal(self.U_f)
init.orthogonal(self.U_f_p)
init.orthogonal(self.W_c)
init.orthogonal(self.U_c)
init.orthogonal(self.U_c_p)
init.orthogonal(self.W_o)
init.orthogonal(self.U_o)
init.orthogonal(self.U_o_p)
self.b_i.data.fill_(0.)
self.b_c.data.fill_(0.)
self.b_o.data.fill_(0.)
# forget bias set to 1.
self.b_f.data.fill_(1.)
self.b_f_p.data.fill_(1.)
def forward(self, input, hidden_states):
h_tm1, c_tm1, h_tm1_p, c_tm1_p = hidden_states
i_t = F.sigmoid(F.linear(input, self.W_i) + F.linear(h_tm1, self.U_i) + F.linear(h_tm1_p, self.U_i_p) + self.b_i)
xf_t = F.linear(input, self.W_f)
f_t = F.sigmoid(xf_t + F.linear(h_tm1, self.U_f) + self.b_f)
f_t_p = F.sigmoid(xf_t + F.linear(h_tm1_p, self.U_f_p) + self.b_f_p)
xc_t = F.linear(input, self.W_c) + F.linear(h_tm1, self.U_c) + F.linear(h_tm1_p, self.U_c_p) + self.b_c
c_t = f_t * c_tm1 + f_t_p * c_tm1_p + i_t * F.tanh(xc_t)
o_t = F.sigmoid(F.linear(input, self.W_o) + F.linear(h_tm1, self.U_o) + F.linear(h_tm1_p, self.U_o_p) + self.b_o)
h_t = o_t * F.tanh(c_t)
return h_t, c_t
class LSTM(nn.Module):
def __init__(self, input_size, hidden_size, bidirectional=False, dropout=0., cell_factory=RecurrentDropoutLSTMCell):
super(LSTM, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bidirectional = bidirectional
self.dropout = dropout
self.cell_factory = cell_factory
num_directions = 2 if bidirectional else 1
self.lstm_cells = []
for direction in range(num_directions):
cell = cell_factory(input_size, hidden_size, dropout=dropout)
self.lstm_cells.append(cell)
suffix = '_reverse' if direction == 1 else ''
cell_name = 'cell{}'.format(suffix)
self.add_module(cell_name, cell)
def forward(self, input, hidden_state=None):
is_packed = isinstance(input, PackedSequence)
if is_packed:
input, batch_sizes = input
max_batch_size = batch_sizes[0]
else: raise NotImplementedError()
for cell in self.lstm_cells:
cell.set_dropout_masks(max_batch_size)
if hidden_state is None:
num_directions = 2 if self.bidirectional else 1
hx = torch.autograd.Variable(input.data.new(num_directions,
max_batch_size,
self.hidden_size).zero_())
hidden_state = (hx, hx)
rec_factory = variable_recurrent_factory(batch_sizes)
if self.bidirectional:
layer = (rec_factory(lambda x, h: self.cell(x, h)),
rec_factory(lambda x, h: self.cell_reverse(x, h), reverse=True))
else:
layer = (rec_factory(lambda x, h: self.cell(x, h)),)
func = StackedRNN(layer,
num_layers=1,
lstm=True,
dropout=0.,
train=self.training)
next_hidden, output = func(input, hidden_state, weight=[[], []])
if is_packed:
output = PackedSequence(output, batch_sizes)
return output, next_hidden
|
nilq/baby-python
|
python
|
from flask import Flask, send_from_directory, request
from flask_restful import Api, Resource, reqparse
import json
import numpy as np
import datetime
import csv
import click
from dlw import DLWSubject
STATICS_LOCATION = 'dist'
app = Flask(__name__, static_url_path='', static_folder=STATICS_LOCATION)
api = Api(app)
CALCULATED_RESULTS = None # type: DLWSubject
@app.route('/calculate', methods=['POST'])
def calculate_from_inputs():
input_data = json.loads(request.get_data().decode('utf-8'))
datetimes = [datetime.datetime(l[0], l[1], l[2], l[3], l[4]) for l in input_data['datetimes']]
d_meas = [d if d != "" else np.nan for d in input_data['d_meas']]
o_meas = [d if d != "" else np.nan for d in input_data['o18_meas']]
global CALCULATED_RESULTS
CALCULATED_RESULTS = DLWSubject(d_meas=np.asarray(d_meas, dtype=float),
o18_meas=np.asarray(o_meas, dtype=float),
sample_datetimes=np.asarray(datetimes),
dose_weights=np.asarray(input_data['dose_weights'], dtype=float),
mixed_dose=input_data['mixed_dose'],
dose_enrichments=np.asarray(input_data['dose_enrichments'], dtype=float),
rq=float(input_data['rq']) if input_data['rq'] else None,
subject_weights=np.asarray(input_data['subject_weights'], dtype=float),
subject_id=input_data['subject_id'],
in_permil=input_data['in_permil'],
expo_calc=input_data['exponential'],
pop_avg_rdil=float(input_data['pop_avg_rdil']) if input_data[
'pop_avg_rdil'] else None)
def sort_calculated_results(results):
return {
"rco2_ee_int": {
"rco2_mol_day": ["rCO2 (mol/day)", round(results['co2_int_mol_day'], 2)],
"rco2_l_hr": ["rCO2 (L/day)", round(results['co2_int_L_day'], 1)],
"ee_kcal_day": ["EE (kcal/day)", round(results['tee_int_kcal_day'], 1)],
"ee_mj_day": ["EE (MJ/day)", round(results['tee_int_mj_day'], 2)]
},
"rco2_ee_plat": {
"rco2_mol_day": ["rCO2 (mol/day)", round(results['co2_plat_mol_day'], 2)],
"rco2_l_hr": ["rCO2 (L/day)", round(results['co2_plat_L_day'], 1)],
"ee_kcal_day": ["EE (kcal/day)", round(results['tee_plat_kcal_day'], 1)],
"ee_mj_day": ["EE (MJ/day)", round(results['tee_plat_mj_day'], 2)]
}
}
if np.isnan(CALCULATED_RESULTS.d_ratio_percent):
plateau_2h = ["2H plateau (<5%)", "N/A (missing data)"]
else:
plateau_2h = ["2H plateau (<5%)", str(round(CALCULATED_RESULTS.d_ratio_percent, 2)) + "%"]
if np.isnan(CALCULATED_RESULTS.o18_ratio_percent):
plateau_o18 = ["18O Plateau (<5%)", "N/A (missing data)"]
else:
plateau_o18 = ["18O Plateau (<5%)", str(round(CALCULATED_RESULTS.o18_ratio_percent, 2)) + "%"]
if np.isnan(CALCULATED_RESULTS.ee_check):
ee = ["EE (PD4-ED4 vs. PD5-ED5, <10%)", "N/A (missing data)"]
else:
ee = ["EE (PD4-ED4 vs. PD5-ED5, <10%)", str(round(CALCULATED_RESULTS.ee_check, 4)) + "%"]
return json.dumps({
"results": {
"calculations": {
"ndp_kg": ["NdP (kg)", round(CALCULATED_RESULTS.nd['adj_plat_avg_kg'], 1)],
"kd_hr": ["kd/hour", round(CALCULATED_RESULTS.kd_per_hr, 6)],
"nop_kg": ["NoP (kg)", round(CALCULATED_RESULTS.no['adj_plat_avg_kg'], 1)],
"ko_hr": ["ko/hour", round(CALCULATED_RESULTS.ko_per_hr, 6)],
"body_water_avg_kg": ["Total Body Water Average (kg)",
round(CALCULATED_RESULTS.total_body_water_ave_kg, 1)],
"fat_free_mass_kg": ["Fat Free Mass (kg)", round(CALCULATED_RESULTS.fat_free_mass_kg, 1)],
"fat_mass_kg": ["Fat Mass (kg)", round(CALCULATED_RESULTS.fat_mass_kg, 1)],
"body_fat_percentage": ["Body Fat Percentage", round(CALCULATED_RESULTS.body_fat_percent, 1)]
},
"error_flags": {
"plateau_2h": plateau_2h,
"plateau_18O": plateau_o18,
"ds_ratio": ["DS ratio (1.000 - 1.070)", round(CALCULATED_RESULTS.dil_space_ratio, 4)],
"ee": ee,
"ko_kd": ["Ko/kd (1.1 - 1.7)", round(CALCULATED_RESULTS.ko_kd_ratio, 4)]
},
"schoeller": sort_calculated_results(CALCULATED_RESULTS.schoeller),
"racette": sort_calculated_results(CALCULATED_RESULTS.racette),
"speakman1997": sort_calculated_results(CALCULATED_RESULTS.speakman1997),
"speakman2020": sort_calculated_results(CALCULATED_RESULTS.speakman2020)
}
})
@app.route('/export', methods=['POST'])
def export_to_csv():
return CALCULATED_RESULTS.save_results_csv()
@app.route('/load', methods=['POST'])
def load_csv():
file = request.get_data().decode('utf-8')
rows = file.split('\n')
reader = csv.DictReader(rows)
results = []
for row in reader:
results.append(row)
return json.dumps({'results': results, 'error': False})
@app.route('/')
def root():
return send_from_directory(STATICS_LOCATION, 'index.html')
@click.command()
@click.option('--host', default=None)
@click.option('--port', default=None)
def run_app(host, port):
app.run(debug=False, host=host, port=port)
if __name__ == '__main__':
app.run(debug=True)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from strip import Strip
import random
import time
import signal
import logging
logger = logging.getLogger(__name__)
def init_logging(log_level):
logging.basicConfig(level=log_level)
# catch signals for tidy exit
_exiting = False
def signal_handler(signal, frame):
global _exiting
_exiting = True
NUM_LEDS = 60
SPI_BUS = 0
SPI_DEVICE = 0
def main():
init_logging(logging.INFO)
signal.signal(signal.SIGINT, signal_handler)
strip = Strip(NUM_LEDS, SPI_BUS, SPI_DEVICE)
updates = 0
start_time = time.time()
last_report_time = start_time
while not _exiting:
strip.set_all(random.randint(0, 255), random.randint(0, 255), random.randint(0,255), 1)
strip.update()
time.sleep(0.15)
updates += 1
now = time.time()
if now - last_report_time > 1.0:
elapsed = now - start_time
updates_per_second = updates / elapsed
logger.info("Updates per second: {0}".format(updates_per_second))
last_report_time = now
strip.set_all_off()
strip.update()
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from flask_appbuilder import Model
from flask_appbuilder.models.mixins import AuditMixin, FileColumn, ImageColumn
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.orm import relationship
"""
You can use the extra Flask-AppBuilder fields and Mixin's
AuditMixin will add automatic timestamp of created and modified by who
"""
# This model stores SSH key details - their names and full paths.
class Key(Model):
key_id = Column(Integer, primary_key=True)
key_name = Column(String(100), nullable=False)
key_path = Column(String(50), nullable=False)
def __repr__(self):
return self.key_name
# This model stores details about a particular kind of server - in this case, a production server running various apps.
class ProductionServer(Model):
production_server_id = Column(Integer, primary_key=True)
production_server_name = Column(String(50), nullable=False)
production_server_ip = Column(String(30), nullable=False)
key_id = Column(Integer, ForeignKey('key.key_id'))
key = relationship('Key')
production_server_user = Column(String(30))
def __repr__(self):
return self.production_server_name
# This model stores details about MySQL servers.
class MySQLServer(Model):
mysql_server_id = Column(Integer, primary_key=True)
mysql_server_name = Column(String(50), nullable=False)
mysql_server_ip = Column(String(30), nullable=False)
key_id = Column(Integer, ForeignKey('key.key_id'))
key = relationship('Key')
mysql_server_user = Column(String(30))
def __repr__(self):
return self.mysql_server_name
# This model will store details about RethinkDB servers.
class RethinkDBServer(Model):
rethinkdb_server_id = Column(Integer, primary_key=True)
rethinkdb_server_name = Column(String(50), nullable=False)
rethinkdb_server_ip = Column(String(30), nullable=False)
key_id = Column(Integer, ForeignKey('key.key_id'))
key = relationship('Key')
rethinkdb_server_user = Column(String(30))
def __repr__(self):
return self.rethinkdb_server_name
|
nilq/baby-python
|
python
|
import pytest
from ergaleia import Mini
@pytest.fixture
def mini():
return Mini('foo value=bar')
def test_default(mini):
assert mini.foo == 'bar'
with pytest.raises(TypeError):
mini['foo']
def test_set_attribute(mini):
mini.foo = 'whatever'
assert mini.foo == 'whatever'
with pytest.raises(KeyError):
mini.bar = 'whatever'
def test_set(mini):
mini.set(foo=1)
assert mini.foo == 1
with pytest.raises(KeyError):
mini.set(bar=2)
m = Mini('a', 'b', 'c')
m.set(a=1, b=2, c=3)
assert m.a + m.b + m.c == 6
def test_load(mini):
mini.load(['foo=10'])
assert mini.foo == '10'
m = Mini('a validator=int')
m.load(['a=10'])
assert m.a == 10
def test_as_tuple(mini):
t = mini.as_tuple()
assert t.foo == 'bar'
|
nilq/baby-python
|
python
|
import itertools
import pickle
import time
import tarfile
import sys
import uuid
import warnings
from collections import OrderedDict
from pathlib import Path
import hawkeslib as hl
import numpy as np
from joblib import Parallel, delayed
from sklearn.decomposition import NMF
from sklearn.cluster import KMeans, SpectralClustering
from sklearn.metrics import normalized_mutual_info_score as nmi
from tick.hawkes import HawkesExpKern, HawkesEM
from tick.hawkes.inference.hawkes_cumulant_matching import HawkesCumulantMatching
sys.path.append("../")
from lrhp.neumann import TruncatedNeumannEstimator
from lrhp.util import hawkeslib_data_to_tick
class Experiment:
def __init__(self, name, fit_args, model, n_clusters=10):
self.name = name
self.fit_args = fit_args
self.model = model
self.n_clusters = n_clusters
def get_spectral_clustering(self, P, is_W=False):
if is_W:
P = P.T.dot(P)
P = np.clip(P, a_min=0, a_max=None)
return SpectralClustering(
n_clusters=self.n_clusters, affinity="precomputed"
).fit_predict(P)
def get_kmeans_clustering(self, P, is_W=False):
if not is_W:
P = np.clip(P, a_min=0, a_max=None)
P = NMF(n_components=self.n_clusters).fit_transform(P)
else:
P = P.T
return KMeans(n_clusters=self.n_clusters).fit_predict(P)
@staticmethod
def _get_tick_phi(model):
phi_getter = {
HawkesEM: lambda m: m.kernel.sum(-1),
HawkesCumulantMatching: lambda m: m.solution,
HawkesExpKern: lambda m: m.adjacency,
}
for k, v in phi_getter.items():
if isinstance(model, k):
return v(model)
raise ValueError("Given model not recognized.")
def run(self):
start_time = time.time()
if "tick" in str(type(self.model)):
self.model.fit(*self.fit_args)
phi = self._get_tick_phi(self.model)
fitting_time = time.time() - start_time
sc = self.get_spectral_clustering(phi)
km = self.get_kmeans_clustering(phi)
else:
assert isinstance(self.model, TruncatedNeumannEstimator)
W, _ = self.model.fit(**self.fit_args)
phi = np.clip(W.T.dot(W), a_min=0, a_max=None).astype(np.float64)
fitting_time = time.time() - start_time
sc = self.get_spectral_clustering(W, is_W=True)
km = self.get_kmeans_clustering(W, is_W=True)
return fitting_time, phi, sc, km
def main(n_clusters=10):
with tarfile.open("../data/synthetic_hawkes_data.tar.gz", "r:gz") as tar:
fp = tar.extractfile("synthetic_hawkes_data")
mu, Phi, beta, t, c = pickle.load(fp)
fp.close()
test_ix = len(t) // 2
t1, c1 = (x[:test_ix] for x in (t, c))
t2, c2 = (x[test_ix:] for x in (t, c))
t2 -= t1[-1]
tickd = hawkeslib_data_to_tick(t1, c1)
SHORT_DATA_LENGTH = 2_000_000
tickd_short = hawkeslib_data_to_tick(t1[:SHORT_DATA_LENGTH], c1[:SHORT_DATA_LENGTH])
baseline_experiments = [
Experiment(
name="NPHC",
fit_args=[tickd],
model=HawkesCumulantMatching(
integration_support=1.0,
verbose=True,
C=1.,
max_iter=1000,
),
n_clusters=n_clusters,
),
Experiment(
name="Hawkes-LS",
fit_args=[tickd_short],
model=HawkesExpKern(
decays=1.,
gofit="least-squares",
C=1,
solver="gd"
),
n_clusters=n_clusters,
),
Experiment(
name="Hawkes-EM",
fit_args=[tickd_short],
model=HawkesEM(
kernel_support=10.,
kernel_size=2,
verbose=True,
print_every=10,
),
n_clusters=n_clusters,
)
]
neumann_experiments = [
Experiment(
name="LRHP-GD",
fit_args=dict(
t=t1, c=c1, num_epochs=int(1e3), learning_rate=1e-2
),
model=TruncatedNeumannEstimator(rank=n_clusters, is_nmf=False),
n_clusters=n_clusters,
),
Experiment(
name="LRHP-GD (NMF)",
fit_args=dict(
t=t1, c=c1, num_epochs=int(5e4), learning_rate=2e-1
),
model=TruncatedNeumannEstimator(rank=10, is_nmf=True),
n_clusters=n_clusters,
),
]
# get original clusters via NMF
nmf = NMF(n_components=n_clusters)
Wo = nmf.fit_transform(Phi)
orig_clus = KMeans(n_clusters=n_clusters).fit_predict(Wo)
# run experiments
warnings.simplefilter("ignore")
all_results = []
for exp in baseline_experiments + neumann_experiments:
time_taken, phi, sc, km = exp.run()
results = OrderedDict(
name=exp.name,
n_clusters=n_clusters,
time_taken=time_taken,
pred_ll=hl.MultivariateExpHawkesProcess.log_likelihood_with_params(
t2, c2, mu, np.clip(phi, a_min=0, a_max=None), beta
) / len(t2),
sc_nmi=nmi(sc, orig_clus),
km_nmi=nmi(km, orig_clus),
)
reslist = list(results.values())
all_results.append(reslist)
print(reslist)
# write out results
out_path = Path("./outputs/")
out_path.mkdir(exist_ok=True)
with open(out_path / f"{str(uuid.uuid4())[:7]}", "w") as fp:
for r in all_results:
print(",".join(map(str, r)), file=fp)
if __name__ == "__main__":
# Parallel(n_jobs=36)(delayed(main)() for _ in range(20))
main()
|
nilq/baby-python
|
python
|
# Copyright (c) Ye Liu. All rights reserved.
from .dynamic_bce import DynamicBCELoss
from .focal import (FocalLoss, FocalLossStar, GaussianFocalLoss, focal_loss,
focal_loss_star, gaussian_focal_loss)
from .ghm import GHMCLoss
from .lasso import (BalancedL1Loss, L1Loss, SmoothL1Loss, balanced_l1_loss,
l1_loss, smooth_l1_loss)
from .utils import weighted_loss
__all__ = [
'DynamicBCELoss', 'FocalLoss', 'FocalLossStar', 'GaussianFocalLoss',
'focal_loss', 'focal_loss_star', 'gaussian_focal_loss', 'GHMCLoss',
'BalancedL1Loss', 'L1Loss', 'SmoothL1Loss', 'balanced_l1_loss', 'l1_loss',
'smooth_l1_loss', 'weighted_loss'
]
|
nilq/baby-python
|
python
|
import pandas as pd
t1 = pd.read_csv("lgb_pyst.csv")
t2 = pd.read_csv("lgb_pyst_Keras_4_0.967189916545.csv")
t2['click'] = t2['click']*0.8 +t1['click']*0.2
t2.to_csv('avg_lgb_pyst_Keras_4_2_8.csv', index=False)
|
nilq/baby-python
|
python
|
# Sum of Polygon Angles
print("Given an n-sided regular polygon n, return the total sum of internal angles (in degrees).")
n_sided = int(input("Enter your n-sided polygon : "))
angles = 2(n_sided)−4×90(n_sided)
print(angles)
|
nilq/baby-python
|
python
|
#Copyright ReportLab Europe Ltd. 2000-2008
#this test and associates functionality kinds donated by Ian Sparks.
#see license.txt for license details
"""
Tests for internal links and destinations
"""
__version__='''$Id: test_pdfgen_links.py 3288 2008-09-15 11:03:17Z rgbecker $'''
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
#
# Fit tests
#
# Modification History
# ====================
#
# 11-Mar-2003 Ian Sparks
# * Initial version.
#
#
from reportlab.pdfgen import canvas
from reportlab.lib.units import inch
from reportlab.lib.pagesizes import letter
from reportlab.lib import colors
import unittest
def markPage(c,height=letter[1],width=letter[0]):
height = height / inch
width = width / inch
for y in range(int(height)):
for x in range(int(width)):
c.drawString(x*inch,y*inch,"x=%d y=%d" % (x,y) )
c.line(x*inch,0,x*inch,height*inch)
c.line(0,y*inch,width*inch,y*inch)
fn = outputfile("test_pdfgen_links.pdf")
class LinkTestCase(unittest.TestCase):
"Test classes."
def test1(self):
c = canvas.Canvas(fn,pagesize=letter)
#Page 1
c.setFont("Courier", 10)
markPage(c)
c.bookmarkPage("P1")
c.addOutlineEntry("Page 1","P1")
#Note : XYZ Left is ignored because at this zoom the whole page fits the screen
c.bookmarkPage("P1_XYZ",fit="XYZ",top=7*inch,left=3*inch,zoom=0.5)
c.addOutlineEntry("Page 1 XYZ #1 (top=7,left=3,zoom=0.5)","P1_XYZ",level=1)
c.bookmarkPage("P1_XYZ2",fit="XYZ",top=7*inch,left=3*inch,zoom=5)
c.addOutlineEntry("Page 1 XYZ #2 (top=7,left=3,zoom=5)","P1_XYZ2",level=1)
c.bookmarkPage("P1_FIT",fit="Fit")
c.addOutlineEntry("Page 1 Fit","P1_FIT",level=1)
c.bookmarkPage("P1_FITH",fit="FitH",top=2*inch)
c.addOutlineEntry("Page 1 FitH (top = 2 inch)","P1_FITH",level=1)
c.bookmarkPage("P1_FITV",fit="FitV",left=3*inch)
c.addOutlineEntry("Page 1 FitV (left = 3 inch)","P1_FITV",level=1)
c.bookmarkPage("P1_FITR",fit="FitR",left=1*inch,bottom=2*inch,right=5*inch,top=6*inch)
c.addOutlineEntry("Page 1 FitR (left=1,bottom=2,right=5,top=6)","P1_FITR",level=1)
c.bookmarkPage("P1_FORWARD")
c.addOutlineEntry("Forward References","P1_FORWARD",level=2)
c.addOutlineEntry("Page 3 XYZ (top=7,left=3,zoom=0)","P3_XYZ",level=3)
#Create link to FitR on page 3
c.saveState()
c.setFont("Courier", 14)
c.setFillColor(colors.blue)
c.drawString(inch+20,inch+20,"Click to jump to the meaning of life")
c.linkAbsolute("","MOL",(inch+10,inch+10,6*inch,2*inch))
c.restoreState()
#Create linkAbsolute to page 2
c.saveState()
c.setFont("Courier", 14)
c.setFillColor(colors.green)
c.drawString(4*inch,4*inch,"Jump to 2.5 inch position on page 2")
c.linkAbsolute("","HYPER_1",(3.75*inch,3.75*inch,8.25*inch,4.25*inch))
c.restoreState()
c.showPage()
#Page 2
c.setFont("Helvetica", 10)
markPage(c)
c.bookmarkPage("P2")
c.addOutlineEntry("Page 2","P2")
#Note : This time left will be at 3*inch because the zoom makes the page to big to fit
c.bookmarkPage("P2_XYZ",fit="XYZ",top=7*inch,left=3*inch,zoom=2)
c.addOutlineEntry("Page 2 XYZ (top=7,left=3,zoom=2.0)","P2_XYZ",level=1)
c.bookmarkPage("P2_FIT",fit="Fit")
c.addOutlineEntry("Page 2 Fit","P2_FIT",level=1)
c.bookmarkPage("P2_FITH",fit="FitH",top=2*inch)
c.addOutlineEntry("Page 2 FitH (top = 2 inch)","P2_FITH",level=1)
c.bookmarkPage("P2_FITV",fit="FitV",left=10*inch)
c.addOutlineEntry("Page 2 FitV (left = 10 inch)","P2_FITV",level=1)
c.bookmarkPage("P2_FITR",fit="FitR",left=1*inch,bottom=2*inch,right=5*inch,top=6*inch)
c.addOutlineEntry("Page 2 FitR (left=1,bottom=2,right=5,top=6)","P2_FITR",level=1)
c.bookmarkPage("P2_FORWARD")
c.addOutlineEntry("Forward References","P2_FORWARD",level=2)
c.addOutlineEntry("Page 3 XYZ (top=7,left=3,zoom=0)","P3_XYZ",level=3)
c.bookmarkPage("P2_BACKWARD")
c.addOutlineEntry("Backward References","P2_BACKWARD",level=2)
c.addOutlineEntry("Page 1 Fit","P1_FIT",level=3)
c.addOutlineEntry("Page 1 FitR (left=1,bottom=2,right=5,top=6)","P1_FITR",level=3)
#Horizontal absolute test from page 1. Note that because of the page size used on page 3 all this will do
#is put the view centered on the bookmark. If you want to see it "up close and personal" change page3 to be
#the same page size as the other pages.
c.saveState()
c.setFont("Courier", 14)
c.setFillColor(colors.green)
c.drawString(2.5*inch,2.5*inch,"This line is hyperlinked from page 1")
# c.bookmarkHorizontalAbsolute("HYPER_1",3*inch) #slightly higher than the text otherwise text is of screen above.
c.bookmarkPage("HYPER_1",fit="XYZ",top=2.5*inch,bottom=2*inch)
c.restoreState()
#
c.showPage()
#Page 3
c.setFont("Times-Roman", 10)
#Turn the page on its size and make it 2* the normal "width" in order to have something to test FitV against.
c.setPageSize((2*letter[1],letter[0]))
markPage(c,height=letter[0],width=2*letter[1])
c.bookmarkPage("P3")
c.addOutlineEntry("Page 3 (Double-wide landscape page)","P3")
#Note : XYZ with no zoom (set it to something first
c.bookmarkPage("P3_XYZ",fit="XYZ",top=7*inch,left=3*inch,zoom=0)
c.addOutlineEntry("Page 3 XYZ (top=7,left=3,zoom=0)","P3_XYZ",level=1)
#FitV works here because the page is so wide it can"t all fit on the page
c.bookmarkPage("P3_FITV",fit="FitV",left=10*inch)
c.addOutlineEntry("Page 3 FitV (left = 10 inch)","P3_FITV",level=1)
c.bookmarkPage("P3_BACKWARD")
c.addOutlineEntry("Backward References","P3_BACKWARD",level=2)
c.addOutlineEntry("Page 1 XYZ #1 (top=7,left=3,zoom=0.5)","P1_XYZ",level=3)
c.addOutlineEntry("Page 1 XYZ #2 (top=7,left=3,zoom=5)","P1_XYZ2",level=3)
c.addOutlineEntry("Page 2 FitV (left = 10 inch)","P2_FITV",level=3)
#Add link from page 1
c.saveState()
c.setFont("Courier", 40)
c.setFillColor(colors.green)
c.drawString(5*inch,6*inch,"42")
c.bookmarkPage("MOL",fit="FitR",left=4*inch,top=7*inch,bottom=4*inch,right=6*inch)
c.showOutline()
c.save()
def makeSuite():
return makeSuiteForClasses(LinkTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
print "wrote", fn
printLocation()
|
nilq/baby-python
|
python
|
import os
from setuptools import setup
def pkg_dir(path):
return os.path.join(os.path.dirname(__file__), path)
with open(pkg_dir('VERSION'), 'r') as f:
version = f.read().strip()
with open(pkg_dir('README.md'), 'r') as f:
readme = f.read()
setup(
name='elasticsearch-collectd-plugin',
version=version,
install_requires=[],
py_modules=['elasticsearch-collectd'],
author='',
author_email='platforms@digital.justice.gov.uk',
maintainer='MOJDS',
url='https://github.com/ministryofjustice/elasticsearch-collectd-plugin',
description='Collectd plugin to query stats from elasticsearch',
long_description=readme,
license='LICENSE',
keywords=['python', 'ministryofjustice', 'collectd', 'elasticsearch'],
test_suite='tests',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Development Status :: 1 - Alpha',
'Environment :: Plugins',
'Intended Audience :: Developers',
'Natural Language :: English',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Monitoring',
'Topic :: System :: Networking :: Monitoring',
'Topic :: System :: Networking :: Time Synchronization']
)
|
nilq/baby-python
|
python
|
import os
import sys
import glob
import json
import scipy.signal as signal
import numpy.ma as ma
import numpy as np
import matplotlib
import matplotlib.pylab as plt
import matplotlib.dates as mdates
import datetime
import statsmodels.api as sm
lowess = sm.nonparametric.lowess
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
From http://scipy-cookbook.readthedocs.io/items/SavitzkyGolay.html
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
import numpy as np
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
matplotlib.rcParams['font.size'] = 8
def process(f, i):
path = 'time_series_images/' + os.path.basename(f) + '.png'
if os.path.exists(path):
print('Exists, skipping ...')
return
j = json.loads(open(f).read())
p = j['features'][0]['properties']
# fr = p['water_area_filled_fraction']
t = p['water_area_time']
v1 = p['water_area_value']
v2 = p['water_area_filled']
t_jrc = p['water_area_time_jrc']
v_jrc = p['water_area_value_jrc']
filled_fr = list(zip(v1, v2))
filled_fr = [(o[1]-o[0])/o[1] for o in filled_fr]
mask = ma.masked_greater_equal(filled_fr, 0.5)
# t = list(ma.masked_array(t, mask).compressed())
# v1 = list(ma.masked_array(v1, mask).compressed())
# v2 = list(ma.masked_array(v2, mask).compressed())
if not len(t):
print('Empty, skipping ...')
return
years = mdates.YearLocator() # every year
v2_filtered = savitzky_golay(np.array(v2), window_size=15, order=4)
# v2_filtered = signal.medfilt(v2, 7)
# v2_filtered = lowess(v2, t)
# v2_filtered = lowess(v2, t, frac=1./50)
t = [datetime.datetime.fromtimestamp(tt / 1000) for tt in t]
t_jrc = [datetime.datetime.fromtimestamp(tt_jrc / 1000) for tt_jrc in t_jrc]
s_scale = 'Scale: {:.2f}'.format(p['scale']) + '$m$'
s_area = 'Area: {:.2f}'.format(p['area']/(1000*1000)) + '$km^2$, ' + '{:.2f}'.format(100 * p['area']/(1000*1000)) + '$ha$'
title = s_scale + ', ' + s_area
fig = plt.figure(figsize=(11, 4))
ax = fig.add_subplot(111)
ax.xaxis.set_major_locator(years)
# fig.autofmt_xdate()
ax.set_xlim([datetime.date(1985, 1, 1), datetime.date(2019, 1, 1)])
ax.grid(color='k', linestyle='-', linewidth=1, alpha=0.2)
plt.title(title)
plt.xticks(rotation=90)
ax.plot(t_jrc, v_jrc, marker='.', c='r', markersize=2, linewidth=0, alpha=0.05)
ax.plot(t, v1, marker='.', c='b', markersize=2, linewidth=0, alpha=0.05)
ax.plot(t, v2, marker='.', c='k', markersize=3, linewidth=0, alpha=0.8)
# for SG
if len(t) != len(v2_filtered):
print('Bad, shapes are not equal, skipping line plotting ...')
else:
ax.plot(t, v2_filtered, marker='.', c='k', markersize=0, linewidth=2, alpha=0.1)
# for LOWESS
# v2_filtered_t = [datetime.datetime.fromtimestamp(t / 1000) for t in v2_filtered[:, 0]]
# ax.plot(v2_filtered_t, v2_filtered[:, 1], marker='.', c='k', markersize=0, linewidth=2, alpha=0.1)
path = 'time_series_images/' + os.path.basename(f) + '.png'
print(str(i) + ' ' + path)
plt.tight_layout()
plt.savefig(path, dpi=150)
plt.close()
# ========================== JRC
# fig = plt.figure(figsize=(11, 4))
# ax = fig.add_subplot(111)
# ax.xaxis.set_major_locator(years)
# ax.set_xlim([datetime.date(1985, 1, 1), datetime.date(2019, 1, 1)])
# ax.grid(color='k', linestyle='-', linewidth=1, alpha=0.2)
# plt.title(title)
# plt.xticks(rotation=90)
# ax.plot(t_jrc, v_jrc, marker='.', c='r', markersize=2, linewidth=0, alpha=0.8)
# ax.plot(t, v1, marker='.', c='b', markersize=2, linewidth=0, alpha=0.05)
# ax.plot(t, v2, marker='.', c='k', markersize=3, linewidth=0, alpha=0.05)
# for SG
# if len(t) != len(v2_filtered):
# print('Bad, shapes are not equal, skipping line plotting ...')
# else:
# ax.plot(t, v2_filtered, marker='.', c='k', markersize=0, linewidth=2, alpha=0.1)
# path = 'time_series_images/' + os.path.basename(f) + '-jrc.png'
# print(str(i) + ' ' + path)
# plt.tight_layout()
# plt.savefig(path, dpi=150)
# plt.close()
offset = 0
for (i, f) in enumerate(glob.glob('time_series/*.geojson')[offset:]):
print('Processing ' + str(i) + ' ...')
process(f, i + offset)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
#
# Copyright 2019 YugaByte, Inc. and Contributors
#
# Licensed under the Polyform Free Trial License 1.0.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# https://github.com/YugaByte/yugabyte-db/blob/master/licenses/POLYFORM-FREE-TRIAL-LICENSE-1.0.0.txt
from ybops.cloud.common.base import AbstractPerCloudCommand
from ybops.cloud.common.command import InstanceCommand
from ybops.cloud.common.method import ConfigureInstancesMethod, ListInstancesMethod, \
InitYSQLMethod, CronCheckMethod
from ybops.cloud.onprem.method import OnPremCreateInstancesMethod, OnPremDestroyInstancesMethod, \
OnPremProvisionInstancesMethod, OnPremValidateMethod, \
OnPremFillInstanceProvisionTemplateMethod, OnPremListInstancesMethod
class OnPremInstanceCommand(InstanceCommand):
"""Subclass for on premise specific instance command baseclass. Supplies overrides for method
hooks.
"""
def __init__(self):
super(OnPremInstanceCommand, self).__init__()
def add_methods(self):
self.add_method(OnPremProvisionInstancesMethod(self))
self.add_method(OnPremCreateInstancesMethod(self))
self.add_method(ConfigureInstancesMethod(self))
self.add_method(OnPremDestroyInstancesMethod(self))
self.add_method(OnPremListInstancesMethod(self))
self.add_method(OnPremValidateMethod(self))
self.add_method(OnPremFillInstanceProvisionTemplateMethod(self))
self.add_method(InitYSQLMethod(self))
self.add_method(CronCheckMethod(self))
|
nilq/baby-python
|
python
|
from __future__ import print_function
import json
import logging
import sys
import os
this_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append("{0}/../lib".format(this_dir))
sys.path.append("{0}/../src".format(this_dir))
from jsonschema import validate
from generator.generator import convert_to_imacro
log = logging.getLogger()
log.setLevel(logging.DEBUG)
def handler(event, context):
# input_json = json.dumps(event)
with open(os.path.join(this_dir, '../resources/schema.json'), 'r') as myfile:
schema = json.loads(myfile.read())
try:
validate(event, schema)
except Exception as e:
return "The input failed validation\n{0}".format(repr(e))
try:
output = convert_to_imacro(event)
except Exception as e:
return "An internal error occured during response generation\n{0}".format(repr(e))
return output
|
nilq/baby-python
|
python
|
import argparse
import traceback
import warnings
import torch
import wandb
from gym_carla.envs.carla_env import CarlaEnv
from gym_carla.envs.carla_pid_env import CarlaPidEnv
from termcolor import colored
from torch.utils.data import DataLoader
from bc.train_bc import get_collate_fn
from models.carlaAffordancesDataset import HLCAffordanceDataset, AffordancesDataset
from sac.replay_buffer import OnlineReplayBuffer
from sac.sac_agent import SACAgent
from sac.trainer import SACTrainer
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="SAC Trainer",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# carla parameters
carla_config = parser.add_argument_group('CARLA config')
carla_config.add_argument('--host', default='172.18.0.1', type=str, help='IP address of CARLA host.')
carla_config.add_argument('--port', default=2008, type=int, help='Port number of CARLA host.')
carla_config.add_argument('--vehicles', default=100, type=int, help='Number of vehicles in the simulation.')
carla_config.add_argument('--walkers', default=50, type=int, help='Number of walkers in the simulation.')
# SAC parameters
rl_group = parser.add_argument_group('RL Config')
rl_group.add_argument('--num-seed', default=2000, type=int, help='Number of seed steps before starting to train.')
rl_group.add_argument('--control-frequency', default=4, type=int, help='Number of times that a control signal'
'is going to be repeated to the environment')
rl_group.add_argument('--act-mode', default="pid", type=str, help="Action space.")
rl_group.add_argument('--max-episode-steps', default=200, type=int, help='Maximum number of steps per episode.')
rl_group.add_argument('--num-eval-episodes', default=3, type=int, help='Number of evaluation episodes.')
rl_group.add_argument('--num-train-steps', default=1e6, type=int, help='Number of training steps.')
rl_group.add_argument('--eval-frequency', default=10, type=int, help='number of episodes between evaluations.')
rl_group.add_argument('--learn-temperature', action='store_true', help='Whether to lean alpha value or not.')
rl_group.add_argument('--reward-scale', default=1, type=float, help='Reward scale factor (positive)')
rl_group.add_argument('--speed-reward-weight', default=1, type=float, help='Speed reward weight.')
rl_group.add_argument('--collision-reward-weight', default=1, type=float, help='Collision reward weight')
rl_group.add_argument('--lane-distance-reward-weight', default=1, type=float, help='Lane distance reward weight')
models_parameters = parser.add_argument_group('Actor-Critic config')
models_parameters.add_argument('--actor-hidden-dim', type=int, default=128, help='Size of hidden layer in the '
'actor model.')
models_parameters.add_argument('--critic-hidden-dim', type=int, default=128, help='Size of hidden layer in the '
'critic model.')
models_parameters.add_argument('--actor-weights', type=str, default=None, help='Path to actor weights')
models_parameters.add_argument('--critic-weights', type=str, default=None, help='Path to critic weights')
loss_parameters = parser.add_argument_group('Loss parameters')
loss_parameters.add_argument('--actor-l2', type=float, default=4e-2,
help='L2 regularization for the actor model.')
loss_parameters.add_argument('--critic-l2', type=float, default=4e-2,
help='L2 regularization for the critic model.')
buffer_group = parser.add_argument_group('Buffer config')
buffer_group.add_argument('--batch-size', default=1024, type=int, help='Batch size.')
buffer_group.add_argument('--online-memory-size', default=8192, type=int, help='Number of steps to be stored in the'
'online buffer')
# in case of using behavioral cloning
bc_group = parser.add_argument_group('Behavioral cloning config')
bc_group.add_argument('--bc', default=None, type=str, help='path to dataset (without extensions)')
bc_group.add_argument('--wandb', action='store_true', help='Whether or not to use wandb')
args = parser.parse_args()
warnings.filterwarnings("ignore")
device = 'cuda' if torch.cuda.is_available() else 'cpu'
control_action_dim = 2 if args.act_mode == "pid" else 3
action_range = (-1, 1) if args.act_mode == "raw" else (-1, 5)
offline_dataset_path = args.bc
if args.wandb:
wandb.init(project='tsad', entity='autonomous-driving')
carla_env = None
if args.eval_frequency > 0:
print(colored("[*] Initializing environment", "white"))
desired_speed = 6
env_params = {
# carla connection parameters+
'host': args.host,
'port': args.port, # connection port
'town': 'Town01', # which town to simulate
'traffic_manager_port': 8000,
# simulation parameters
'verbose': False,
'vehicles': args.vehicles, # number of vehicles in the simulation
'walkers': args.walkers, # number of walkers in the simulation
'obs_size': 224, # sensor width and height
'max_past_step': 1, # the number of past steps to draw
'dt': 1 / 30, # time interval between two frames
'reward_weights': [1, 1, 1], # reward weights [speed, collision, lane distance]
'continuous_steer_range': [-1, 1],
'ego_vehicle_filter': 'vehicle.lincoln*', # filter for defining ego vehicle
'max_time_episode': args.max_episode_steps, # maximum timesteps per episode
'max_waypt': 12, # maximum number of waypoints
'd_behind': 12, # distance behind the ego vehicle (meter)
'out_lane_thres': 2.0, # threshold for out of lane
'desired_speed': desired_speed, # desired speed (m/s)
'speed_reduction_at_intersection': 0.75,
'max_ego_spawn_times': 200, # maximum times to spawn ego vehicle
}
if args.act_mode == "pid":
env_params.update({
'continuous_speed_range': [0, desired_speed]
})
carla_env = CarlaPidEnv(env_params)
else:
env_params.update({
'continuous_throttle_range': [0, 1],
'continuous_brake_range': [0, 1]
})
carla_env = CarlaEnv(env_params)
carla_env.reset()
print(colored(f"[+] Environment ready "
f"(max_steps={args.max_episode_steps},"
f"action_frequency={args.control_frequency})!", "green"))
print(colored(f"[*] Initializing data structures", "white"))
online_replay_buffer = OnlineReplayBuffer(args.online_memory_size)
bc_loaders = None
if offline_dataset_path:
print(colored("RL + BC mode"))
dataset = AffordancesDataset(args.bc)
custom_collate_fn = get_collate_fn(args.act_mode)
bc_loaders = {hlc: DataLoader(HLCAffordanceDataset(dataset, hlc=hlc),
batch_size=args.batch_size,
collate_fn=custom_collate_fn,
shuffle=True) for hlc in [0, 1, 2, 3]}
else:
print(colored("Full DRL mode"))
print(colored("[*] Data structures are ready!", "green"))
agent = SACAgent(observation_dim=15,
action_range=action_range,
device=device,
action_dim=control_action_dim,
batch_size=args.batch_size,
actor_weight_decay=args.actor_l2,
critic_weight_decay=args.critic_l2,
learnable_temperature=args.learn_temperature)
agent.train(True)
print(colored("Training", "white"))
trainer = SACTrainer(env=carla_env,
agent=agent,
buffer=online_replay_buffer,
dataloaders=bc_loaders,
device=device,
eval_frequency=args.eval_frequency,
num_seed_steps=args.num_seed,
num_train_steps=args.num_train_steps,
num_eval_episodes=args.num_eval_episodes,
seed=42)
try:
trainer.run()
except Exception as e:
print(colored("\nEarly stopping due to exception", "red"))
traceback.print_exc()
print(e)
finally:
print(colored("\nTraning finished!", "green"))
trainer.end()
|
nilq/baby-python
|
python
|
from odio_urdf import *
def assign_inertia(im):
return Inertia(ixx=im[0], ixy=im[1], ixz=im[2], iyy=im[3], iyz=im[4], izz=im[5])
my_robot = Robot("walker_a")
contact = Contact(Lateral_Friction("100"))
s = 1
inertia_matrix_body = [0.6363636364, 4.908571429, 4.51012987, 4.51012987, 0.6363636364, 4.908571429]
inertia_arm_0 = [3.11E-04, 0.003766478343, 0.007532956685, 0.007532956685, 2.25E-03, 0.003766478343]
inertia_arm_1 = [0.4103896104, 0.003291744093, 0.04189009822, 0.04189009822, 0.04194319087, 0.003291744093]
mass_body = str(40 * s * s * s)
mass_arm_0 = str(2 * s * s * s)
mass_arm_1 = str(2 * s * s * s)
joint_X_0_loc = [str( 0.35*s) +", " + str(-0.3 *s) + ", 0",
str( 0.35*s) +", " + str( 0.3 *s) + ", 0",
str(-0.35*s) +", " + str( 0.3 *s) + ", 0",
str(-0.35*s) +", " + str(-0.3 *s) + ", 0"]
joint_X_0_rot = ["0, 0, 0", "0, 0, 0", "0, 0, 3.14159", "0, 0, 3.14159"]
leg_X_0_inertial = str(0.05*s) + ", 0, 0"
joint_X_1_loc = str(0.15*s) + ", 0, 0"
leg_X_1_inertial = "0, 0, " + str(-0.21*s)
joint_X_2_loc = "0, 0, " + str(-0.50*s)
leg_X_2_inertial = "0, 0, " + str(-0.21*s)
inertia_matrix_body = [x * s * s for x in inertia_matrix_body]
inertia_arm_0 = [x * s * s for x in inertia_arm_0]
inertia_arm_1 = [x * s * s for x in inertia_arm_1]
inertia_body = assign_inertia(inertia_matrix_body)
inertia_arm_0 = assign_inertia(inertia_arm_0)
inertia_arm_1 = assign_inertia(inertia_arm_1)
base_link = Link("base_link", contact,
Inertial(inertia_body, Mass(mass_body)),
Visual(Geometry(Mesh(filename="body.obj", scale=f"{s} {s} {s}"))),
Collision(Geometry(Mesh(filename="body.obj", scale=f"{s} {s} {s}")))
)
link_X_0 = []
for i in range(4):
link_X_0.append(Link("link_" + str(i) + "_0", contact,
Inertial(inertia_arm_0, Mass(mass_arm_0), Origin(leg_X_0_inertial)),
Visual(Geometry(Mesh(filename="leg_X_0.obj", scale=f"{s} {s} {s}"))),
Collision(Geometry(Mesh(filename="leg_X_0.obj", scale=f"{s} {s} {s}")))
))
link_X_1 = []
for i in range(4):
link_X_1.append(Link(f"link_{i}_1", contact,
Inertial(inertia_arm_0, Mass(mass_arm_0), Origin(leg_X_1_inertial)),
Visual(Geometry(Mesh(filename="leg_X_1.obj", scale=f"{s} {s} {s}"))),
Collision(Geometry(Mesh(filename="leg_X_1.obj", scale=f"{s} {s} {s}")))
))
link_X_2 = []
for i in range(4):
link_X_2.append(Link(f"link_{i}_2", contact,
Inertial(inertia_arm_0, Mass(mass_arm_0), Origin(leg_X_1_inertial)),
Visual(Geometry(Mesh(filename="leg_X_1.obj", scale=f"{s} {s} {s}"))),
Collision(Geometry(Mesh(filename="leg_X_1.obj", scale=f"{s} {s} {s}")))
))
#Add first elements to robot
my_robot(base_link,
link_X_0[0], link_X_1[0], link_X_2[0],
link_X_0[1], link_X_1[1], link_X_2[1],
link_X_0[2], link_X_1[2], link_X_2[2],
link_X_0[3], link_X_1[3], link_X_2[3])
joint_X_0 = []
for i in range(4):
joint_X_0.append(Joint(Parent("base_link"), Child("link_" + str(i) + "_0"),
Origin(xyz=joint_X_0_loc[i], rpy=joint_X_0_rot[i]),
Axis("1, 0, 0"), type="continuous", name=f"joint_{i}_0"))
joint_X_1 = []
for i in range(4):
joint_X_1.append(Joint(Parent("link_{}_0".format(i)), Child(f"link_{i}_1"), Origin(xyz=joint_X_1_loc),
Axis("0, 1, 0"), type="continuous", name=f"joint_{i}_1"))
joint_X_2 = []
for i in range(4):
joint_X_2.append(Joint(Parent("link_{}_1".format(i)), Child(f"link_{i}_2"), Origin(xyz=joint_X_2_loc),
Axis("0, 1, 0"), type="continuous", name=f"joint_{i}_2"))
my_robot(joint_X_0[0], joint_X_1[0], joint_X_2[0],
joint_X_0[1], joint_X_1[1], joint_X_2[1],
joint_X_0[2], joint_X_1[2], joint_X_2[2],
joint_X_0[3], joint_X_1[3], joint_X_2[3],)
f = open("walker_a/urdf/walker_a_0_5.urdf", "w")
f.write(str(my_robot))
f.close()
|
nilq/baby-python
|
python
|
import argparse
import constants
from data_support.tfrecord_wrapper import TFRecordWriter
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("data_dir", type=str, default='../resources/tf_data',
help="Directory where tfrecord files are stored")
parser.add_argument("--model",
default=f"bert-{constants.SIZE_BASE}-{constants.LANGUAGE_MULTILINGUAL}-{constants.CASING_CASED}",
help="Transformer model name (see: https://huggingface.co/transformers/pretrained_models.html)")
args = parser.parse_args()
models = [args.model]
data_spec = [
# ('train', 'en', 'dep_distance,dep_depth,lex_distance,lex_depth,pos_distance,pos_depth,rnd_distance,rnd_depth',
# "/net/data/universal-dependencies-2.6/UD_English-EWT/en_ewt-ud-train.conllu"),
# ('dev', 'en', 'dep_distance,dep_depth,lex_distance,lex_depth,pos_distance,pos_depth,rnd_distance,rnd_depth',
# "/net/data/universal-dependencies-2.6/UD_English-EWT/en_ewt-ud-dev.conllu"),
# ('test', 'en', 'dep_distance,dep_depth,lex_distance,lex_depth,pos_distance,pos_depth,rnd_distance,rnd_depth',
# "/net/data/universal-dependencies-2.6/UD_English-EWT/en_ewt-ud-test.conllu"),
# ('train', 'es','dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
# "/net/data/universal-dependencies-2.6/UD_Spanish-AnCora/es_ancora-ud-train.conllu"),
# ('dev', 'es', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
# "/net/data/universal-dependencies-2.6/UD_Spanish-AnCora/es_ancora-ud-dev.conllu"),
# ('test', 'es', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
# "/net/data/universal-dependencies-2.6/UD_Spanish-AnCora/es_ancora-ud-test.conllu"),
# ('train', 'sl','dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
# "/net/data/universal-dependencies-2.6/UD_Slovenian-SSJ/sl_ssj-ud-train.conllu"),
# ('dev', 'sl', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
# "/net/data/universal-dependencies-2.6/UD_Slovenian-SSJ/sl_ssj-ud-dev.conllu"),
# ('test', 'sl', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
# "/net/data/universal-dependencies-2.6/UD_Slovenian-SSJ/sl_ssj-ud-test.conllu"),
# ('train', 'zh','dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
# "/net/data/universal-dependencies-2.6/UD_Chinese-GSD/zh_gsd-ud-train.conllu"),
# ('dev', 'zh', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
# "/net/data/universal-dependencies-2.6/UD_Chinese-GSD/zh_gsd-ud-dev.conllu"),
# ('test', 'zh', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
# "/net/data/universal-dependencies-2.6/UD_Chinese-GSD/zh_gsd-ud-test.conllu"),
# ('train', 'id', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
# "/net/data/universal-dependencies-2.6/UD_Indonesian-GSD/id_gsd-ud-train.conllu"),
# ('dev', 'id', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
# "/net/data/universal-dependencies-2.6/UD_Indonesian-GSD/id_gsd-ud-dev.conllu"),
# ('test', 'id', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
# "/net/data/universal-dependencies-2.6/UD_Indonesian-GSD/id_gsd-ud-test.conllu")
('train', 'fi', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
"/net/data/universal-dependencies-2.6/UD_Finnish-TDT/fi_tdt-ud-train.conllu"),
('dev', 'fi', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
"/net/data/universal-dependencies-2.6/UD_Finnish-TDT/fi_tdt-ud-dev.conllu"),
('test', 'fi', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
"/net/data/universal-dependencies-2.6/UD_Finnish-TDT/fi_tdt-ud-test.conllu"),
('train', 'ar', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
"/net/data/universal-dependencies-2.6/UD_Arabic-PADT/ar_padt-ud-train.conllu"),
('dev', 'ar', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
"/net/data/universal-dependencies-2.6/UD_Arabic-PADT/ar_padt-ud-dev.conllu"),
('test', 'ar', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
"/net/data/universal-dependencies-2.6/UD_Arabic-PADT/ar_padt-ud-test.conllu"),
('train', 'fr', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
"/net/data/universal-dependencies-2.6/UD_French-GSD/fr_gsd-ud-train.conllu"),
('dev', 'fr', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
"/net/data/universal-dependencies-2.6/UD_French-GSD/fr_gsd-ud-dev.conllu"),
('test', 'fr', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
"/net/data/universal-dependencies-2.6/UD_French-GSD/fr_gsd-ud-test.conllu"),
('train', 'eu', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
"/net/data/universal-dependencies-2.6/UD_Basque-BDT/eu_bdt-ud-train.conllu"),
('dev', 'eu', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
"/net/data/universal-dependencies-2.6/UD_Basque-BDT/eu_bdt-ud-dev.conllu"),
('test', 'eu', 'dep_distance,dep_depth,lex_distance,lex_depth,rnd_distance,rnd_depth,pos_distance,pos_depth',
"/net/data/universal-dependencies-2.6/UD_Basque-BDT/eu_bdt-ud-test.conllu")
]
tf_writer = TFRecordWriter(models, data_spec, args.data_dir)
tf_writer.compute_and_save(args.data_dir)
|
nilq/baby-python
|
python
|
import copy
import json
import time
from io import open
from .exceptions import (
WebpackBundleLookupError,
WebpackError,
WebpackLoaderBadStatsError,
WebpackLoaderTimeoutError,
)
class WebpackLoader(object):
_assets = {}
def __init__(self, name, config):
self.name = name
self.config = config
def load_assets(self):
# TODO
# poll when debugging and block request until bundle is compiled
# or the build times out
try:
with open(self.config["MANIFEST_FILE"], encoding="utf-8") as f:
return json.load(f)
except IOError:
raise IOError(
"Error reading {0}. Are you sure webpack has generated "
"the file and the path is correct?".format(self.config["MANIFEST_FILE"])
)
def get_assets(self):
if self.config["CACHE"]:
if self.name not in self._assets:
self._assets[self.name] = self.load_assets()
return self._assets[self.name]
return self.load_assets()
def filter_chunks(self, chunks):
for chunk in chunks:
ignore = any(regex.match(chunk["url"]) for regex in self.config["ignores"])
if not ignore:
chunk["url"] = self.get_chunk_url(chunk)
yield chunk
def get_chunk_url(self, chunk):
url = chunk["url"]
if self.config.get("web_framework", None) == "django":
from django.contrib.staticfiles.storage import staticfiles_storage
from django.conf import settings
if url.startswith("http"):
# webpack dev server
return url
else:
prefix = settings.STATIC_URL
url_without_static_prefix = url[
url.startswith(prefix) and len(prefix) :
]
return staticfiles_storage.url(url_without_static_prefix)
else:
return url
def get_bundle(self, bundle_name):
assets = copy.copy(self.get_assets())
try:
# keep the order
js = assets["entrypoints"][bundle_name]["assets"].get("js", [])
css = assets["entrypoints"][bundle_name]["assets"].get("css", [])
js_css = js + css
assets.pop("entrypoints")
# so url is the key
reversed_assets = {value: key for (key, value) in assets.items()}
chunks = [{"name": reversed_assets[url], "url": url,} for url in js_css]
except Exception:
raise WebpackBundleLookupError(
"Cannot resolve bundle {0}.".format(bundle_name)
)
return self.filter_chunks(chunks)
|
nilq/baby-python
|
python
|
import os
import shutil
from ptest.assertion import assert_true
from ptest.decorator import TestClass, BeforeMethod, Test, AfterMethod
from watchdog.events import FileCreatedEvent
from shirp.event import EventConf
from shirp.handler import HDFSHandler
HDFS_GROUP = "grp-hdfs"
@TestClass(run_mode="singleline")
class HDFSHandlerTest:
def __init__(self, hdfs_put_handler=None, hdfs_get_handler=None, put_event_conf=None, get_event_conf=None):
"""
:param hdfs_put_handler:
:type hdfs_put_handler: HDFSHandler
:param hdfs_get_handler:
:type hdfs_get_handler: HDFSHandler
:param put_event_conf:
:type put_event_conf: EventConf
:param get_event_conf:
:type get_event_conf: EventConf
"""
self.hdfs_put_handler = hdfs_put_handler
self.hdfs_get_handler = hdfs_get_handler
self.put_event_conf = put_event_conf
self.get_event_conf = get_event_conf
self.current_dir = os.path.dirname(os.path.realpath(__file__))
self.result = False
@BeforeMethod(group=HDFS_GROUP)
def before_hdfs_test(self):
self.put_event_conf = EventConf(True, "test move", "hdfs", HDFSHandler.TYPE_PUT,
"D:\\Users\\Cedric\\PycharmProjects\\event-manager\\rep_test\\in",
["test_????.txt"], "/user/hduser",
{"hdfsUrl": "http://192.168.1.24:50070", "hdfsUser": "hduser"})
self.get_event_conf = EventConf(True, "test move", "hdfs", HDFSHandler.TYPE_GET, "/user/hduser",
["test_????.txt"],
"D:\\Users\\Cedric\\PycharmProjects\\event-manager\\rep_test\\out",
{"hdfsUrl": "http://192.168.1.24:50070", "hdfsUser": "hduser"})
HDFSHandler.FILE_LOG = self.current_dir + os.path.sep + "events.log"
self.hdfs_put_handler = HDFSHandler(self.put_event_conf, self.put_event_conf.subtype)
self.hdfs_get_handler = HDFSHandler(self.get_event_conf, self.get_event_conf.subtype)
@Test(group=HDFS_GROUP)
def move_test(self):
shutil.copy("D:\\Users\\Cedric\\PycharmProjects\\event-manager\\rep_test\\test_2208.txt",
self.put_event_conf.directory)
event = FileCreatedEvent(self.put_event_conf.directory + os.path.sep + "test_2208.txt")
assert_true(self.hdfs_put_handler.on_created(event))
assert_true(self.hdfs_get_handler.process("/user/hduser/test_2208.txt"))
assert_true(os.path.exists(self.get_event_conf.destination + os.path.sep + "test_2208.txt"))
@AfterMethod(group=HDFS_GROUP)
def after_hdfs_test(self):
os.remove(self.get_event_conf.destination + os.path.sep + "test_2208.txt")
|
nilq/baby-python
|
python
|
"""Project-level configuration and state."""
import os.path
class Project(object):
"""A Project tracks the overall build configuration, filesystem paths,
registered plugins/keys, etc. and provides services that relate to that."""
def __init__(self, root, build_dir):
"""Creates a Project.
root: path to root of project structure.
build_dir: path to build directory.
"""
self.root = root
self.build_dir = build_dir
self.named_envs = {}
self.packages = {}
self.ninja_rules = {
'cobble_symlink_product': {
'command': 'ln -sf $target $out',
'description': 'SYMLINK $out',
},
}
# TODO: rename something like static_path?
def inpath(self, *parts):
"""Creates a path to an input resource within the project tree by
separating the given path components by the path separator
character."""
return os.path.join(self.root, *parts)
def outpath(self, env, *parts):
"""Creates a path to an output resource within the build directory.
Output resources are distinguished by their environments; the same
product may be built several times, in different environments, and
stored in separate places. Thus, 'outpath' requires the environment to
be provided.
"""
return os.path.join(self.build_dir, 'env', env.digest, *parts)
def linkpath(self, *parts):
"""Creates a path into the 'latest' symlinks in the build directory."""
return os.path.join(self.build_dir, 'latest', *parts)
def add_package(self, package):
"""Registers 'package' with the project."""
assert package.relpath not in self.packages, \
"duplicate package at %s" % package.relpath
assert package.project is self, "package project misconfigured"
self.packages[package.relpath] = package
def find_target(self, ident):
"""Finds the 'Target' named by an 'ident'.
'find_target' at the 'Project' level requires absolute identifiers,
e.g. '//foo/bar:target'.
"""
assert ident.startswith('//'), "bad identifier: %r" % ident
parts = ident[2:].split(':')
if len(parts) == 1:
# Target name not specified
pkg_path = parts[0]
target_name = os.path.basename(pkg_path)
elif len(parts) == 2:
# Explicit target name
pkg_path = parts[0]
target_name = parts[1]
else:
raise Exception('Too many colons in identifier: %r' % ident)
assert pkg_path in self.packages, \
"Reference to unknown package: %r" % ident
assert target_name in self.packages[pkg_path].targets, \
"Target %s not found in package %s" % (target_name, pkg_path)
return self.packages[pkg_path].targets[target_name]
def define_environment(self, name, env):
"""Defines a named environment in the project.
Named environments are defined in BUILD.conf, and provide the basis for
all other environments.
"""
assert name not in self.named_envs, \
"more than one environment named %s" % name
self.named_envs[name] = env
def add_ninja_rules(self, rules):
"""Extends the set of Ninja rules used in the project.
Ninja rules are represented as dicts with keys matching the attributes
of Ninja's rule syntax.
"""
for k, v in rules.items():
if k in self.ninja_rules:
assert v == self.ninja_rules[k], \
"ninja rule %s defined incompatibly in multiple places" % k
else:
self.ninja_rules[k] = v
def files(self):
"""Returns an iterator over the build files and BUILD.conf."""
yield self.inpath('BUILD.conf')
for p in self.packages.values():
yield p.inpath('BUILD')
def targets(self):
"""Returns an iterator over all Targets in the project."""
for p in self.packages.values():
for t in p.targets.values():
yield t
def concrete_targets(self):
"""Returns an iterator over the concrete Targets in the project."""
return filter(lambda t: t.concrete, self.targets())
class Package(object):
def __init__(self, project, relpath):
"""Creates a Package and registers it with 'project'."""
self.project = project
self.relpath = os.path.normpath(relpath)
self.targets = {}
project.add_package(self)
def add_target(self, target):
"""Adds a 'Target' to the package."""
assert target.name not in self.targets, \
"duplicate target %s in package %s" % (target.name, self.relpath)
self.targets[target.name] = target
def outpath(self, env, *parts):
"""Creates a path to an output resource within this package."""
return self.project.outpath(env, self.relpath, *parts)
def inpath(self, *parts):
"""Creates a path to an input resource within this package."""
return self.project.inpath(self.relpath, *parts)
def linkpath(self, *parts):
"""Creates a path into the 'latest' symlinks for this package."""
return self.project.linkpath(self.relpath, *parts)
def make_absolute(self, ident):
"""Makes an ident, which may be relative to this package, absolute."""
if ident.startswith('//'):
return ident
if ident.startswith(':'):
return '//' + self.relpath + ident
raise Exception('Unexpected ident: %r' % ident)
def find_target(self, ident):
"""Finds a target relative to this package. This enables local
references using the ':foo' syntax."""
if ident.startswith(':'):
return self.project.find_target('//' + self.relpath + ident)
return self.project.find_target(ident)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import json
from bs4 import BeautifulSoup
from django.contrib.auth import get_user_model
from django.test import TestCase
class BaseTestCase(TestCase):
fixtures = [
'users.json',
]
USER_PWD = 'password'
# Superuser - admin/adminpassword
# User - neo/password
@staticmethod
def get_soup(response):
return BeautifulSoup(response.content)
@staticmethod
def get_json(response):
return json.loads(response.content.decode('utf-8'))
def setUp(self):
User = get_user_model()
self.user = User.objects.get(username='neo')
def login(self, username='neo'):
self.client.logout()
self.client.login(
username=username,
password=self.USER_PWD
)
class BaseTestLoginCase(BaseTestCase):
def setUp(self):
super(BaseTestLoginCase, self).setUp()
self.login()
|
nilq/baby-python
|
python
|
from django.contrib import sitemaps
from django.urls import reverse
from booru.models import Post
class PostSitemap(sitemaps.Sitemap):
priority = 0.8
changefreq = 'daily'
def items(self):
return Post.objects.approved()
def location(self, item):
return item.get_absolute_url()
def lastmod(self, item):
return item.update_timestamp
class TagsSitemap(sitemaps.Sitemap):
priority = 0.5
def items(self):
return Post.tags.most_common()[:25]
def location(self, item):
return item.get_search_url()
def lastmod(self, item):
return item.update_timestamp
class PostListSitemap(sitemaps.Sitemap):
priority = 0.6
changefreq = 'daily'
def items(self):
return ['posts']
def location(self, item):
return reverse('booru:posts')
|
nilq/baby-python
|
python
|
from .utils import check_token
from .models import Entry
from .checks_models import EntryCheck
open_entry_checks = EntryCheck()
open_entry = Entry()
|
nilq/baby-python
|
python
|
import requests
import pytest
from helpers import (create_user,
get_random_email,
login_user,
refresh_token, get_user)
from requests import HTTPError
HOST = 'localhost:5000'
def test_register():
email = get_random_email()
new_user = create_user(email, 'pass')
assert new_user['email'] == email
def test_register_user_twice():
email = get_random_email()
create_user(email, 'pass')
with pytest.raises(requests.HTTPError):
create_user(email, 'pass')
def test_login():
email = get_random_email()
create_user(email, 'pass')
token = login_user(email, 'pass')
token = refresh_token(token)
def test_token_guard():
email = get_random_email()
create_user(email, 'pass')
token = login_user(email, 'pass')
with pytest.raises(HTTPError):
token = refresh_token(token + '1')
def test_login_with_bad_password():
email = get_random_email()
create_user(email, 'pass')
with pytest.raises(HTTPError):
login_user(email, 'wrong_pass')
def test_get_current():
email = get_random_email()
create_user(email, 'pass')
token = login_user(email, 'pass')
user = get_user(token=token)
assert user['email'] == email
assert user['balance'] == 2.5
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. currentmodule:: test_Point
.. moduleauthor:: Pat Daburu <pat@daburu.net>
This is a unit test module.
"""
import unittest
from djio.geometry import GeometryException
class TestGeometryExceptionSuite(unittest.TestCase):
def test_initWithoutInner_verify(self):
ge = GeometryException(message='Test Message')
self.assertEqual('Test Message', ge.message)
self.assertIsNone(ge.inner)
def test_initWithInner_verify(self):
inner = Exception()
ge = GeometryException(message='Test Message',
inner=inner)
self.assertEqual('Test Message', ge.message)
self.assertTrue(ge.inner == inner)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import os
import uuid
import time
import zlib
import random
import numpy as np
from string import ascii_lowercase
list_chars = list(c.encode('utf8') for c in ascii_lowercase)
# Number of objects
#num_files_list = [1]
num_files_list = [1, 100, 1000, 10000, 100000, 1000000]
# Hash functions
#compression_levels = [0, 1, 3, 5, 7, 9]
compression_levels = [1]
# Total target size
total_size_target = 100000000
for num_files in num_files_list:
size = total_size_target // num_files
data = {}
start = time.time()
for _ in range(num_files):
filename = str(uuid.uuid4().hex)
## Method 1
content = os.urandom(size)
## Method 2
#content = b"".join(np.random.choice(list_chars, size))
## Method 3
#with open('test.dat', 'rb') as fhandle:
# content = fhandle.read(size)
#content = (content + content)[:size]
#assert len(content) == size
data[filename] = content
tot_time = time.time() - start
total_size = sum(len(content) for content in data.values())
print('{} objects generated in {} s. Total size: {} bytes (~{:.3f} MB).'.format(num_files, tot_time, total_size, (total_size / 1024) / 1024))
for compression_level in compression_levels:
print('TESTING FOR ZLIB COMPRESSION WITH LEVEL {}'.format(compression_level))
v = {}
start = time.time()
for key, val in data.items():
v[key] = zlib.compress(val, compression_level)
tot_time = time.time() - start
tot_compressed_size = sum(len(compressed_string) for compressed_string in v.values())
print('Total time to compress {} objects: {} s (final size: {} MB, speed: {} MB/s)'.format(num_files, tot_time, tot_compressed_size / 1024 / 1024, total_size/1024/1024/tot_time))
# Decompress
start = time.time()
for compressed_string in v.values():
zlib.decompress(compressed_string)
tot_time = time.time() - start
print('Total time to decompress back: {} s (speed: {} MB/s)'.format(tot_time, total_size/1024/1024/tot_time))
print('-'*72)
print('='*72)
|
nilq/baby-python
|
python
|
_base_ = ['./rotated_retinanet_obb_r50_fpn_1x_dota_le90.py']
fp16 = dict(loss_scale='dynamic')
|
nilq/baby-python
|
python
|
from tksheet import Sheet
import tkinter as tk
class Sheet_Listbox(Sheet):
def __init__(self,
parent,
values = []):
Sheet.__init__(self,
parent = parent,
show_horizontal_grid = False,
show_vertical_grid = False,
show_header = False,
show_row_index = False,
show_top_left = False,
empty_horizontal = 0,
empty_vertical = 0)
if values:
self.values(values)
def values(self, values = []):
self.set_sheet_data([[v] for v in values],
reset_col_positions = False,
reset_row_positions = False,
redraw = False,
verify = False)
self.set_all_cell_sizes_to_text()
class demo(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self.grid_columnconfigure(0,
weight = 1)
self.grid_rowconfigure(0,
weight = 1)
self.listbox = Sheet_Listbox(self,
values = [f"_________ Item {i} _________" for i in range(2000)])
self.listbox.grid(row = 0,
column = 0,
sticky = "nswe")
#self.listbox.values([f"new values {i}" for i in range(50)]) set values
app = demo()
app.mainloop()
|
nilq/baby-python
|
python
|
# 1.
# C = float(input('输入摄氏温度'))
# F = (9/5)*C + 32
# print('%.2F 华氏度' %F)
# 2.
# import math
# r = float(input('输入圆柱半径:'))
# l = float(input('输入圆柱高:'))
# area = r*r*math.pi
# volume = area*l
# print('面积:%.2f' %area)
# print('体积:%.2f' %volume)
# 3.
# feet = float(input('请输入英尺数:'))
# meters = feet * 0.305
# print('%.1ffeet is %.4fmeters'%(feet,meters))
# 4.
# M = float(input('输入按千克计算的水量:'))
# initialTemperature = float(input('输入水的初始温度:'))
# finalTemperature = float(input('输入水的最终温度:'))
# Q = M * (finalTemperature-initialTemperature)*4184
# print('所需能量:%.1f%Q',Q)
# 5.
# balance = float(input('输入差额:'))
# interest_rate = float(input('输入年利率:'))
# interest = balance*(interest_rate/1200)
# print('下月需付利息:%.5f' %interest)
# 6.
# v0 = float(input('输入初始速度:'))
# v1 = float(input('输入末速度:'))
# t = float(input('输入速度变化所占用的时间:'))
# a =(v1-v0)/t
# print('平均加速度为:%.4f' %a)
# 7.
# num = float(input('输入每月存款数:'))
# rate =0.05/12
# interest = 1+rate
# count=[0]
# for i in range(6):
# month = (100+count[i]*interest)
# count.append(month)
# print('六个月后的账户总额:%.2f' %count[6])
8.
# num = int(input("请输入1-1000的一个整数:"))
# bai = int(num%10)
# shi = int(num/10%10)
# ge = int(num/100)
# sum = ge + shi + bai
# print('各位数字之和:' ,sum)
# 9.
# import math
# r = float(input('输入顶点到中心的距离:'))
# s = 2*r*math.sin(math.pi/5)
# area = 5*s*s/(4*math.tan(math.pi/5))
# print('五边形的面积%.2f' %area)
# 10.
# import math
# print ('输入第一个坐标:')
# x1 = float(input('>'))
# y1 = float(input('>'))
# print ('输入第二个坐标:')
# x2 = float(input('>'))
# y2 = float(input('>'))
# radius = 6371.01
# math.radians = float(input('输入地球表面的经度:'))
# math.arccoss = float(input('输入地球表面的纬度:'))
# d = math.radians * math.arccos(math.sin(math.radians(x1)) * math.sin(math.radians(x2)) + math.cos(math.radians(x1)) * math.cos(math.radians(x2)) * math.cos(math.radians(y1-y2))
# print ('%d' %d)
10.
# import math
# x1,y1 = eval(input('Please input point1(latitude and longitude) in degrees:'))
# x2,y2 = eval(input('Please input point2(latitude and longitude) in degrees:'))
# radius = 6371.01
# x11 = math.radians(x1) #math.radians()函数将度数转换成弧度数
# y11 = math.radians(y1)
# x22 = math.radians(x2)
# y22 = math.radians(y2)
# d = radius * math.acos(math.sin(x11) * math.sin(x22) + math.cos(x11) * math.cos(x22) * math.cos(y11-y22))
# print('The distance between the two points is %5.2f km'%d)
# 11.
# import math
# s = float(input('输入五角星的边长:'))
# area = (5*s*s)/(4*math.tan(math.pi/5))
# print('五角星的面积为:%.2f',area)
# 12.
# import math
# n = int(input('输入边数:'))
# s = float(input('输入正多边形的边长:'))
# area = (n * s * s) / (4 * math.tan (math.pi / n))
# print('%.2f',area)
# 13.
# ASCII = int(input('输入整数=>'))
# print(chr(ASCII))
# 14.
# name = (input('姓名:'))
# workhour = int(input('一周工作时间:'))
# many = float(input('每小时的报酬:'))
# lianbang = float(input('联邦预扣税率:'))
# zhou = float(input('州预扣税率:'))
# rate1 = workhour * many
# print(rate1)
# print('Deduction:')
# faderal = rate1 * lianbang
# print(faderal)
# state = rate1 * zhou
# print(state)
# zongmany = rate1 -(faderal + state)
# print(zongmany)
15.
# num = input('输入一个四位整数数字:')
# for i in range(len(num)):
# print(num[-i + len(num)-1],end='')
# # 16.
# import hashlib
# a = input('请输入一行文本:')
# m = hashlib.md5()
# b = a.encode(encoding='utf-8')
# m.update(b)
# a_md5 = m.hexdigest
# print('md5加密前为:'+a)
# print('md5加密前为:'+a_md5)
|
nilq/baby-python
|
python
|
# You are provided with a code that raises many exceptions. Fix it, so it works correctly.
# numbers_list = input().split(", ")
# result = 0
#
# for i in range(numbers_list):
# number = numbers_list[i + 1]
# if number < 5:
# result *= number
# elif number > 5 and number > 10:
# result /= number
#
# print(result)
numbers_list = map(int, input().split(", "))
result = 1
for number in numbers_list:
if number <= 5:
result *= number
elif number <= 10:
result /= number
print(int(result))
|
nilq/baby-python
|
python
|
#! /usr/bin/env python3
"""This is a prototype work manager which reads work requests from a file and
submits them as messages to a RabbitMQ queue.
This is development only. For a real system, you would get work from a
database or other entity.
"""
import os
import sys
import json
import logging
from argparse import ArgumentParser
from time import sleep
import proton
from proton import Message
from proton.utils import BlockingConnection
from proton.handlers import IncomingMessageHandler
logger = None
SYSTEM = 'PROTO'
COMPONENT = 'work-manager'
MSG_SERVICE_STRING = None
MSG_WORK_QUEUE = None
MSG_STATUS_QUEUE = None
class LoggingFilter(logging.Filter):
"""Standard logging filter for using Mesos
"""
def __init__(self, system='', component=''):
super(LoggingFilter, self).__init__()
self.system = system
self.component = component
def filter(self, record):
record.system = self.system
record.component = self.component
return True
class ExceptionFormatter(logging.Formatter):
"""Standard logging formatter with special execption formatting
"""
def __init__(self, fmt=None, datefmt=None):
std_fmt = ('%(asctime)s.%(msecs)03d'
' %(levelname)-8s'
' %(system)s'
' %(component)s'
' %(message)s')
std_datefmt = '%Y-%m-%dT%H:%M:%S'
if fmt is not None:
std_fmt = fmt
if datefmt is not None:
std_datefmt = datefmt
super(ExceptionFormatter, self).__init__(fmt=std_fmt,
datefmt=std_datefmt)
def formatException(self, exc_info):
result = super(ExceptionFormatter, self).formatException(exc_info)
return repr(result)
def format(self, record):
s = super(ExceptionFormatter, self).format(record)
if record.exc_text:
s = s.replace('\n', ' ')
s = s.replace('\\n', ' ')
return s
def setup_logging(args):
"""Configure the message logging components
"""
global logger
# Setup the logging level
logging_level = logging.INFO
if args.debug:
logging_level = args.debug
handler = logging.StreamHandler(sys.stdout)
msg_formatter = ExceptionFormatter()
msg_filter = LoggingFilter(SYSTEM, COMPONENT)
handler.setFormatter(msg_formatter)
handler.addFilter(msg_filter)
logger = logging.getLogger()
logger.setLevel(logging_level)
logger.addHandler(handler)
def retrieve_command_line():
"""Read and return the command line arguments
"""
description = 'Prototype Work Manager'
parser = ArgumentParser(description=description)
parser.add_argument('--job-filename',
action='store',
dest='job_filename',
required=False,
metavar='TEXT',
help='JSON job file to use')
parser.add_argument('--dev-mode',
action='store_true',
dest='dev_mode',
required=False,
default=False,
help='Run in developer mode')
parser.add_argument('--debug',
action='store',
dest='debug',
required=False,
type=int,
default=0,
metavar='DEBUG_LEVEL',
help='Log debug messages')
return parser.parse_args()
def get_env_var(variable, default):
"""Read variable from the environment and provide a default value
"""
result = os.environ.get(variable, default)
if not result:
raise RuntimeError('You must specify {} in the environment'
.format(variable))
return result
def get_jobs(job_filename):
"""Reads jobs from a known job file location
"""
jobs = list()
if job_filename and os.path.isfile(job_filename):
with open(job_filename, 'r') as input_fd:
data = input_fd.read()
job_dict = json.loads(data)
del data
for job in job_dict['jobs']:
jobs.append(job)
os.unlink(job_filename)
return jobs
def main():
"""Main processing for the application
"""
global MSG_SERVICE_STRING
global MSG_WORK_QUEUE
global MSG_STATUS_QUEUE
# Example connection string: amqp://<username>:<password>@<host>:<port>
MSG_SERVICE_STRING = get_env_var('PROTO_MSG_SERVICE_CONNECTION_STRING', None)
MSG_WORK_QUEUE = get_env_var('PROTO_MSG_WORK_QUEUE', None)
MSG_STATUS_QUEUE = get_env_var('PROTO_MSG_STATUS_QUEUE', None)
args = retrieve_command_line()
# Configure logging
setup_logging(args)
logger.info('Begin Processing')
try:
while True:
try:
# Create the connection
connection = BlockingConnection(MSG_SERVICE_STRING)
# Create a sender
sender = connection.create_sender(MSG_WORK_QUEUE)
jobs = get_jobs(args.job_filename)
for job in jobs:
message_json = json.dumps(job, ensure_ascii=False)
try:
sender.send(Message(body=message_json))
# TODO - This prototype doesn't care, but we
# TODO - should probably update the status at
# TODO - the work source.
print('Queued Message = {}'.format(message_json))
except proton.ConnectionException:
# TODO - This prototype doesn't care, but does
# TODO - something need to be done if this
# TODO - happens?
print('Returned Message = {}'.format(message_json))
finally:
connection.close()
sleep(60)
except KeyboardInterrupt:
pass
#except pika.exceptions.ConnectionClosed:
# pass
logger.info('Terminated Processing')
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# coding: utf-8
import numpy as np
from numpy import matrix as mat
import cv2
import os
import math
def undistort(img, # image data
fx, fy, cx, cy, # camera intrinsics
k1, k2, # radial distortion parameters
p1=None, p2=None, # tagential distortion parameters
radial_ud_only=True):
"""
undistort image using distort model
test gray-scale image only
"""
if img is None:
print('[Err]: empty image.')
return
is_bgr = len(img.shape) == 3
if is_bgr:
H, W, C = img.shape
elif len(img.shape) == 2:
H, W = img.shape
else:
print('[Err]: image format wrong!')
return
img_undistort = np.zeros_like(img, dtype=np.uint8)
# fill in each pixel in un-distorted image
for v in range(H):
for u in range(W): # u,v are pixel coordinates
# convert to camera coordinates by camera intrinsic parameters
x1 = (u - cx) / fx
y1 = (v - cy) / fy
r_square = (x1 * x1) + (y1 * y1)
r_quadric = r_square * r_square
if radial_ud_only: # do radial undistortion only
x2 = x1 * (1.0 + k1 * r_square + k2 * r_quadric)
y2 = y1 * (1.0 + k1 * r_square + k2 * r_quadric)
else: # do radial undistortion and tangential undistortion
x2 = x1 * (1.0 + k1 * r_square + k2 * r_quadric) + \
2.0 * p1 * x1 * y1 + p2 * (r_square + 2.0 * x1 * x1)
y2 = y1 * (1.0 + k1 * r_square + k2 * r_quadric) + \
p1 * (r_square + 2.0 * y1 * y1) + 2.0 * p2 * x1 * y1
# convert back to pixel coordinates
# using nearest neighbor interpolation
u_corrected = int(fx * x2 + cx + 0.5)
v_corrected = int(fy * y2 + cy + 0.5)
# @Todo: using bilinear interpolation...
# processing pixel outside the image area
if u_corrected < 0 or u_corrected >= W \
or v_corrected < 0 or v_corrected >= H:
if is_bgr:
img_undistort[v, u, :] = 0
else:
img_undistort[v, u] = 0
else:
if is_bgr:
img_undistort[v, u, :] = img[v_corrected,
u_corrected, :] # y, x
else:
img_undistort[v, u] = img[v_corrected, u_corrected] # y, x
return img_undistort.astype('uint8')
def test_undistort_img():
img_path = './distorted.png'
fx = 458.654
fy = 457.296
cx = 367.215
cy = 248.375
camera_intrinsics = [fx, fy, cx, cy]
k1 = -0.28340811
k2 = 0.07395907
p1 = 0.00019359
p2 = 1.76187114e-05
# Init parameters to be optimized
params = np.array([[-0.1],
[0.1]]) # k1k2
# ---------- Run LM optimization
LM_Optimize(params)
k1 = params[0][0]
k2 = params[1][0]
# ----------
undistort_img(img_path, camera_intrinsics, k1, k2, p1, p2)
def undistort_img(img_path,
camera_intrinsics,
k1, k2, p1=None, p2=None,
is_color=True):
"""
undistort of image
given camera matrix and distortion coefficients
"""
# LM_Optimize()
fx = camera_intrinsics[0]
fy = camera_intrinsics[1]
cx = camera_intrinsics[2]
cy = camera_intrinsics[3]
if not os.path.isfile(img_path):
print('[Err]: invalid image path.')
return
img_orig = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
if is_color:
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
else:
img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
if img is None:
print('[Err]: empty image.')
return
# ---------- Do undistortion
img_undistort = undistort(img,
fx, fy, cx, cy,
k1, k2, p1, p2)
# ----------
cv2.imshow('origin', img_orig)
cv2.imshow('undistort', img_undistort)
cv2.waitKey()
def show_points_of_curve():
"""
visualize points on the curve
"""
pts_on_curve = [
[546, 20], [545, 40], [543, 83],
[536, 159], [535, 170], [534, 180],
[531, 200], [530, 211], [529, 218],
[526, 236], [524, 253], [521, 269],
[519, 281], [517, 293], [515, 302],
[514, 310], [512, 320], [510, 329],
[508, 341], [506, 353], [505, 357]
]
print('Total {:d} points on the curve.'.format(len(pts_on_curve)))
img_path = './distorted.png'
if not os.path.isfile(img_path):
print('[Err]: invalid image path.')
return
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
if img is None:
print('[Err]: empty image.')
return
# Draw points and centroid
centroid_x, centroid_y = 0.0, 0.0
for pt in pts_on_curve:
centroid_x += pt[0]
centroid_y += pt[1]
cv2.circle(img, tuple(pt), 5, (0, 255, 0), -1)
centroid_x /= float(len(pts_on_curve))
centroid_y /= float(len(pts_on_curve))
centroid_x = int(centroid_x + 0.5)
centroid_y = int(centroid_y + 0.5)
cv2.circle(img, (centroid_x, centroid_y), 7, (0, 0, 255), -1)
# Draw line of endpoints
cv2.line(img, tuple(pts_on_curve[0]), tuple(
pts_on_curve[-1]), (255, 0, 0), 2)
cv2.imshow('Curve', img)
cv2.waitKey()
def line_equation(first_x, first_y, second_x, second_y):
# Ax+By+C=0
A = second_y - first_y
B = first_x - second_x
C = second_x*first_y - first_x*second_y
# k = -1.0 * A / B
# b = -1.0 * C / B
return A, B, C
def dist_of_pt_to_line(pt, A, B, C):
"""
2D space point to line distance
"""
# tmp = abs(A*pt[0] + B*pt[1] + C) / math.sqrt(A*A + B*B)
tmp = -(A*pt[0] + B*pt[1] + C) / math.sqrt(A*A + B*B)
return tmp
# return math.sqrt(tmp * tmp)
def undistort_point(u, v,
fx, fy, cx, cy,
k1, k2, p1=None, p2=None,
radial_ud_only=True):
"""
"""
# convert to camera coordinates by camera intrinsic parameters
x1 = (u - cx) / fx
y1 = (v - cy) / fy
# compute r^2 and r^4
r_square = (x1 * x1) + (y1 * y1)
r_quadric = r_square * r_square
if radial_ud_only: # do radial undistortion only
x2 = x1 * (1.0 + k1 * r_square + k2 * r_quadric)
y2 = y1 * (1.0 + k1 * r_square + k2 * r_quadric)
else: # do radial undistortion and tangential undistortion
x2 = x1 * (1.0 + k1 * r_square + k2 * r_quadric) + \
2.0 * p1 * x1 * y1 + p2 * (r_square + 2.0 * x1 * x1)
y2 = y1 * (1.0 + k1 * r_square + k2 * r_quadric) + \
p1 * (r_square + 2.0 * y1 * y1) + 2.0 * p2 * x1 * y
# convert back to pixel coordinates
# using nearest neighbor interpolation
u_corrected = fx * x2 + cx
v_corrected = fy * y2 + cy
return [u_corrected, v_corrected]
# the function
def test_undistort_pts_on_curve():
"""
"""
fx = 458.654
fy = 457.296
cx = 367.215
cy = 248.375
k1 = -0.28340811
k2 = 0.07395907
k1k2 = np.array([[k1],
[k2]])
pts_orig = [
[546, 20], [545, 40], [543, 83],
[536, 159], [535, 170], [534, 180],
[531, 200], [530, 211], [529, 218],
[526, 236], [524, 253], [521, 269],
[519, 281], [517, 293], [515, 302],
[514, 310], [512, 320], [510, 329],
[508, 341], [506, 353], [505, 357]
]
pts_corrected = undistort_point(
pts_orig[:, 0], pts_orig[:, 1],
fx, fy, cx, cy,
k1k2[0][0], k1k2[1][0]
)
img_path = './distorted.png'
img_orig = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
def Func(fx, fy, cx, cy, k1k2, input_list):
ret = np.zeros(len(input_list))
for i, input_i in enumerate(input_list):
# using numpy array for SIMD
pts_orig = np.array(input_i) #
# applying undistortion of points
pts_corrected = undistort_point(
pts_orig[:, 0], pts_orig[:, 1],
fx, fy, cx, cy,
k1k2[0][0], k1k2[1][0]
)
# compute centroid of undistorted points
centroid = np.sum(pts_corrected, axis=1) # get sum by column
centroid /= float(pts_orig.shape[0])
# build line of undistorted endpoints
A, B, C = line_equation(pts_corrected[0][0], pts_corrected[0][1],
pts_corrected[-1][0], pts_corrected[-1][1])
# build loss function and return
dist = dist_of_pt_to_line(centroid, A, B, C)
ret[i] = dist
ret = np.array(ret)
ret = np.reshape(ret, (-1, 1))
return ret
def Deriv(fx, fy, cx, cy,
k1k2,
input_list,
i):
"""
"""
k1k2_delta_1 = k1k2.copy()
k1k2_delta_2 = k1k2.copy()
k1k2_delta_1[i, 0] -= 0.000001
k1k2_delta_2[i, 0] += 0.000001
p1 = Func(fx, fy, cx, cy, k1k2_delta_1, input_list)
p2 = Func(fx, fy, cx, cy, k1k2_delta_2, input_list)
d = (p2 - p1) * 1.0 / (0.000002)
return d
def test_func():
pts_orig = [
[546, 20], [545, 40], [543, 83],
[536, 159], [535, 170], [534, 180],
[531, 200], [530, 211], [529, 218],
[526, 236], [524, 253], [521, 269],
[519, 281], [517, 293], [515, 302],
[514, 310], [512, 320], [510, 329],
[508, 341], [506, 353], [505, 357]
]
input_list = []
input_list.append(pts_orig)
fx = 458.654
fy = 457.296
cx = 367.215
cy = 248.375
# k1k2 = np.array([[0.1],
# [0.1]])
k1 = -0.28340811
k2 = 0.07395907
k1k2 = np.array([[k1],
[k2]])
dists = Func(fx, fy, cx, cy, k1k2, input_list) # N×1
print('Dist: {:.3f}'.format(dists[0][0]))
def LM_Optimize(params, max_iter=100):
"""
"""
# Known parameters(camera intrinsics)
fx = 458.654
fy = 457.296
cx = 367.215
cy = 248.375
# Input
pts_orig = [
[546, 20], [545, 40], [543, 83],
[536, 159], [535, 170], [534, 180],
[531, 200], [530, 211], [529, 218],
[526, 236], [524, 253], [521, 269],
[519, 281], [517, 293], [515, 302],
[514, 310], [512, 320], [510, 329],
[508, 341], [506, 353], [505, 357]
]
input_list = []
input_list.append(pts_orig)
N = len(input_list) # 数据个数
print('Total {:d} data.'.format(N))
u, v = 1, 2
step = 0
last_mse = 0.0
while max_iter:
step += 1
mse, mse_tmp = 0.0, 0.0
# loss
loss = Func(fx, fy, cx, cy, params, input_list)
mse += sum(loss**2)
mse /= N # normalize
# build Jacobin matrix
J = mat(np.zeros((N, 2))) # 雅克比矩阵
for i in range(2):
J[:, i] = Deriv(fx, fy, cx, cy, params, input_list, i)
print('Jacobin matrix:\n', J)
H = J.T*J + u*np.eye(2) # 2×2
params_delta = -H.I * J.T*fx #
# update parameters
params_tmp = params.copy()
params_tmp += params_delta
# current loss
loss_tmp = Func(fx, fy, cx, cy, params_tmp, input_list)
mse_tmp = sum(loss_tmp[:, 0]**2)
mse_tmp /= N
# adaptive adjustment
q = float((mse - mse_tmp) /
((0.5*params_delta.T*(u*params_delta - J.T*loss))[0, 0]))
if q > 0:
s = 1.0 / 3.0
v = 2
mse = mse_tmp
params = params_tmp
temp = 1 - pow(2.0*q-1, 3)
if s > temp:
u = u*s
else:
u = u*temp
else:
u = u*v
v = 2*v
params = params_tmp
print("step = %d, abs(mse-lase_mse) = %.8f" %
(step, abs(mse-last_mse)))
if abs(mse - last_mse) < 0.000001:
break
last_mse = mse # 记录上一个 mse 的位置
max_iter -= 1
print('\nFinal optimized parameters:\n', params)
if __name__ == '__main__':
test_undistort_img()
# show_points_of_curve()
# test_func()
print('=> Test done.')
|
nilq/baby-python
|
python
|
from http import HTTPStatus
import json
from src.common.encoder import PynamoDbEncoder
class HTTPResponse(object):
@classmethod
def to_json_response(cls, http_status, message=None):
"""
Access-Control-Allow-Origin is needed for CORS to work
Access-Control-Allow-Credentials is needed for cookies
"""
_message = http_status.description
if message:
_message = message
return {
"statusCode": http_status.value,
"headers": {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Credentials": True
},
"body": json.dumps({"message": _message})}
@classmethod
def to_ok_json(cls, body, encoder=PynamoDbEncoder):
return {
"statusCode": HTTPStatus.OK.value,
"headers": {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Credentials": True
},
"body": json.dumps(body, cls=encoder)
}
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-11-26 09:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('libretto', '0044_auto_20190917_1200'),
]
operations = [
migrations.AlterField(
model_name='source',
name='folio',
field=models.CharField(blank=True, help_text='Sans «\xa0f.\xa0». Exemple\xa0: «\xa03\xa0».', max_length=15, verbose_name='folio'),
),
migrations.AlterField(
model_name='source',
name='page',
field=models.CharField(blank=True, db_index=True, help_text='Sans «\xa0p.\xa0». Exemple\u202f: «\xa03\xa0»', max_length=15, verbose_name='page'),
),
]
|
nilq/baby-python
|
python
|
from setuptools import setup, find_packages
classifiers = ['Development Status :: 4 - Beta',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Topic :: System :: Hardware']
setup(name='ST7735',
version='0.0.2',
description='Library to control an ST7735 168x80 TFT LCD display.',
long_description=open('README.rst').read() + '\n' + open('CHANGELOG.txt').read(),
license='MIT',
author='Philip Howard',
author_email='phil@pimoroni.com',
classifiers=classifiers,
url='https://github.com/pimoroni/st7735-160x80-python/',
packages=find_packages())
|
nilq/baby-python
|
python
|
"""============================================================================
The input is a file containing lines of the following form:
equation_name arg1 ...
For example:
energy 5.4 3.7 99
something 7 280.01
energy 88.94 73 21.2
whizbang 83.34 14.34 356.43 139593.7801
something .001 25
You must pass the name of the input file on the command-line. Do not hard-code
the input file name in the source code.
You must validate the name of the physics equation and the number of arguments.
If the name of the equation is invalid, write an error message and skip to the
next line. If the equation name is valid, but has the wrong number of
arguments, write an error message and skip to the next line.
If the equation name and number of arguments is correct, call the equation with
the arguments and print the answer like this:
physics_equation_name(arg1, arg2 ...) = answer
============================================================================"""
from physequations import grav_potential_energy, kin_energy, work_energy
from pprint import pprint
# print('<--checking equations hardcoded with rounding-->')
# print(grav_potential_energy(2, 6.4))
# print(round(grav_potential_energy(2, 6.4), 2))
# print(kin_energy(2, 5))
# print(work_energy(2, 5, 30))
# print(round(work_energy(2, 5, 30), 2))
# print()
def isint(s):
"""Checks to see if input is an interger"""
try:
int(s)
except:
return False
return True
# string = 'this is a string'
# print(string.split())
# print()
"""logic: find if the index element is not an int then start a new line"""
f = open('resources/equations_input.txt', 'r')
flines = f.readlines()
# pprint(flines)
equations = []
for line in flines:
spline = line.split()
# print(spline)
equations.append(spline)
# print('===> equations')
# pprint(equations)
for equation in equations:
eqname = equation[0]
# print(eqname)
if eqname != 'grav_potential_energy' and eqname != 'kin_energy' and \
eqname !='work_energy':
print(f'{eqname} is not valid')
# print(equation)
numargs = len(equation) - 1
if eqname == 'grav_potential_energy':
if numargs != 2:
print(f'Wrong number of arguments: {equation}')
else:
mass = float(equation[1])
height = float(equation[2])
ans = grav_potential_energy(float(equation[1]), float(equation[2]))
# {mass, height} creates a tuple
# ({mass}, {height}) is another way to format it
print(f'{eqname}{mass, height} = {ans}')
if eqname == 'kin_energy':
if numargs != 2:
print(f'Wrong number of arguments: {equation}')
else:
mass = float(equation[1])
velocity = float(equation[2])
ans = kin_energy(float(equation[1]), float(equation[2]))
# {mass, velocity} creates a tuple
# ({mass}, {velocity}) is another way to format it
print(f'{eqname}{mass, velocity} = {ans}')
if eqname == 'work_energy':
if numargs != 3:
print(f'Wrong number of arguments: {equation}')
else:
force = float(equation[1])
displacement = float(equation[2])
angle = float(equation[3])
ans = work_energy(float(equation[1]), float(equation[2]), float(equation[3]))
# {force, displacement, angle} creates a tuple
# ({force}, {displacement}, {angle}) is another way to format it
print(f'{eqname}{force, displacement, angle} = {ans}')
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from sympy import init_printing,Integral,latex,pretty,pprint,sqrt,symbols,srepr
init_printing(use_unicode=True)
x,y,z = symbols('x y z')
print(Integral(sqrt(1/x),x))
print(srepr(Integral(sqrt(1/x), x)))
pprint(Integral(sqrt(1/x), x), use_unicode=False)
print(pretty(Integral(sqrt(1/x), x), use_unicode=False))
print(latex(Integral(sqrt(1/x), x)))
from sympy.printing.mathml import print_mathml
print_mathml(Integral(sqrt(1/x), x))
from sympy.printing.dot import dotprint
from sympy.abc import x
print(dotprint(x+2))
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.