input stringlengths 2.65k 237k | output stringclasses 1 value |
|---|---|
self.apply(input_cols, self.F.to_integer, func_return_type=int,
output_cols=output_cols, meta_action=Actions.TO_INTEGER.value, mode="map")
def to_boolean(self, input_cols="*", output_cols=None):
return self.apply(input_cols, self.F.to_boolean, func_return_type=int,
output_cols=output_cols, meta_action=Actions.TO_BOOLEAN.value, mode="map")
def to_string(self, input_cols="*", output_cols=None):
filtered_columns = []
df = self.root
input_cols = parse_columns(df, input_cols)
for col_name in input_cols:
dtype = df.cols.dtypes(col_name)
if dtype != np.object:
filtered_columns.append(col_name)
if len(filtered_columns) > 0:
return self.apply(input_cols, self.F.to_string, func_return_type=str,
output_cols=output_cols, meta_action=Actions.TO_STRING.value, mode="vectorized",
func_type="column_expr")
else:
return df
def match(self, input_cols="*", regex="", output_cols=None):
return self.apply(input_cols, self.F.match, args=(regex,), func_return_type=str, output_cols=output_cols,
meta_action=Actions.MATCH.value, mode="vectorized", func_type="column_expr")
def lower(self, input_cols="*", output_cols=None):
return self.apply(input_cols, self.F.lower, func_return_type=str, output_cols=output_cols,
meta_action=Actions.LOWER.value, mode="vectorized", func_type="column_expr")
def infer_dtypes(self, input_cols="*", output_cols=None):
dtypes = self.root[input_cols].cols.dtypes()
return self.apply(input_cols, self.F.infer_dtypes, args=(dtypes,), func_return_type=str,
output_cols=output_cols,
meta_action=Actions.INFER.value, mode="map", func_type="column_expr")
def upper(self, input_cols="*", output_cols=None):
return self.apply(input_cols, self.F.upper, func_return_type=str, output_cols=output_cols,
meta_action=Actions.UPPER.value, mode="vectorized", func_type="column_expr")
def title(self, input_cols="*", output_cols=None):
return self.apply(input_cols, self.F.title, func_return_type=str,
output_cols=output_cols, meta_action=Actions.PROPER.value, mode="vectorized",
func_type="column_expr")
def capitalize(self, input_cols="*", output_cols=None):
return self.apply(input_cols, self.F.capitalize, func_return_type=str,
output_cols=output_cols, meta_action=Actions.PROPER.value, mode="vectorized",
func_type="column_expr")
def proper(self, input_cols="*", output_cols=None):
return self.apply(input_cols, self.F.proper, func_return_type=str,
output_cols=output_cols, meta_action=Actions.PROPER.value, mode="vectorized",
func_type="column_expr")
# def url_decode(self):
# from urllib.parse import unquote
# def title_parse(title):
# title = unquote(title)
# return title
#
# # "apply" from pandas method will help to all the decode text in the csv
# df['title'] = df.title.apply(title_parse)
def pad(self, input_cols="*", width=0, fillchar="0", side="left", output_cols=None, ):
return self.apply(input_cols, self.F.pad, args=(width, side, fillchar,), func_return_type=str,
output_cols=output_cols,
meta_action=Actions.PAD.value, mode="vectorized")
def trim(self, input_cols="*", output_cols=None):
return self.apply(input_cols, self.F.trim, func_return_type=str,
output_cols=output_cols, meta_action=Actions.TRIM.value, mode="vectorized")
def strip_html(self, input_cols="*", output_cols=None):
return self.apply(input_cols, self.F.strip_html, func_return_type=str,
output_cols=output_cols, meta_action=Actions.TRIM.value, mode="map")
def date_format(self, input_cols, current_format=None, output_format=None, output_cols=None):
return self.apply(input_cols, self.F.date_format, args=(current_format, output_format), func_return_type=str,
output_cols=output_cols, meta_action=Actions.DATE_FORMAT.value, mode="partitioned",
set_index=False)
def word_tokenize(self, input_cols="*", output_cols=None):
return self.apply(input_cols, self.F.word_tokenize, func_return_type=str, output_cols=output_cols,
meta_action=Actions.WORD_TOKENIZE.value, mode="map")
def word_count(self, input_cols="*", output_cols=None):
return self.word_tokenize(input_cols, output_cols).cols.len(output_cols)
def len(self, input_cols="*", output_cols=None):
return self.apply(input_cols, self.F.len, func_return_type=str, output_cols=output_cols,
meta_action=Actions.LENGTH.value, mode="map")
@staticmethod
@abstractmethod
def reverse(input_cols, output_cols=None):
pass
def remove(self, input_cols, search=None, search_by="chars", output_cols=None):
return self.replace(input_cols=input_cols, search=search, replace_by="", search_by=search_by,
output_cols=output_cols)
def normalize_chars(self, input_cols="*", output_cols=None):
"""
Remove diacritics from a dataframe
:param input_cols:
:param output_cols:
:return:
"""
return self.apply(input_cols, self.F.normalize_chars, func_return_type=str,
meta_action=Actions.REMOVE_ACCENTS.value,
output_cols=output_cols, mode="vectorized")
def remove_numbers(self, input_cols, output_cols=None):
"""
Remove numbers from a dataframe
:param input_cols:
:param output_cols:
:return:
"""
def _remove_numbers(value):
return value.astype(str).str.replace(r'\d+', '')
return self.apply(input_cols, _remove_numbers, func_return_type=str,
output_cols=output_cols, mode="vectorized", set_index=True)
def remove_white_spaces(self, input_cols="*", output_cols=None):
"""
Remove all white spaces from a dataframe
:param input_cols:
:param output_cols:
:return:
"""
return self.apply(input_cols, self.F.remove_white_spaces, func_return_type=str,
output_cols=output_cols, mode="vectorized")
def remove_stopwords(self, input_cols="*", language="english", output_cols=None):
"""
Remove extra whitespace between words and trim whitespace from the beginning and the end of each string.
:param input_cols:
:param language: specify the stopwords language
:param output_cols:
:return:
"""
stop = stopwords.words(language)
df = self.root
return df.cols.lower(input_cols).cols.replace("*", stop, "", "words").cols.normalize_spaces()
def remove_urls(self, input_cols="*", output_cols=None):
return self.apply(input_cols, self.F.remove_urls, func_return_type=str,
output_cols=output_cols, mode="vectorized")
def normalize_spaces(self, input_cols="*", output_cols=None):
"""
Remove extra whitespace between words and trim whitespace from the beginning and the end of each string.
:param input_cols:
:param output_cols:
:return:
"""
return self.apply(input_cols, self.F.normalize_spaces, func_return_type=str,
output_cols=output_cols, mode="vectorized")
def remove_special_chars(self, input_cols="*", output_cols=None):
"""
Remove special chars from a dataframe
:param input_cols:
:param output_cols:
:return:
"""
return self.apply(input_cols, self.F.remove_special_chars, func_return_type=str,
output_cols=output_cols, mode="vectorized")
def to_datetime(self, input_cols, format, output_cols=None):
"""
:param input_cols:
:param format:
:param output_cols:
:return:
"""
return self.apply(input_cols, self.F.to_datetime, func_return_type=str,
output_cols=output_cols, args=format, mode="partitioned")
def year(self, input_cols, format=None, output_cols=None):
"""
:param input_cols:
:param format:
:param output_cols:
:return:
"""
return self.apply(input_cols, self.F.year, args=format, output_cols=output_cols,
meta_action=Actions.YEAR.value,
mode="vectorized", set_index=True)
def month(self, input_cols, format=None, output_cols=None):
"""
:param input_cols:
:param format:
:param output_cols:
:return:
"""
return self.apply(input_cols, self.F.month(), args=format, output_cols=output_cols, mode="vectorized",
set_index=True)
def day(self, input_cols, format=None, output_cols=None):
return self.apply(input_cols, self.F.day, args=format, output_cols=output_cols, mode="vectorized",
set_index=True)
def hour(self, input_cols, format=None, output_cols=None):
def _hour(value, _format):
return self.F.hour(value, _format)
return self.apply(input_cols, _hour, args=format, output_cols=output_cols, mode="vectorized", set_index=True)
def minute(self, input_cols, format=None, output_cols=None):
def _minute(value, _format):
return self.F.minute(value, _format)
return self.apply(input_cols, _minute, args=format, output_cols=output_cols, mode="vectorized", set_index=True)
def second(self, input_cols, format=None, output_cols=None):
def _second(value, _format):
return self.F.second(value, _format)
return self.apply(input_cols, _second, args=format, output_cols=output_cols, mode="vectorized", set_index=True)
def weekday(self, input_cols, format=None, output_cols=None):
def _second(value, _format):
return self.F.weekday(value, _format)
return self.apply(input_cols, _second, args=format, output_cols=output_cols, mode="vectorized", set_index=True)
def years_between(self, input_cols, date_format=None, output_cols=None):
def _years_between(value, args):
return self.F.years_between(value, *args)
return self.apply(input_cols, _years_between, args=[date_format], func_return_type=str,
output_cols=output_cols,
meta_action=Actions.YEARS_BETWEEN.value, mode="partitioned", set_index=True)
def replace(self, input_cols="*", search=None, replace_by=None, search_by="chars", ignore_case=False,
output_cols=None):
"""
Replace a value, list of values by a specified string
:param input_cols: '*', list of columns names or a single column name.
:param search: Values to look at to be replaced
:param replace_by: New value to replace the old one. Supports an array when searching by characters.
:param search_by: Can be "full","words","chars" or "values".
:param ignore_case: Ignore case when searching for match
:param output_cols:
:return: DataFrame
"""
# df = self.parent
search = val_to_list(search)
replace_by = val_to_list(replace_by)
if search_by == "full" and not is_list_of_str(search) or not is_list_of_str(replace_by):
search_by = "values"
if search_by == "chars":
# print("F", type(F), F)
func = self.F.replace_chars
func_return_type = str
elif search_by == "words":
func = self.F.replace_words
func_return_type = str
replace_by = replace_by[0]
elif search_by == "full":
func = self.F.replace_full
func_return_type = str
replace_by = replace_by[0]
elif search_by == "values":
func = self.F.replace_values
func_return_type = None
replace_by = replace_by[0]
else:
RaiseIt.value_error(search_by, ["chars", "words", "full", "values"])
# Cudf raise and exception if both param are not the same type
# For example [] ValueError: Cannot convert value of type list to cudf scalar
return self.apply(input_cols, func, args=(search, replace_by), func_return_type=func_return_type,
output_cols=output_cols, meta_action=Actions.REPLACE.value, mode="vectorized")
@staticmethod
@abstractmethod
def replace_regex(input_cols, regex=None, value=None, output_cols=None):
pass
@staticmethod
@abstractmethod
def impute(input_cols, data_type="continuous", strategy="mean", fill_value=None, output_cols=None):
pass
def fill_na(self, input_cols, value=None, output_cols=None):
"""
Replace null data with a specified value
:param input_cols: '*', list of columns names or a single column name.
:param output_cols:
:param value: value to replace the nan/None values
:return:
"""
df = self.root
columns = prepare_columns(df, input_cols, output_cols)
kw_columns = {}
for input_col, output_col in columns:
kw_columns[output_col] = df.data[input_col].fillna(value)
kw_columns[output_col] = kw_columns[output_col].mask(kw_columns[output_col] == "", value)
return df.cols.assign(kw_columns)
def is_na(self, input_cols, output_cols=None):
"""
Replace null values with True and non null with False
:param input_cols: '*', list of columns names or a single column name.
:param output_cols:
:return:
"""
def _is_na(value):
return value.isnull()
df = self.root
return df.cols.apply(input_cols, _is_na, output_cols=output_cols, mode="vectorized")
def count(self):
df = self.root
return len(df.cols.names())
def count_na(self, columns="*", tidy=True, compute=True):
"""
Return the NAN and Null count in a Column
:param columns: '*', list of columns names or a single column name.
:param tidy:
:param compute:
:return:
"""
df = self.root
return df.cols.agg_exprs(columns, self.F.count_na, tidy=tidy, compute=compute)
def unique(self, columns="*", values=None, estimate=True, tidy=True, compute=True):
df = self.root
return df.cols.agg_exprs(columns, self.F.unique, values, estimate, tidy=tidy, compute=compute)
def count_uniques(self, columns="*", values=None, estimate=True, tidy=True, compute=True):
df = self.root
return df.cols.agg_exprs(columns, self.F.count_uniques, values, estimate, tidy=tidy, compute=compute)
def _math(self, columns, operator, output_col):
"""
Helper to process arithmetic operation between columns. If a
:param columns: Columns to be used to make the calculation
:param operator: A lambda function
:return:
"""
df = self.root
columns = parse_columns(df, columns)
expr = reduce(operator, [df[col_name].cols.fill_na("*", 0).cols.to_float() for col_name in columns])
return df.cols.assign({output_col: expr})
def add(self, columns, output_col="sum"):
"""
Add two or more columns
:param columns: '*', list of columns names or a single column name
:param output_col:
:return:
"""
return self._math(columns, lambda x, y: x + y, output_col)
def sub(self, columns, output_col="sub"):
"""
Subs two or more columns
:param columns: '*', list of columns names or a single column name
:param output_col:
:return:
"""
return self._math(columns, lambda x, y: x - y, output_col)
def mul(self, columns, output_col="mul"):
"""
Multiply two or more columns
:param columns: '*', list of columns names or a single column name
:param output_col:
:return:
"""
return self._math(columns, lambda x, y: x * y, output_col)
def div(self, columns, output_col="div"):
"""
Divide two or more columns
:param columns: '*', list of columns names or a single column name
:param output_col:
:return:
"""
return self._math(columns, lambda x, y: x / y, output_col)
def z_score(self, input_cols="*", output_cols=None):
df = self.root
return df.cols.apply(input_cols, self.F.z_score, func_return_type=float, output_cols=output_cols,
meta_action=Actions.Z_SCORE.value, mode="vectorized")
def modified_z_score(self, input_cols="*", output_cols=None):
df = self.root
return df.cols.apply(input_cols, self.F.modified_z_score, func_return_type=float, output_cols=output_cols,
meta_action=Actions.Z_SCORE.value, mode="vectorized")
@staticmethod
@abstractmethod
def min_max_scaler(input_cols, output_cols=None):
pass
@staticmethod
@abstractmethod
def standard_scaler(input_cols, output_cols=None):
pass
@staticmethod
@abstractmethod
def | |
import os
import io
import security.JWT
import json
import boto3
import time
import hashlib
from flask import Blueprint, request, send_file, Response
from services.data.DBConn import db
from bson import Binary
user_api = Blueprint('user_api', __name__)
userDB = db.users
@user_api.route("", methods=['PUT'])
def createUser():
"""
Endpoint to create a new user using specified username and password.
Request Body Parameters:
username : string, JSON, required
password : string, JSON, required
Queries the database to see if there is already a user with this username. If not, creates a new user this username
and password and default settings. If the process fails, an appropriate error message is returned.
"""
username = request.args.get('username')
password = request.args.get('password')
if not username:
return json.dumps({'error': "Username parameter was not provided.", 'code': 1})
if not password:
return json.dumps({'error': "Password parameter was not provided.", 'code': 2})
username = username.lower()
if "@" not in username:
return json.dumps({'error': "Username is not a valid email.", 'code': 3})
if username[-18:] != "@myhunter.cuny.edu":
return json.dumps({'error': "Email is not a valid @myhunter.cuny.edu email.", 'code': 4})
if username[:-18] == "":
return json.dumps({'error': "@myhunter.cuny.edu email is invalid.", 'code': 5})
if len(password) < 6 or len(password) > 52:
return json.dumps({'error': "Password must be at least 6 characters and less than 52 characters long.", 'code': 6})
salt = os.urandom(32).hex()
hashy = hashlib.sha512()
hashy.update(('%s%s' % (salt, password)).encode('utf-8'))
hashed_password = hashy.hexdigest()
try:
record = userDB.find_one({'username': username}, {'_id': 1})
if record is None:
user = {'username': username, 'salt': <PASSWORD>, 'password': <PASSWORD>, 'name': username, 'github': '', 'linkedin': '', 'skills': [],
'classes': [], 'profilePicture': None}
result = userDB.insert_one(user)
if result.inserted_id:
# print("created new user: " + username)
authtoken = security.JWT.encode_auth_token(username).decode("utf-8")
return json.dumps({'success': True, 'token': authtoken})
else:
return json.dumps({'error': "Server error while creating new user.", 'code': 7})
else:
return json.dumps({'error': "User already exists.", 'code': 8})
except Exception as e:
print(e)
return json.dumps({'error': "Server error while checking if username already exists.", 'code': 9})
@user_api.route("/", methods=['GET'], defaults={'username': None})
@user_api.route("/<username>", methods=['GET'])
@security.JWT.requires_auth
def getUserDetails(username):
"""
Endpoint to get user details for a specified user, defaulting to the current user. This endpoint requires the
requesting user to be authorized.
URL Parameters:
username: string, optional
Return: user object, JSON
This endpoint queries the database for the user based on the current user's username. If the user is found in the
database, the user's details are returned in JSON format. If the process fails, an appropriate error message is
returned.
"""
if not username:
username = request.userNameFromToken
else:
username = username.lower()
try:
record = userDB.find_one({'username': username}, {'username': 1, 'name': 1, 'github': 1, 'linkedin': 1, 'skills': 1, 'classes': 1, 'profilePicture': 1})
if record is None:
return json.dumps({'error': "No user details found for username: " + username})
else:
del record['_id'] # don't send document id
# del record['password'] #don't send the password
# print("returned user details: " + username)
return json.dumps(record)
except Exception as e:
print(e)
return json.dumps({'error': "Server error while checking if username already exists."})
@user_api.route("/skills", methods=['GET'], defaults={'username': None})
@user_api.route("/skills/<username>", methods=['GET'])
@security.JWT.requires_auth
def getSkills(username):
"""
Endpoint to get skills for a specified user, defaulting to the user. This endpoint requires the requesting user to
be an authorized.
URL Parameters:
username: string, optional
Return: array of strings, JSON
This endpoint queries the database for the user based on the specified username or, if blank, the current user's
username. If the user is found in the database, the user's skills are returned in JSON format. If the search fails,
an appropriate error message is returned.
"""
if not username:
username = request.userNameFromToken
else:
username = username.lower()
try:
record = userDB.find_one({'username': username}, {'skills': 1})
if record is None:
return json.dumps({'error': "No user details found for username: " + username})
else:
del record['_id'] # don't send document id
# del record['password'] #don't send the password
# print("returned user skills: " + username)
return json.dumps(record)
except Exception as e:
print(e)
return json.dumps({'error': "Server error while checking if username already exists."})
@user_api.route("/classes", methods=['GET'], defaults={'username': None})
@user_api.route("/classes/<username>", methods=['GET'])
@security.JWT.requires_auth
def getClasses(username):
"""
Endpoint to get classes for a specified user, defaulting to the user. This endpoint requires the requesting user to
be an authorized.
URL Parameters:
username: string, optional
Return: array of strings, JSON
This endpoint queries the database for the user based on the specified username or, if blank, the current user's
username. If the user is found in the database, the user's classes are returned in JSON format. If the search fails,
an appropriate error message is returned.
"""
if not username:
username = request.userNameFromToken
else:
username = username.lower()
try:
record = userDB.find_one({'username': username}, {'classes': 1})
if record is None:
return json.dumps({'error': "No user details found for username: " + username})
else:
del record['_id'] # don't send document id
# del record['password'] #don't send the password
# print("returned user skills: " + username)
return json.dumps(record)
except Exception as e:
print(e)
return json.dumps({'error': "Server error while checking if username already exists."})
def skillClassValidityChecker(data):
"""
:param data: arry of strings
:return: bool
"""
if isinstance(data, list):
for elem in data:
if not isinstance(elem, str):
return False
else:
return False
return True
@user_api.route("", methods=['POST'])
@security.JWT.requires_auth
def updateUserDetails():
"""
Endpoint to update user details for the current user. This endpoint requires the requesting user to be authorized.
Request Body Parameters:
name: string, JSON, optional
github: string, JSON, optional
linkedin: string, JSON, optional
skills: array of strings, JSON, optional
classes: array of strings, JSON, optional
This endpoint queries the database for the user based on the current user's username. If the user is found in the
database, the user's details are set according to the specified fields. If the search fails, an appropriate error
message is returned.
"""
content = request.get_json()
username = request.userNameFromToken
# print(content)
if 'name' in content and isinstance(content['name'], str):
res = userDB.update_one(
{"username": username},
{
"$set": {
"name": content['name'],
}
}
)
if 'github' in content and isinstance(content['github'], str):
res = userDB.update_one(
{"username": username},
{
"$set": {
"github": content['github'],
}
}
)
if 'linkedin' in content and isinstance(content['linkedin'], str):
res = userDB.update_one(
{"username": username},
{
"$set": {
"linkedin": content['linkedin'],
}
}
)
if 'skills' in content and skillClassValidityChecker(content['skills']):
res = userDB.update_one(
{"username": username},
{
"$set": {
"skills": content['skills'],
}
}
)
if 'classes' in content and skillClassValidityChecker(content['classes']):
res = userDB.update_one(
{"username": username},
{
"$set": {
"classes": content['classes'],
}
}
)
return json.dumps({'success': True})
@user_api.route("/skills", methods=['POST'])
@security.JWT.requires_auth
def updateSkills():
"""
Endpoint to update skills for the current user. This endpoint requires the requesting user to be authorized.
Request Body Parameters:
skills: array of strings, JSON, required
This endpoint queries the database for the user based on the current user's username. If the user is found in the
database, the user's skills are set according to the specified fields. If the search fails, an appropriate error
message is returned.
"""
content = request.get_json()
# print(content)
if not ('skills' in content):
return json.dumps({'error': "'skills' not provided.", 'code': 1})
if not (skillClassValidityChecker(content['skills'])):
return json.dumps({'error': "'skills' is not a valid array.", 'code': 2})
username = request.userNameFromToken
try:
record = userDB.find_one({'username': username}, {'_id': 1, 'skills': 1})
if record is None:
return json.dumps({'error': "No user details found for username: " + username})
else:
result = userDB.update_one(
{"username": username},
{
"$set": {
"skills": content['skills']
}
}
)
if result.matched_count > 0:
return json.dumps({'success': True})
else:
return json.dumps({'success': False, 'error': 'Updating user data failed for some reason', 'code': 998})
except Exception as e:
print(e)
return json.dumps({'error': "Server error while trying to find user.", 'code': 999})
@user_api.route("/classes", methods=['POST'])
@security.JWT.requires_auth
def updateClasses():
"""
Endpoint to update classes for the current user. This endpoint requires the requesting user to be authorized.
Request Body Parameters:
classes: array of strings, JSON, required
This endpoint queries the database for the user based on the current user's username. If the user is found in the
database, the user's classes are set according to the specified fields. If the search fails, an appropriate error
message is returned.
"""
content = request.get_json()
# print(content)
if not ('classes' in content):
return json.dumps({'error': "'classes' not provided.", 'code': 1})
if not (skillClassValidityChecker(content['classes'])):
return json.dumps({'error': "'classes' is not a valid array.", 'code': 2})
username = request.userNameFromToken
try:
record = | |
"""
Routines for mode coupling calculation. For more details on computation of the matrix see
https://pspy.readthedocs.io/en/latest/scientific_doc.pdf.
"""
from copy import deepcopy
import healpy as hp
import numpy as np
from pspy import pspy_utils, so_cov, sph_tools
from pspy.mcm_fortran.mcm_fortran import mcm_compute as mcm_fortran
def mcm_and_bbl_spin0(win1,
binning_file,
lmax,
niter,
type="Dl",
win2=None,
bl1=None,
bl2=None,
input_alm=False,
unbin=None,
save_file=None,
l_exact=None,
l_toep=None,
l_band=None,
l3_pad=2000,
return_coupling_only=False):
"""Get the mode coupling matrix and the binning matrix for spin0 fields
Parameters
----------
win1: so_map (or alm)
the window function of survey 1, if input_alm=True, expect wlm1
binning_file: text file
a binning file with three columns bin low, bin high, bin mean
lmax: integer
the maximum multipole to consider for the spectra computation
type: string
the type of binning, either bin Cl or bin Dl
win2: so_map (or alm)
the window function of survey 2, if input_alm=True, expect wlm2
bl1: 1d array
the beam of survey 1, expected to start at l=0
bl2: 1d array
the beam of survey 2, expected to start at l=0
niter: int
specify the number of iteration in map2alm
unbin: boolean
return the unbinned mode coupling matrix
save_file: boolean
save the mcm and bbl to disk
l_toep: int
l_band: int
l_exact: int
"""
if type == "Dl": doDl = 1
if type == "Cl": doDl = 0
if input_alm == False:
l_max_limit = win1.get_lmax_limit()
if lmax > l_max_limit: raise ValueError("the requested lmax is too high with respect to the map pixellisation")
maxl = np.minimum(lmax + l3_pad, l_max_limit)
win1 = sph_tools.map2alm(win1, niter=niter, lmax=maxl)
if win2 is not None:
win2 = sph_tools.map2alm(win2, niter=niter, lmax=maxl)
if win2 is None:
wcl = hp.alm2cl(win1)
else:
wcl = hp.alm2cl(win1, win2)
l = np.arange(len(wcl))
wcl *= (2 * l + 1)
if bl1 is None: bl1 = np.ones(len(l)+2)
if bl2 is None: bl2 = bl1.copy()
mcm = np.zeros((lmax, lmax))
if l_toep is None: l_toep = lmax
if l_band is None: l_band = lmax
if l_exact is None: l_exact = lmax
mcm_fortran.calc_coupling_spin0(wcl,
l_exact,
l_band,
l_toep,
mcm.T)
if l_toep < lmax:
mcm = format_toepliz_fortran2(mcm, l_toep, l_exact, lmax)
mcm_fortran.fill_upper(mcm.T)
if return_coupling_only == True:
return mcm[:lmax - 2, :lmax - 2]
fac = (2 * np.arange(2, lmax + 2) + 1) / (4 * np.pi) * bl1[2:lmax + 2] * bl2[2:lmax + 2]
mcm *= fac
bin_lo, bin_hi, bin_c, bin_size = pspy_utils.read_binning_file(binning_file, lmax)
n_bins = len(bin_hi)
mbb = np.zeros((n_bins, n_bins))
mcm_fortran.bin_mcm(mcm.T, bin_lo, bin_hi, bin_size, mbb.T, doDl)
Bbl = np.zeros((n_bins, lmax))
mcm_fortran.binning_matrix(mcm.T, bin_lo, bin_hi, bin_size, Bbl.T, doDl)
mbb_inv = np.linalg.inv(mbb)
Bbl = np.dot(mbb_inv, Bbl)
if unbin:
mcm = mcm[:lmax - 2, :lmax - 2]
mcm_inv = np.linalg.inv(mcm)
if save_file is not None:
save_coupling(save_file, mbb_inv, Bbl, mcm_inv=mcm_inv)
return mcm_inv, mbb_inv, Bbl
else:
if save_file is not None:
save_coupling(save_file, mbb_inv, Bbl)
return mbb_inv, Bbl
def mcm_and_bbl_spin0and2(win1,
binning_file,
lmax,
niter,
type="Dl",
win2=None,
bl1=None,
bl2=None,
input_alm=False,
pure=False,
unbin=None,
save_file=None,
l3_pad=2000,
l_exact=None,
l_toep=None,
l_band=None,
return_coupling_only=False):
"""Get the mode coupling matrix and the binning matrix for spin 0 and 2 fields
Parameters
----------
win1: python tuple of so_map or alms (if input_alm=True)
a python tuple (win_spin0,win_spin2) with the window functions of survey 1, if input_alm=True, expect (wlm_spin0, wlm_spin2)
binning_file: text file
a binning file with three columns bin low, bin high, bin mean
lmax: integer
the maximum multipole to consider
type: string
the type of binning, either bin Cl or bin Dl
win2: python tuple of so_map or alms (if input_alm=True)
a python tuple (win_spin0,win_spin2) with the window functions of survey 1, if input_alm=True, expect (wlm_spin0, wlm_spin2)
bl1: python tuple of 1d array
a python tuple (beam_spin0,beam_spin2) with the beam of survey 1, expected to start at l=0
bl2: python tuple of 1d array
a python tuple (beam_spin0,beam_spin2) with the beam of survey 2, expected to start at l=0
niter: int
specify the number of iteration in map2alm
pureB: boolean
do B mode purification
unbin: boolean
return the unbinned mode coupling matrix
save_file: boolean
save the mcm and bbl to disk
l_toep: int
l_band: int
l_exact: int
save_coupling: str
"""
def get_coupling_dict(array, fac=1.0):
ncomp, dim1, dim2 = array.shape
dict = {}
dict["spin0xspin0"] = array[0, :, :]
dict["spin0xspin2"] = array[1, :, :]
dict["spin2xspin0"] = array[2, :, :]
dict["spin2xspin2"] = np.zeros((4 * dim1, 4 * dim2))
for i in range(4):
dict["spin2xspin2"][i * dim1:(i + 1) * dim1, i * dim2:(i + 1) * dim2] = array[3, :, :]
dict["spin2xspin2"][2 * dim1:3 * dim1, dim2:2 * dim2] = array[4, :, :] * fac
dict["spin2xspin2"][dim1:2 * dim1, 2 * dim2:3 * dim2] = array[4, :, :] * fac
dict["spin2xspin2"][3 * dim1:4 * dim1, :dim2] = array[4, :, :]
dict["spin2xspin2"][:dim1, 3 * dim2:4 * dim2] = array[4, :, :]
return dict
if type == "Dl": doDl = 1
if type == "Cl": doDl = 0
if input_alm == False:
l_max_limit = win1[0].get_lmax_limit()
if lmax > l_max_limit: raise ValueError("the requested lmax is too high with respect to the map pixellisation")
maxl = np.minimum(lmax + l3_pad, l_max_limit)
win1 = (sph_tools.map2alm(win1[0], niter=niter,
lmax=maxl), sph_tools.map2alm(win1[1], niter=niter, lmax=maxl))
if win2 is not None:
win2 = (sph_tools.map2alm(win2[0], niter=niter,
lmax=maxl), sph_tools.map2alm(win2[1], niter=niter,
lmax=maxl))
if win2 is None: win2 = deepcopy(win1)
if bl1 is None: bl1 = (np.ones(2 + lmax), np.ones(2 + lmax))
if bl2 is None: bl2 = deepcopy(bl1)
wcl, wbl = {}, {}
spins = ["0", "2"]
for i, spin1 in enumerate(spins):
for j, spin2 in enumerate(spins):
wcl[spin1 + spin2] = hp.alm2cl(win1[i], win2[j])
wcl[spin1 + spin2] *= (2 * np.arange(len(wcl[spin1 + spin2])) + 1)
wbl[spin1 + spin2] = bl1[i][2:lmax + 2] * bl2[j][2:lmax + 2]
mcm = np.zeros((5, lmax, lmax))
if pure == False:
if l_toep is None: l_toep = lmax
if l_band is None: l_band = lmax
if l_exact is None: l_exact = lmax
mcm_fortran.calc_coupling_spin0and2(wcl["00"],
wcl["02"],
wcl["20"],
wcl["22"],
l_exact,
l_band,
l_toep,
mcm.T)
for id_mcm in range(5):
if l_toep < lmax:
mcm[id_mcm] = format_toepliz_fortran2(mcm[id_mcm], l_toep, l_exact, lmax)
mcm_fortran.fill_upper(mcm[id_mcm].T)
else:
mcm_fortran.calc_mcm_spin0and2_pure(wcl["00"],
wcl["02"],
wcl["20"],
wcl["22"],
mcm.T)
if return_coupling_only == True:
return mcm[:, :lmax - 2, :lmax - 2]
for id_mcm, spairs in enumerate(["00", "02", "20", "22", "22"]):
fac = (2 * np.arange(2, lmax + 2) + 1) / (4 * np.pi) * wbl[spairs]
mcm[id_mcm] *= fac
bin_lo, bin_hi, bin_c, bin_size = pspy_utils.read_binning_file(binning_file, lmax)
n_bins = len(bin_hi)
mbb_array = np.zeros((5, n_bins, n_bins))
Bbl_array = np.zeros((5, n_bins, lmax))
for id_mcm in range(5):
mcm_fortran.bin_mcm((mcm[id_mcm, :, :]).T,
bin_lo,
bin_hi,
bin_size,
(mbb_array[id_mcm, :, :]).T,
doDl)
mcm_fortran.binning_matrix((mcm[id_mcm, :, :]).T,
bin_lo,
bin_hi,
bin_size,
(Bbl_array[id_mcm, :, :]).T,
doDl)
mbb = get_coupling_dict(mbb_array, fac=-1.0)
Bbl = get_coupling_dict(Bbl_array, fac=1.0)
spin_pairs = ["spin0xspin0", "spin0xspin2", "spin2xspin0", "spin2xspin2"]
mbb_inv = {}
for s in spin_pairs:
mbb_inv[s] = np.linalg.inv(mbb[s])
Bbl[s] = np.dot(mbb_inv[s], Bbl[s])
if unbin:
mcm = get_coupling_dict(mcm[:, :lmax - 2, :lmax - 2], fac=-1.0)
mcm_inv = {}
for s in spin_pairs:
mcm_inv[s] = np.linalg.inv(mcm[s])
if save_file is not None:
save_coupling(save_file, mbb_inv, Bbl, spin_pairs=spin_pairs, mcm_inv=mcm_inv)
return mcm_inv, mbb_inv, Bbl
else:
if save_file is not None:
save_coupling(save_file, mbb_inv, Bbl, spin_pairs=spin_pairs)
return mbb_inv, Bbl
def format_toepliz_fortran(coupling, l_toep, lmax):
"""take a matrix and apply the toepliz appoximation (fortran)
Parameters
----------
coupling: array
consist of an array where the upper part is the exact matrix and
the lower part is the diagonal. We will feed the off diagonal
of the lower part using the measurement of the correlation from the exact computatio
l_toep: integer
the l at which we start the approx
lmax: integer
the maximum multipole of the array
"""
toepliz_array = np.zeros(coupling.shape)
mcm_fortran.toepliz_array_fortran(toepliz_array.T, coupling.T, l_toep)
toepliz_array[coupling != 0] = coupling[coupling != 0]
return toepliz_array
def format_toepliz_fortran2(coupling, l_toep, l_exact, lmax):
"""take a matrix and apply the toepliz appoximation (fortran)
Parameters
----------
coupling: array
consist of an array where the upper part is the exact matrix and
the lower part is the diagonal. We will feed the off diagonal
of the lower part using the measurement of the correlation from the exact computatio
l_toep: integer
the l at which we start the approx
l_exact: integer
the l until which we do the exact computation
lmax: integer
the maximum multipole of the array
"""
toepliz_array = np.zeros(coupling.shape)
mcm_fortran.toepliz_array_fortran2(toepliz_array.T, coupling.T, | |
np.asarray(rmatrix),
order=order, scale=scale,
image_center=np.flipud(pixel_center),
recenter=recenter, missing=missing,
use_scipy=use_scipy).T
if recenter:
new_reference_pixel = pixel_array_center
else:
# Calculate new pixel coordinates for the rotation center
new_reference_pixel = pixel_center + np.dot(rmatrix,
pixel_rotation_center - pixel_center)
new_reference_pixel = np.array(new_reference_pixel).ravel()
# Define the new reference_pixel
new_meta['crval1'] = rotation_center[0].value
new_meta['crval2'] = rotation_center[1].value
new_meta['crpix1'] = new_reference_pixel[0] + 1 # FITS pixel origin is 1
new_meta['crpix2'] = new_reference_pixel[1] + 1 # FITS pixel origin is 1
# Unpad the array if necessary
unpad_x = -np.min((diff[1], 0))
if unpad_x > 0:
new_data = new_data[:, unpad_x:-unpad_x]
new_meta['crpix1'] -= unpad_x
unpad_y = -np.min((diff[0], 0))
if unpad_y > 0:
new_data = new_data[unpad_y:-unpad_y, :]
new_meta['crpix2'] -= unpad_y
# Calculate the new rotation matrix to store in the header by
# "subtracting" the rotation matrix used in the rotate from the old one
# That being calculate the dot product of the old header data with the
# inverse of the rotation matrix.
pc_C = np.dot(self.rotation_matrix, np.linalg.inv(rmatrix))
new_meta['PC1_1'] = pc_C[0, 0]
new_meta['PC1_2'] = pc_C[0, 1]
new_meta['PC2_1'] = pc_C[1, 0]
new_meta['PC2_2'] = pc_C[1, 1]
# Update pixel size if image has been scaled.
if scale != 1.0:
new_meta['cdelt1'] = (self.scale[0] / scale).value
new_meta['cdelt2'] = (self.scale[1] / scale).value
# Remove old CROTA kwargs because we have saved a new PCi_j matrix.
new_meta.pop('CROTA1', None)
new_meta.pop('CROTA2', None)
# Remove CDi_j header
new_meta.pop('CD1_1', None)
new_meta.pop('CD1_2', None)
new_meta.pop('CD2_1', None)
new_meta.pop('CD2_2', None)
# Create new map with the modification
new_map = self._new_instance(new_data, new_meta, self.plot_settings)
return new_map
@deprecate_positional_args_since(since='2.0', keyword_only=('width', 'height'))
@u.quantity_input
def submap(self, bottom_left, *, top_right=None, width: (u.deg, u.pix) = None, height: (u.deg, u.pix) = None):
"""
Returns a submap defined by a rectangle.
Any pixels which have at least part of their area inside the rectangle
are returned.
Parameters
----------
bottom_left : `astropy.units.Quantity` or `~astropy.coordinates.SkyCoord`
The bottom-left coordinate of the rectangle. If a `SkyCoord` it can
have shape ``(2,)`` and simultaneously define ``top_right``. If specifying
pixel coordinates it must be given as an `~astropy.units.Quantity`
object with units of `~astropy.units.pixel`.
top_right : `astropy.units.Quantity` or `~astropy.coordinates.SkyCoord`, optional
The top-right coordinate of the rectangle. If ``top_right`` is
specified ``width`` and ``height`` must be omitted.
Passing this as a positional argument is deprecated, you must pass
it as ``top_right=...``.
width : `astropy.units.Quantity`, optional
The width of the rectangle. Required if ``top_right`` is omitted.
height : `astropy.units.Quantity`
The height of the rectangle. Required if ``top_right`` is omitted.
Returns
-------
out : `~sunpy.map.GenericMap` or subclass
A new map instance is returned representing to specified
sub-region.
Examples
--------
>>> import astropy.units as u
>>> from astropy.coordinates import SkyCoord
>>> import sunpy.map
>>> import sunpy.data.sample # doctest: +REMOTE_DATA
>>> aia = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE) # doctest: +REMOTE_DATA
>>> bl = SkyCoord(-300*u.arcsec, -300*u.arcsec, frame=aia.coordinate_frame) # doctest: +REMOTE_DATA
>>> tr = SkyCoord(500*u.arcsec, 500*u.arcsec, frame=aia.coordinate_frame) # doctest: +REMOTE_DATA
>>> aia.submap(bl, top_right=tr) # doctest: +REMOTE_DATA
<sunpy.map.sources.sdo.AIAMap object at 0x...>
SunPy Map
---------
Observatory: SDO
Instrument: AIA 3
Detector: AIA
Measurement: 171.0 Angstrom
Wavelength: 171.0 Angstrom
Observation Date: 2011-06-07 06:33:02
Exposure Time: 0.234256 s
Dimension: [334. 334.] pix
Coordinate System: helioprojective
Scale: [2.402792 2.402792] arcsec / pix
Reference Pixel: [127.5 126.5] pix
Reference Coord: [3.22309951 1.38578135] arcsec
array([[ 450.4546 , 565.81494, 585.0416 , ..., 1178.3234 , 1005.28284,
977.8161 ],
[ 474.20004, 516.1865 , 555.7032 , ..., 1024.9636 , 1010.1449 ,
1010.1449 ],
[ 548.1609 , 620.9256 , 620.9256 , ..., 933.8139 , 1074.4924 ,
1108.4492 ],
...,
[ 203.58617, 195.52335, 225.75891, ..., 612.7742 , 580.52295,
560.3659 ],
[ 206.00058, 212.1806 , 232.78065, ..., 650.96185, 622.12177,
537.6615 ],
[ 229.32516, 236.07002, 222.5803 , ..., 517.1058 , 586.8026 ,
591.2992 ]], dtype=float32)
>>> aia.submap([0,0]*u.pixel, [5,5]*u.pixel) # doctest: +REMOTE_DATA
<sunpy.map.sources.sdo.AIAMap object at 0x...>
SunPy Map
---------
Observatory: SDO
Instrument: AIA 3
Detector: AIA
Measurement: 171.0 Angstrom
Wavelength: 171.0 Angstrom
Observation Date: 2011-06-07 06:33:02
Exposure Time: 0.234256 s
Dimension: [5. 5.] pix
Coordinate System: helioprojective
Scale: [2.402792 2.402792] arcsec / pix
Reference Pixel: [512.5 512.5] pix
Reference Coord: [3.22309951 1.38578135] arcsec
array([[-95.92475 , 7.076416 , -1.9656711 , -2.9485066 ,
-0.98283553],
[-96.97533 , -5.1167884 , 0. , 0. ,
0.9746264 ],
[-93.99607 , 1.0189276 , -4.0757103 , 2.0378551 ,
-2.0378551 ],
[-96.97533 , -8.040668 , -2.9238791 , -5.1167884 ,
-0.9746264 ],
[-95.92475 , 6.028058 , -4.9797 , -1.0483578 ,
-3.9313421 ]], dtype=float32)
>>> width = 10 * u.arcsec
>>> height = 10 * u.arcsec
>>> aia.submap(bl, width=width, height=height) # doctest: +REMOTE_DATA
<sunpy.map.sources.sdo.AIAMap object at 0x7f91aecc5438>
SunPy Map
---------
Observatory: SDO
Instrument: AIA 3
Detector: AIA
Measurement: 171.0 Angstrom
Wavelength: 171.0 Angstrom
Observation Date: 2011-06-07 06:33:02
Exposure Time: 0.234256 s
Dimension: [4. 4.] pix
Coordinate System: helioprojective
Scale: [2.402792 2.402792] arcsec / pix
Reference Pixel: [126.5 126.5] pix
Reference Coord: [3.22309951 1.38578135] arcsec
array([[565.81494, 585.0416 , 656.4552 , 670.18854],
[516.1865 , 555.7032 , 634.7365 , 661.90424],
[620.9256 , 620.9256 , 654.8825 , 596.6707 ],
[667.5083 , 560.52094, 651.22766, 530.28534]], dtype=float32)
>>> bottom_left_vector = SkyCoord([0, 10] * u.arcsec, [0, 10] * u.arcsec, frame='heliographic_stonyhurst')
>>> aia.submap(bottom_left_vector) # doctest: +REMOTE_DATA
<sunpy.map.sources.sdo.AIAMap object at 0x7f91aece8be0>
SunPy Map
---------
Observatory: SDO
Instrument: AIA 3
Detector: AIA
Measurement: 171.0 Angstrom
Wavelength: 171.0 Angstrom
Observation Date: 2011-06-07 06:33:02
Exposure Time: 0.234256 s
Dimension: [4. 5.] pix
Coordinate System: helioprojective
Scale: [2.402792 2.402792] arcsec / pix
Reference Pixel: [1.5 1.5] pix
Reference Coord: [3.22309951 1.38578135] arcsec
array([[213.9748 , 256.76974, 244.54262, 356.62466],
[223.74321, 258.0102 , 292.27716, 340.65408],
[219.53459, 242.31648, 308.5911 , 331.373 ],
[268.24377, 254.83157, 268.24377, 321.89252],
[249.99167, 265.14267, 274.61206, 240.5223 ]], dtype=float32)
"""
# Check that we have been given a valid combination of inputs
# [False, False, False] is valid if bottom_left contains the two corner coords
if ([arg is not None for arg in (top_right, width, height)]
not in [[True, False, False], [False, False, False], [False, True, True]]):
raise ValueError("Either top_right alone or both width and height must be specified.")
# parse input arguments
bottom_left, top_right = self._parse_submap_input(bottom_left, top_right, width, height)
x_pixels = u.Quantity([bottom_left[0], top_right[0]]).to_value(u.pix)
y_pixels = u.Quantity([bottom_left[1], top_right[1]]).to_value(u.pix)
if x_pixels[0] > x_pixels[1]:
warnings.warn("The rectangle is inverted in the left/right direction, "
"which may lead to unintended behavior.", SunpyUserWarning)
if y_pixels[0] > y_pixels[1]:
warnings.warn("The rectangle is inverted in the bottom/top direction, "
"which may lead to unintended behavior.", SunpyUserWarning)
# Sort the pixel values so we always slice in the correct direction
x_pixels.sort()
y_pixels.sort()
# Round the lower left pixel to the nearest integer
# We want 0.5 to be rounded up to 1, so use floor(x + 0.5)
x_pixels[0] = np.floor(x_pixels[0] + 0.5)
y_pixels[0] = np.floor(y_pixels[0] + 0.5)
# Round the top right pixel to the nearest integer, then add 1 for array indexing
# We want e.g. 2.5 to be rounded down to 2, so use ceil(x - 0.5)
x_pixels[1] = np.ceil(x_pixels[1] - 0.5) + 1
y_pixels[1] = np.ceil(y_pixels[1] - 0.5) + 1
x_pixels = np.array(x_pixels)
y_pixels = np.array(y_pixels)
# Clip pixel values to max of array, prevents negative
# indexing
x_pixels = np.clip(x_pixels, 0, self.data.shape[1])
y_pixels = np.clip(y_pixels, 0, self.data.shape[0])
# Get ndarray representation of submap
xslice = slice(int(x_pixels[0]), int(x_pixels[1]))
yslice = slice(int(y_pixels[0]), int(y_pixels[1]))
new_data = self.data[yslice, xslice].copy()
# Make a copy of the header with updated centering information
new_meta = self.meta.copy()
# Add one to go from zero-based to one-based indexing
new_meta['crpix1'] = self.reference_pixel.x.to_value(u.pix) + 1 - x_pixels[0]
new_meta['crpix2'] = self.reference_pixel.y.to_value(u.pix) + 1 - y_pixels[0]
new_meta['naxis1'] = new_data.shape[1]
new_meta['naxis2'] = new_data.shape[0]
# Create new map instance
if self.mask is not None:
new_mask = self.mask[yslice, xslice].copy()
# Create new map with the modification
new_map = self._new_instance(new_data, new_meta, self.plot_settings, mask=new_mask)
return new_map
# Create new map with the modification
new_map = self._new_instance(new_data, new_meta, self.plot_settings)
return new_map
@seconddispatch
def _parse_submap_input(self, bottom_left, top_right, width, height):
"""
Should take any valid input to submap() and return bottom_left and
top_right in pixel coordinates.
"""
@_parse_submap_input.register(u.Quantity)
def _parse_submap_quantity_input(self, bottom_left, top_right, width, height):
if top_right is None and width is None:
raise ValueError('Either top_right alone or both width and height must be specified '
'when bottom_left is a Quantity')
if bottom_left.shape != (2, ):
| |
import numpy as np
from PyTE.ContextOfPastMeasureCalculatorDiscrete import *
from array import *
import torch
class TEWindow:
def __init__(self,
clean_window = True,
MA_window = 0,
base = 2,
destHistoryEmbedLength = 1,
destEmbeddingDelay = 1,
sourceHistoryEmbeddingLength = 1,
sourceEmbeddingDelay = 1,
delay = 1,
history = 1):
#ContextOfPastMeasureCalculatorDiscrete.__init__(self, base, destHistoryEmbedLength)
self.base_power_l = power(base, sourceHistoryEmbeddingLength)
self.init(MA_window, base, destHistoryEmbedLength, destEmbeddingDelay, sourceHistoryEmbeddingLength, sourceEmbeddingDelay, delay, history)
self.tes = np.array([]) # torch.empty(size=(self.MA_window,0), dtype=torch.float32, device='cpu', requires_grad=False)
self.clean_window = clean_window
def init(self,
MA_window = 0,
base = 2,
destHistoryEmbedLength = 1,
destEmbeddingDelay = 1,
sourceHistoryEmbeddingLength = 1,
sourceEmbeddingDelay = 1,
delay = 1,
dontCreateObsStorage = False):
self.base = base
# Last computed average of the measure
self.average = 0.0
# Last computed max local value of the measure
self.max = 0.0
# Last computed min local value of the measure
self.min = 0.0
# Last computed standard deviation of local values of the measure
self.std = 0.0
# Number of observations supplied for the PDFs
self.observations = 0
# Number of available quantised states for each variable
# (ie binary is base-2).
# Cached value of ln(base)
self.log_base = math.log(base)
# Cached value of ln(2)
self.log_2 = math.log(2.0)
# Cache of whether the base is a power of 2
self.power_of_2_base = False
# Cached value of log_2(base)
self.log_2_base = 0
# Whether we're in debug mode
self.debug = False
# Construct an instance
# @ param base number of quantisation levels for each variable.
# E.g.binary variables are in base-2.
log_base = 0
if base < 2:
raise Exception("Can't calculate info theoretic measures for base " + str(base))
# Check if we've got a power of 2
self.power_of_2_base = math.log(self.base, 2).is_integer();
if self.power_of_2_base:
self.log_2_base = round(math.log(self.base) / math.log(2))
self.k = destHistoryEmbedLength
self.base_power_k = power(self.base, self.k)
if self.k < 0:
raise RuntimeError("destHistoryEmbedLength k " + str(self.k) + " is not >= 0 for a TEWindow")
self.maxShiftedValue = np.ndarray(shape=self.base, dtype=int)
for v in range(0, base):
self.maxShiftedValue[v] = v * power(self.base, self.k - 1)
self.noObservationStorage = dontCreateObsStorage
if not dontCreateObsStorage:
# Create storage for counts of observations
try:
self.nextPastCount = np.zeros(shape=(self.base, self.base_power_k), dtype=int)
self.pastCount = np.zeros(shape=self.base_power_k, dtype=int)
#self.nextCount = np.zeros(shape=self.base, dtype=int)
except Exception as e:
# Allow any Exceptions to be thrown, but catch and wrap
# Error as a RuntimeException
print("Requested memory for the base " + self.base +
" and k=" + self.k + " is too large for the JVM at this time " + str(e))
self.sourceNextPastCount = np.ndarray(shape=(self.base_power_l, self.base, self.base_power_k), dtype=int)
self.pastCount = np.zeros(shape=self.base_power_k, dtype=int)
self.nextPastCount = np.zeros(shape=(self.base, self.base_power_k), dtype=int)
self.sourcePastCount = np.ndarray(shape=(self.base_power_l, self.base_power_k), dtype=int)
self.periodicBoundaryConditions = True
self.base = 2
self.startObservationTime = 1
self.destEmbeddingDelay = destEmbeddingDelay
if sourceHistoryEmbeddingLength <= 0:
raise RuntimeError("Cannot have source embedding length of zero or less")
self.sourceHistoryEmbedLength = sourceHistoryEmbeddingLength
self.sourceEmbeddingDelay = sourceEmbeddingDelay
self.delay = delay
self.prev_te = None
## // Check that we can convert the history value into an integer ok:
## if (sourceHistoryEmbedLength > Math.log(Integer.MAX_VALUE) / log_base) {
## throw new RuntimeException("Base and source history combination too large");
##}
self.maxShiftedSourceValue = np.ndarray(shape=base, dtype=int)
for v in range(0, base):
self.maxShiftedSourceValue[v] = v * power(base, self.sourceHistoryEmbedLength - 1)
# Create storage for extra counts of observations
# self.sourceNextPastCount = np.ndarray(shape=(self.base_power_l, base, self.base_power_k), dtype=int)
# self.sourcePastCount = np.ndarray(shape=(self.base_power_l, self.base_power_k), dtype=int)
self.sourceNextPastCount = np.zeros(shape=(self.base_power_l, base, self.base_power_k), dtype=int)
self.sourcePastCount = np.zeros(shape=(self.base_power_l, self.base_power_k), dtype=int)
# Which time step do we start taking observations from?
# These two integers represent the earliest next time step, in the cases where the destination
# embedding itself determines where we can start taking observations, or
# the case where the source embedding plus delay is longer and so determines
# where we can start taking observations.
startTimeBasedOnDestPast = (self.k - 1) * destEmbeddingDelay + 1;
startTimeBasedOnSourcePast = (self.sourceHistoryEmbedLength - 1) * sourceEmbeddingDelay + delay;
self.startObservationTime = max(startTimeBasedOnDestPast, startTimeBasedOnSourcePast)
self.estimateComputed = False
# self.xs, self.xs_ = torch.empty(size=(0,), dtype=torch.uint8, device='cpu'), torch.empty(size=(0,), dtype=torch.uint8, device='cpu') # array('h'), array('h')
# self.ys, self.ys_ = torch.empty(size=(0,), dtype=torch.uint8, device='cpu'), torch.empty(size=(0,), dtype=torch.uint8, device='cpu'), # array('h'), array('h')
self.xs, self.xs_ = torch.empty(size=(0,), dtype=torch.uint8, device='cpu'), torch.empty(size=(0,), dtype=torch.uint8, device='cpu') # array('h'), array('h')
self.ys, self.ys_ = torch.empty(size=(0,), dtype=torch.uint8, device='cpu'), torch.empty(size=(0,), dtype=torch.uint8, device='cpu'), # array('h'), array('h')
self.MA_window = MA_window
def initialise(self):
#ContextOfPastMeasureCalculatorDiscrete.initialise()
#super(ContextOfPastMeasureCalculatorDiscrete, self).initialise()
#super(TEWindow, self).initialise()
self.average = 0.0
self.max = 0.0
self.min = 0.0
self.std = 0.0
self.observations = 0
self.estimateComputed = False
fill(self.sourceNextPastCount, 0)
fill(self.sourcePastCount, 0)
self.xs, self.xs_ = torch.empty(size=(0,),dtype=torch.uint8, device='cpu'), torch.empty(size=(0,),dtype=torch.uint8, device='cpu', requires_grad=False) #array('h'), array('h')
self.ys, self.ys_ = torch.empty(size=(0,),dtype=torch.uint8, device='cpu'), torch.empty(size=(0,),dtype=torch.uint8, device='cpu', requires_grad=False), #array('h'), array('h')
# Return the measure last calculated in a call to
# or related methods after the previous
def getLastAverage(self):
return self.average
# Return the last computed max local value of the measure.
# Not declaring this final so that separable calculator
# can throw an exception on it since it does not support it
def getLastMax(self):
return self.max
# Return the last computed min local value of the measure.
# Not declaring this final so that separable calculator
# can throw an exception on it since it does not support it
def getLastMin(self):
return self.min
# Return the last computed standard deviation of
# local values of the measure.
def getLastStd(self):
return self.std
# Get the number of samples to be used for the PDFs here
# which have been supplied by calls to
# "setObservations", "addObservations" etc.
# Note that the number of samples may not be equal to the length of time-series
# supplied (e.g.for transfer entropy, where we need to accumulate a number of
# samples for the past history of the destination).
# return the number of samples to be used for the PDFs
def getNumObservations(self) :
return self.observations
def computePastValue(self, x, t):
pastVal = 0
for p in range(0, self.k):
pastVal *= self.base
pastVal += x[t - self.k + 1 + p]
return pastVal
def __len__(self):
if(self.xs != None):
return len(self.xs)
return 0
def add_source(self, src):
if self.clean_window:
self.init(self.MA_window)
self._add_item(True, src)
def add_dest(self, dst):
#this cleanup is dependent on the previous step (add_source()) to do it
#self.init(self.MA_window)
self._add_item(False, dst)
def _add_item(self, src_or_dest, item: torch.Tensor):
if src_or_dest:
self.xs_ = item #np.concatenate((self.xs_, item))
# self.xs_ = item
else:
self.ys_ = item #np.concatenate((self.ys_, item))
# if src_or_dest:
# self.xs_ = item
# # self.xs_ = item
# else:
# self.ys_ = item
# if item is not None and len(item) > 1:
# if src_or_dest:
# self.xs_ = np.concatenate((self.xs_, item))
# #self.xs_ = item
# else:
# self.ys_ = np.concatenate((self.ys_, item))
# #self.ys_ = item
# elif item is not None:
# if src_or_dest:
# self.xs_ = torch.cat((self.xs_, torch.tensor(item)))
# else:
# self.ys_ = torch.cat((self.ys_, torch.tensor(item)))
def addOnlineObservationsLag1(self, source, dest, startTime=0, endTime=0):
if endTime == 0:
endTime = len(dest) - 1
if ((endTime - startTime) <= 0):
# No observations to add
return
if (endTime >= len(dest) or endTime >= len(source)):
msg = "endTime {:d} must be <= length of input arrays (dest: {:d}, source: {:d})".format(endTime,
dest.shape[0],
source.shape[0])
raise RuntimeError(msg)
self.observations += (endTime - startTime)
# Initialise and store the current previous values;
# one for each phase of the embedding delay.
# First for the destination:
pastVal = np.ndarray(shape=1, dtype=int)
pastVal[0] = 0
sourcePastVal = np.ndarray(shape=self.sourceEmbeddingDelay, dtype=int)
sourcePastVal[0] = 0
destVal = 0
startIndex = startTime + 1
endIndex = endTime + 1
for r in list(range(startIndex, endIndex)):
if self.k > 0:
pastVal[0] += dest[r - 1]
sourcePastVal[0] += source[r - self.delay]
# Add to the count for this particular transition
# (cell's assigned as above
destVal = dest[r]
thisPastVal = pastVal[0]
thisSourceVal = sourcePastVal[0]
self.sourceNextPastCount[thisSourceVal][destVal][thisPastVal] += 1
self.sourcePastCount[thisSourceVal][thisPastVal] += 1
self.nextPastCount[destVal][thisPastVal] += 1
self.pastCount[thisPastVal] += 1
#self.nextCount[destVal] += 1
# Now, update the combined embedding values and phases,
# for this phase we back out the oldest value which we'll no longer need:
if self.k > 0:
pastVal[0] -= self.maxShiftedValue[dest[r - 1 - (self.k - 1)]]
pastVal[0] *= self.base
sourcePastVal[0] -= self.maxShiftedSourceValue[source[r - self.delay - (self.sourceHistoryEmbedLength - 1) * self.sourceEmbeddingDelay]]
| |
<gh_stars>1-10
from __future__ import absolute_import
import StringIO
import logging
import requests
from lxml import etree
from simplejson import JSONDecodeError
from six.moves.urllib.parse import urljoin
from clims.models.workunit import WorkUnit, ExternalWorkUnit
from .api import CamundaApi, UnexpectedHttpResponse
from clims import utils
from clims.models import Workflow
logger = logging.getLogger(__name__)
class CamundaClient(object):
"""
A client for executing workflows that interfaces with the underlying workflow engine.
It's higher level than CamundaApi which directly maps to the CamundaApi.
"""
def __init__(self, base_url):
self.base_url = base_url
self.api = CamundaApi(self.base_url)
logger.debug("Initialized CamundaClient with url '{}'".format(
self.base_url))
def _url(self, resource):
return urljoin(self.base_url, resource)
def start_workflows(self, workflow, items):
# NOTE: The REST API doesn't seem to provide batch start of processes at the moment, so
# we have to call the endpoint several times:
for item in items:
self.start_workflow(workflow, item)
def start_workflow(self, workflow, item):
# Camunda expects a particular format:
variables = {k: {"value": v} for k, v in workflow.variables.items()}
json = {"businessKey": item.global_id, "variables": variables}
url = self._url("process-definition/key/{}/start".format(
workflow.get_full_name()))
response = requests.post(url,
json=json,
headers={"Accept": "application/json"})
json = response.json()
logger.debug("Reply from Camunda [{}]: {}".format(
response.status_code, json))
if response.status_code == 200:
return json
else:
raise UnexpectedHttpResponse(json["message"], response.status_code)
def _add_tracked_object_ids(self, external_work_units):
map_process_instance_id_to_business_object = dict({
(external_work_unit.work_unit.external_workflow_instance_id, None)
for external_work_unit in external_work_units
})
keys = ",".join(map_process_instance_id_to_business_object.keys())
# TODO: Solve paging in one go in the api
for process_instance in self.api.process_instances().get(
processInstanceIds=keys):
id = process_instance.json["id"]
business_key = process_instance.json["businessKey"]
map_process_instance_id_to_business_object[id] = business_key
for external_work_unit in external_work_units:
external_work_unit.tracked_object_global_id = map_process_instance_id_to_business_object[
external_work_unit.work_unit.external_workflow_instance_id]
def get_work_units(self, task_definition_key=None, process_definition_key=None):
"""
Returns ExternalWorkUnits. These map to user tasks in camunda.
"""
logger.debug(
"Fetching tasks in Camunda from search filters: task_definition_key='{}', "
"process_definition_key='{}'".format(task_definition_key,
process_definition_key))
# TODO: Paging. Since we use the api, we must go through the paging mechanism.
# TODO: We're using the formKey to tell clims which work configuration to use.
# It would be simpler to just use the ID of the task (and it doesn't require a camunda
# extension for bpmn)
# 1. Fetch outstanding tasks matching the filters
work_units = list()
for res in self.api.tasks().get(
taskDefinitionKey=task_definition_key,
processDefinitionKey=process_definition_key):
json = res.json
work_unit = WorkUnit(
external_work_unit_id=json["id"],
workflow_provider=Workflow.BACKEND_CAMUNDA,
external_workflow_instance_id=json["processInstanceId"],
work_type=json["formKey"])
# assert work_unit.work_type, "No work type defined {} {}".format(task_definition_key, process_definition_key)
external_work_unit = ExternalWorkUnit(work_unit, None)
# TODO: In the demo data from Camunda, there is an entry that doesn't have a
# processInstanceId for some reason. Filtering it out now. Can be removed when
# the demo data isn't added.
if not work_unit.external_workflow_instance_id:
continue
work_units.append(external_work_unit)
logger.debug("Fetched {} tasks from Camunda".format(len(work_units)))
self._add_tracked_object_ids(work_units)
return work_units
def unsafe_delete_deployment(self, deployment_id):
"""
Deletes a deployment by ID.
This is for development and test purposes, so state can be cleaned up after a test, thus
the `unsafe` prefix.
Note that this cascades, so all related definitions are deleted.
"""
logger.info(
"Deleting deployment {} (should not run in production).".format(
deployment_id))
self.api.deployment(id=deployment_id).delete()
def unsafe_delete_all_deployments(self):
"""
Cleans the state in the Camunda instance, deleting all deployments that have been created.
This is only intended for development and test purposes (unsafe)
"""
logger.info(
"Deleting all deployments in Camunda (should not run in production)."
)
for x in self.api.deployments().get():
self.unsafe_delete_deployment(x.id)
def _refine_xml(self, tree, namespace):
"""
Adds a fully qualifed name to relative ones. This ensures that all IDs that require
it will always have a fully qualified name so they will not clash with other plugins.
Example: User creates a diagram with the ID "SequenceSimple" and then registers it
through the plugin clims.plugins.demo.dnaseq, the process will actually be registered
as clims.plugins.demo.dnaseq.SequenceSimple. Furthermore, all calls to to subprocesses
will be made to fully qualified names using the same module.
Form keys are also changed to the fully qualified version.
If the user does fully qualify names (they contain a dot), no change takes place.
"""
# Load the xml and make sure that we have fully qualified proccesses:
root = tree.getroot()
ns = {
"bpmn": "http://www.omg.org/spec/BPMN/20100524/MODEL",
"bpmndi": "http://www.omg.org/spec/BPMN/20100524/DI",
"camunda": "http://camunda.org/schema/1.0/bpmn"
}
process = utils.single(root.findall("bpmn:process", ns))
diagram = utils.single(root.findall("bpmndi:BPMNDiagram", ns))
def is_relative(name):
# Process names are relative to the module of the defining workflow class
# if there are no dots in them, except at the beginning.
# Example: .workflows.DataEntry => relative
# clims.workflows.DataEntry => absolute
return "." not in name[1:]
# A dict of all renamed elements (in particular relative process names that we're fully
# qualifying):
renamed_elements = dict()
# Ensure that we have a fully qualified name of the process:
process_id = process.attrib["id"]
if process.attrib.get("name", "") == "":
process.attrib["name"] = process_id
if is_relative(process_id):
process_id_fully_qualified = "{}.{}".format(namespace, process_id)
process.attrib["id"] = process_id_fully_qualified
renamed_elements[process_id] = process_id_fully_qualified
# For all "call activities", i.e. calls to subprocesses, ensure that we have a
# fully qualified name:
call_activities = process.findall("bpmn:callActivity", ns)
for call_activity in call_activities:
# We fully qualify relative names for call activities too:
called_process_id = call_activity.attrib["calledElement"]
if is_relative(called_process_id):
called_process_id_fully_qualified = "{}.{}".format(
namespace, called_process_id)
call_activity.attrib[
"calledElement"] = called_process_id_fully_qualified
# Qualify all form keys
user_tasks = process.findall("bpmn:userTask", ns)
for user_task in user_tasks:
form_key = user_task.attrib.get("{http://camunda.org/schema/1.0/bpmn}formKey", None)
if form_key and is_relative(form_key):
form_key_qualified = "{}.{}".format(namespace, form_key)
user_task.attrib["{http://camunda.org/schema/1.0/bpmn}formKey"] = form_key_qualified
# Since we might have renamed the process ID, let's update the refs in the diagram:
for dia_element in diagram.iter():
bpmn_element_ref = dia_element.attrib.get("bpmnElement", None)
if bpmn_element_ref and bpmn_element_ref in renamed_elements:
dia_element.attrib["bpmnElement"] = renamed_elements[
bpmn_element_ref]
def install_from_workflow_class(self, camunda_workflow):
"""
Given a CamundaWorkflow class, installs the definition in Camunda.
Adds a full namespace for each process definition that is relative, as well as all
call activities.
Returns a `Workflow` describing it.
"""
defname = utils.class_full_name(camunda_workflow)
logger.debug("Installing workflow definition in Camunda: {}".format(
camunda_workflow))
path = camunda_workflow.get_bpmn_path()
tree = etree.parse(path)
logger.debug("Refining workflow definition xml")
self._refine_xml(tree, camunda_workflow.__module__)
logger.debug("Creating in-memory file for uploading")
fs = StringIO.StringIO(
etree.tostring(tree,
encoding="UTF-8",
pretty_print=True,
xml_declaration=True))
entry = self.install_file(defname,
"{}.bpmn".format(camunda_workflow.__name__),
fs)
logger.debug("File for {} installed in Camunda: {}".format(
defname, entry))
if not entry:
entry = self.get_process_definition_by_key(defname)
logger.debug(
"Fetched latest version of '{}' in Camunda: {}".format(
defname, entry))
return Workflow(name=entry['key'],
external_id=entry['id'],
version=entry['version'],
backend=Workflow.BACKEND_CAMUNDA)
def install_file(self, deployment_name, fname, file_like):
"""
Deploys a BPMN workflow to the backend workflow engine.
Returns a dictionary describing the entry in Camunda
Returns None if the call was successful but the deployment already exists.
"""
# NOTE: We could deploy all the files at once
logger.info(
"Deploying workflow definition '{}' to Camunda".format(fname))
payload = {
"deployment-name": deployment_name,
"deployment-source": "clims",
"deploy-changed-only": "true",
fname: file_like
}
url = self._url("deployment/create")
logger.info("Requesting {}".format(url))
resp = requests.post(url,
files=payload,
headers={"Accept": "application/json"})
try:
json = resp.json()
except JSONDecodeError:
json = dict()
logger.debug("Response from {}: [{}] {}".format(
url, resp.status_code, json))
if resp.status_code != 200:
raise CamundaError(
"Error code {} when connecting to Camunda: {}".format(
resp.status_code, json.get("message", "(null)")))
deployed = json['deployedProcessDefinitions'] or dict()
if len(deployed) == 0:
return None
elif len(deployed) > 1:
raise AssertionError(
"Unexpected number of deployed process definitions: {}".format(
len(deployed)))
else:
return deployed.values()[0]
def _get(self, resource, params=None):
url = self._url(resource)
response = requests.get(url, params)
if response.status_code == 200:
if response.headers['Content-Type'] == 'application/json':
return response.json()
else:
return response.text
else:
raise UnexpectedHttpResponse(url, response.status_code)
def _process_definition_info(self, process_definition_id):
definition = self.process_definition(process_definition_id)
return {"name": definition["name"], "key": definition["key"]}
def process_instances(self, business_key=None, **kwargs):
"""Returns all process instances"""
params = kwargs or dict()
params.update({"businessKey": business_key})
process_instances = self.api.process_instances().get(**params)
def map_activity_instance(json):
return {
"id": json["id"],
"parentActivityInstanceId": json["parentActivityInstanceId"],
"name": json["name"],
"taskDefinitionKey": json["id"].split(":")[0]
}
# TODO: Fetch details about the process definition and cache it, as the frontend will most likely
# need this info
for instance in process_instances:
instance.json[
"processDefinitionInfo"] = self._process_definition_info(
instance.json["definitionId"])
activity_instances = instance.activity_instances().get(
).json["childActivityInstances"]
activities = [
map_activity_instance(json) for json in activity_instances
]
instance.json["activities"] = activities
return [instance.json for instance in process_instances]
def get_process_definition_by_key(self, key):
"""
Fetches the latest process definition by key
"""
return self._get("process-definition/key/{}".format(key))
def process_definition(self, process_definition_id):
return self._get("process-definition/{}".format(process_definition_id))
def process_definitions(self, process_definition_key=None, **kwargs):
"""Returns all process definitions"""
params = kwargs or dict()
params.update({
# TODO: Decide on how to handle mapping between casing styles in the two systems
"definitionKey": process_definition_key
})
return self._get("process-definition", params=params)
def get_outstanding_tasks(self,
process_definition=None,
task_definition=None):
process_instances = self.process_instances(
active="true", processDefinitionKey=process_definition)
ret = list()
for process_instance in process_instances:
for activity in process_instance["activities"]:
if activity["taskDefinitionKey"] == task_definition:
activity["businessKey"] = process_instance["businessKey"]
ret.append(activity)
return ret
class CamundaError(Exception):
| |
# coding: utf-8
import pprint
import re
import six
class ListBackupsRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'checkpoint_id': 'str',
'dec': 'bool',
'end_time': 'str',
'image_type': 'str',
'limit': 'int',
'marker': 'str',
'name': 'str',
'offset': 'int',
'resource_az': 'str',
'resource_id': 'str',
'resource_name': 'str',
'resource_type': 'str',
'sort': 'str',
'start_time': 'str',
'status': 'str',
'vault_id': 'str',
'enterprise_project_id': 'str',
'own_type': 'str',
'member_status': 'str',
'parent_id': 'str',
'used_percent': 'str'
}
attribute_map = {
'checkpoint_id': 'checkpoint_id',
'dec': 'dec',
'end_time': 'end_time',
'image_type': 'image_type',
'limit': 'limit',
'marker': 'marker',
'name': 'name',
'offset': 'offset',
'resource_az': 'resource_az',
'resource_id': 'resource_id',
'resource_name': 'resource_name',
'resource_type': 'resource_type',
'sort': 'sort',
'start_time': 'start_time',
'status': 'status',
'vault_id': 'vault_id',
'enterprise_project_id': 'enterprise_project_id',
'own_type': 'own_type',
'member_status': 'member_status',
'parent_id': 'parent_id',
'used_percent': 'used_percent'
}
def __init__(self, checkpoint_id=None, dec=None, end_time=None, image_type=None, limit=None, marker=None, name=None, offset=None, resource_az=None, resource_id=None, resource_name=None, resource_type=None, sort=None, start_time=None, status=None, vault_id=None, enterprise_project_id=None, own_type=None, member_status=None, parent_id=None, used_percent=None):
"""ListBackupsRequest - a model defined in huaweicloud sdk"""
self._checkpoint_id = None
self._dec = None
self._end_time = None
self._image_type = None
self._limit = None
self._marker = None
self._name = None
self._offset = None
self._resource_az = None
self._resource_id = None
self._resource_name = None
self._resource_type = None
self._sort = None
self._start_time = None
self._status = None
self._vault_id = None
self._enterprise_project_id = None
self._own_type = None
self._member_status = None
self._parent_id = None
self._used_percent = None
self.discriminator = None
if checkpoint_id is not None:
self.checkpoint_id = checkpoint_id
if dec is not None:
self.dec = dec
if end_time is not None:
self.end_time = end_time
if image_type is not None:
self.image_type = image_type
if limit is not None:
self.limit = limit
if marker is not None:
self.marker = marker
if name is not None:
self.name = name
if offset is not None:
self.offset = offset
if resource_az is not None:
self.resource_az = resource_az
if resource_id is not None:
self.resource_id = resource_id
if resource_name is not None:
self.resource_name = resource_name
if resource_type is not None:
self.resource_type = resource_type
if sort is not None:
self.sort = sort
if start_time is not None:
self.start_time = start_time
if status is not None:
self.status = status
if vault_id is not None:
self.vault_id = vault_id
if enterprise_project_id is not None:
self.enterprise_project_id = enterprise_project_id
if own_type is not None:
self.own_type = own_type
if member_status is not None:
self.member_status = member_status
if parent_id is not None:
self.parent_id = parent_id
if used_percent is not None:
self.used_percent = used_percent
@property
def checkpoint_id(self):
"""Gets the checkpoint_id of this ListBackupsRequest.
还原点ID
:return: The checkpoint_id of this ListBackupsRequest.
:rtype: str
"""
return self._checkpoint_id
@checkpoint_id.setter
def checkpoint_id(self, checkpoint_id):
"""Sets the checkpoint_id of this ListBackupsRequest.
还原点ID
:param checkpoint_id: The checkpoint_id of this ListBackupsRequest.
:type: str
"""
self._checkpoint_id = checkpoint_id
@property
def dec(self):
"""Gets the dec of this ListBackupsRequest.
专属云
:return: The dec of this ListBackupsRequest.
:rtype: bool
"""
return self._dec
@dec.setter
def dec(self, dec):
"""Sets the dec of this ListBackupsRequest.
专属云
:param dec: The dec of this ListBackupsRequest.
:type: bool
"""
self._dec = dec
@property
def end_time(self):
"""Gets the end_time of this ListBackupsRequest.
备份产生时间范围的结束时间,格式为%YYYY-%mm-%ddT%HH:%MM:%SSZ,例如2018-02-01T12:00:00Z
:return: The end_time of this ListBackupsRequest.
:rtype: str
"""
return self._end_time
@end_time.setter
def end_time(self, end_time):
"""Sets the end_time of this ListBackupsRequest.
备份产生时间范围的结束时间,格式为%YYYY-%mm-%ddT%HH:%MM:%SSZ,例如2018-02-01T12:00:00Z
:param end_time: The end_time of this ListBackupsRequest.
:type: str
"""
self._end_time = end_time
@property
def image_type(self):
"""Gets the image_type of this ListBackupsRequest.
备份类型
:return: The image_type of this ListBackupsRequest.
:rtype: str
"""
return self._image_type
@image_type.setter
def image_type(self, image_type):
"""Sets the image_type of this ListBackupsRequest.
备份类型
:param image_type: The image_type of this ListBackupsRequest.
:type: str
"""
self._image_type = image_type
@property
def limit(self):
"""Gets the limit of this ListBackupsRequest.
每页显示的条目数量,正整数
:return: The limit of this ListBackupsRequest.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListBackupsRequest.
每页显示的条目数量,正整数
:param limit: The limit of this ListBackupsRequest.
:type: int
"""
self._limit = limit
@property
def marker(self):
"""Gets the marker of this ListBackupsRequest.
上一次查询最后一条的id
:return: The marker of this ListBackupsRequest.
:rtype: str
"""
return self._marker
@marker.setter
def marker(self, marker):
"""Sets the marker of this ListBackupsRequest.
上一次查询最后一条的id
:param marker: The marker of this ListBackupsRequest.
:type: str
"""
self._marker = marker
@property
def name(self):
"""Gets the name of this ListBackupsRequest.
名称
:return: The name of this ListBackupsRequest.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ListBackupsRequest.
名称
:param name: The name of this ListBackupsRequest.
:type: str
"""
self._name = name
@property
def offset(self):
"""Gets the offset of this ListBackupsRequest.
偏移值,正整数
:return: The offset of this ListBackupsRequest.
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ListBackupsRequest.
偏移值,正整数
:param offset: The offset of this ListBackupsRequest.
:type: int
"""
self._offset = offset
@property
def resource_az(self):
"""Gets the resource_az of this ListBackupsRequest.
支持按az来过滤
:return: The resource_az of this ListBackupsRequest.
:rtype: str
"""
return self._resource_az
@resource_az.setter
def resource_az(self, resource_az):
"""Sets the resource_az of this ListBackupsRequest.
支持按az来过滤
:param resource_az: The resource_az of this ListBackupsRequest.
:type: str
"""
self._resource_az = resource_az
@property
def resource_id(self):
"""Gets the resource_id of this ListBackupsRequest.
资源ID
:return: The resource_id of this ListBackupsRequest.
:rtype: str
"""
return self._resource_id
@resource_id.setter
def resource_id(self, resource_id):
"""Sets the resource_id of this ListBackupsRequest.
资源ID
:param resource_id: The resource_id of this ListBackupsRequest.
:type: str
"""
self._resource_id = resource_id
@property
def resource_name(self):
"""Gets the resource_name of this ListBackupsRequest.
资源名称
:return: The resource_name of this ListBackupsRequest.
:rtype: str
"""
return self._resource_name
@resource_name.setter
def resource_name(self, resource_name):
"""Sets the resource_name of this ListBackupsRequest.
资源名称
:param resource_name: The resource_name of this ListBackupsRequest.
:type: str
"""
self._resource_name = resource_name
@property
def resource_type(self):
"""Gets the resource_type of this ListBackupsRequest.
资源类型
:return: The resource_type of this ListBackupsRequest.
:rtype: str
"""
return self._resource_type
@resource_type.setter
def resource_type(self, resource_type):
"""Sets the resource_type of this ListBackupsRequest.
资源类型
:param resource_type: The resource_type of this ListBackupsRequest.
:type: str
"""
self._resource_type = resource_type
@property
def sort(self):
"""Gets the sort of this ListBackupsRequest.
sort的内容为一组由逗号分隔的属性及可选排序方向组成,形如<key1>[:<direction>],<key2>[:<direction>],其中direction的取值为asc (升序) 或 desc (降序),如没有传入direction参数,默认为降序,sort内容的长度限制为255个字符。key取值范围:[created_at,updated_at,name,status,protected_at,id]
:return: The sort of this ListBackupsRequest.
:rtype: str
"""
return self._sort
@sort.setter
def sort(self, sort):
"""Sets the sort of this ListBackupsRequest.
sort的内容为一组由逗号分隔的属性及可选排序方向组成,形如<key1>[:<direction>],<key2>[:<direction>],其中direction的取值为asc (升序) 或 desc (降序),如没有传入direction参数,默认为降序,sort内容的长度限制为255个字符。key取值范围:[created_at,updated_at,name,status,protected_at,id]
:param sort: The sort of this ListBackupsRequest.
:type: str
"""
self._sort = sort
@property
def start_time(self):
"""Gets the start_time of this ListBackupsRequest.
备份产生时间范围的开始时间,格式为%YYYY-%mm-%ddT%HH:%MM:%SSZ,例如2018-02-01T12:00:00Z
:return: The start_time of this ListBackupsRequest.
:rtype: str
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this ListBackupsRequest.
备份产生时间范围的开始时间,格式为%YYYY-%mm-%ddT%HH:%MM:%SSZ,例如2018-02-01T12:00:00Z
:param start_time: The start_time of this ListBackupsRequest.
:type: str
"""
self._start_time = start_time
@property
def status(self):
"""Gets the status of this ListBackupsRequest.
状态
:return: The status of this ListBackupsRequest.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ListBackupsRequest.
状态
:param status: The status of this ListBackupsRequest.
:type: str
"""
self._status = status
@property
def vault_id(self):
"""Gets the vault_id of this ListBackupsRequest.
存储库ID
:return: The vault_id of this ListBackupsRequest.
:rtype: str
"""
return self._vault_id
@vault_id.setter
def vault_id(self, vault_id):
"""Sets the vault_id of this ListBackupsRequest.
存储库ID
:param vault_id: The vault_id of this ListBackupsRequest.
:type: str
"""
self._vault_id = vault_id
@property
def enterprise_project_id(self):
"""Gets the enterprise_project_id of this ListBackupsRequest.
企业项目id或all_granted_eps,all_granted_eps表示查询用户有权限的所有企业项目id
:return: The enterprise_project_id of this ListBackupsRequest.
:rtype: str
"""
return self._enterprise_project_id
@enterprise_project_id.setter
def enterprise_project_id(self, enterprise_project_id):
"""Sets the enterprise_project_id of this ListBackupsRequest.
企业项目id或all_granted_eps,all_granted_eps表示查询用户有权限的所有企业项目id
:param enterprise_project_id: The enterprise_project_id of this ListBackupsRequest.
:type: str
"""
self._enterprise_project_id = enterprise_project_id
@property
def own_type(self):
"""Gets the own_type of this ListBackupsRequest.
持有类型,私有的private/共享的shared/全部all_granted,默认只查询private。
:return: The own_type of this ListBackupsRequest.
:rtype: str
"""
return self._own_type
@own_type.setter
def own_type(self, own_type):
"""Sets the own_type of this ListBackupsRequest.
持有类型,私有的private/共享的shared/全部all_granted,默认只查询private。
:param own_type: The own_type of this ListBackupsRequest.
:type: str
"""
self._own_type = own_type
@property
def member_status(self):
"""Gets the member_status of this ListBackupsRequest.
共享状态
:return: The member_status of this ListBackupsRequest.
:rtype: str
"""
return self._member_status
@member_status.setter
def member_status(self, member_status):
"""Sets the member_status of this ListBackupsRequest.
共享状态
:param member_status: The member_status of this ListBackupsRequest.
:type: str
"""
self._member_status = member_status
@property
def parent_id(self):
"""Gets the parent_id of this ListBackupsRequest.
父备份ID
:return: The parent_id of this ListBackupsRequest.
:rtype: | |
the resources that match the entire 'displayName' given.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def fqdn(self) -> str:
"""
A three-label Fully Qualified Domain Name (FQDN) for a resource.
"""
return pulumi.get(self, "fqdn")
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> Mapping[str, Any]:
"""
A simple key-value pair that is applied without any predefined name, type, or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}`
"""
return pulumi.get(self, "freeform_tags")
@property
@pulumi.getter
def id(self) -> str:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the databaseRegistration being referenced.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> str:
"""
The private IP address in the customer's VCN of the customer's endpoint, typically a database.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="keyId")
def key_id(self) -> str:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the customer "Master" key being referenced. If provided, this will reference a key which the customer will be required to ensure the policies are established to permit the GoldenGate Service to utilize this key to manage secrets.
"""
return pulumi.get(self, "key_id")
@property
@pulumi.getter(name="lifecycleDetails")
def lifecycle_details(self) -> str:
"""
Describes the object's current state in detail. For example, it can be used to provide actionable information for a resource in a Failed state.
"""
return pulumi.get(self, "lifecycle_details")
@property
@pulumi.getter
def password(self) -> str:
return pulumi.get(self, "password")
@property
@pulumi.getter(name="rcePrivateIp")
def rce_private_ip(self) -> str:
"""
A Private Endpoint IP Address created in the customer's subnet. A customer database can expect network traffic initiated by GGS from this IP address and send network traffic to this IP address, typically in response to requests from GGS (OGG). The customer may utilize this IP address in Security Lists or Network Security Groups (NSG) as needed.
"""
return pulumi.get(self, "rce_private_ip")
@property
@pulumi.getter(name="secretCompartmentId")
def secret_compartment_id(self) -> str:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment where the the GGS Secret will be created. If provided, this will reference a key which the customer will be required to ensure the policies are established to permit the GoldenGate Service to utilize this Compartment in which to create a Secret.
"""
return pulumi.get(self, "secret_compartment_id")
@property
@pulumi.getter(name="secretId")
def secret_id(self) -> str:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the customer GGS Secret being referenced. If provided, this will reference a key which the customer will be required to ensure the policies are established to permit the GoldenGate Service to utilize this Secret
"""
return pulumi.get(self, "secret_id")
@property
@pulumi.getter
def state(self) -> str:
"""
A filter to return only the resources that match the 'lifecycleState' given.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> str:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the subnet being referenced.
"""
return pulumi.get(self, "subnet_id")
@property
@pulumi.getter(name="systemTags")
def system_tags(self) -> Mapping[str, Any]:
"""
The system tags associated with this resource, if any. The system tags are set by Oracle Cloud Infrastructure services. Each key is predefined and scoped to namespaces. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{orcl-cloud: {free-tier-retain: true}}`
"""
return pulumi.get(self, "system_tags")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> str:
"""
The time the resource was created. The format is defined by [RFC3339](https://tools.ietf.org/html/rfc3339), such as `2016-08-25T21:10:29.600Z`.
"""
return pulumi.get(self, "time_created")
@property
@pulumi.getter(name="timeUpdated")
def time_updated(self) -> str:
"""
The time the resource was last updated. The format is defined by [RFC3339](https://tools.ietf.org/html/rfc3339), such as `2016-08-25T21:10:29.600Z`.
"""
return pulumi.get(self, "time_updated")
@property
@pulumi.getter
def username(self) -> str:
"""
The username Oracle GoldenGate uses to connect the associated RDBMS. This username must already exist and be available for use by the database. It must conform to the security requirements implemented by the database including length, case sensitivity, and so on.
"""
return pulumi.get(self, "username")
@property
@pulumi.getter(name="vaultId")
def vault_id(self) -> str:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the customer vault being referenced. If provided, this will reference a vault which the customer will be required to ensure the policies are established to permit the GoldenGate Service to manage secrets contained within this vault.
"""
return pulumi.get(self, "vault_id")
@property
@pulumi.getter
def wallet(self) -> str:
return pulumi.get(self, "wallet")
@pulumi.output_type
class GetDatabaseRegistrationsFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str],
regex: Optional[bool] = None):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
if regex is not None:
pulumi.set(__self__, "regex", regex)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@property
@pulumi.getter
def regex(self) -> Optional[bool]:
return pulumi.get(self, "regex")
@pulumi.output_type
class GetDeploymentBackupsDeploymentBackupCollectionResult(dict):
def __init__(__self__, *,
items: Sequence['outputs.GetDeploymentBackupsDeploymentBackupCollectionItemResult']):
pulumi.set(__self__, "items", items)
@property
@pulumi.getter
def items(self) -> Sequence['outputs.GetDeploymentBackupsDeploymentBackupCollectionItemResult']:
return pulumi.get(self, "items")
@pulumi.output_type
class GetDeploymentBackupsDeploymentBackupCollectionItemResult(dict):
def __init__(__self__, *,
backup_type: str,
bucket: str,
compartment_id: str,
defined_tags: Mapping[str, Any],
deployment_id: str,
display_name: str,
freeform_tags: Mapping[str, Any],
id: str,
is_automatic: bool,
lifecycle_details: str,
namespace: str,
object: str,
ogg_version: str,
state: str,
system_tags: Mapping[str, Any],
time_created: str,
time_of_backup: str,
time_updated: str):
"""
:param str backup_type: Possible Deployment backup types.
:param str bucket: Name of the bucket where the object is to be uploaded in the object storage
:param str compartment_id: The ID of the compartment in which to list resources.
:param Mapping[str, Any] defined_tags: Tags defined for this resource. Each key is predefined and scoped to a namespace. Example: `{"foo-namespace.bar-key": "value"}`
:param str deployment_id: The ID of the deployment in which to list resources.
:param str display_name: A filter to return only the resources that match the entire 'displayName' given.
:param Mapping[str, Any] freeform_tags: A simple key-value pair that is applied without any predefined name, type, or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}`
:param str id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the backup being referenced.
:param bool is_automatic: True if this object is automatically created
:param str lifecycle_details: Describes the object's current state in detail. For example, it can be used to provide actionable information for a resource in a Failed state.
:param str namespace: Name of namespace that serves as a container for all of your buckets
:param str object: Name of the object to be uploaded to object storage
:param str ogg_version: Version of OGG
:param str state: A filter to return only the resources that match the 'lifecycleState' given.
:param Mapping[str, Any] system_tags: The system tags associated with this resource, if any. The system tags are set by Oracle Cloud Infrastructure services. Each key is predefined and scoped to namespaces. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{orcl-cloud: {free-tier-retain: true}}`
:param str time_created: The time the resource was created. The format is defined by [RFC3339](https://tools.ietf.org/html/rfc3339), such as `2016-08-25T21:10:29.600Z`.
:param str time_of_backup: The time of the resource backup. The format is defined by [RFC3339](https://tools.ietf.org/html/rfc3339), such as `2016-08-25T21:10:29.600Z`.
:param str time_updated: The time the resource was last updated. The format is defined by [RFC3339](https://tools.ietf.org/html/rfc3339), such as `2016-08-25T21:10:29.600Z`.
"""
pulumi.set(__self__, "backup_type", backup_type)
pulumi.set(__self__, "bucket", bucket)
pulumi.set(__self__, "compartment_id", compartment_id)
pulumi.set(__self__, "defined_tags", defined_tags)
pulumi.set(__self__, "deployment_id", deployment_id)
pulumi.set(__self__, "display_name", display_name)
pulumi.set(__self__, "freeform_tags", freeform_tags)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "is_automatic", is_automatic)
pulumi.set(__self__, "lifecycle_details", lifecycle_details)
pulumi.set(__self__, "namespace", namespace)
pulumi.set(__self__, "object", object)
pulumi.set(__self__, "ogg_version", ogg_version)
pulumi.set(__self__, "state", state)
pulumi.set(__self__, "system_tags", system_tags)
pulumi.set(__self__, "time_created", time_created)
pulumi.set(__self__, "time_of_backup", time_of_backup)
pulumi.set(__self__, "time_updated", time_updated)
@property
@pulumi.getter(name="backupType")
def backup_type(self) -> str:
"""
Possible Deployment backup types.
"""
return pulumi.get(self, "backup_type")
@property
@pulumi.getter
def bucket(self) -> str:
"""
Name of the bucket where the object is to be uploaded in the object storage
"""
return pulumi.get(self, "bucket")
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> str:
"""
The ID of the compartment in which to list resources.
"""
return pulumi.get(self, "compartment_id")
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> Mapping[str, Any]:
"""
Tags defined for this resource. Each key is predefined and scoped to a namespace. Example: `{"foo-namespace.bar-key": "value"}`
"""
return pulumi.get(self, "defined_tags")
@property
@pulumi.getter(name="deploymentId")
def deployment_id(self) -> str:
"""
The ID of the deployment in which to list resources.
"""
return pulumi.get(self, "deployment_id")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
A filter to return only the resources that match the entire 'displayName' given.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> Mapping[str, Any]:
"""
A simple key-value pair that is applied without any predefined name, type, or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}`
"""
return pulumi.get(self, "freeform_tags")
@property
@pulumi.getter
| |
nat_gateway_id, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the data center. (required)
:type datacenter_id: str
:param nat_gateway_id: The unique ID of the NAT Gateway. (required)
:type nat_gateway_id: str
:param pretty: Controls whether the response is pretty-printed (with indentations and new lines).
:type pretty: bool
:param depth: Controls the detail depth of the response objects. GET /datacenters/[ID] - depth=0: Only direct properties are included; children (servers and other elements) are not included. - depth=1: Direct properties and children references are included. - depth=2: Direct properties and children properties are included. - depth=3: Direct properties and children properties and children's children are included. - depth=... and so on
:type depth: int
:param x_contract_number: Users with multiple contracts must provide the contract number, for which all API requests are to be executed.
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(NatGateway, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'datacenter_id',
'nat_gateway_id',
'pretty',
'depth',
'x_contract_number'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'response_type',
'query_params'
]
)
for local_var_params_key, local_var_params_val in six.iteritems(local_var_params['kwargs']):
if local_var_params_key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method datacenters_natgateways_find_by_nat_gateway_id" % local_var_params_key
)
local_var_params[local_var_params_key] = local_var_params_val
del local_var_params['kwargs']
# verify the required parameter 'datacenter_id' is set
if self.api_client.client_side_validation and ('datacenter_id' not in local_var_params or # noqa: E501
local_var_params['datacenter_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `datacenter_id` when calling `datacenters_natgateways_find_by_nat_gateway_id`") # noqa: E501
# verify the required parameter 'nat_gateway_id' is set
if self.api_client.client_side_validation and ('nat_gateway_id' not in local_var_params or # noqa: E501
local_var_params['nat_gateway_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `nat_gateway_id` when calling `datacenters_natgateways_find_by_nat_gateway_id`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] > 10: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_natgateways_find_by_nat_gateway_id`, must be a value less than or equal to `10`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_natgateways_find_by_nat_gateway_id`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'datacenter_id' in local_var_params:
path_params['datacenterId'] = local_var_params['datacenter_id'] # noqa: E501
if 'nat_gateway_id' in local_var_params:
path_params['natGatewayId'] = local_var_params['nat_gateway_id'] # noqa: E501
query_params = list(local_var_params.get('query_params', {}).items())
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'depth' in local_var_params and local_var_params['depth'] is not None: # noqa: E501
query_params.append(('depth', local_var_params['depth'])) # noqa: E501
header_params = {}
if 'x_contract_number' in local_var_params:
header_params['X-Contract-Number'] = local_var_params['x_contract_number'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Basic Authentication', 'Token Authentication'] # noqa: E501
response_type = 'NatGateway'
if 'response_type' in kwargs:
response_type = kwargs['response_type']
return self.api_client.call_api(
'/datacenters/{datacenterId}/natgateways/{natGatewayId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=response_type, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def datacenters_natgateways_flowlogs_delete(self, datacenter_id, nat_gateway_id, flow_log_id, **kwargs): # noqa: E501
"""Delete NAT Gateway Flow Logs # noqa: E501
Delete the specified NAT Gateway Flow Log. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_natgateways_flowlogs_delete(datacenter_id, nat_gateway_id, flow_log_id, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the data center. (required)
:type datacenter_id: str
:param nat_gateway_id: The unique ID of the NAT Gateway. (required)
:type nat_gateway_id: str
:param flow_log_id: The unique ID of the Flow Log. (required)
:type flow_log_id: str
:param pretty: Controls whether the response is pretty-printed (with indentations and new lines).
:type pretty: bool
:param depth: Controls the detail depth of the response objects. GET /datacenters/[ID] - depth=0: Only direct properties are included; children (servers and other elements) are not included. - depth=1: Direct properties and children references are included. - depth=2: Direct properties and children properties are included. - depth=3: Direct properties and children properties and children's children are included. - depth=... and so on
:type depth: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.datacenters_natgateways_flowlogs_delete_with_http_info(datacenter_id, nat_gateway_id, flow_log_id, **kwargs) # noqa: E501
def datacenters_natgateways_flowlogs_delete_with_http_info(self, datacenter_id, nat_gateway_id, flow_log_id, **kwargs): # noqa: E501
"""Delete NAT Gateway Flow Logs # noqa: E501
Delete the specified NAT Gateway Flow Log. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_natgateways_flowlogs_delete_with_http_info(datacenter_id, nat_gateway_id, flow_log_id, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the data center. (required)
:type datacenter_id: str
:param nat_gateway_id: The unique ID of the NAT Gateway. (required)
:type nat_gateway_id: str
:param flow_log_id: The unique ID of the Flow Log. (required)
:type flow_log_id: str
:param pretty: Controls whether the response is pretty-printed (with indentations and new lines).
:type pretty: bool
:param depth: Controls the detail depth of the response objects. GET /datacenters/[ID] - depth=0: Only direct properties are included; children (servers and other elements) are not included. - depth=1: Direct properties and children references are included. - depth=2: Direct properties and children properties are included. - depth=3: Direct properties and children properties and children's children are included. - depth=... and so on
:type depth: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'datacenter_id',
'nat_gateway_id',
'flow_log_id',
'pretty',
'depth'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'response_type',
'query_params'
]
)
for local_var_params_key, local_var_params_val in six.iteritems(local_var_params['kwargs']):
if local_var_params_key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method datacenters_natgateways_flowlogs_delete" % local_var_params_key
)
local_var_params[local_var_params_key] = local_var_params_val
del local_var_params['kwargs']
# verify the required parameter 'datacenter_id' is set
if self.api_client.client_side_validation and ('datacenter_id' not in local_var_params or # noqa: E501
local_var_params['datacenter_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `datacenter_id` when calling `datacenters_natgateways_flowlogs_delete`") # noqa: E501
# verify the required parameter 'nat_gateway_id' is set
if self.api_client.client_side_validation and ('nat_gateway_id' not in local_var_params or # noqa: E501
local_var_params['nat_gateway_id'] is None): # noqa: E501
raise ApiValueError("Missing | |
= np.multiply(np.multiply(Xind,Yind),Zind)
#self.XYZind is built during the construction of the YY grid and follows
#the good index for the field read here (= readFlag in index)
#Functions for the 3D spherical YY grids
def rectangular2YY(x,y,z,rcmb):
"""Returns the geometry of the two cartesian blocks corresponding
to the overlapping Yin (x1,y1,z1) and Yang (x2,y2,z2) grids
from the single block contained in the StagYY binary outputs.
after bending cartesian boxes"""
#Spherical coordinates:
R = z+rcmb
lat = np.pi/4 - x
lon = y - 3*np.pi/4
#Yin grid
x1 = np.multiply(np.multiply(R,np.cos(lat)),np.cos(lon))
y1 = np.multiply(np.multiply(R,np.cos(lat)),np.sin(lon))
z1 = np.multiply(R,np.sin(lat))
#Yang grid
x2 = -x1
y2 = z1
z2 = y1
return ((x1,y1,z1),(x2,y2,z2))
def cartesian2spherical(x1,y1,z1,x2,y2,z2):
"""Converts cartesian coordinates of YY grid into spherical coordinates"""
#Yin grid
r1 = np.sqrt(x1**2+y1**2+z1**2)
theta1 = np.arctan2(np.sqrt(x1**2+y1**2),z1)
phi1 = np.arctan2(y1,x1)
#Yang grid
r2 = np.sqrt(x2**2+y2**2+z2**2)
theta2 = np.arctan2(np.sqrt(x2**2+y2**2),z2)
phi2 = np.arctan2(y2,x2)
return ((r1,theta1,phi1),(r2,theta2,phi2))
#Creation of Yin-Yang grids:
self.im(' - Creation of the Yin-Yang grids')
((self.x1_overlap,self.y1_overlap,self.z1_overlap),(self.x2_overlap,self.y2_overlap,self.z2_overlap)) = \
rectangular2YY(self.X,self.Y,self.Z,self.rcmb)
((self.r1,self.theta1,self.phi1),(self.r2,self.theta2,self.phi2)) = \
cartesian2spherical(self.x1_overlap,self.y1_overlap,self.z1_overlap,self.x2_overlap,self.y2_overlap,self.z2_overlap)
##Cut off the corners from grid #1, which seems to do #2:
##Build Redflags on wrong coordinates
theta12 = np.arccos(np.multiply(np.sin(self.theta1),np.sin(self.phi1)))
self.redFlags = np.where(np.logical_or(np.logical_and((theta12>np.pi/4),(self.phi1>np.pi/2)),\
np.logical_and((theta12<3*np.pi/4),(self.phi1<-np.pi/2))))[0]
if build_redflag_point == True:
print(' - Building RedFlags Points...')
((self.x1_redf,self.y1_redf,self.z1_redf),(self.x2_redf,self.y2_redf,self.z2_redf)) = (([],[],[]),([],[],[]))
self.redFlags_layers = []
for ind in self.redFlags:
self.x1_redf.append(self.x1_overlap[ind])
self.y1_redf.append(self.y1_overlap[ind])
self.z1_redf.append(self.z1_overlap[ind])
self.x2_redf.append(self.x2_overlap[ind])
self.y2_redf.append(self.y2_overlap[ind])
self.z2_redf.append(self.z2_overlap[ind])
self.redFlags_layers.append(self.layers[ind])
#Assembly Yin and Yang grids
self.im(' - Assembly Yin and Yang grids')
goodIndex = np.ones(len(self.x1_overlap),dtype=bool)
goodIndex[np.array(self.redFlags)] = False
self.x1 = self.x1_overlap[goodIndex]
self.y1 = self.y1_overlap[goodIndex]
self.z1 = self.z1_overlap[goodIndex]
self.x2 = self.x2_overlap[goodIndex]
self.y2 = self.y2_overlap[goodIndex]
self.z2 = self.z2_overlap[goodIndex]
self.r1 = self.r1[goodIndex]
self.r2 = self.r2[goodIndex]
self.theta1 = self.theta1[goodIndex]
self.theta2 = self.theta2[goodIndex]
self.phi1 = self.phi1[goodIndex]
self.phi2 = self.phi2[goodIndex]
self.layers = self.layers[goodIndex]
self.layers = self.layers.astype(np.int)
# Extract the scalar or the vectorial field V: V1 on Yin, V2 on Yang
self.im(' - Construction of the appropriated vectorial field:')
## Application of redFlag on index matrix:
## return good index for the vectorial field (goodIndex):
goodIndex = np.array(range(self.nx0*self.ny0*self.nz0))
goodIndex = goodIndex[np.array(self.XYZind,dtype=bool)]
#Two different types of field: Scalar or Vectorial
if self.fieldNature == 'Scalar':
self.im(' - Build data for the entire grids')
tempField = self.flds[0].reshape(self.flds.shape[1]*self.flds.shape[2]*self.flds.shape[3],2)
V1 = tempField[:,0]
V2 = tempField[:,1]
if build_overlapping_field:
self.im(' - Overlapping field requested')
self.v1_overlap = [] #Yin
self.v2_overlap = [] #Yang
for gid in goodIndex:
self.v1_overlap.append(V1[gid])
self.v2_overlap.append(V2[gid])
#Apply redFlags on goodindex:
self.im(' - Processing of redFlags')
mask = np.ones(len(goodIndex),dtype=bool) # all True
mask[np.array(self.redFlags)] = False
#Creation of non overlapping data matrices for Yin and Yang
goodIndex = goodIndex[mask]
self.v1 = np.array(V1)[goodIndex]
self.v2 = np.array(V2)[goodIndex]
#Creation of empty vectorial fields arrays:
self.vx1 = np.array(self.vx1)
self.vy1 = np.array(self.vy1)
self.vz1 = np.array(self.vz1)
self.P1 = np.array(self.P1)
self.vr1 = np.array(self.vr1)
self.vtheta1 = np.array(self.vtheta1)
self.vphi1 = np.array(self.vphi1)
self.vx2 = np.array(self.vx2)
self.vy2 = np.array(self.vy2)
self.vz2 = np.array(self.vz2)
self.P2 = np.array(self.P2)
self.vr2 = np.array(self.vr2)
self.vtheta2 = np.array(self.vtheta2)
self.vphi2 = np.array(self.vphi2)
elif self.fieldNature == 'Vectorial':
self.im(' - Build data for the entire grids')
(Nx, Ny, Nz) = self.header.get('nts')
tempField_vx = self.flds[0][0:Nx,0:Ny,:,:].reshape(Nx*Ny*Nz,2)
tempField_vy = self.flds[1][0:Nx,0:Ny,:,:].reshape(Nx*Ny*Nz,2)
tempField_vz = self.flds[2][0:Nx,0:Ny,:,:].reshape(Nx*Ny*Nz,2)
tempField_P = self.flds[3][0:Nx,0:Ny,:,:].reshape(Nx*Ny*Nz,2)
VX1 = tempField_vx[:,0]
VX2 = tempField_vx[:,1]
VY1 = tempField_vy[:,0]
VY2 = tempField_vy[:,1]
VZ1 = tempField_vz[:,0]
VZ2 = tempField_vz[:,1]
P1 = tempField_P[:,0]
P2 = tempField_P[:,1]
#Transform velocities from internal Yin or Yang coord -> Cartesian
self.im(' - Merging of velocities: YY -> Cartesian')
tx_coord = self.header.get('e1_coord') #temps, will be immediately deleted after use
ty_coord = self.header.get('e2_coord')
tz_coord = self.header.get('e3_coord')
(tX,tY,tZ) = np.meshgrid(tx_coord,ty_coord,tz_coord, indexing='ij')
tX = tX.reshape(Nx*Ny*Nz)
tY = tY.reshape(Nx*Ny*Nz)
tZ = tZ.reshape(Nx*Ny*Nz)
#R = tZ + self.rcmb
lat = np.pi/4 - tX
lon = tY - 3*np.pi/4
# --- on Yin grid ---
Vtheta = VX1
Vphi = VY1
Vr = VZ1
VX1 = Vtheta*np.sin(lat)*np.cos(lon) - Vphi*np.sin(lon) + Vr*np.cos(lat)*np.cos(lon)
VY1 = Vtheta*np.sin(lat)*np.sin(lon) + Vphi*np.cos(lon) + Vr*np.cos(lat)*np.sin(lon)
VZ1 = -1*Vtheta*np.cos(lat) + Vr*np.sin(lat)
vr1 = Vr
# --- on Yang grid ---
Vtheta = VX2
Vphi = VY2
Vr = VZ2
VX2 = -1*(Vtheta*np.sin(lat)*np.cos(lon) - Vphi*np.sin(lon) + Vr*np.cos(lat)*np.cos(lon))
VZ2 = Vtheta*np.sin(lat)*np.sin(lon) + Vphi*np.cos(lon) + Vr*np.cos(lat)*np.sin(lon)
VY2 = -1*Vtheta*np.cos(lat) + Vr*np.sin(lat)
vr2 = Vr
#Discharge of the memory
(tX, tY, tZ) = (None, None, None)
(Vtheta, Vphi, Vr) = (None, None, None)
if build_overlapping_field:
self.im(' - Overlapping field requested')
#Re-sampling
self.vx1_overlap = [] #Yin
self.vx2_overlap = [] #Yang
self.vy1_overlap = []
self.vy2_overlap = []
self.vz1_overlap = []
self.vz2_overlap = []
self.P1_overlap = []
self.P2_overlap = []
for gid in goodIndex:
self.vx1_overlap.append(VX1[gid])
self.vx2_overlap.append(VX2[gid])
self.vy1_overlap.append(VY1[gid])
self.vy2_overlap.append(VY2[gid])
self.vz1_overlap.append(VZ1[gid])
self.vz2_overlap.append(VZ2[gid])
self.P1_overlap.append(P1[gid])
self.P2_overlap.append(P2[gid])
#Apply redFlags on goodindex:
self.im(' - Processing of redFlags')
mask = np.ones(len(goodIndex),dtype=bool) # all True
mask[np.array(self.redFlags)] = False
goodIndex = goodIndex[mask]
self.vx1 = VX1[goodIndex]
self.vy1 = VY1[goodIndex]
self.vz1 = VZ1[goodIndex]
self.vx2 = VX2[goodIndex]
self.vy2 = VY2[goodIndex]
self.vz2 = VZ2[goodIndex]
self.P1 = P1[goodIndex]
self.P2 = P2[goodIndex]
#Radial velocities
self.vr1 = vr1[goodIndex]
self.vr2 = vr2[goodIndex]
#Tranformation of velocities from cartesian to spherical:
self.im(' - Conversion of Velocities: Cartesian -> Spherical')
lat1 = np.arctan2(np.sqrt(self.x1**2+self.y1**2),self.z1)
lon1 = np.arctan2(self.y1,self.x1)
lat2 = np.arctan2(np.sqrt(self.x2**2+self.y2**2),self.z2)
lon2 = np.arctan2(self.y2,self.x2)
Vlat1 = self.vx1*(np.cos(lon1)*np.cos(lat1)) + self.vy1*(np.sin(lon1)*np.cos(lat1)) - self.vz1*(np.sin(lat1))
Vlon1 = -self.vx1*(np.sin(lon1)) + self.vy1*(np.cos(lon1))
Vlat2 = self.vx2*(np.cos(lon2)*np.cos(lat2)) + self.vy2*(np.sin(lon2)*np.cos(lat2)) - self.vz2*(np.sin(lat2))
Vlon2 = -self.vx2*(np.sin(lon2)) + self.vy2*(np.cos(lon2))
#Conservation of the ndarray-type:
self.vr1 = np.array(self.vr1)
self.vr2 = np.array(self.vr2)
self.vtheta1 = Vlat1
self.vtheta2 = Vlat2
self.vphi1 = Vlon1
self.vphi2 = Vlon2
#fills the .v1 and .v2 by the norm of the velocity
self.v1 = np.sqrt(self.vx1**2+self.vy1**2+self.vz1**2) #the norm
self.v2 = np.sqrt(self.vx2**2+self.vy2**2+self.vz2**2) #the norm
# == Processing Finish !
self.im('Processing of stag data done!')
class StagSphericalGeometry(MainStagObject):
"""
Defines the StagSphericalGeometry object, derived from MainStagObject
This object is conditionally inherited in StagData.
"""
def __init__(self,geometry):
super().__init__() # inherit all the methods and properties from MainStagObject
self.geometry = geometry
# ----- Cartesian 2D and 3D geometries ----- #
self.x = [] #Matrix of X coordinates meshed (in spherical shape)
self.y = [] #Matrix of Y coordinates meshed (in spherical shape)
self.z = [] #Matrix of Z coordinates meshed (in spherical shape)
self.xc = [] #Matrice of cartesian x coordinates (in cartesian shape)
self.yc = [] #Matrice of cartesian y coordinates (in cartesian shape)
self.zc = [] #Matrice of cartesian z coordinates (in cartesian shape)
self.r = [] #Matrice of spherical coordinates r
self.theta = [] #Matrice of spherical coordinates theta
self.phi = [] #Matrice of spherical coordinates phi
self.v = [] #Matrix of scalar field (or norm of vectorial)
self.vx = [] #Matrix of x-component of the vectorial field for Cartesian grids
self.vy = [] #Matrix of y-component of the vectorial field for Cartesian grids
self.vz = [] #Matrix of z-component of the vectorial field for Cartesian grids
self.vtheta = [] #Matrix of theta component of the vectorial field
self.vphi = [] #Matrix of phi component of the vectorial field
self.vr = [] #Matrix of radial component of the vectorial field
self.P = [] #Matrix of Pressure field for Cartesian grids
def stagProcessing(self):
"""
This function process stag data and returns the appropriated coords
matrices (1 matrix Yin and 1 matrix for Yqng coords) as well as matrix
of the reading field for Yin and for Yang.
If build_redflag_point == True, build coordinates matrices of the
redflag points and fills fields x-y-z_redf
If build_overlapping_field == True, build ghost points on YY corner
"""
self.im('Processing stag Data:')
self.im(' - Grid Geometry')
self.im(' - 3D cartesian grid geometry')
(self.x,self.y,self.z) = np.meshgrid(self.x_coords,self.y_coords,self.z_coords,indexing='ij')
#save cartesian grid geometry
self.xc = self.x
self.yc = self.y
self.zc = self.z
#Same operation but on index matrix:
(Xind,Yind,Zind) = np.meshgrid(self.xind,self.yind,self.zind, indexing='ij')
Xind = Xind.reshape(Xind.shape[0]*Xind.shape[1]*Xind.shape[2])
Yind = Yind.reshape(Yind.shape[0]*Yind.shape[1]*Yind.shape[2])
Zind = Zind.reshape(Zind.shape[0]*Zind.shape[1]*Zind.shape[2])
self.XYZind = np.multiply(np.multiply(Xind,Yind),Zind)
# Application of redFlag on index matrix:
goodIndex = np.array(range(self.nx0*self.ny0*self.nz0))
goodIndex = goodIndex[np.array(self.XYZind,dtype=bool)]
#Function for 3D psherical YY grids
def rectangular2Spherical(x,y,z,rcmb):
"""Returns the geometry of the spherical grid
after bending the cartesian box"""
#Spherical coordinates:
R = z+rcmb
lat = np.pi/4 - x
lon = y - 3*np.pi/4
#Spherical grid
x = np.multiply(np.multiply(R,np.cos(lat)),np.cos(lon))
y = np.multiply(np.multiply(R,np.cos(lat)),np.sin(lon))
z = np.multiply(R,np.sin(lat))
return (x,y,z)
def cartesian2spherical(x,y,z):
"""Converts cartesian coordinates into spherical | |
<filename>pnc_cli/swagger_client/models/build_record.py<gh_stars>0
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from datetime import datetime
from pprint import pformat
from six import iteritems
class BuildRecord(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
BuildRecord - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'build_configuration_audited': 'BuildConfigurationAudited',
'build_configuration_id': 'int',
'build_configuration_rev': 'int',
'build_content_id': 'str',
'submit_time': 'datetime',
'start_time': 'datetime',
'end_time': 'datetime',
'user': 'User',
'scm_repo_url': 'str',
'scm_revision': 'str',
'build_log': 'str',
'status': 'str',
'ssh_command': 'str',
'ssh_password': '<PASSWORD>',
'execution_root_name': 'str',
'execution_root_version': 'str',
'built_artifacts': 'list[Artifact]',
'dependencies': 'list[Artifact]',
'build_environment': 'BuildEnvironment',
'product_milestone': 'ProductMilestone',
'build_config_set_record': 'BuildConfigSetRecord',
'attributes': 'dict(str, str)',
'repour_log': 'str',
'field_handler': 'FieldHandler',
'build_configuration_audited_id_rev': 'IdRev'
}
self.attribute_map = {
'id': 'id',
'build_configuration_audited': 'buildConfigurationAudited',
'build_configuration_id': 'buildConfigurationId',
'build_configuration_rev': 'buildConfigurationRev',
'build_content_id': 'buildContentId',
'submit_time': 'submitTime',
'start_time': 'startTime',
'end_time': 'endTime',
'user': 'user',
'scm_repo_url': 'scmRepoURL',
'scm_revision': 'scmRevision',
'build_log': 'buildLog',
'status': 'status',
'ssh_command': 'sshCommand',
'ssh_password': '<PASSWORD>',
'execution_root_name': 'executionRootName',
'execution_root_version': 'executionRootVersion',
'built_artifacts': 'builtArtifacts',
'dependencies': 'dependencies',
'build_environment': 'buildEnvironment',
'product_milestone': 'productMilestone',
'build_config_set_record': 'buildConfigSetRecord',
'attributes': 'attributes',
'repour_log': 'repourLog',
'field_handler': 'fieldHandler',
'build_configuration_audited_id_rev': 'buildConfigurationAuditedIdRev'
}
self._id = None
self._build_configuration_audited = None
self._build_configuration_id = None
self._build_configuration_rev = None
self._build_content_id = None
self._submit_time = None
self._start_time = None
self._end_time = None
self._user = None
self._scm_repo_url = None
self._scm_revision = None
self._build_log = None
self._status = None
self._ssh_command = None
self._ssh_password = None
self._execution_root_name = None
self._execution_root_version = None
self._built_artifacts = None
self._dependencies = None
self._build_environment = None
self._product_milestone = None
self._build_config_set_record = None
self._attributes = None
self._repour_log = None
self._field_handler = None
self._build_configuration_audited_id_rev = None
@property
def id(self):
"""
Gets the id of this BuildRecord.
:return: The id of this BuildRecord.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this BuildRecord.
:param id: The id of this BuildRecord.
:type: int
"""
self._id = id
@property
def build_configuration_audited(self):
"""
Gets the build_configuration_audited of this BuildRecord.
:return: The build_configuration_audited of this BuildRecord.
:rtype: BuildConfigurationAudited
"""
return self._build_configuration_audited
@build_configuration_audited.setter
def build_configuration_audited(self, build_configuration_audited):
"""
Sets the build_configuration_audited of this BuildRecord.
:param build_configuration_audited: The build_configuration_audited of this BuildRecord.
:type: BuildConfigurationAudited
"""
self._build_configuration_audited = build_configuration_audited
@property
def build_configuration_id(self):
"""
Gets the build_configuration_id of this BuildRecord.
:return: The build_configuration_id of this BuildRecord.
:rtype: int
"""
return self._build_configuration_id
@build_configuration_id.setter
def build_configuration_id(self, build_configuration_id):
"""
Sets the build_configuration_id of this BuildRecord.
:param build_configuration_id: The build_configuration_id of this BuildRecord.
:type: int
"""
self._build_configuration_id = build_configuration_id
@property
def build_configuration_rev(self):
"""
Gets the build_configuration_rev of this BuildRecord.
:return: The build_configuration_rev of this BuildRecord.
:rtype: int
"""
return self._build_configuration_rev
@build_configuration_rev.setter
def build_configuration_rev(self, build_configuration_rev):
"""
Sets the build_configuration_rev of this BuildRecord.
:param build_configuration_rev: The build_configuration_rev of this BuildRecord.
:type: int
"""
self._build_configuration_rev = build_configuration_rev
@property
def build_content_id(self):
"""
Gets the build_content_id of this BuildRecord.
:return: The build_content_id of this BuildRecord.
:rtype: str
"""
return self._build_content_id
@build_content_id.setter
def build_content_id(self, build_content_id):
"""
Sets the build_content_id of this BuildRecord.
:param build_content_id: The build_content_id of this BuildRecord.
:type: str
"""
self._build_content_id = build_content_id
@property
def submit_time(self):
"""
Gets the submit_time of this BuildRecord.
:return: The submit_time of this BuildRecord.
:rtype: datetime
"""
return self._submit_time
@submit_time.setter
def submit_time(self, submit_time):
"""
Sets the submit_time of this BuildRecord.
:param submit_time: The submit_time of this BuildRecord.
:type: datetime
"""
self._submit_time = submit_time
@property
def start_time(self):
"""
Gets the start_time of this BuildRecord.
:return: The start_time of this BuildRecord.
:rtype: datetime
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""
Sets the start_time of this BuildRecord.
:param start_time: The start_time of this BuildRecord.
:type: datetime
"""
self._start_time = start_time
@property
def end_time(self):
"""
Gets the end_time of this BuildRecord.
:return: The end_time of this BuildRecord.
:rtype: datetime
"""
return self._end_time
@end_time.setter
def end_time(self, end_time):
"""
Sets the end_time of this BuildRecord.
:param end_time: The end_time of this BuildRecord.
:type: datetime
"""
self._end_time = end_time
@property
def user(self):
"""
Gets the user of this BuildRecord.
:return: The user of this BuildRecord.
:rtype: User
"""
return self._user
@user.setter
def user(self, user):
"""
Sets the user of this BuildRecord.
:param user: The user of this BuildRecord.
:type: User
"""
self._user = user
@property
def scm_repo_url(self):
"""
Gets the scm_repo_url of this BuildRecord.
:return: The scm_repo_url of this BuildRecord.
:rtype: str
"""
return self._scm_repo_url
@scm_repo_url.setter
def scm_repo_url(self, scm_repo_url):
"""
Sets the scm_repo_url of this BuildRecord.
:param scm_repo_url: The scm_repo_url of this BuildRecord.
:type: str
"""
self._scm_repo_url = scm_repo_url
@property
def scm_revision(self):
"""
Gets the scm_revision of this BuildRecord.
:return: The scm_revision of this BuildRecord.
:rtype: str
"""
return self._scm_revision
@scm_revision.setter
def scm_revision(self, scm_revision):
"""
Sets the scm_revision of this BuildRecord.
:param scm_revision: The scm_revision of this BuildRecord.
:type: str
"""
self._scm_revision = scm_revision
@property
def build_log(self):
"""
Gets the build_log of this BuildRecord.
:return: The build_log of this BuildRecord.
:rtype: str
"""
return self._build_log
@build_log.setter
def build_log(self, build_log):
"""
Sets the build_log of this BuildRecord.
:param build_log: The build_log of this BuildRecord.
:type: str
"""
self._build_log = build_log
@property
def status(self):
"""
Gets the status of this BuildRecord.
:return: The status of this BuildRecord.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this BuildRecord.
:param status: The status of this BuildRecord.
:type: str
"""
allowed_values = ["SUCCESS", "FAILED", "UNSTABLE", "BUILDING", "REJECTED", "CANCELLED", "SYSTEM_ERROR", "UNKNOWN", "NONE"]
if status not in allowed_values:
raise ValueError(
"Invalid value for `status`, must be one of {0}"
.format(allowed_values)
)
self._status = status
@property
def ssh_command(self):
"""
Gets the ssh_command of this BuildRecord.
:return: The ssh_command of this BuildRecord.
:rtype: str
"""
return self._ssh_command
@ssh_command.setter
def ssh_command(self, ssh_command):
"""
Sets the ssh_command of this BuildRecord.
:param ssh_command: The ssh_command of this BuildRecord.
:type: str
"""
self._ssh_command = ssh_command
@property
def ssh_password(self):
"""
Gets the ssh_password of this BuildRecord.
:return: The ssh_password of this BuildRecord.
:rtype: str
"""
return self._ssh_password
@ssh_password.setter
def ssh_password(self, ssh_password):
"""
Sets the ssh_password of this BuildRecord.
:param ssh_password: The ssh_password of this BuildRecord.
:type: str
"""
self._ssh_password = ssh_password
@property
def execution_root_name(self):
"""
Gets the execution_root_name of this BuildRecord.
:return: The execution_root_name of this BuildRecord.
:rtype: str
"""
return self._execution_root_name
@execution_root_name.setter
def execution_root_name(self, execution_root_name):
"""
Sets the execution_root_name of this BuildRecord.
:param execution_root_name: The execution_root_name of this BuildRecord.
:type: str
"""
self._execution_root_name = execution_root_name
@property
def execution_root_version(self):
"""
Gets the execution_root_version of this BuildRecord.
:return: The execution_root_version of this BuildRecord.
:rtype: str
"""
return self._execution_root_version
@execution_root_version.setter
def execution_root_version(self, execution_root_version):
"""
Sets the execution_root_version of this BuildRecord.
:param execution_root_version: The execution_root_version of this BuildRecord.
:type: str
"""
self._execution_root_version = execution_root_version
@property
def built_artifacts(self):
"""
Gets the built_artifacts of this BuildRecord.
:return: The built_artifacts of this BuildRecord.
:rtype: list[Artifact]
"""
return self._built_artifacts
@built_artifacts.setter
def built_artifacts(self, built_artifacts):
"""
Sets the built_artifacts of this BuildRecord.
:param built_artifacts: The built_artifacts of this BuildRecord.
:type: list[Artifact]
"""
self._built_artifacts = built_artifacts
@property
def dependencies(self):
"""
Gets the dependencies of this BuildRecord.
:return: The dependencies of this BuildRecord.
:rtype: list[Artifact]
"""
return self._dependencies
@dependencies.setter
def dependencies(self, dependencies):
"""
Sets the dependencies of this BuildRecord.
:param dependencies: The dependencies of this BuildRecord.
:type: list[Artifact]
"""
self._dependencies = dependencies
@property
def build_environment(self):
"""
Gets the build_environment of this BuildRecord.
:return: The build_environment of this BuildRecord.
:rtype: BuildEnvironment
"""
return self._build_environment
@build_environment.setter
def build_environment(self, build_environment):
"""
Sets the build_environment of this BuildRecord.
:param build_environment: The build_environment of this BuildRecord.
:type: BuildEnvironment
"""
self._build_environment = build_environment
@property
def product_milestone(self):
| |
<gh_stars>0
"""This requires CGAL mesher applied to series of surfaces. See readme.txt for details.
"""
from __future__ import print_function
# Use FEniCS for Finite Element
import fenics as d
# Useful to import the derivative separately
from dolfin import dx
# Useful numerical libraries
import numpy as N
import matplotlib
matplotlib.use('SVG')
import matplotlib.pyplot as P
# General tools
import os
import subprocess
import shutil
# UFL
import ufl
# Set interactive plotting on
P.ion()
# Use a separate Python file to declare variables
import variables as v
import vtk_tools
input_mesh = "input"
class IREProblem:
"""class IREProblem()
This represents a Finite Element IRE problem using a similar algorithm to that of ULJ
"""
def __init__(self):
pass
def load(self):
# Convert mesh from MSH to Dolfin-XML
shutil.copyfile("input/%s.msh" % input_mesh, "%s.msh" % input_mesh)
destination_xml = "%s.xml" % input_mesh
subprocess.call(["dolfin-convert", "%s.msh" % input_mesh, destination_xml])
# Load mesh and boundaries
mesh = d.Mesh(destination_xml)
self.patches = d.MeshFunction("size_t", mesh, "%s_facet_region.xml" % input_mesh)
self.subdomains = d.MeshFunction("size_t", mesh, "%s_physical_region.xml" % input_mesh)
# Define differential over subdomains
self.dxs = d.dx[self.subdomains]
# Turn subdomains into a Numpy array
self.subdomains_array = N.asarray(self.subdomains.array(), dtype=N.int32)
# Create a map from subdomain indices to tissues
self.tissues_by_subdomain = {}
for i, t in v.tissues.items():
print(i, t)
for j in t["indices"]:
self.tissues_by_subdomain[j] = t
self.mesh = mesh
self.setup_fe()
self.prepare_increase_conductivity()
def load_patient_data(self):
indicators = {}
for subdomain in ("liver", "vessels", "tumour"):
values = N.empty((v.dim_height, v.dim_width, v.dim_depth), dtype='uintp')
for i in range(0, v.dim_depth):
slice = N.loadtxt(os.path.join(
v.patient_data_location,
"patient-%s.%d.txt" % (subdomain, i + 1))
)
values[:, :, i] = slice.astype('uintp')
indicators[subdomain] = values
self.indicators = indicators
def interpolate_to_patient_data(self, function, indicator):
values = N.empty((v.dim_height, v.dim_width, v.dim_depth), dtype='float')
it = N.nditer(values, flags=['multi_index'])
u = N.empty((1,))
x = N.empty((3,))
delta = (v.delta_height, v.delta_width, v.delta_depth)
offset = (v.offset_x, v.offset_y, v.offset_z)
while not it.finished:
if indicator[it.multi_index] != 1:
it.iternext()
continue
x[0] = it.multi_index[1] * delta[1] - offset[0]
x[1] = it.multi_index[0] * delta[0] - offset[1]
x[2] = it.multi_index[2] * delta[2] - offset[2]
function.eval(u, x)
values[...] = u[0]
it.iternext()
return values
def setup_fe(self):
# Define the relevant function spaces
V = d.FunctionSpace(self.mesh, "Lagrange", 1)
self.V = V
# DG0 is useful for defining piecewise constant functions
DV = d.FunctionSpace(self.mesh, "Discontinuous Lagrange", 0)
self.DV = DV
# Define test and trial functions for FE
self.z = d.TrialFunction(self.V)
self.w = d.TestFunction(self.V)
def per_tissue_constant(self, generator):
fefunction = d.Function(self.DV)
generated_values = dict((l, generator(l)) for l in N.unique(self.subdomains_array))
vector = N.vectorize(generated_values.get)
fefunction.vector()[:] = vector(self.subdomains_array)
return fefunction
def get_tumour_volume(self):
# Perhaps there is a prettier way, but integrate a unit function over the tumour tets
one = d.Function(self.V)
one.vector()[:] = 1
return sum(d.assemble(one * self.dxs(i)) for i in v.tissues["tumour"]["indices"])
def save_lesion(self):
final_filename = "results/%s-max_e%06d.vtu" % (input_mesh, self.max_e_count)
shutil.copyfile(final_filename, "../lesion_volume.vtu")
destination = "../lesion_surface.vtp"
vtk_tools.save_lesion(destination, final_filename, "max_E", (80, None))
print("Output file to %s?" % destination, os.path.exists(destination))
def solve(self):
# TODO: when FEniCS ported to Python3, this should be exist_ok
try:
os.makedirs('results')
except OSError:
pass
z, w = (self.z, self.w)
u0 = d.Constant(0.0)
# Define the linear and bilinear forms
L = u0 * w * dx
# Define useful functions
cond = d.Function(self.DV)
U = d.Function(self.V)
# Initialize the max_e vector, that will store the cumulative max e values
max_e = d.Function(self.V)
max_e.vector()[:] = 0.0
max_e.rename("max_E", "Maximum energy deposition by location")
max_e_file = d.File("results/%s-max_e.pvd" % input_mesh)
max_e_per_step = d.Function(self.V)
max_e_per_step_file = d.File("results/%s-max_e_per_step.pvd" % input_mesh)
self.es = {}
self.max_es = {}
fi = d.File("results/%s-cond.pvd" % input_mesh)
potential_file = d.File("results/%s-potential.pvd" % input_mesh)
# Loop through the voltages and electrode combinations
for i, (anode, cathode, voltage) in enumerate(v.electrode_triples):
print("Electrodes %d (%lf) -> %d (0)" % (anode, voltage, cathode))
cond = d.project(self.sigma_start, V=self.DV)
# Define the Dirichlet boundary conditions on the active needles
uV = d.Constant(voltage)
term1_bc = d.DirichletBC(self.V, uV, self.patches, v.needles[anode])
term2_bc = d.DirichletBC(self.V, u0, self.patches, v.needles[cathode])
e = d.Function(self.V)
e.vector()[:] = max_e.vector()
# Re-evaluate conductivity
self.increase_conductivity(cond, e)
for j in range(v.max_restarts):
# Update the bilinear form
a = d.inner(d.nabla_grad(z), cond * d.nabla_grad(w)) * dx
# Solve again
print(" [solving...")
d.solve(a == L, U, bcs=[term1_bc, term2_bc])
print(" ....solved]")
# Extract electric field norm
for k in range(len(U.vector())):
if N.isnan(U.vector()[k]):
U.vector()[k] = 1e5
e_new = d.project(d.sqrt(d.dot(d.grad(U), d.grad(U))), self.V)
# Take the max of the new field and the established electric field
e.vector()[:] = N.array([max(*X) for X in zip(e.vector(), e_new.vector())])
# Re-evaluate conductivity
fi << cond
self.increase_conductivity(cond, e)
potential_file << U
# Save the max e function to a VTU
max_e_per_step.vector()[:] = e.vector()[:]
max_e_per_step_file << max_e_per_step
# Store this electric field norm, for this triple, for later reference
self.es[i] = e
# Store the max of this electric field norm and that for all previous triples
max_e_array = N.array([max(*X) for X in zip(max_e.vector(), e.vector())])
max_e.vector()[:] = max_e_array
# Create a new max_e function for storage, or it will be overwritten by the next iteration
max_e_new = d.Function(self.V)
max_e_new.vector()[:] = max_e_array
# Store this max e function for the cumulative coverage curve calculation later
self.max_es[i] = max_e_new
# Save the max e function to a VTU
max_e_file << max_e
self.max_e_count = i
def prepare_increase_conductivity(self):
def sigma_function(l, i):
s = self.tissues_by_subdomain[l]["sigma"]
if isinstance(s, list):
return s[i]
else:
return s
def threshold_function(l, i):
s = self.tissues_by_subdomain[l]["sigma"]
if isinstance(s, list):
return self.tissues_by_subdomain[l][i]
else:
return 1 if i == "threshold reversible" else 0
self.sigma_start = self.per_tissue_constant(lambda l: sigma_function(l, 0))
self.sigma_end = self.per_tissue_constant(lambda l: sigma_function(l, 1))
self.threshold_reversible = self.per_tissue_constant(lambda l: threshold_function(l, "threshold reversible"))
self.threshold_irreversible = self.per_tissue_constant(lambda l: threshold_function(l, "threshold irreversible"))
self.k = (self.sigma_end - self.sigma_start) / (self.threshold_irreversible - self.threshold_reversible)
self.h = self.sigma_start - self.k * self.threshold_reversible
def increase_conductivity(self, cond, e):
# Set up the three way choice function
intermediate = e * self.k + self.h
not_less_than = ufl.conditional(ufl.gt(e, self.threshold_irreversible), self.sigma_end, intermediate)
cond_expression = ufl.conditional(ufl.lt(e, self.threshold_reversible), self.sigma_start, not_less_than)
# Project this onto the function space
cond_function = d.project(ufl.Max(cond_expression, cond), cond.function_space())
cond.assign(cond_function)
def plot_bitmap_result(self):
# Create a horizontal axis
cc_haxis = N.linspace(5000, 1e5, 200)
# Import the binary data indicating the location of structures
self.load_patient_data()
# Calculate the tumour volume; this is what we will compare against
tumour_volume = (self.indicators["tumour"] == 1).sum()
# Initialize the output_arrays vector a rescale the x to V/cm
output_arrays = [cc_haxis / 100]
# Loop through the electrode triples
for i, triple in enumerate(v.electrode_triples):
# Project the max e values for this triple to DG0 - this forces an evaluation of the function at the mid-point of each tet, DG0's only DOF
e_dg = self.interpolate_to_patient_data(self.max_es[i], self.indicators["tumour"])
# Sum the tet volumes for tets with a midpoint value greater than x, looping over x as e-norm thresholds (also scale to tumour volume)
elim = N.vectorize(lambda x: (e_dg > x).sum() / tumour_volume)
output_arrays.append(elim(cc_haxis))
# Compile into a convenient array
output = N.array(zip(*output_arrays))
# Output cumulative coverage curves as CSV
N.savetxt('results/%s-coverage_curves_bitmap.csv' % input_mesh, output)
# Plot the coverage curves
for (anode, cathode, voltage), a in zip(v.electrode_triples, output_arrays[1:]):
P.plot(output_arrays[0], a, label="%d - %d" % (anode, cathode))
# Draw the plot
P.draw()
P.title(r"Bitmap-based")
P.xlabel(r"Threshold level of $|E|$ ($\mathrm{J}$)")
P.ylabel(r"Fraction of tumour beneath level")
# Show a legend for the plot
P.legend(loc=3)
# Display the plot
P.show(block=True)
def plot_result(self):
# Calculate preliminary relationships
dofmap = self.DV.dofmap()
cell_dofs = N.array([dofmap.cell_dofs(c)[0] for c in N.arange(self.mesh.num_cells()) if (self.subdomains[c] in v.tissues["tumour"]["indices"])])
volumes = N.array([d.Cell(self.mesh, c).volume() for c in N.arange(self.mesh.num_cells()) if (self.subdomains[c] in v.tissues["tumour"]["indices"])])
# Create a horizontal axis
cc_haxis = N.linspace(5000, 1e5, 200)
# Calculate the tumour volume; this is what we will compare against
tumour_volume = self.get_tumour_volume()
# Initialize the output_arrays vector a rescale the x to V/cm
output_arrays = [cc_haxis / 100]
# Loop through the electrode pairs
for i, triple in enumerate(v.electrode_triples):
# Project the max e values for this triple to DG0 - this forces an evaluation of the function at the mid-point of each tet, DG0's only DOF
e_dg = d.project(self.max_es[i], self.DV)
# Calculate the "max e" contribution for each cell
contributor = N.vectorize(lambda c: e_dg.vector()[c])
contributions = contributor(cell_dofs)
# Sum the tet volumes for tets with a midpoint value greater than x, looping over x as e-norm thresholds (also scale to tumour volume)
elim = N.vectorize(lambda x: volumes[contributions > x].sum() / tumour_volume)
| |
#!/usr/bin/env python3
# coding: utf-8
import unittest
import testgres
import string
import random
from .base_test import BaseTest
from .base_test import ThreadQueryExecutor
from .base_test import generate_string
from .base_test import wait_stopevent
from .base_test import wait_checkpointer_stopevent
from testgres.enums import NodeStatus
class RecoveryTest(BaseTest):
def setUp(self):
super().setUp()
self.node.append_conf('postgresql.conf',
"log_min_messages = notice\n")
def checkpoint_simple_base(self, compressed):
node = self.node
node.start() # start PostgreSQL
node.safe_psql('postgres',
"""
CREATE EXTENSION IF NOT EXISTS orioledb;
CREATE TABLE IF NOT EXISTS o_test (
id integer NOT NULL,
val text,
PRIMARY KEY (id) %s
) USING orioledb;
INSERT INTO o_test
(SELECT id, id || 'val' FROM generate_series(1, 10000, 1) id);
"""
% ("WITH (compress)" if compressed else "")
)
node.safe_psql('postgres', "CHECKPOINT;")
self.assertTrue(node.execute("SELECT orioledb_tbl_check('o_test'::regclass);")[0][0])
node.safe_psql('postgres', "UPDATE o_test SET val = 'xxx' WHERE id % 1000 = 0;")
node.safe_psql('postgres', "CHECKPOINT;")
self.assertTrue(node.execute("SELECT orioledb_tbl_check('o_test'::regclass);")[0][0])
node.safe_psql('postgres', "CHECKPOINT;")
self.assertTrue(node.execute("SELECT orioledb_tbl_check('o_test'::regclass);")[0][0])
node.stop() # stop PostgreSQL
def test_checkpoint_simple(self):
self.checkpoint_simple_base(False)
def test_checkpoint_compress_simple(self):
self.checkpoint_simple_base(True)
def checkpoint_multiple_base(self, compressed):
node = self.node
node.append_conf('postgresql.conf',
"shared_preload_libraries = orioledb\n")
node.start() # start PostgreSQL
node.safe_psql('postgres',
"""
CREATE EXTENSION IF NOT EXISTS orioledb;
CREATE TABLE IF NOT EXISTS o_test (
key int NOT NULL,
value int NOT NULL,
PRIMARY KEY (key)
) USING orioledb %s;
INSERT INTO o_test
(SELECT i, i + 1 FROM generate_series(1, 10000, 1) i);
"""
% ("WITH (primary_compress)" if compressed else "")
)
node.safe_psql('postgres', "CHECKPOINT;")
node.safe_psql('postgres', "UPDATE o_test SET value = value + 1 WHERE key % 10 = 0;")
node.stop(['-m', 'immediate'])
node.start()
node.safe_psql('postgres', "CHECKPOINT;")
self.assertEqual(
node.execute('postgres', "SELECT orioledb_tbl_check('o_test'::regclass);")[0][0],
True)
node.safe_psql('postgres', "UPDATE o_test SET value = value + 1 WHERE key % 10 = 0;")
node.stop(['-m', 'immediate'])
node.start()
node.safe_psql('postgres', "CHECKPOINT;")
self.assertEqual(
node.execute('postgres', "SELECT orioledb_tbl_check('o_test'::regclass);")[0][0],
True)
node.safe_psql('postgres', "UPDATE o_test SET value = value + 1 WHERE key % 10 = 0;")
self.assertEqual(
node.execute('postgres', "SELECT value FROM o_test WHERE key = 10;")[0][0],
14)
self.assertEqual(
node.execute('postgres', "SELECT orioledb_tbl_check('o_test'::regclass);")[0][0],
True)
node.safe_psql('postgres', "UPDATE o_test SET value = value + 1 WHERE key % 10 = 0;")
node.safe_psql('postgres', "CHECKPOINT;")
node.stop(['-m', 'immediate'])
node.start()
self.assertEqual(
node.execute('postgres', "SELECT value FROM o_test WHERE key = 10;")[0][0],
15)
self.assertEqual(
node.execute('postgres', "SELECT orioledb_tbl_check('o_test'::regclass);")[0][0],
True)
node.stop()
def test_checkpoint_multiple(self):
self.checkpoint_multiple_base(False)
def test_checkpoint_compress_multiple(self):
self.checkpoint_multiple_base(True)
def test_checkpoint_simple_in_progress(self):
node = self.node
node.start() # start PostgreSQL
node.safe_psql('postgres',
"CREATE EXTENSION IF NOT EXISTS orioledb;\n"
"CREATE TABLE IF NOT EXISTS o_test (\n"
" id integer NOT NULL,\n"
" val text\n"
") USING orioledb;\n"
"INSERT INTO o_test\n"
" (SELECT id, id || 'val' FROM generate_series(1, 100, 1) id);\n"
)
con1 = node.connect()
con1.execute("INSERT INTO o_test (SELECT id, id || 'val' FROM generate_series(101, 200, 1) id);")
node.safe_psql('postgres', "CHECKPOINT;")
con1.close()
node.stop(['-m', 'immediate'])
node.start()
self.assertEqual(node.execute('postgres', 'SELECT count(*) FROM o_test;')[0][0], 100)
node.stop()
def test_primary_xip_secondary_tuples_insert(self):
node = self.node
node.append_conf('postgresql.conf',
"orioledb.enable_stopevents = true\n")
node.start()
node.safe_psql('postgres',
"""
CREATE EXTENSION IF NOT EXISTS orioledb;
CREATE TABLE IF NOT EXISTS o_test (
id integer NOT NULL,
id2 integer NOT NULL,
id3 integer NOT NULL,
PRIMARY KEY (id)
) USING orioledb;
CREATE UNIQUE INDEX o_test_ix1 ON o_test (id2);
CREATE INDEX o_test_ix2 ON o_test (id3);
""")
con1 = node.connect()
con2 = node.connect()
con1.begin()
con1.execute("INSERT INTO o_test (SELECT id, id + 1, id + 3 FROM generate_series(1, 100, 1) id);")
con1.execute("SELECT pg_stopevent_set('checkpoint_index_start', '$.treeName == \"o_test_ix1\"');")
t1 = ThreadQueryExecutor(con2, "CHECKPOINT;")
t1.start()
wait_checkpointer_stopevent(node)
con1.commit()
con1.execute("SELECT pg_stopevent_reset('checkpoint_index_start');")
t1.join()
con1.close()
con2.close()
node.stop(['-m', 'immediate'])
node.start()
self.assertEqual(100, node.execute("SELECT COUNT(*) FROM o_test;")[0][0])
self.assertEqual(100, node.execute("SELECT COUNT(*) FROM (SELECT * FROM o_test ORDER BY id2) t;")[0][0])
self.assertEqual(100, node.execute("SELECT COUNT(*) FROM (SELECT * FROM o_test ORDER BY id3) t;")[0][0])
node.execute("DELETE FROM o_test;")
self.assertEqual(0, node.execute("SELECT COUNT(*) FROM o_test;")[0][0])
self.assertEqual(0, node.execute("SELECT COUNT(*) FROM (SELECT * FROM o_test ORDER BY id2) t;")[0][0])
self.assertEqual(0, node.execute("SELECT COUNT(*) FROM (SELECT * FROM o_test ORDER BY id3) t;")[0][0])
node.stop()
def test_primary_xip_secondary_tuples_delete(self):
node = self.node
node.append_conf('postgresql.conf',
"orioledb.enable_stopevents = true\n")
node.start()
node.safe_psql('postgres',
"""
CREATE EXTENSION IF NOT EXISTS orioledb;
CREATE TABLE IF NOT EXISTS o_test (
id integer NOT NULL,
id2 integer NOT NULL,
id3 integer NOT NULL,
PRIMARY KEY (id)
) USING orioledb;
CREATE UNIQUE INDEX o_test_ix1 ON o_test (id2);
CREATE INDEX o_test_ix2 ON o_test (id3);
""")
con1 = node.connect()
con2 = node.connect()
con1.begin()
con1.execute("INSERT INTO o_test (SELECT id, id + 1, id + 3 FROM generate_series(1, 100, 1) id);")
con1.commit()
con1.begin()
con1.execute("DELETE FROM o_test WHERE mod(id, 5) = 0;")
con1.execute("SELECT pg_stopevent_set('checkpoint_index_start', '$.treeName == \"o_test_ix1\"');")
t1 = ThreadQueryExecutor(con2, "CHECKPOINT;")
t1.start()
wait_checkpointer_stopevent(node)
con1.commit()
con1.execute("SELECT pg_stopevent_reset('checkpoint_index_start');")
t1.join()
con1.close()
con2.close()
node.stop(['-m', 'immediate'])
node.start()
self.assertEqual(80, node.execute("SELECT COUNT(*) FROM o_test;")[0][0])
self.assertEqual(80, node.execute("SELECT COUNT(*) FROM (SELECT * FROM o_test ORDER BY id2) t;")[0][0])
self.assertEqual(80, node.execute("SELECT COUNT(*) FROM (SELECT * FROM o_test ORDER BY id3) t;")[0][0])
node.execute("DELETE FROM o_test;")
self.assertEqual(0, node.execute("SELECT COUNT(*) FROM o_test;")[0][0])
self.assertEqual(0, node.execute("SELECT COUNT(*) FROM (SELECT * FROM o_test ORDER BY id2) t;")[0][0])
self.assertEqual(0, node.execute("SELECT COUNT(*) FROM (SELECT * FROM o_test ORDER BY id3) t;")[0][0])
node.stop()
def test_primary_xip_secondary_tuples_update(self):
node = self.node
node.append_conf('postgresql.conf',
"orioledb.enable_stopevents = true\n")
node.start()
node.safe_psql('postgres',
"""
CREATE EXTENSION IF NOT EXISTS orioledb;
CREATE TABLE IF NOT EXISTS o_test (
id integer NOT NULL,
id2 integer NOT NULL,
id3 integer NOT NULL,
PRIMARY KEY (id)
) USING orioledb;
CREATE UNIQUE INDEX o_test_ix1 ON o_test (id2);
CREATE INDEX o_test_ix2 ON o_test (id3);
""")
con1 = node.connect()
con2 = node.connect()
con1.begin()
con1.execute("INSERT INTO o_test (SELECT id, id + 1, id + 3 FROM generate_series(1, 100, 1) id);")
con1.commit()
con1.begin()
con1.execute("UPDATE o_test SET id = id + 100 WHERE mod(id, 10) = 0;")
con1.execute("UPDATE o_test SET id2 = id2 + 100 WHERE mod(id, 3) = 0;")
con1.execute("UPDATE o_test SET id3 = id3 + 100 WHERE mod(id, 4) = 0;")
con1.execute("UPDATE o_test SET id = id + 100, id2 = id2 + 100, id3 = id3 + 100 WHERE mod(id, 7) = 0;")
con1.execute("SELECT pg_stopevent_set('checkpoint_index_start', '$.treeName == \"o_test_ix1\"');")
t1 = ThreadQueryExecutor(con2, "CHECKPOINT;")
t1.start()
wait_checkpointer_stopevent(node)
con1.commit()
con1.execute("SELECT pg_stopevent_reset('checkpoint_index_start');")
t1.join()
con1.close()
con2.close()
node.stop(['-m', 'immediate'])
node.start()
self.assertEqual(100, node.execute("SELECT COUNT(*) FROM o_test;")[0][0])
self.assertEqual(100, node.execute("SELECT COUNT(*) FROM (SELECT * FROM o_test ORDER BY id2) t;")[0][0])
self.assertEqual(100, node.execute("SELECT COUNT(*) FROM (SELECT * FROM o_test ORDER BY id3) t;")[0][0])
node.execute("DELETE FROM o_test;")
self.assertEqual(0, node.execute("SELECT COUNT(*) FROM o_test;")[0][0])
self.assertEqual(0, node.execute("SELECT COUNT(*) FROM (SELECT * FROM o_test ORDER BY id2) t;")[0][0])
self.assertEqual(0, node.execute("SELECT COUNT(*) FROM (SELECT * FROM o_test ORDER BY id3) t;")[0][0])
node.stop()
def test_primary_xip_secondary_tuples_mix(self):
node = self.node
node.append_conf('postgresql.conf',
"orioledb.enable_stopevents = true\n")
node.start()
node.safe_psql('postgres',
"""
CREATE EXTENSION IF NOT EXISTS orioledb;
CREATE TABLE IF NOT EXISTS o_test (
id integer NOT NULL,
id2 integer NOT NULL,
id3 integer NOT NULL,
PRIMARY KEY (id)
) USING orioledb;
CREATE UNIQUE INDEX o_test_ix1 ON o_test (id2);
CREATE INDEX o_test_ix2 ON o_test (id3);
""")
# insert-update-delete-commit
con1 = node.connect()
con2 = node.connect()
con1.begin()
con1.execute("INSERT INTO o_test (SELECT id, id + 1, id + 3 FROM generate_series(1, 10, 1) id);")
con1.execute("UPDATE o_test SET id3 = id3 + 1 WHERE id < 5")
con1.execute("DELETE FROM o_test WHERE id > 5;")
con1.execute("SELECT pg_stopevent_set('checkpoint_index_start', '$.treeName == \"o_test_ix1\"');")
t1 = ThreadQueryExecutor(con2, "CHECKPOINT;")
t1.start()
wait_checkpointer_stopevent(node)
con1.commit()
con1.execute("SELECT pg_stopevent_reset('checkpoint_index_start');")
t1.join()
con1.close()
con2.close()
node.stop(['-m', 'immediate'])
node.start()
self.assertEqual("[(1, 2, 5), (2, 3, 6), (3, 4, 7), (4, 5, 8), (5, 6, 8)]", str(node.execute("SELECT * FROM o_test;")))
self.assertEqual(5, node.execute("SELECT COUNT(*) FROM o_test;")[0][0])
self.assertEqual(5, node.execute("SELECT COUNT(*) FROM (SELECT * FROM o_test ORDER BY id2) t;")[0][0])
self.assertEqual(5, node.execute("SELECT COUNT(*) FROM (SELECT * FROM o_test ORDER BY id3) t;")[0][0])
node.execute("DELETE FROM o_test;")
self.assertEqual(0, node.execute("SELECT COUNT(*) FROM o_test;")[0][0])
self.assertEqual(0, node.execute("SELECT COUNT(*) FROM (SELECT * FROM o_test ORDER BY id2) t;")[0][0])
self.assertEqual(0, node.execute("SELECT COUNT(*) FROM (SELECT * FROM o_test ORDER BY id3) t;")[0][0])
node.stop()
node.start()
# insert-update-delete-rollback
con1 = node.connect()
con2 = node.connect()
con1.execute("INSERT INTO o_test (SELECT id, id + 1, id + 3 FROM generate_series(1, 10, 1) id);")
con1.execute("UPDATE o_test SET id3 = id3 + 1 WHERE id < 5")
con1.execute("DELETE FROM o_test WHERE id > 5;")
con1.execute("SELECT pg_stopevent_set('checkpoint_index_start', '$.treeName == \"o_test_ix1\"');")
t1 = ThreadQueryExecutor(con2, "CHECKPOINT;")
t1.start()
wait_checkpointer_stopevent(node)
con1.rollback()
con1.execute("SELECT pg_stopevent_reset('checkpoint_index_start');")
t1.join()
con1.close()
con2.close()
node.stop(['-m', 'immediate'])
node.start()
self.assertEqual(0, node.execute("SELECT COUNT(*) FROM o_test;")[0][0])
self.assertEqual(0, node.execute("SELECT COUNT(*) FROM (SELECT * FROM o_test ORDER BY id2) t;")[0][0])
self.assertEqual(0, node.execute("SELECT COUNT(*) FROM (SELECT * FROM o_test ORDER BY id3) t;")[0][0])
node.stop()
def test_primary_empty_secondary_tuples(self):
node = self.node
node.append_conf('postgresql.conf',
"orioledb.enable_stopevents = true\n")
node.start()
node.safe_psql('postgres',
"""
CREATE EXTENSION IF NOT EXISTS orioledb;
CREATE TABLE IF NOT EXISTS o_test (
id integer NOT NULL,
id2 integer NOT NULL,
id3 integer NOT NULL,
PRIMARY KEY (id)
) USING orioledb;
CREATE UNIQUE INDEX o_test_ix1 ON o_test (id2);
CREATE INDEX o_test_ix2 ON o_test (id3);
""")
# insert-update-delete-commit
con1 = node.connect()
con2 = node.connect()
con1.execute("SELECT pg_stopevent_set('checkpoint_index_start', '$.treeName == \"o_test_ix1\"');")
t1 = ThreadQueryExecutor(con2, "CHECKPOINT;")
t1.start()
wait_checkpointer_stopevent(node)
con1.begin()
con1.execute("INSERT INTO o_test (SELECT id, id + 1, id + 3 FROM generate_series(1, 10, 1) id);")
con1.commit()
con1.begin()
con1.execute("UPDATE o_test SET id3 = id3 + 1 WHERE id < 5")
con1.commit()
con1.begin()
con1.execute("DELETE FROM o_test WHERE id > 5;")
con1.commit()
con1.execute("SELECT pg_stopevent_reset('checkpoint_index_start');");
t1.join()
con1.close()
con2.close()
node.stop(['-m', 'immediate'])
node.start()
self.assertEqual("[(1, 2, 5), (2, 3, 6), (3, 4, 7), (4, 5, 8), (5, 6, 8)]", str(node.execute("SELECT * FROM o_test;")))
self.assertEqual(5, node.execute("SELECT COUNT(*) FROM o_test;")[0][0])
self.assertEqual(5, node.execute("SELECT COUNT(*) FROM (SELECT * FROM o_test ORDER BY id2) t;")[0][0])
self.assertEqual(5, node.execute("SELECT COUNT(*) FROM (SELECT * FROM o_test ORDER BY id3) t;")[0][0])
node.execute("DELETE FROM o_test;")
self.assertEqual(0, node.execute("SELECT COUNT(*) FROM o_test;")[0][0])
self.assertEqual(0, node.execute("SELECT COUNT(*) FROM (SELECT * FROM o_test ORDER BY id2) t;")[0][0])
self.assertEqual(0, node.execute("SELECT COUNT(*) FROM (SELECT * FROM o_test ORDER BY id3) t;")[0][0])
node.stop()
def test_wal_truncate(self):
node = self.node
node.start()
node.safe_psql('postgres',
"CREATE EXTENSION IF NOT EXISTS orioledb;\n"
"CREATE TABLE IF NOT EXISTS o_test (\n"
" id integer NOT NULL,\n"
" val text,\n"
" PRIMARY KEY (id)\n"
") USING orioledb;\n")
node.safe_psql("INSERT INTO o_test\n"
"(SELECT id, id || 'val' FROM generate_series(1, 100, 1) id);\n")
node.safe_psql("TRUNCATE o_test;")
node.safe_psql("INSERT INTO o_test\n"
"(SELECT id, id || 'val' FROM generate_series(101, 200, 1) id);\n")
node.stop(['-m', 'immediate'])
node.start()
self.assertEqual(15050, node.execute("SELECT SUM(id) FROM o_test;")[0][0])
node.stop()
def test_wal_without_checkpoint(self):
node = self.node
node.start()
node.safe_psql('postgres',
"CREATE EXTENSION IF NOT EXISTS orioledb;\n"
"CREATE TABLE IF NOT EXISTS o_test (\n"
" id integer NOT NULL,\n"
" val text,\n"
" PRIMARY KEY (id)\n"
") USING orioledb;\n"
"TRUNCATE o_test;\n")
node.safe_psql("INSERT INTO o_test\n"
"(SELECT id, id || 'val' FROM generate_series(1, 100, 1) id);\n")
node.stop(['-m', 'immediate'])
node.start()
self.assertEqual(
str(node.execute('postgres',
'SELECT * FROM | |
<reponame>abailie3/TravelingIntelligence
"""
Copyright (c) 2018 <NAME>, All rights reserved.
"""
import inspect
import math
import types
from typing import Tuple, List, TypeVar, Dict
from abc import ABC
__VarCOMB__ = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k"]
class MachineInterface(object):
"""
An interface allowing AI/ML algorithms to design and execute algorithms.
"""
__tab__ = " " # Standard tab width for line indentation
__base_imports__ = ["from typing import Tuple, List"]
__method_start__ = "def solve(origin, targets) -> Tuple[float, List[tuple]]:\n" + __tab__ + \
"fout = -1.\n" + __tab__ + "lout = []\n"
__return__ = __tab__ + "return fout, lout"
__ASSIGNVARIABLE = 0
def __init__(self):
self.__method = self.__method_start__
self.__addImports = []
self.__lineDepth = 1
self.__variables = [("origin",), ("targets", List), ("fout",), ("lout", List)]
self.__numVar = len(self.__variables)
def __compile__(self) -> types.FunctionType:
"""
Compiles the dynamically created code, returning the function.
:return: (FunctionType) a compiled function from the dynamically created code.
"""
code = {}
exec(compile(self.__build_method__(), '<string>', 'exec'), code)
return code["solve"]
def __build_method__(self) -> str:
"""
This constructs the method's code to be compiled/inspected.
:return: (str) A string of the method's code.
"""
out = ""
for imp in self.__base_imports__:
out += imp + "\n"
return out + self.__method + "\n" + self.__return__
def __new_list__(self, name=None):
return self.__new_var__(List, name)
def __new_var__(self, var_type=None, name=None) -> str:
"""
For creating and logging new variables.
:param var_type: (Optional str) The name of the type of the variable.
:param name: (Optional str) The name for the variable. If none a random name will be created.
:return: (str) The name. Either the provided name or the randomly generated name.
"""
if name is None:
from time import time
name = str(int(time()*1e7))
self.__numVar += 1
self.__variables.append((name, var_type))
if var_type == List:
return name + " = []"
return name
def __var_assign__(self, idx, value) -> str:
# TODO: Add code for variable assignment
pass
@staticmethod
def __math__(inputs: list) -> str:
"""
This method chooses a function from the math package and creates a line of code representing its usage.
:param inputs: (list) A list of inputs to configure the method call.
inputs[0] - the index of the math function to call, if non-int then the
value is truncated to an integer.
inputs[1:] - the remaining indexes act as the inputs to the functions.
if the type of the index is Tuple(int) then this indicates
that a variable in the self.__variables list (at index of the
int % len(self.variables)) shall be used. Otherwise, the
value of the index shall be a static input to the function.
:return: (str) An un-indented string of the method call.
"""
# TODO: Find new way of indicating that a variable should be used instead of a static value.
out = ""
if len(inputs) < 2:
return None # TODO: create error and throw that instead
fname, func = inspect.getmembers(math, inspect.isbuiltin)[inputs[0]]
out += fname + "("
nvars = MachineInterface.__math_doc_nvars__(func.__doc__)
if nvars == 1:
out += str(inputs[1]) + ","
else:
for i in range(1, nvars):
out += str(inputs[i % len(inputs)]) + ","
return out[:-1] + ")"
# b = func.__doc__
# print(dir(func))
# print(b)
# print(dir(b))
# print(inspect.signature(b)
@staticmethod
def __math_doc_nvars__(doc: str) -> int:
"""
This finds the number of parameters of a function from the math library by parsing the function's
doc parameter. NOTE: This is not a great way of finding the number of parameters, but I can't find
a better way.
:param doc: (str) the doc string for the function.
:return: (int) number of variables for the function.
"""
# TODO: Find a better way.
# TODO: Potentially log the functions away in dictionary {func_name:n_vars} for faster recall
start = doc.find("(")+1
end = doc.find(")")
if end < start:
return 0
sub_string = doc[start:end]
commas = MachineInterface.__count_commas__(sub_string)
if commas == 0:
if len(sub_string) > 0:
return 1
else:
return 0
else:
return commas + 1
@staticmethod
def __count_commas__(test_str: str) -> int:
"""
Counts number of commas in the input string
:param test_str: (str) String with commas to count
:return: (int) Number of commas in test_str
"""
i = test_str.find(",")
if i == -1:
return 0
return 1 + MachineInterface.__count_commas__(test_str[i+1:])
@staticmethod
def __safe_idx__(arr_name: str, idx) -> str:
"""
This method creates a line of code that safe indexes an array. Safe indexing here means indexing such that
there is no possibility of indexing out of bounds (assuming the array is not of length 0).
:param arr_name: (str) Name of the variable representing the array.
:param idx: (int -or- float) Index of the array to grab. If float is provided it will be truncated to int.
:return: (str) An un-indented string of the line of code.
"""
return arr_name + "[" + str(idx) + " % len(" + arr_name + ")]"
def test_machine_interface(self) -> Tuple[float, List]:
"""
Just a test module for the class.
:return:
"""
# b = self.__math__([1, 3, 5, 2, 5, 3, 2, 15, 6,7, 5])
return self.__compile__()(0, [])
class CodeElement(ABC):
"""
Abstract class for the various code elements. (eg. ForLoop, Conditional, etc.)
"""
__tab__ = " " # Standard tab width for line indentation
def __init__(self):
self.__car_vars__ = {}
self.__car_var_reg__ = []
self._int_vars_ = {}
self._spacing_ = ""
self._block_ = []
self._prefix_ = ""
self.isClosed = False
@property
def _carry_variables_(self) -> Dict:
return self.__car_vars__
@_carry_variables_.setter
def _carry_variables_(self, var: Dict):
self.__car_vars__ = var
@property
def _carry_register_(self) -> List[str]:
return self.__car_var_reg__
@_carry_register_.setter
def _carry_register_(self, car_reg: List[str]):
self.__car_var_reg__ = car_reg
@property
def internal_variables(self) -> Dict:
return self._int_vars_
@internal_variables.setter
def internal_variables(self, var: Dict):
self._int_vars_ = var
@property
def __spacing__(self) -> str:
return self._spacing_
@__spacing__.setter
def __spacing__(self, space: str):
self._spacing_ = space
@property
def __code_block__(self) -> List[str]:
return self._block_
@__code_block__.setter
def __code_block__(self, new_block: List[str]):
self._block_ = new_block
@property
def __prefix__(self):
return self._prefix_
@__prefix__.setter
def __prefix__(self, new_prefix: str):
self._prefix_ = new_prefix
def _get_available_var_(self) -> str:
for a in __VarCOMB__:
for b in __VarCOMB__:
for i in range(10):
if (a + b + str(i)) not in self._carry_variables_ and \
(a + b + str(i)) not in self.internal_variables:
return a + b + str(i)
return "" # TODO: Make this throw an error instead
def add_code(self, code_lines: List[str]) -> None:
"""
Method for adding a block of code lines using a list of strings.
:param code_lines: (List[str]) The block of code lines
:return: (void)
"""
self.__code_block__ += code_lines
def add_code_block(self, code_block: 'CodeElement') -> None:
"""
Method for adding a block of code lines using another code element.
:param code_block: (CodeElement) The block of code lines
:return: (void)
"""
self.add_code(code_block.get_code())
def get_code(self) -> List[str]:
"""
To retrieve the code generated by the code element
:return: (str) The un-indented line or block of code representing the code element.
"""
if self.__prefix__ == "":
out = []
else:
out = [self.__prefix__]
if self.__spacing__ == "":
return out + self.__code_block__
for line in self.__code_block__:
out.append(self.__spacing__ + line)
return out
CE = TypeVar('CE', bound=CodeElement)
class GenericBlock(CodeElement):
"""
This class is for creating a standard block of code.
"""
def __init__(self):
super().__init__()
self.__prefix__ = ""
self.__spacing__ = ""
class ForLoop(CodeElement):
"""
This class is for creating a for loop.
"""
def __init__(self):
super().__init__()
self.indexing = ""
self.__prefix__ = "for "
self.__spacing__ = self.__tab__
def set_range_indexing(self, var_idx: int) -> None:
"""
Sets the indexing method of the for loop to be based on a range of the indexing variables from 0 to some number,
incremented by 1. It returns the following form:
"for var0 in range(var1):"
Where:
var0 - some un-used variable
var1 - if var_idx corresponds to a variable of type int, then var1 is this variable. Else if var_idx
corresponds to a list, then var1 is the length of said variable. Otherwise, this method fails
:param var_idx: (int) The index in the carry variables of the variable to use in the "range()" interior
:return: (void)
"""
if len(self._carry_register_) < var_idx or len(self._carry_register_) == 0:
return # TODO: Throw an error
if self._carry_variables_[self._carry_register_[var_idx]] not in [List, int, list]:
return # TODO: | |
<reponame>hughbg/healvis<filename>healvis/observatory.py
# -*- mode: python; coding: utf-8 -*
# Copyright (c) 2019 Radio Astronomy Software Group
# Licensed under the 3-clause BSD License
import numpy as np
import multiprocessing as mp
import sys
import resource
import warnings
import time
import copy
from astropy_healpix import healpy as hp
from astropy_healpix import HEALPix
from astropy.time import Time
from astropy.constants import c
from astropy.coordinates import Angle, AltAz, EarthLocation, ICRS
from astropy import units
from .beam_model import PowerBeam, AnalyticBeam
from .utils import jy2Tsr, mparray
from .cosmology import c_ms
# -----------------------
# Classes and methods to calculate visibilities from HEALPix maps.
# -----------------------
class Baseline(object):
def __init__(self, ant1_enu=None, ant2_enu=None, ant1=None, ant2=None, enu_vec=None):
if enu_vec is not None:
self.enu = enu_vec
else:
ant1_enu = np.asarray(ant1_enu)
ant2_enu = np.asarray(ant2_enu)
self.enu = ant2_enu - ant1_enu
assert self.enu.size == 3, f"Wronge enu vector shape {self.enu.shape}"
# Antenna indexes, for indexing beam list if necessary.
# Must be numbers from 0 ..., so the right beams are found.
self.ant1 = ant1
self.ant2 = ant2
def get_uvw(self, freq_Hz):
return np.outer(self.enu, 1 / (c_ms / freq_Hz)) # In wavelengths
def get_fringe(self, az, za, freq_Hz, degrees=False):
if degrees:
az *= np.pi / 180
za *= np.pi / 180
freq_Hz = freq_Hz.astype(float)
pos_l = np.sin(az) * np.sin(za)
pos_m = np.cos(az) * np.sin(za)
pos_n = np.cos(za)
lmn = np.vstack((pos_l, pos_m, pos_n))
self.uvw = self.get_uvw(freq_Hz)
udotl = np.einsum("jk,jl->kl", lmn, self.uvw)
fringe = np.cos(2 * np.pi * udotl) + (1j) * np.sin(
2 * np.pi * udotl
) # This is weirdly faster than np.exp
return fringe
class Observatory(object):
"""
Representation of the observing instrument.
Parameters
----------
latitude, longitude: float
Decimal degrees position of the observatory on Earth.
height: float
Decimal meters height of the observatory on Earth.
fov: float
Field of view in degrees (Defaults to 180 deg for horizon to horizon).
baseline_array: array_like of Baseline instances
The set of baselines in the observatory.
freqs: array of float
Array of frequencies, in Hz
nside: int
Nside parameter for the input map (optional).
array: array_like of Baseline instances
Alias for baseline_array, for backwards compatibility.
"""
def __init__(
self,
latitude,
longitude,
height=0.0,
fov=None,
baseline_array=None,
freqs=None,
nside=None,
array=None,
):
if baseline_array is None and array is not None:
baseline_array = array
self.array = baseline_array
self.freqs = freqs
if fov is None:
fov = 180 # Degrees
self.fov = fov
if nside is None:
self.healpix = None
else:
self.healpix = HEALPix(nside=nside)
self._set_vectors()
self.beam = None # Primary beam. Set by `set_beam`
self.times_jd = None # Observation times. Set by `set_pointings` function
self.pointing_centers = None # List of [ra, dec] positions. One for each time. `set_pointings` sets this to zenith.
self.north_poles = None # [ra,dec] ICRS position of the Earth's north pole. Set by `set_pointings`.
self.telescope_location = EarthLocation.from_geodetic(
longitude * units.degree, latitude * units.degree, height
)
self.do_horizon_taper = False
if freqs is not None:
self.Nfreqs = len(freqs)
def _set_vectors(self):
"""
Set the unit vectors to pixel centers for the whole shell, in a shared memory array.
Sets the attribute _vecs.
"""
vecs = hp.pix2vec(self.healpix.nside, np.arange(self.healpix.npix))
vecs = np.array(vecs).T # Shape (Npix, 3)
self._vecs = mparray(vecs.shape, dtype=float)
self._vecs[()] = vecs[()]
def set_pointings(self, time_arr):
"""
Set the pointing centers (in ra/dec) based on array location and times.
Dec = self.lat
RA = What RA is at zenith at a given JD?
Also sets the north pole positions in ICRS.
"""
self.times_jd = time_arr
centers = []
north_poles = []
for t in Time(time_arr, scale="utc", format="jd"):
zen = AltAz(
alt=Angle("90d"),
az=Angle("0d"),
obstime=t,
location=self.telescope_location,
)
north = AltAz(
alt=Angle("0d"),
az=Angle("0d"),
obstime=t,
location=self.telescope_location,
)
zen_radec = zen.transform_to(ICRS())
north_radec = north.transform_to(ICRS())
centers.append([zen_radec.ra.deg, zen_radec.dec.deg])
north_poles.append([north_radec.ra.deg, north_radec.dec.deg])
self.pointing_centers = centers
self.north_poles = north_poles
def calc_azza(self, center, north=None, return_inds=False):
"""
Calculate azimuth/altitude of sources given the pointing center.
Parameters
----------
center: array_like of float
[lon, lat] of pointing center in degrees
radius: float
Selection radius in degrees
north: array_like of float
[ra, dec] in degrees of the ICRS North pole.
This is used to define the origin of azimuth.
Defaults to [0, 90].
NB -- This is a bad assumption, in general, and will affect the
azimuth angles returned. Providing the north position fixes this.
return_inds: bool
Return the healpix indices (Default False)
Returns
-------
zenith_angles: array of float
zenith angles in radians.
azimuth_angles: array of float
azimuth angles in radians same shape as zenith_angles)
indices: array of int
healpix indices of chosen pixels
(If return_inds is True)
"""
if self.fov is None:
raise AttributeError("Need to set a field of view in degrees")
if self.healpix is None:
raise AttributeError("Need to set HEALPix instance attribute")
radius = self.fov * np.pi / 180.0 * 1 / 2.0
if self.do_horizon_taper:
radius += self.healpix.pixel_resolution.to_value(
"rad"
) # Allow parts of pixels to be above the horizon.
cvec = hp.ang2vec(center[0], center[1], lonlat=True)
if north is None:
north = np.array([0, 90.0])
nvec = hp.ang2vec(north[0], north[1], lonlat=True)
colat = np.arccos(np.dot(cvec, nvec)) # Should be close to 90d
xvec = np.cross(nvec, cvec) * 1 / np.sin(colat)
yvec = np.cross(cvec, xvec)
sdotx = np.tensordot(self._vecs, xvec, 1)
sdotz = np.tensordot(self._vecs, cvec, 1)
sdoty = np.tensordot(self._vecs, yvec, 1)
za_arr = np.arccos(sdotz)
az_arr = (np.arctan2(sdotx, sdoty)) % (
2 * np.pi
) # xy plane is tangent. Increasing azimuthal angle eastward, zero at North (y axis). x is East.
pix = za_arr <= radius # Horizon cut.
if return_inds:
return za_arr[pix], az_arr[pix], np.arange(self.healpix.npix)[pix]
return za_arr[pix], az_arr[pix]
def set_fov(self, fov):
"""
fov = field of view in degrees
"""
self.fov = fov
def set_beam(self, beam="uniform", freq_interp_kind="linear", **kwargs):
"""
Set the beam of the array.
Args:
beam : str, or list of beam objects
str: If it is a viable input to AnalyticBeam,
then instantiates an AnalyticBeam, otherwise assumes beam is
a filepath to a beamfits and instantiates a PowerBeam.
list: List of beam objects. This allows for external beams to be
used, and different beams for each antenna. They should not be
power beams. Each beam must have an interp method:
interp(self, az_array, za_array, freq_array)
freq_interp_kind : str
For PowerBeam, frequency interpolation option.
kwargs : keyword arguments
kwargs to pass to AnalyticBeam instantiation.
"""
if isinstance(beam, list): self.beam = beam
elif beam in ['uniform', 'gaussian', 'airy'] or callable(beam):
self.beam = AnalyticBeam(beam, **kwargs)
else:
self.beam = PowerBeam(beam)
self.beam.interp_freq(self.freqs, inplace=True, kind=freq_interp_kind)
self.beam.freq_interp_kind = freq_interp_kind
def beam_sq_int(self, freqs, Nside, pointing, beam_pol="pI"):
"""
Get the integral of the squared antenna primary beam power across the sky.
Args:
freqs : 1D ndarray
Frequencies [Hz]
Nside : int
Nside of healpix map to use in integral
pointing : len-2 list
Pointing center [Dec, RA] in J2000 degrees
"""
if isinstance(self.beam, list):
raise RuntimeError("beam_sq_int not implemented for multiple antenna beams")
za, az = self.calc_azza(pointing)
beam_sq_int = np.sum(
self.beam.beam_val(az, za, freqs, pol=beam_pol) ** 2, axis=0
)
om = 4 * np.pi / (12.0 * Nside ** 2)
beam_sq_int = beam_sq_int * om
return beam_sq_int
def external_beam_val(self, beam, az_arr, za_arr, freqs, pol="XX"):
"""
Call interp() on a beam, and provide results in the right format.
"""
interp_data, interp_basis_vector = beam.interp(az_array=az_arr, za_array=za_arr,
freq_array=freqs)
return interp_data[0, 0, 1].T # just want Npix, Nfreq
def _horizon_taper(self, za_arr):
"""
For pixels near the edge of the FoV downweight flux
by what fraction of the pixel is below the horizon.
(Allow pixels to "set")
"""
res = self.healpix.pixel_resolution.to_value("rad")
max_za = np.radians(self.fov) / 2.0
fracs = 0.5 * (1 - (za_arr - max_za) / res)
fracs[fracs > 1] = 1.0 # Do not weight pixels fully above the horizon.
return fracs
def _vis_calc(self, pcents, tinds, shell, vis_array, Nfin, beam_pol="pI"):
"""
Function sent to subprocesses. Called by make_visibilities.
pcents : Pointing centers to evaluate.
tinds : Array of indices in the time array (and correspondingly in pointings/north_poles)
shell : SkyModel data array
vis_array : Output array for placing results.
Nfin : Number of finished tasks. A variable shared among subprocesses.
"""
if len(pcents) == 0:
return
# Check for North Pole attribute.
haspoles = True
if self.north_poles is None:
warnings.warn("North pole positions not set. | |
<reponame>CowherdChris/droidlet
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import numpy as np
from .memory_filters import get_property_value
# attribute has function signature list(mems) --> list(value)
class Attribute:
def __init__(self, memory):
self.memory = memory
def __call__(self, mems):
raise NotImplementedError("Implemented by subclass")
class TableColumn(Attribute):
"""
for each input memory, the call returns a column value or a triple obj_text via
get_property_value
Args:
memory (droidlet memory): the memory that will be queried
attribute (str): the name of the column or triple predicate whose value is
to be returned
"""
def __init__(self, memory, attribute, get_all=False):
super().__init__(memory)
self.attribute = attribute
# if this is true, the value will be a list of all outputs
# for attributes where one mem can have multiple values
self.get_all = get_all
def __call__(self, mems):
return [
get_property_value(self.memory, mem, self.attribute, get_all=self.get_all)
for mem in mems
]
def __repr__(self):
return "Attribute: " + self.attribute
class TripleWalk(Attribute):
"""
for each input memory, takes a specified path along the triples graph
and returns a MemoryNode corresponding to where the walk ends
Args:
memory (droidlet memory): the memory that will be queried
path list(tuple(str, str)): a list of tuples where the first entry in the
tuples is the pred_text and the second is either
"subj_variable" or "obj_variable"
"""
def __init__(self, memory, path, get_all=False):
super().__init__(memory)
self.path = path
def __call__(self, mems):
step = mems
for p in self.path:
next_step = []
for mem in step:
n = None
if mem is not None:
if p[1] == "subj_variable":
n = self.memory.get_triples(pred_text=p[0], obj=mem.memid)
if len(n) > 0:
# TODO don't just pick the first?
next_step.append(self.memory.get_mem_by_id(n[0][0]))
else:
n = self.memory.get_triples(
pred_text=p[0], subj=mem.memid, return_obj_text="never"
)
if len(n) > 0:
next_step.append(self.memory.get_mem_by_id(n[0][2]))
if len(n) == 0:
next_step.append(None)
return next_step
def __repr__(self):
return "triple path: " + str(self.path)
class AttributeSequence(Attribute):
def __init__(self, memory, attributes):
self.attributes = attributes
def __call__(self, mems):
out = mems
for a in self.attributes:
out = a(out)
return out
def __repr__(self):
return "sequence attribute " + str(self.attributes)
class ListAttribute(Attribute):
def __init__(self, memory, attributes):
super().__init__(memory)
self.attributes = attributes
def __call__(self, mems):
return list(zip(*[a(mems) for a in self.attributes]))
def __repr__(self):
return "List Attribute: " + self.attributes.format()
class BBoxSize(Attribute):
"""
computes the size(s) of the bounding box of a ReferenceObject. if the
input MemoryNode is not a ReferenceObject returns None.
Attributes:
attribute (str): either height, width, min_width, or size. if is "size",
will return a tuple of (depth, height, width) where the ordering
of "width" and "depth" is undefined
if "width" will return the larger of the non-height dims
if "min_width" will return the smaller of the non-height dims
"""
def __init__(self, memory, attribute="height"):
super().__init__(memory)
self.attribute = attribute
# FIXME in non-MC settings, need to not do +1
def __call__(self, mems):
bounds = [m.get_bounds() if hasattr(m, "get_bounds") else None for m in mems]
if self.attribute == "width":
return [
max(b[1] - b[0] + 1, b[5] - b[4] + 1) if b is not None else None for b in bounds
]
elif self.attribute == "min_width":
return [
min(b[1] - b[0] + 1, b[5] - b[4] + 1) if b is not None else None for b in bounds
]
elif self.attribute == "height":
return [b[3] - b[2] + 1 if b is not None else None for b in bounds]
elif self.attribute == "size":
return [
(b[1] - b[0] + 1, b[3] - b[2] + 1, b[5] - b[4] + 1) if b is not None else None
for b in bounds
]
else:
raise ValueError("tried to get size attribute {}".format(self.attribute))
def __repr__(self):
return "BBoxSize " + str(self.attribute)
class LinearExtentAttribute(Attribute):
"""
computes the (perhaps signed) length between two points in space.
behavior controlled by the location_data array.
if field "relative_direction"=="AWAY", unsigned length
if "relative_direction" in ["LEFT", "RIGHT" ...] projected onto a special direction
and signed. the "arrow" goes from "source" to "destination",
e.g. if destination is more LEFT than source, "LEFT" will be positive
if "relative_direction" in ["INSIDE", "OUTSIDE"], signed length is shifted towards zero
so that 0 is at the boundary of the source.
This is not implemented yet FIXME!!
One of the two points in space is given by the positions of a reference object
either given directly as a memory, or given as FILTERs to search
the other is the list element input into the call
if the field "normalized" is True, and relative direction is a cardinal,
the extent is divided by distance to the fixed reference
"""
def __init__(self, memory, location_data, mem=None, fixed_role="source"):
super().__init__(memory)
self.coordinate_transforms = memory.coordinate_transforms
self.location_data = location_data
self.fixed_role = fixed_role
self.frame = location_data.get("frame") or "AGENT"
self.normalized = location_data.get("normalized", False)
# TODO generalize/formalize this
# TODO: currently stores look vecs/orientations at creation,
# build mechanism to update orientations, e.g. if giving directions
# "first you turn left, then go 7 steps forward, turn right, go 7 steps forward"
# need this in grammar too
# TODO store fixed pitch/yaw etc. with arxiv memories, not raw
try:
if self.frame == "AGENT":
# TODO handle this appropriately!
yaw, pitch = memory._db_read(
"SELECT yaw, pitch FROM ReferenceObjects WHERE uuid=?", memory.self_memid
)[0]
elif self.frame == "ABSOLUTE":
yaw, pitch = self.coordinate_transforms.yaw_pitch(
self.coordinate_transforms.DIRECTIONS["FRONT"]
)
# this is another player/agent; it is assumed that the frame has been replaced with
# with the eid of the player/agent
else:
# TODO error if eid not found; but then parent/helper should have caught it?
# TODO error properly if eid is a ref object, but pitch or yaw are null
yaw, pitch = memory._db_read(
"SELECT yaw, pitch FROM ReferenceObjects WHERE eid=?", self.frame
)[0]
except:
# TODO handle this better
raise Exception(
"Unable to find the yaw, pitch in the given frame; maybe can't find the eid?"
)
self.yaw = yaw
self.pitch = pitch
self.mem = mem
self.searcher = "mem"
# put a "NULL" mem in input to not build a searcher
if not self.mem:
self.searcher = self.location_data.get("filter")
if not self.mem and (self.searcher == "mem" or not self.searcher):
raise Exception("Bad linear attribute data, no memory and no searcher specified")
def extent(self, source, destination):
# source and destination are arrays in this function
# arrow goes from source to destination:
diff = np.subtract(destination, source)
if self.location_data["relative_direction"] in ["INSIDE", "OUTSIDE"]:
raise Exception("inside and outside not yet implemented in linear extent")
if self.location_data["relative_direction"] in [
"LEFT",
"RIGHT",
"UP",
"DOWN",
"FRONT",
"BACK",
]:
reldir_vec = self.coordinate_transforms.DIRECTIONS[
self.location_data["relative_direction"]
]
# this should be an inverse transform so we set inverted=True
dir_vec = self.coordinate_transforms.transform(
reldir_vec, self.yaw, self.pitch, inverted=True
)
if self.normalized:
return diff @ dir_vec
else:
return diff @ dir_vec / np.linalg.norm(diff)
else: # AWAY
return np.linalg.norm(diff)
def __call__(self, mems):
if not self.mem:
fixed_mem, _ = self.searcher()
fixed_mem = self.memory.get_mem_by_id(fixed_mem[0])
# fixed_mem = self.searcher.search(self.memory)
# FIXME!!! handle mem not found, more than one, etc.
else:
fixed_mem = self.mem
fixed_pos = fixed_mem.get_pos()
# FIXME TODO store and use an arxiv if we don't want position to track!
if self.fixed_role == "source":
return [self.extent(fixed_pos, mem.get_pos()) for mem in mems]
else:
return [self.extent(mem.get_pos(), fixed_pos) for mem in mems]
def __repr__(self):
return "Attribute: " + str(self.location_data)
class LookRayDistance(Attribute):
"""
computes the distance between a ref_obj_node and a ray given by an agent's
look and pos. The agent's name or eid is input to contructor (in addition to the agent running
this attribute)
constructor inputs:
memory: the memory this will run in
eid: the entity id of the LookRay owner (the viewing agent/player/person)
if None, assumes it is the eid from the agent's default_frame
constructor kv inputs:
mode: ="raw" (default) or "normalized". If "normalized",
computes the distance from the ray divided by the distance to the orgin of the ray
"""
def __init__(self, memory, eid, mode="raw"):
super().__init__(memory)
# TODO: currently stores look vecs/orientations at creation,
try:
x, y, z, yaw, pitch = memory._db_read(
"SELECT x, y, z yaw, pitch FROM ReferenceObjects WHERE eid=?", eid
)[0]
except:
# TODO handle this better
raise Exception(
"Unable to find the yaw, pitch of viewing entity when building LookRayDistance"
)
self.yaw = yaw
| |
value of T
two tail = 0.95:
:param conf_level:
:param dof:
:return:
"""
print(conf_level)
T_value = [
12.71, 4.303, 3.182, 2.776, 2.571, 2.447, 2.365, 2.306, 2.262, 2.228,
2.201, 2.179, 2.160, 2.145, 2.131, 2.120, 2.110, 2.101, 2.093, 2.086,
2.080, 2.074, 2.069, 2.064, 2.060, 2.056, 2.052, 2.048, 2.045, 2.042,
2.040, 2.037, 2.035, 2.032, 2.030, 2.028, 2.026, 2.024, 2.023, 2.021,
2.020, 2.018, 2.017, 2.015, 2.014, 2.013, 2.012, 2.011, 2.010, 2.009,
2.008, 2.007, 2.006, 2.005, 2.004, 2.003, 2.002, 2.002, 2.001, 2.000,
2.000, 1.999, 1.998, 1.998, 1.997, 1.997, 1.996, 1.995, 1.995, 1.994,
1.994, 1.993, 1.993, 1.993, 1.992, 1.992, 1.991, 1.991, 1.990, 1.990,
1.990, 1.989, 1.989, 1.989, 1.988, 1.988, 1.988, 1.987, 1.987, 1.987,
1.986, 1.986, 1.986, 1.986, 1.985, 1.985, 1.985, 1.984, 1.984, 1.984]
# infinity:
if dof > 100:
return 1.960
else:
return T_value[dof - 1]
# ===================================================
# one tail t test table:
# dof 0.90 0.95 0.975 0.99 0.995 0.999
# 1. 3.078 6.314 12.706 31.821 63.657 318.313
# 2. 1.886 2.920 4.303 6.965 9.925 22.327
# 3. 1.638 2.353 3.182 4.541 5.841 10.215
# 4. 1.533 2.132 2.776 3.747 4.604 7.173
# 5. 1.476 2.015 2.571 3.365 4.032 5.893
# 6. 1.440 1.943 2.447 3.143 3.707 5.208
# 7. 1.415 1.895 2.365 2.998 3.499 4.782
# 8. 1.397 1.860 2.306 2.896 3.355 4.499
# 9. 1.383 1.833 2.262 2.821 3.250 4.296
# 10. 1.372 1.812 2.228 2.764 3.169 4.143
# 11. 1.363 1.796 2.201 2.718 3.106 4.024
# 12. 1.356 1.782 2.179 2.681 3.055 3.929
# 13. 1.350 1.771 2.160 2.650 3.012 3.852
# 14. 1.345 1.761 2.145 2.624 2.977 3.787
# 15. 1.341 1.753 2.131 2.602 2.947 3.733
# 16. 1.337 1.746 2.120 2.583 2.921 3.686
# 17. 1.333 1.740 2.110 2.567 2.898 3.646
# 18. 1.330 1.734 2.101 2.552 2.878 3.610
# 19. 1.328 1.729 2.093 2.539 2.861 3.579
# 20. 1.325 1.725 2.086 2.528 2.845 3.552
# 21. 1.323 1.721 2.080 2.518 2.831 3.527
# 22. 1.321 1.717 2.074 2.508 2.819 3.505
# 23. 1.319 1.714 2.069 2.500 2.807 3.485
# 24. 1.318 1.711 2.064 2.492 2.797 3.467
# 25. 1.316 1.708 2.060 2.485 2.787 3.450
# 26. 1.315 1.706 2.056 2.479 2.779 3.435
# 27. 1.314 1.703 2.052 2.473 2.771 3.421
# 28. 1.313 1.701 2.048 2.467 2.763 3.408
# 29. 1.311 1.699 2.045 2.462 2.756 3.396
# 30. 1.310 1.697 2.042 2.457 2.750 3.385
# 31. 1.309 1.696 2.040 2.453 2.744 3.375
# 32. 1.309 1.694 2.037 2.449 2.738 3.365
# 33. 1.308 1.692 2.035 2.445 2.733 3.356
# 34. 1.307 1.691 2.032 2.441 2.728 3.348
# 35. 1.306 1.690 2.030 2.438 2.724 3.340
# 36. 1.306 1.688 2.028 2.434 2.719 3.333
# 37. 1.305 1.687 2.026 2.431 2.715 3.326
# 38. 1.304 1.686 2.024 2.429 2.712 3.319
# 39. 1.304 1.685 2.023 2.426 2.708 3.313
# 40. 1.303 1.684 2.021 2.423 2.704 3.307
# 41. 1.303 1.683 2.020 2.421 2.701 3.301
# 42. 1.302 1.682 2.018 2.418 2.698 3.296
# 43. 1.302 1.681 2.017 2.416 2.695 3.291
# 44. 1.301 1.680 2.015 2.414 2.692 3.286
# 45. 1.301 1.679 2.014 2.412 2.690 3.281
# 46. 1.300 1.679 2.013 2.410 2.687 3.277
# 47. 1.300 1.678 2.012 2.408 2.685 3.273
# 48. 1.299 1.677 2.011 2.407 2.682 3.269
# 49. 1.299 1.677 2.010 2.405 2.680 3.265
# 50. 1.299 1.676 2.009 2.403 2.678 3.261
# 51. 1.298 1.675 2.008 2.402 2.676 3.258
# 52. 1.298 1.675 2.007 2.400 2.674 3.255
# 53. 1.298 1.674 2.006 2.399 2.672 3.251
# 54. 1.297 1.674 2.005 2.397 2.670 3.248
# 55. 1.297 1.673 2.004 2.396 2.668 3.245
# 56. 1.297 1.673 2.003 2.395 2.667 3.242
# 57. 1.297 1.672 2.002 2.394 2.665 3.239
# 58. 1.296 1.672 2.002 2.392 2.663 3.237
# 59. 1.296 1.671 2.001 2.391 2.662 3.234
# 60. 1.296 1.671 2.000 2.390 2.660 3.232
# 61. 1.296 1.670 2.000 2.389 2.659 3.229
# 62. 1.295 1.670 1.999 2.388 2.657 3.227
# 63. 1.295 1.669 1.998 2.387 2.656 3.225
# 64. 1.295 1.669 1.998 2.386 2.655 3.223
# 65. 1.295 1.669 1.997 2.385 2.654 3.220
# 66. 1.295 1.668 1.997 2.384 2.652 3.218
# 67. 1.294 1.668 1.996 2.383 2.651 3.216
# 68. 1.294 1.668 1.995 2.382 2.650 3.214
# 69. 1.294 1.667 1.995 2.382 2.649 3.213
# 70. 1.294 1.667 1.994 2.381 2.648 3.211
# 71. 1.294 1.667 1.994 2.380 2.647 3.209
# 72. 1.293 1.666 1.993 2.379 2.646 3.207
# 73. 1.293 1.666 1.993 2.379 2.645 3.206
# 74. 1.293 1.666 1.993 2.378 2.644 3.204
# 75. 1.293 1.665 1.992 2.377 2.643 3.202
# 76. 1.293 1.665 1.992 2.376 2.642 3.201
# 77. 1.293 1.665 1.991 2.376 2.641 3.199
# 78. 1.292 1.665 1.991 2.375 2.640 3.198
# 79. 1.292 1.664 1.990 2.374 2.640 3.197
# 80. 1.292 1.664 1.990 2.374 2.639 3.195
# 81. 1.292 1.664 1.990 2.373 2.638 3.194
# 82. 1.292 1.664 1.989 2.373 2.637 3.193
# 83. 1.292 1.663 1.989 2.372 2.636 3.191
# 84. 1.292 1.663 1.989 2.372 2.636 3.190
# 85. 1.292 1.663 1.988 2.371 2.635 3.189
# 86. 1.291 1.663 1.988 2.370 2.634 3.188
# 87. 1.291 1.663 1.988 2.370 2.634 3.187
# 88. 1.291 1.662 1.987 2.369 2.633 3.185
# 89. 1.291 1.662 1.987 2.369 2.632 3.184
# 90. 1.291 1.662 1.987 2.368 2.632 3.183
# 91. 1.291 1.662 1.986 2.368 2.631 3.182
# 92. 1.291 1.662 1.986 2.368 2.630 3.181
# 93. 1.291 1.661 1.986 2.367 2.630 3.180
# 94. 1.291 1.661 1.986 2.367 2.629 3.179
# 95. 1.291 1.661 1.985 2.366 2.629 3.178
# 96. 1.290 1.661 1.985 2.366 2.628 3.177
# 97. 1.290 1.661 1.985 2.365 2.627 3.176
# 98. 1.290 1.661 1.984 2.365 2.627 3.175
# 99. 1.290 1.660 1.984 2.365 2.626 3.175
# 100. 1.290 1.660 1.984 2.364 2.626 3.174
# infinity 1.282 1.645 1.960 2.326 2.576 3.090
def value_mjo_significant_map(phase: int, grid: xr.DataArray = 0, month: str = 0) -> xr.DataArray:
"""
calculate significant map of mjo phase, depend on the input olr data from era5 analysis
ONLY in the swio area
:param month: like JJA and DJF, etc
:param grid: output sig_map remapped to gird. if grid = 0 , no interp
:param phase:
:return:
"""
mjo_phase: pd.DataFrame = read_mjo()
# ----------------------------- read necessary data: era5 ttr reanalysis data
# ttr_swio = xr.open_dataset(f'~/local_data/era5/ttr.era5.1999-2016.day.swio.nc')['ttr']
ttr_swio = read_to_standard_da(f'./local_data/era5/ttr.era5.1999-2016.day.swio.nc', var='ttr')
if isinstance(month, str):
ttr_swio = filter_xr_by_month(ttr_swio, month=month)
mjo_phase: pd.DataFrame = filter_df_by_month(mjo_phase, month=month)
# ----------------------------- anomaly OLR -----------------------------
olr_swio = convert_ttr_era5_2_olr(ttr=ttr_swio, is_reanalysis=True)
olr_swio_anomaly = anomaly_daily(olr_swio)
# select phase:
date_index: pd.DatetimeIndex = mjo_phase.loc[mjo_phase['phase'] == phase].index
olr_swio_anomaly_1phase: xr.DataArray = olr_swio_anomaly.sel(time=date_index) # tag: filter
# ----------------------------- calculate sig_map -----------------------------
print(f'calculating significant map, dims={str(olr_swio_anomaly_1phase.shape):s}, waiting ... ')
sig_map_olr: xr.DataArray = value_significant_of_anomaly_2d_mask(field_3d=olr_swio_anomaly_1phase, conf_level=0.05)
# to see if remap is necessary:
if grid == 0:
sig = sig_map_olr.copy()
# no remap
else:
new_sig_map = np.zeros(grid.shape)
old_lon = olr_swio.lon
old_lat = olr_swio.lat
new_lon = grid.lon.values
new_lat = grid.lat.values
# get closest lon:
for lon in range(grid.lon.size):
new_lon[lon] = old_lon[np.abs(old_lon - new_lon[lon]).argmin()]
# get closest lat:
for lat in range(grid.lat.size):
new_lat[lat] = old_lat[np.abs(old_lat - new_lat[lat]).argmin()]
for lat in range(grid.lat.size):
for lon in range(grid.lon.size):
new_sig_map[lat, lon] = sig_map_olr.where((sig_map_olr.lat == new_lat[lat]) &
(sig_map_olr.lon == new_lon[lon]), drop=True).values
sig = xr.DataArray(new_sig_map.astype(bool), coords=[grid.lat, grid.lon], dims=grid.dims)
return sig
def value_significant_of_anomaly_2d_mask(field_3d: xr.DataArray, conf_level: float = 0.05,
show: bool = True,
fdr_correction: bool = True,
check_nan_every_grid: bool = False) -> xr.DataArray:
"""
calculate 2d map of significant of values in true false
:param conf_level: default = 0.05
:param field_3d: have to be in (time, lat, lon)
:return: 2d array of true false xr.DataArray
Args:
fdr_correction (): if do a false discoveries rate correction
check_nan_every_grid (): to check all the pixel, since they could have nan at different times
show ():
"""
# change the order of dims
transpose_dims = ['y', 'x']
# there's another coord beside 'y' and 'x', which is the dim of significant !!!
sig_coord_name = [x for x in field_3d.dims if x not in transpose_dims][0]
# tag, note: change order of dim:
new_dims = [sig_coord_name] + transpose_dims
field_3d = field_3d.transpose(*new_dims)
p_map = np.zeros((field_3d.shape[1], field_3d.shape[2]))
print(f'to get significant map...')
if check_nan_every_grid:
for lat in range(field_3d.shape[1]):
print(f'significant ----- {lat * 100 / len(field_3d.lat): 4.2f} % ...')
for lon in range(field_3d.shape[2]):
grid = field_3d[:, lat, lon]
# select value not nan, removing the nan value
grid_nonnan = grid[np.logical_not(np.isnan(grid))]
# check if there's a bad grid point with all values = nan
# when pass a 3D values to t test with nan, it gives only nan, so better to | |
<filename>convoy/crypto.py
# Copyright (c) Microsoft Corporation
#
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# compat imports
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from builtins import ( # noqa
bytes, dict, int, list, object, range, str, ascii, chr, hex, input,
next, oct, open, pow, round, super, filter, map, zip)
# stdlib imports
import base64
import collections
import getpass
import logging
import os
try:
import pathlib2 as pathlib
except ImportError:
import pathlib
import tempfile
import stat
import subprocess
# local imports
from . import settings
from . import util
# create logger
logger = logging.getLogger(__name__)
util.setup_logger(logger)
# global defines
_SSH_KEY_PREFIX = 'id_rsa_shipyard'
_REMOTEFS_SSH_KEY_PREFIX = '{}_remotefs'.format(_SSH_KEY_PREFIX)
# named tuples
PfxSettings = collections.namedtuple(
'PfxSettings', ['filename', 'passphrase', 'sha1'])
def get_ssh_key_prefix():
# type: (None) -> str
"""Get SSH key prefix
:rtype: str
:return: ssh key prefix
"""
return _SSH_KEY_PREFIX
def get_remotefs_ssh_key_prefix():
# type: (None) -> str
"""Get remote fs SSH key prefix
:rtype: str
:return: ssh key prefix for remote fs
"""
return _REMOTEFS_SSH_KEY_PREFIX
def generate_rdp_password():
# type: (None) -> str
"""Generate an RDP password
:rtype: str
:return: rdp password
"""
return base64.b64encode(os.urandom(8))
def generate_ssh_keypair(export_path, prefix=None):
# type: (str, str) -> tuple
"""Generate an ssh keypair for use with user logins
:param str export_path: keypair export path
:param str prefix: key prefix
:rtype: tuple
:return: (private key filename, public key filename)
"""
if util.is_none_or_empty(prefix):
prefix = _SSH_KEY_PREFIX
privkey = pathlib.Path(export_path, prefix)
pubkey = pathlib.Path(export_path, prefix + '.pub')
if privkey.exists():
old = pathlib.Path(export_path, prefix + '.old')
if old.exists():
old.unlink()
privkey.rename(old)
if pubkey.exists():
old = pathlib.Path(export_path, prefix + '.pub.old')
if old.exists():
old.unlink()
pubkey.rename(old)
logger.info('generating ssh key pair to path: {}'.format(export_path))
subprocess.check_call(
['ssh-keygen', '-f', str(privkey), '-t', 'rsa', '-N', ''''''])
return (privkey, pubkey)
def check_ssh_private_key_filemode(ssh_private_key):
# type: (pathlib.Path) -> bool
"""Check SSH private key filemode
:param pathlib.Path ssh_private_key: SSH private key
:rtype: bool
:return: private key filemode is ok
"""
def _mode_check(fstat, flag):
return bool(fstat & flag)
if util.on_windows():
return True
fstat = ssh_private_key.stat().st_mode
modes = frozenset((stat.S_IRWXG, stat.S_IRWXO))
return not any([_mode_check(fstat, x) for x in modes])
def connect_or_exec_ssh_command(
remote_ip, remote_port, ssh_private_key, username, sync=True,
shell=False, tty=False, ssh_args=None, command=None):
# type: (str, int, pathlib.Path, str, bool, bool, tuple, tuple) -> bool
"""Connect to node via SSH or execute SSH command
:param str remote_ip: remote ip address
:param int remote_port: remote port
:param pathlib.Path ssh_private_key: SSH private key
:param str username: username
:param bool sync: synchronous execution
:param bool shell: execute with shell
:param bool tty: allocate pseudo-tty
:param tuple ssh_args: ssh args
:param tuple command: command
:rtype: int or subprocess.Process
:return: return code or subprocess handle
"""
if not ssh_private_key.exists():
raise RuntimeError('SSH private key file not found at: {}'.format(
ssh_private_key))
# ensure file mode is set properly for the private key
if not check_ssh_private_key_filemode(ssh_private_key):
logger.warning(
'SSH private key filemode is too permissive: {}'.format(
ssh_private_key))
# execute SSH command
ssh_cmd = [
'ssh', '-o', 'StrictHostKeyChecking=no',
'-o', 'UserKnownHostsFile={}'.format(os.devnull),
'-i', str(ssh_private_key), '-p', str(remote_port),
]
if tty:
ssh_cmd.append('-t')
if util.is_not_empty(ssh_args):
ssh_cmd.extend(ssh_args)
ssh_cmd.append('{}@{}'.format(username, remote_ip))
if util.is_not_empty(command):
ssh_cmd.extend(command)
logger.info('{} node {}:{} with key {}'.format(
'connecting to' if util.is_none_or_empty(command)
else 'executing command on', remote_ip, remote_port, ssh_private_key))
if sync:
return util.subprocess_with_output(ssh_cmd, shell=shell)
else:
return util.subprocess_nowait_pipe_stdout(
ssh_cmd, shell=shell, pipe_stderr=True)
def derive_private_key_pem_from_pfx(pfxfile, passphrase=None, pemfile=None):
# type: (str, str, str) -> str
"""Derive a private key pem file from a pfx
:param str pfxfile: pfx file
:param str passphrase: passphrase for pfx
:param str pemfile: path of pem file to write to
:rtype: str
:return: path of pem file
"""
if pfxfile is None:
raise ValueError('pfx file is invalid')
if passphrase is None:
passphrase = <PASSWORD>('Enter password for PFX: ')
# convert pfx to pem
if pemfile is None:
f = tempfile.NamedTemporaryFile(mode='wb', delete=False)
f.close()
pemfile = f.name
try:
# create pem from pfx
subprocess.check_call(
['openssl', 'pkcs12', '-nodes', '-in', pfxfile, '-out',
pemfile, '-password', 'pass:' + passphrase]
)
except Exception:
fp = pathlib.Path(pemfile)
if fp.exists():
fp.unlink()
pemfile = None
return pemfile
def derive_public_key_pem_from_pfx(pfxfile, passphrase=None, pemfile=None):
# type: (str, str, str) -> str
"""Derive a public key pem file from a pfx
:param str pfxfile: pfx file
:param str passphrase: passphrase for pfx
:param str pemfile: path of pem file to write to
:rtype: str
:return: path of pem file
"""
if pfxfile is None:
raise ValueError('pfx file is invalid')
if passphrase is None:
passphrase = <PASSWORD>pass('Enter password for PFX: ')
# convert pfx to pem
if pemfile is None:
f = tempfile.NamedTemporaryFile(mode='wb', delete=False)
f.close()
pemfile = f.name
try:
# create pem from pfx
subprocess.check_call(
['openssl', 'pkcs12', '-nodes', '-in', pfxfile, '-out',
pemfile, '-password', 'pass:' + passphrase]
)
# extract public key from private key
subprocess.check_call(
['openssl', 'rsa', '-in', pemfile, '-pubout', '-outform',
'PEM', '-out', pemfile]
)
except Exception:
fp = pathlib.Path(pemfile)
if fp.exists():
fp.unlink()
pemfile = None
return pemfile
def _parse_sha1_thumbprint_openssl(output):
# type: (str) -> str
"""Get SHA1 thumbprint from buffer
:param str buffer: buffer to parse
:rtype: str
:return: sha1 thumbprint of buffer
"""
# return just thumbprint (without colons) from the above openssl command
# in lowercase. Expected openssl output is in the form:
# SHA1 Fingerprint=<thumbprint>
return ''.join(util.decode_string(
output).strip().split('=')[1].split(':')).lower()
def get_sha1_thumbprint_pfx(pfxfile, passphrase):
# type: (str, str) -> str
"""Get SHA1 thumbprint of PFX
:param str pfxfile: name of the pfx file to export
:param str passphrase: passphrase for pfx
:rtype: str
:return: sha1 thumbprint of pfx
"""
if pfxfile is None:
raise ValueError('pfxfile is invalid')
if passphrase is None:
passphrase = getpass.getpass('Enter password for PFX: ')
# compute sha1 thumbprint of pfx
pfxdump = subprocess.check_output(
['openssl', 'pkcs12', '-in', pfxfile, '-nodes', '-passin',
'pass:' + passphrase]
)
proc = subprocess.Popen(
['openssl', 'x509', '-noout', '-fingerprint'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE
)
return _parse_sha1_thumbprint_openssl(proc.communicate(input=pfxdump)[0])
def get_sha1_thumbprint_pem(pemfile):
# type: (str) -> str
"""Get SHA1 thumbprint of PEM
:param str pfxfile: name of the pfx file to export
:rtype: str
:return: sha1 thumbprint of pem
"""
proc = subprocess.Popen(
['openssl', 'x509', '-noout', '-fingerprint', '-in', pemfile],
stdout=subprocess.PIPE
)
return _parse_sha1_thumbprint_openssl(proc.communicate()[0])
def generate_pem_pfx_certificates(config):
# type: (dict) -> str
"""Generate a pem and a derived pfx file
:param dict config: configuration dict
:rtype: str
:return: sha1 thumbprint of pfx
"""
# gather input
pemfile = settings.batch_shipyard_encryption_public_key_pem(config)
pfxfile = settings.batch_shipyard_encryption_pfx_filename(config)
passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config)
if pemfile is None:
pemfile = util.get_input('Enter public key PEM filename to create: ')
if pfxfile is None:
pfxfile = util.get_input('Enter PFX filename to create: ')
if passphrase is None:
while util.is_none_or_empty(passphrase):
passphrase = getpass.getpass('Enter password for PFX: ')
if len(passphrase) == 0:
print('passphrase cannot be empty')
privatekey = pemfile + '.key'
# generate pem file with private key and no password
f = tempfile.NamedTemporaryFile(mode='wb', delete=False)
f.close()
try:
subprocess.check_call(
['openssl', 'req', '-new', '-nodes', '-x509', '-newkey',
'rsa:2048', '-keyout', privatekey, '-out', f.name, '-days', '730',
'-subj', '/C=US/ST=None/L=None/O=None/CN=BatchShipyard']
)
# extract public key from private key
subprocess.check_call(
['openssl', 'rsa', '-in', privatekey, '-pubout', '-outform',
'PEM', '-out', pemfile]
)
logger.debug('created public key PEM file: {}'.format(pemfile))
# convert pem to pfx for Azure Batch service
subprocess.check_call(
['openssl', 'pkcs12', '-export', '-out', pfxfile, '-inkey',
privatekey, '-in', f.name, '-certfile', f.name,
'-passin', 'pass:', '-passout', 'pass:' + passphrase]
)
logger.debug('created PFX file: {}'.format(pfxfile))
finally:
# remove rsa private key file
fp = pathlib.Path(privatekey)
if fp.exists():
fp.unlink()
# remove temp cert pem
fp = pathlib.Path(f.name)
if fp.exists():
fp.unlink()
# get sha1 thumbprint of | |
detailed structure of the queues can be found in parse_counter_info/parse_irig_info.
If unexpected data length found, this will output some messages:
Error 0: data length is shorter than the header size (4 bytes)
Error 1: data length is shorter than the encoder counter info
even though the encoder packet header is found.
Error 2: data length is shorter than the IRIG info
even though the IRIG packet header is found.
"""
while True:
# If there is data from the socket attached to the beaglebone then
# ready[0] = true
# If not then continue checking for 2 seconds and if there is still no data
# ready[0] = false
ready = select.select([self.sock], [], [], 2)
if ready[0]:
# Add the data from the socket attached to the beaglebone
# to the self.data string
data = self.sock.recv(self.read_chunk_size)
if len(self.data) > 0:
self.data += data
else:
self.data = data
while True:
# Check to make sure that there is at least 1 int in the packet
# The first int in every packet should be the header
if not self.check_data_length(0, 4):
self.log.error('Error 0')
break
header = self.data[0:4]
# Convert a structure value from the beaglebone (header) to an int
header = struct.unpack('<I', header)[0]
#print('header ', '0x%x'%header)
# 0x1EAF = Encoder Packet
# 0xCAFE = IRIG Packet
# 0xE12A = Error Packet
# Encoder
if header == 0x1eaf:
# Make sure the data is the correct length for an Encoder Packet
if not self.check_data_length(0, COUNTER_PACKET_SIZE):
self.log.error('Error 1')
break
# Call the meathod self.parse_counter_info() to parse the Encoder Packet
self.parse_counter_info(self.data[4 : COUNTER_PACKET_SIZE])
if len(self.data) >= COUNTER_PACKET_SIZE:
self.data = self.data[COUNTER_PACKET_SIZE:]
# IRIG
elif header == 0xcafe:
# Make sure the data is the correct length for an IRIG Packet
if not self.check_data_length(0, IRIG_PACKET_SIZE):
self.log.error('Error 2')
break
# Call the meathod self.parse_irig_info() to parse the IRIG Packet
self.parse_irig_info(self.data[4 : IRIG_PACKET_SIZE])
if len(self.data) >= IRIG_PACKET_SIZE:
self.data = self.data[IRIG_PACKET_SIZE:]
# Error
# An Error Packet will be sent if there is a timing error in the
# synchronization pulses of the IRIG packet
# If you see 'Packet Error' check to make sure the IRIG is functioning as
# intended and that all the connections are made correctly
elif header == 0xe12a:
self.log.error('Packet Error')
# Clear self.data
self.data = ''
elif header == 0x1234:
self.log.error('Received timeout packet.')
# Clear self.data
self.data = ''
else:
self.log.error('Bad header')
# Clear self.data
self.data = ''
if len(self.data) == 0:
break
break
# If there is no data from the beaglebone 'Looking for data ...' will print
# If you see this make sure that the beaglebone has been set up properly
# print('Looking for data ...')
def parse_counter_info(self, data):
"""Method to parse the Encoder Packet and put them to counter_queue
Parameters
----------
data : str
string for the encoder ounter info
Note:
'data' structure:
(Please note that '150' below might be replaced by COUNTER_INFO_LENGTH)
[0] Readout from the quadrature
[1-150] clock counts of 150 data points
[151-300] corresponding clock overflow of the 150 data points (each overflow count
is equal to 2^16 clock counts)
[301-450] corresponding absolute number of the 150 data points ((1, 2, 3, etc ...)
or (150, 151, 152, etc ...) or (301, 302, 303, etc ...) etc ...)
counter_queue structure:
counter_queue = [[64 bit clock counts],
[clock count indicese incremented by every edge],
quadrature,
current system time]
"""
# Convert the Encoder Packet structure into a numpy array
derter = np.array(struct.unpack('<' + 'I'+ 'III'*COUNTER_INFO_LENGTH, data))
# self.quad_queue.append(derter[0].item()) # merged to counter_queue
self.counter_queue.append((derter[1:COUNTER_INFO_LENGTH+1]\
+ (derter[COUNTER_INFO_LENGTH+1:2*COUNTER_INFO_LENGTH+1] << 32), \
derter[2*COUNTER_INFO_LENGTH+1:3*COUNTER_INFO_LENGTH+1], \
derter[0].item(), time.time()))
def parse_irig_info(self, data):
"""Method to parse the IRIG Packet and put them to the irig_queue
Parameters
----------
data : str
string for the IRIG info
Note
----
'data' structure:
[0] clock count of the IRIG Packet which the UTC time corresponds to
[1] overflow count of initial rising edge
[2] binary encoding of the second data
[3] binary encoding of the minute data
[4] binary encoding of the hour data
[5-11] additional IRIG information which we do mot use
[12-21] synchronization pulse clock counts
[22-31] overflow count at each synchronization pulse
irig_queue structure:
irig_queue = [Packet clock count,
Packet UTC time in sec,
[binary encoded IRIG data],
[synch pulses clock counts],
current system time]
"""
# Convert the IRIG Packet structure into a numpy array
unpacked_data = struct.unpack('<L' + 'L' + 'L'*10 + 'L'*10 + 'L'*10, data)
# Start of the packet clock count
#overflow.append(unpacked_data[1])
#print "overflow: ", overflow
rising_edge_time = unpacked_data[0] + (unpacked_data[1] << 32)
# Stores IRIG time data
irig_info = unpacked_data[2:12]
# Prints the time information and returns the current time in seconds
irig_time = self.pretty_print_irig_info(irig_info, rising_edge_time)
# Stores synch pulse clock counts accounting for overflow of 32 bit counter
synch_pulse_clock_times = (np.asarray(unpacked_data[12:22])
+ (np.asarray(unpacked_data[22:32]) << 32)).tolist()
# self.irig_queue = [Packet clock count,Packet UTC time in sec,
# [binary encoded IRIG data],[synch pulses clock counts],
# [current system time]]
self.irig_queue.append((rising_edge_time, irig_time, irig_info, \
synch_pulse_clock_times, time.time()))
def __del__(self):
self.sock.close()
class HWPBBBAgent:
"""OCS agent for HWP encoder DAQ using Beaglebone Black
Attributes
----------
rising_edge_count : int
clock count values for the rising edge of IRIG reference marker,
saved for calculating the beaglebone clock frequency
irig_time : int
unix timestamp from IRIG
"""
def __init__(self, agent_obj, port=8080):
self.active = True
self.agent = agent_obj
self.log = agent_obj.log
self.lock = TimeoutLock()
self.port = port
self.take_data = False
self.initialized = False
# For clock count to time conversion
self.rising_edge_count = 0
self.irig_time = 0
agg_params = {'frame_length': 60}
self.agent.register_feed('HWPEncoder', record=True,
agg_params=agg_params)
agg_params = {'frame_length': 60, 'exclude_influx': True}
self.agent.register_feed('HWPEncoder_full', record=True,
agg_params=agg_params)
self.parser = EncoderParser(beaglebone_port=self.port)
def start_acq(self, session, params):
"""Starts acquiring data.
"""
time_encoder_published = 0
counter_list = []
counter_index_list = []
quad_list = []
quad_counter_list = []
received_time_list = []
with self.lock.acquire_timeout(timeout=0, job='acq') as acquired:
if not acquired:
self.log.warn('Could not start acq because {} is already running'
.format(self.lock.job))
return False, 'Could not acquire lock.'
session.set_status('running')
self.take_data = True
while self.take_data:
# This is blocking until data are available
self.parser.grab_and_parse_data()
# IRIG data; normally every sec
while len(self.parser.irig_queue):
irig_data = self.parser.irig_queue.popleft()
rising_edge_count = irig_data[0]
irig_time = irig_data[1]
irig_info = irig_data[2]
synch_pulse_clock_counts = irig_data[3]
sys_time = irig_data[4]
data = {'timestamp':sys_time, 'block_name':'HWPEncoder_irig', 'data':{}}
data['data']['irig_time'] = irig_time
data['data']['rising_edge_count'] = rising_edge_count
data['data']['irig_sec'] = de_irig(irig_info[0], 1)
data['data']['irig_min'] = de_irig(irig_info[1], 0)
data['data']['irig_hour'] = de_irig(irig_info[2], 0)
data['data']['irig_day'] = de_irig(irig_info[3], 0) \
+ de_irig(irig_info[4], 0) * 100
data['data']['irig_year'] = de_irig(irig_info[5], 0)
# Beagleboneblack clock frequency measured by IRIG
if self.rising_edge_count > 0 and irig_time > 0:
bbb_clock_freq = float(rising_edge_count - self.rising_edge_count) \
/ (irig_time - self.irig_time)
else:
bbb_clock_freq = 0.
data['data']['bbb_clock_freq'] = bbb_clock_freq
self.agent.publish_to_feed('HWPEncoder', data)
self.rising_edge_count = rising_edge_count
self.irig_time = irig_time
# saving clock counts for every refernce edge and every irig bit info
data = {'timestamps':[], 'block_name':'HWPEncoder_irig_raw', 'data':{}}
# 0.09: time difference in seconds b/w reference marker and
# the first index marker
data['timestamps'] = sys_time + 0.09 + np.arange(10) * 0.1
data['data']['irig_synch_pulse_clock_time'] = list(irig_time + 0.09 + \
np.arange(10) * 0.1)
data['data']['irig_synch_pulse_clock_counts'] = synch_pulse_clock_counts
data['data']['irig_info'] = list(irig_info)
self.agent.publish_to_feed('HWPEncoder', data)
## Reducing the packet size, less frequent publishing
# Encoder data; packet coming rate = 570*2*2/150/4 ~ 4Hz packet at 2 Hz rotation
while len(self.parser.counter_queue):
counter_data = self.parser.counter_queue.popleft()
counter_list += counter_data[0].tolist()
counter_index_list += counter_data[1].tolist()
quad_data = counter_data[2]
sys_time = counter_data[3]
received_time_list.append(sys_time)
quad_list.append(quad_data)
quad_counter_list.append(counter_data[0][0])
ct = time.time()
if len(counter_list) >= NUM_ENCODER_TO_PUBLISH \
or (len(counter_list) \
and (ct - time_encoder_published) > SEC_ENCODER_TO_PUBLISH):
# Publishing quadratic data first
data = {'timestamps':[], 'block_name':'HWPEncoder_quad', 'data':{}}
data['timestamps'] = received_time_list
data['data']['quad'] = quad_list
self.agent.publish_to_feed('HWPEncoder', data)
# Publishing counter data
# (full sampled data will not be recorded in influxdb)
data = {'timestamps':[], 'block_name':'HWPEncoder_counter', 'data':{}}
data['data']['counter'] = counter_list
data['data']['counter_index'] = counter_index_list
data['timestamps'] = count2time(counter_list, received_time_list[0])
self.agent.publish_to_feed('HWPEncoder_full', data)
## Subsampled data for influxdb display
data_subsampled = {'block_name':'HWPEncoder_counter_sub', 'data':{}}
data_subsampled['timestamps'] = np.array(data['timestamps'])\
[::NUM_SUBSAMPLE].tolist()
data_subsampled['data']['counter_sub'] = np.array(counter_list)\
[::NUM_SUBSAMPLE].tolist()
data_subsampled['data']['counter_index_sub'] = np.array(counter_index_list)\
[::NUM_SUBSAMPLE].tolist()
self.agent.publish_to_feed('HWPEncoder', data_subsampled)
# For rough estimation of HWP | |
>>> cols = ["real", "bool", "stringNum", "string"]
>>> df = spark.createDataFrame(data, cols)
>>> hasher = FeatureHasher()
>>> hasher.setInputCols(cols)
FeatureHasher...
>>> hasher.setOutputCol("features")
FeatureHasher...
>>> hasher.transform(df).head().features
SparseVector(262144, {174475: 2.0, 247670: 1.0, 257907: 1.0, 262126: 1.0})
>>> hasher.setCategoricalCols(["real"]).transform(df).head().features
SparseVector(262144, {171257: 1.0, 247670: 1.0, 257907: 1.0, 262126: 1.0})
>>> hasherPath = temp_path + "/hasher"
>>> hasher.save(hasherPath)
>>> loadedHasher = FeatureHasher.load(hasherPath)
>>> loadedHasher.getNumFeatures() == hasher.getNumFeatures()
True
>>> loadedHasher.transform(df).head().features == hasher.transform(df).head().features
True
.. versionadded:: 2.3.0
"""
categoricalCols = Param(Params._dummy(), "categoricalCols",
"numeric columns to treat as categorical",
typeConverter=TypeConverters.toListString)
@keyword_only
def __init__(self, numFeatures=1 << 18, inputCols=None, outputCol=None, categoricalCols=None):
"""
__init__(self, numFeatures=1 << 18, inputCols=None, outputCol=None, categoricalCols=None)
"""
super(FeatureHasher, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.FeatureHasher", self.uid)
self._setDefault(numFeatures=1 << 18)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.3.0")
def setParams(self, numFeatures=1 << 18, inputCols=None, outputCol=None, categoricalCols=None):
"""
setParams(self, numFeatures=1 << 18, inputCols=None, outputCol=None, categoricalCols=None)
Sets params for this FeatureHasher.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.3.0")
def setCategoricalCols(self, value):
"""
Sets the value of :py:attr:`categoricalCols`.
"""
return self._set(categoricalCols=value)
@since("2.3.0")
def getCategoricalCols(self):
"""
Gets the value of binary or its default value.
"""
return self.getOrDefault(self.categoricalCols)
def setInputCols(self, value):
"""
Sets the value of :py:attr:`inputCols`.
"""
return self._set(inputCols=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
def setNumFeatures(self, value):
"""
Sets the value of :py:attr:`numFeatures`.
"""
return self._set(numFeatures=value)
@inherit_doc
class HashingTF(JavaTransformer, HasInputCol, HasOutputCol, HasNumFeatures, JavaMLReadable,
JavaMLWritable):
"""
Maps a sequence of terms to their term frequencies using the hashing trick.
Currently we use <NAME>'s MurmurHash 3 algorithm (MurmurHash3_x86_32)
to calculate the hash code value for the term object.
Since a simple modulo is used to transform the hash function to a column index,
it is advisable to use a power of two as the numFeatures parameter;
otherwise the features will not be mapped evenly to the columns.
>>> df = spark.createDataFrame([(["a", "b", "c"],)], ["words"])
>>> hashingTF = HashingTF(inputCol="words", outputCol="features")
>>> hashingTF.setNumFeatures(10)
HashingTF...
>>> hashingTF.transform(df).head().features
SparseVector(10, {5: 1.0, 7: 1.0, 8: 1.0})
>>> hashingTF.setParams(outputCol="freqs").transform(df).head().freqs
SparseVector(10, {5: 1.0, 7: 1.0, 8: 1.0})
>>> params = {hashingTF.numFeatures: 5, hashingTF.outputCol: "vector"}
>>> hashingTF.transform(df, params).head().vector
SparseVector(5, {0: 1.0, 2: 1.0, 3: 1.0})
>>> hashingTFPath = temp_path + "/hashing-tf"
>>> hashingTF.save(hashingTFPath)
>>> loadedHashingTF = HashingTF.load(hashingTFPath)
>>> loadedHashingTF.getNumFeatures() == hashingTF.getNumFeatures()
True
>>> hashingTF.indexOf("b")
5
.. versionadded:: 1.3.0
"""
binary = Param(Params._dummy(), "binary", "If True, all non zero counts are set to 1. " +
"This is useful for discrete probabilistic models that model binary events " +
"rather than integer counts. Default False.",
typeConverter=TypeConverters.toBoolean)
@keyword_only
def __init__(self, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None):
"""
__init__(self, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None)
"""
super(HashingTF, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.HashingTF", self.uid)
self._setDefault(numFeatures=1 << 18, binary=False)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.3.0")
def setParams(self, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None):
"""
setParams(self, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None)
Sets params for this HashingTF.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setBinary(self, value):
"""
Sets the value of :py:attr:`binary`.
"""
return self._set(binary=value)
@since("2.0.0")
def getBinary(self):
"""
Gets the value of binary or its default value.
"""
return self.getOrDefault(self.binary)
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
def setNumFeatures(self, value):
"""
Sets the value of :py:attr:`numFeatures`.
"""
return self._set(numFeatures=value)
@since("3.0.0")
def indexOf(self, term):
"""
Returns the index of the input term.
"""
self._transfer_params_to_java()
return self._java_obj.indexOf(term)
class _IDFParams(HasInputCol, HasOutputCol):
"""
Params for :py:class:`IDF` and :py:class:`IDFModel`.
.. versionadded:: 3.0.0
"""
minDocFreq = Param(Params._dummy(), "minDocFreq",
"minimum number of documents in which a term should appear for filtering",
typeConverter=TypeConverters.toInt)
@since("1.4.0")
def getMinDocFreq(self):
"""
Gets the value of minDocFreq or its default value.
"""
return self.getOrDefault(self.minDocFreq)
@inherit_doc
class IDF(JavaEstimator, _IDFParams, JavaMLReadable, JavaMLWritable):
"""
Compute the Inverse Document Frequency (IDF) given a collection of documents.
>>> from pyspark.ml.linalg import DenseVector
>>> df = spark.createDataFrame([(DenseVector([1.0, 2.0]),),
... (DenseVector([0.0, 1.0]),), (DenseVector([3.0, 0.2]),)], ["tf"])
>>> idf = IDF(minDocFreq=3)
>>> idf.setInputCol("tf")
IDF...
>>> idf.setOutputCol("idf")
IDF...
>>> model = idf.fit(df)
>>> model.getMinDocFreq()
3
>>> model.idf
DenseVector([0.0, 0.0])
>>> model.docFreq
[0, 3]
>>> model.numDocs == df.count()
True
>>> model.transform(df).head().idf
DenseVector([0.0, 0.0])
>>> idf.setParams(outputCol="freqs").fit(df).transform(df).collect()[1].freqs
DenseVector([0.0, 0.0])
>>> params = {idf.minDocFreq: 1, idf.outputCol: "vector"}
>>> idf.fit(df, params).transform(df).head().vector
DenseVector([0.2877, 0.0])
>>> idfPath = temp_path + "/idf"
>>> idf.save(idfPath)
>>> loadedIdf = IDF.load(idfPath)
>>> loadedIdf.getMinDocFreq() == idf.getMinDocFreq()
True
>>> modelPath = temp_path + "/idf-model"
>>> model.save(modelPath)
>>> loadedModel = IDFModel.load(modelPath)
>>> loadedModel.transform(df).head().idf == model.transform(df).head().idf
True
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, minDocFreq=0, inputCol=None, outputCol=None):
"""
__init__(self, minDocFreq=0, inputCol=None, outputCol=None)
"""
super(IDF, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.IDF", self.uid)
self._setDefault(minDocFreq=0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, minDocFreq=0, inputCol=None, outputCol=None):
"""
setParams(self, minDocFreq=0, inputCol=None, outputCol=None)
Sets params for this IDF.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setMinDocFreq(self, value):
"""
Sets the value of :py:attr:`minDocFreq`.
"""
return self._set(minDocFreq=value)
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
def _create_model(self, java_model):
return IDFModel(java_model)
class IDFModel(JavaModel, _IDFParams, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`IDF`.
.. versionadded:: 1.4.0
"""
@since("3.0.0")
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
@since("3.0.0")
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
@property
@since("2.0.0")
def idf(self):
"""
Returns the IDF vector.
"""
return self._call_java("idf")
@property
@since("3.0.0")
def docFreq(self):
"""
Returns the document frequency.
"""
return self._call_java("docFreq")
@property
@since("3.0.0")
def numDocs(self):
"""
Returns number of documents evaluated to compute idf
"""
return self._call_java("numDocs")
class _ImputerParams(HasInputCol, HasInputCols, HasOutputCol, HasOutputCols):
"""
Params for :py:class:`Imputer` and :py:class:`ImputerModel`.
.. versionadded:: 3.0.0
"""
strategy = Param(Params._dummy(), "strategy",
"strategy for imputation. If mean, then replace missing values using the mean "
"value of the feature. If median, then replace missing values using the "
"median value of the feature.",
typeConverter=TypeConverters.toString)
missingValue = Param(Params._dummy(), "missingValue",
"The placeholder for the missing values. All occurrences of missingValue "
"will be imputed.", typeConverter=TypeConverters.toFloat)
@since("2.2.0")
def getStrategy(self):
"""
Gets the value of :py:attr:`strategy` or its default value.
"""
return self.getOrDefault(self.strategy)
@since("2.2.0")
def getMissingValue(self):
"""
Gets the value of :py:attr:`missingValue` or its default value.
"""
return self.getOrDefault(self.missingValue)
@inherit_doc
class Imputer(JavaEstimator, _ImputerParams, JavaMLReadable, JavaMLWritable):
"""
Imputation estimator for completing missing values, either using the mean or the median
of the columns in which the missing values are located. The input columns should be of
DoubleType or FloatType. Currently Imputer does not support categorical features and
possibly creates incorrect values for a categorical feature.
Note that the mean/median value is computed after filtering out missing values.
All Null values in the input columns are treated as missing, and so are also imputed. For
computing median, :py:meth:`pyspark.sql.DataFrame.approxQuantile` is used with a
relative error of `0.001`.
>>> df = spark.createDataFrame([(1.0, float("nan")), (2.0, float("nan")), (float("nan"), 3.0),
... (4.0, 4.0), (5.0, 5.0)], ["a", "b"])
>>> imputer = Imputer()
>>> imputer.setInputCols(["a", "b"])
Imputer...
>>> imputer.setOutputCols(["out_a", "out_b"])
Imputer...
>>> model = imputer.fit(df)
>>> model.getStrategy()
'mean'
>>> model.surrogateDF.show()
+---+---+
| a| b|
+---+---+
|3.0|4.0|
+---+---+
...
>>> model.transform(df).show()
+---+---+-----+-----+
| a| b|out_a|out_b|
+---+---+-----+-----+
|1.0|NaN| 1.0| 4.0|
|2.0|NaN| 2.0| 4.0|
|NaN|3.0| 3.0| 3.0|
...
>>> imputer.setStrategy("median").setMissingValue(1.0).fit(df).transform(df).show()
+---+---+-----+-----+
| a| b|out_a|out_b|
+---+---+-----+-----+
|1.0|NaN| 4.0| NaN|
...
>>> df1 = spark.createDataFrame([(1.0,), (2.0,), (float("nan"),), (4.0,), (5.0,)], ["a"])
>>> imputer1 = Imputer(inputCol="a", outputCol="out_a")
>>> model1 = imputer1.fit(df1)
>>> model1.surrogateDF.show()
+---+
| a|
+---+
|3.0|
+---+
...
>>> model1.transform(df1).show()
+---+-----+
| a|out_a|
+---+-----+
|1.0| 1.0|
|2.0| 2.0|
|NaN| 3.0|
...
>>> imputer1.setStrategy("median").setMissingValue(1.0).fit(df1).transform(df1).show()
+---+-----+
| a|out_a|
+---+-----+
|1.0| 4.0|
...
>>> df2 = spark.createDataFrame([(float("nan"),), (float("nan"),), (3.0,), (4.0,), (5.0,)],
... ["b"])
>>> imputer2 = Imputer(inputCol="b", outputCol="out_b")
>>> model2 = imputer2.fit(df2)
>>> model2.surrogateDF.show()
+---+
| b|
+---+
|4.0|
+---+
...
>>> model2.transform(df2).show()
+---+-----+
| b|out_b|
+---+-----+
|NaN| 4.0|
|NaN| 4.0|
|3.0| 3.0|
...
>>> imputer2.setStrategy("median").setMissingValue(1.0).fit(df2).transform(df2).show()
+---+-----+
| b|out_b|
+---+-----+
|NaN| NaN|
...
>>> imputerPath = temp_path + "/imputer"
>>> imputer.save(imputerPath)
>>> loadedImputer = Imputer.load(imputerPath)
>>> loadedImputer.getStrategy() == imputer.getStrategy()
True
>>> loadedImputer.getMissingValue()
1.0
>>> modelPath = temp_path + "/imputer-model"
>>> model.save(modelPath)
>>> loadedModel = ImputerModel.load(modelPath)
>>> loadedModel.transform(df).head().out_a == model.transform(df).head().out_a
True
.. versionadded:: 2.2.0
"""
@keyword_only
def __init__(self, strategy="mean", missingValue=float("nan"), inputCols=None,
| |
"""
A module of deep feature selection based on multilayer perceptrons.
This module applies a deep structure with not too many hidden layers.
Thus, stochastic gradient descent (back-prop) is used in optimization.
Copyright (c) 2008-2013, Theano Development Team All rights reserved.
<NAME>
CMMT, UBC, Vancouver
Sep 23, 2014
Contact: <EMAIL>
"""
from __future__ import division
import pickle
import time
import math
import copy
import numpy
import theano
import theano.tensor as T
from logistic_sgd import LogisticRegression
import classification as cl
def relu(x):
return 0.5*(x+abs(x))
class InputLayer(object):
def __init__(self, input, n_in, w=None):
"""
In the input layer x_i is multiplied by w_i.
<NAME>, in UBC.
Aug 26, 2014.
"""
self.input=input
if w is None:
w_values = numpy.ones((n_in,), dtype=theano.config.floatX)
# w_values = numpy.asarray(rng.uniform(
# low=0, high=1,
# size=(n_in,)), dtype=theano.config.floatX)
w = theano.shared(value=w_values, name='w', borrow=True)
self.w=w
#u_values = numpy.ones((n_in,), dtype=theano.config.floatX)
#u = theano.shared(value=u_values, name='u', borrow=True)
#self.u=u # auxiliary variable for non-negativity
self.output = self.w * self.input
#self.params=[w,u]
self.params=[w]
def get_predicted(self,data):
return self.w * data
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh):
"""
Typical hidden layer of a MLP: units are fully-connected and have
sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)
and the bias vector b is of shape (n_out,).
NOTE : The nonlinearity used here is tanh by default.
Hidden unit activation is thus given by: tanh(dot(input,W) + b)
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dmatrix
:param input: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of input
:type n_out: int
:param n_out: number of hidden units
:type activation: theano.Op or function
:param activation: Non linearity to be applied in the hidden
layer
"""
self.input = input
# `W` is initialized with `W_values` which is uniformely sampled
# from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden))
# for tanh activation function
# the output of uniform if converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
# Note : optimal initialization of weights is dependent on the
# activation function used (among other things).
# For example, results presented in [Xavier10] suggest that you
# should use 4 times larger initial weights for sigmoid
# compared to tanh
# We have no info for other function, so we use the same as
# tanh.
self.activation=activation
if W is None:
W_values = numpy.asarray(rng.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)), dtype=theano.config.floatX)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = (lin_output if activation is None
else activation(lin_output))
# parameters of the model
self.params = [self.W, self.b]
def get_predicted(self,data):
lin_output = T.dot(data, self.W) + self.b
output = (lin_output if self.activation is None
else self.activation(lin_output))
return output
class DFS(object):
"""
Deep feature selection class. One-one input layer + MLP.
"""
def __init__(self, rng, n_in, n_hidden, n_out, x=None, y=None, activation=T.tanh,
lambda1=0.001, lambda2=1.0, alpha1=0.001, alpha2=0.0):
"""Initialize the parameters for the DFL class.
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_hidden: int
:param n_hidden: number of hidden units
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
activation: activation function, from {T.tanh, T.nnet.sigmoid}
lambda1: float scalar, control the sparsity of the input weights.
The regularization term is lambda1( (1-lambda2)/2 * ||w||_2^2 + lambda2 * ||w||_1 ).
Thus, the larger lambda1 is, the sparser the input weights are.
lambda2: float scalar, control the smoothness of the input weights.
The regularization term is lambda1( (1-lambda2)/2 * ||w||_2^2 + lambda2 * ||w||_1 ).
Thus, the larger lambda2 is, the smoother the input weights are.
alpha1: float scalar, control the sparsity of the weight matrices in MLP.
The regularization term is alpha1( (1-alpha2)/2 * \sum||W_i||_2^2 + alpha2 \sum||W_i||_1 ).
Thus, the larger alpha1 is, the sparser the MLP weights are.
alpha2: float scalar, control the smoothness of the weight matrices in MLP.
The regularization term is alpha1( (1-alpha2)/2 * \sum||W_i||_2^2 + alpha2 \sum||W_i||_1 ).
Thus, the larger alpha2 is, the smoother the MLP weights are.
"""
if not x:
x=T.matrix('x')
self.x=x
if not y:
y=T.ivector('y')
self.y=y
self.hidden_layers=[]
self.params=[]
self.n_layers=len(n_hidden)
input_layer=InputLayer(input=self.x,n_in=n_in)
self.params.extend(input_layer.params)
self.input_layer=input_layer
for i in range(len(n_hidden)):
if i==0: # first hidden layer
hd=HiddenLayer(rng=rng, input=self.input_layer.output, n_in=n_in, n_out=n_hidden[i],
activation=activation)
else:
hd=HiddenLayer(rng=rng, input=self.hidden_layers[i-1].output, n_in=n_hidden[i-1], n_out=n_hidden[i],
activation=activation)
self.hidden_layers.append(hd)
self.params.extend(hd.params)
# The logistic regression layer gets as input the hidden units
# of the hidden layer
if len(n_hidden)<=0:
self.logRegressionLayer = LogisticRegression(
input=self.input_layer.output,
n_in=n_in,
n_out=n_out)
else:
self.logRegressionLayer = LogisticRegression(
input=self.hidden_layers[-1].output,
n_in=n_hidden[-1],
n_out=n_out)
self.params.extend(self.logRegressionLayer.params)
# regularization terms
self.L1_input=T.abs_(self.input_layer.w).sum()
self.L2_input=(self.input_layer.w **2).sum()
self.hinge_loss_neg=(T.maximum(0,-self.input_layer.w)).sum() # penalize negative values
self.hinge_loss_pos=(T.maximum(0,self.input_layer.w)).sum() # # penalize positive values
L1s=[]
L2_sqrs=[]
#L1s.append(abs(self.hidden_layers[0].W).sum())
for i in range(len(n_hidden)):
L1s.append (T.abs_(self.hidden_layers[i].W).sum())
L2_sqrs.append((self.hidden_layers[i].W ** 2).sum())
L1s.append(T.abs_(self.logRegressionLayer.W).sum())
L2_sqrs.append((self.logRegressionLayer.W ** 2).sum())
self.L1 = T.sum(L1s)
self.L2_sqr = T.sum(L2_sqrs)
# negative log likelihood of the MLP is given by the negative
# log likelihood of the output of the model, computed in the
# logistic regression layer
self.negative_log_likelihood = self.logRegressionLayer.negative_log_likelihood
# same holds for the function computing the number of errors
self.errors = self.logRegressionLayer.errors(self.y)
# lambda3=0.5
# self.cost = self.negative_log_likelihood(self.y) \
# + lambda1*(1.0-lambda2)*0.5*self.L2_input \
# + lambda1*lambda2*(1.0-lambda3)*self.hinge_loss_pos \
# + lambda1*lambda2*lambda3*self.hinge_loss_neg \
# + alpha1*(1.0-alpha2)*0.5 * self.L2_sqr + alpha1*alpha2 * self.L1
self.cost = self.negative_log_likelihood(self.y) \
+ lambda1*(1.0-lambda2)*0.5*self.L2_input \
+ lambda1*lambda2*self.L1_input \
+ alpha1*(1.0-alpha2)*0.5* self.L2_sqr + alpha1*alpha2 * self.L1
#self.cost = self.negative_log_likelihood(self.y) \
# + lambda1*(1.0-lambda2)*(0.5/n_in)*self.L2_input \
# + lambda1*lambda2*(1/n_in)*self.L1_input \
# + alpha1*(1.0-alpha2)*0.5 * self.L2_sqr + alpha1*alpha2 * self.L1
self.y_pred=self.logRegressionLayer.y_pred
self.y_pred_prob=self.logRegressionLayer.y_pred_prob
def build_train_function(self, train_set_x, train_set_y, batch_size, alpha, learning_rate_shared):
"""
Create a function to compute the mistakes that are made by the model.
"""
index = T.lscalar('index') # index to a [mini]batch
# compute the gradients with respect to the model parameters
grads = T.grad(self.cost, self.params)
# add momentum
# initialize the delta_i-1
delta_before=[]
for param_i in self.params:
delta_before_i=theano.shared(value=numpy.zeros(param_i.get_value().shape))
delta_before.append(delta_before_i)
updates = []
for param_i, grad_i, delta_before_i in zip(self.params, grads, delta_before):
delta_i=-learning_rate_shared * grad_i + alpha*delta_before_i
updates.append((param_i, param_i + delta_i ))
updates.append((delta_before_i,delta_i))
train_model_cost = theano.function([index], self.cost, updates=updates,
givens={
self.x: train_set_x[index * batch_size: (index + 1) * batch_size],
self.y: train_set_y[index * batch_size: (index + 1) * batch_size]},
name='train')
return train_model_cost
def build_valid_function(self,valid_set_x, valid_set_y, batch_size):
"""
Build symbolic validation function.
"""
n_valid_batches = int(math.ceil(valid_set_x.get_value(borrow=True).shape[0] / batch_size))
index = T.lscalar('index') # index to a [mini]batch
valid_error_i = theano.function([index], self.errors,
givens={self.x: valid_set_x[index * batch_size:(index + 1) * batch_size],
self.y: valid_set_y[index * batch_size:(index + 1) * batch_size]},
name='valid')
# Create a function that scans the entire validation set
def valid_error():
return [valid_error_i(i) for i in xrange(n_valid_batches)]
return valid_error
def build_test_function(self, test_set_x, batch_size):
"""
Build symbolic test function.
"""
n_test_batches = int(math.ceil(test_set_x.get_value(borrow=True).shape[0] / batch_size))
index = T.lscalar('index') # index to a [mini]batch
test_pred_i = theano.function([index], [self.y_pred,self.y_pred_prob],
givens={self.x: test_set_x[index * batch_size : (index + 1) * batch_size]},
name='test')
# Create a function that scans the entire test set
def test_pred():
y_pred=[]
y_pred_prob=[]
for i in xrange(n_test_batches):
label,prob=test_pred_i(i)
y_pred.extend(label)
y_pred_prob.extend(prob)
return y_pred,y_pred_prob
return test_pred
def get_predicted(self,data):
for i in range(len(self.hidden_layers)):
data=self.hidden_layers[i].get_predicted(data)
p_y_given_x = T.nnet.softmax(T.dot(data, self.logRegressionLayer.W) + self.logRegressionLayer.b)
y_pred = T.argmax(p_y_given_x, axis=1)
y_pred_prob = T.argmax(p_y_given_x, axis=1)
return y_pred,y_pred_prob
def get_params(self):
return copy.deepcopy(self.params)
def set_params(self, given_params):
self.params=given_params
def print_params(self):
for param in self.params:
print param.get_value(borrow=True)
def save_params(self,filename):
f=open(filename,'w') # remove existing file
f.close()
f=open(filename,'a')
for param in self.params:
pickle.dump(param.get_value(borrow=True),f)
f.close()
def read_params(filename):
f=open(filename,'r')
params=pickle.load(f)
f.close()
return params
def train_model(train_set_x_org=None, train_set_y_org=None, valid_set_x_org=None, valid_set_y_org=None,
learning_rate=0.1, alpha=0.01,
lambda1=0.001, lambda2=1.0, alpha1=0.001, alpha2=0.0,
n_hidden=[256,128,16], n_epochs=1000, batch_size=100,
activation_func="tanh", rng=numpy.random.RandomState(100),
max_num_epoch_change_learning_rate=100,max_num_epoch_change_rate=0.8,learning_rate_decay_rate=0.8):
"""
Train a deep feature selection model.
INPUTS:
train_set_x_org: numpy 2d array, each row is a training sample.
train_set_y_org: numpy vector of type int {0,1,...,C-1}, class labels of | |
#!/usr/bin/env python3
from __future__ import print_function
from builtins import range
import ROOT
from array import array
from Validation.RecoTrack.plotting.ntuple import *
import analysis
from math import sqrt, copysign, sin, cos, pi
class EventPlotter(object):
def __init__(self):
self.plots_2D = []
self.plots_3D = []
c = ROOT.TColor()
self.colors_G = [c.GetColor(0,255,0), c.GetColor(0,185,0), c.GetColor(50,255,50), \
c.GetColor(0,100,0), c.GetColor(50,155,0), c.GetColor(0,70,155), \
c.GetColor(0,255,0), c.GetColor(0,255,0), c.GetColor(0,255,0)]
self.colors_B = [c.GetColor(0,0,255), c.GetColor(0,0,155), c.GetColor(50,50,255), \
c.GetColor(0,0,80), c.GetColor(50,0,155), c.GetColor(0,70,155), \
c.GetColor(0,0,255), c.GetColor(0,0,255), c.GetColor(0,0,255)]
def Reset(self):
self.plots_2D = []
self.plots_3D = []
def PlotEvent3DHits(self, event, flag="PSG"):
'''
Plots the 3D hits of an event.
flag is an string which defines which hit types to plot
(p for pixel, s for strip, g for glued)
'''
if('p' in flag or 'P' in flag):
pixel_hits = event.pixelHits()
pix_coords = []
for hit in pixel_hits:
pix_coords.append(hit.x())
pix_coords.append(hit.y())
pix_coords.append(hit.z())
pix_plot = ROOT.TPolyMarker3D(len(pix_coords)/3, array('f', pix_coords), 1)
pix_plot.SetMarkerColor(4)
if('s' in flag or 'S' in flag):
strip_hits = event.stripHits()
str_coords = []
for hit in strip_hits:
str_coords.append(hit.x())
str_coords.append(hit.y())
str_coords.append(hit.z())
str_plot = ROOT.TPolyMarker3D(len(str_coords)/3, array('f', str_coords), 1)
str_plot.SetMarkerColor(2)
if('g' in flag or 'G' in flag):
glued_hits = event.gluedHits()
glu_coords = []
for hit in glued_hits:
glu_coords.append(hit.x())
glu_coords.append(hit.y())
glu_coords.append(hit.z())
glu_plot = ROOT.TPolyMarker3D(len(glu_coords)/3, array('f', glu_coords), 1)
glu_plot.SetMarkerColor(3)
if('p' in flag or 'P' in flag): self.plots_3D.append(pix_plot)
if('s' in flag or 'S' in flag): self.plots_3D.append(str_plot)
if('g' in flag or 'G' in flag): self.plots_3D.append(glu_plot)
def PlotXY(self, event, limits=[-1000,1000], flag="PSG"):
'''
Plots the hits of an event in an XY plane.
flag is an string which defines which hit types to plot
(p for pixel, s for strip, g for glued)
'''
#ntuple.tree().GetBranch("pix_x")
if('p' in flag or 'P' in flag):
pixel_hits = event.pixelHits()
pix_x = []
pix_y = []
for hit in pixel_hits:
if(limits[0] < hit.z() < limits[1]):
pix_x.append(hit.x())
pix_y.append(hit.y())
pix_plot = ROOT.TGraph(len(pix_x), array('f', pix_x), array('f', pix_y))
pix_plot.SetMarkerColor(4)
if('s' in flag or 'S' in flag):
strip_hits = event.stripHits()
str_x = []
str_y = []
for hit in strip_hits:
if(limits[0] < hit.z() < limits[1]):
str_x.append(hit.x())
str_y.append(hit.y())
str_plot = ROOT.TGraph(len(str_x), array('f', str_x), array('f', str_y))
str_plot.SetMarkerColor(2)
if('g' in flag or 'G' in flag):
glued_hits = event.gluedHits()
glu_x = []
glu_y = []
for hit in glued_hits:
if(limits[0] < hit.z() < limits[1]):
glu_x.append(hit.x())
glu_y.append(hit.y())
glu_plot = ROOT.TGraph(len(glu_x), array('f', glu_x), array('f', glu_y))
glu_plot.SetMarkerColor(3)
plot = ROOT.TMultiGraph()
if('p' in flag or 'P' in flag): plot.Add(pix_plot,"P")
if('s' in flag or 'S' in flag): plot.Add(str_plot,"P")
if('g' in flag or 'G' in flag): plot.Add(glu_plot,"P")
self.plots_2D.append(plot)
def PlotZY(self, event, limits=[-1000,1000], flag="PSG"):
'''
Plots the hits of an event in an ZR plane.
flag is an string which defines which hit types to plot
(p for pixel, s for strip, g for glued)
'''
#ntuple.tree().GetBranch("pix_x")
if('p' in flag or 'P' in flag):
pixel_hits = event.pixelHits()
pix_z = []
pix_y = []
for hit in pixel_hits:
if(limits[0] < hit.z() < limits[1]):
pix_z.append(hit.z())
pix_y.append(hit.y())
pix_plot = ROOT.TGraph(len(pix_z), array('f', pix_z), array('f', pix_y))
pix_plot.SetMarkerColor(4)
if('s' in flag or 'S' in flag):
strip_hits = event.stripHits()
str_z = []
str_y = []
for hit in strip_hits:
if(limits[0] < hit.z() < limits[1]):
str_z.append(hit.z())
str_y.append(hit.y())
str_plot = ROOT.TGraph(len(str_z), array('f', str_z), array('f', str_y))
str_plot.SetMarkerColor(2)
if('g' in flag or 'G' in flag):
glued_hits = event.gluedHits()
glu_z = []
glu_y = []
for hit in glued_hits:
if(limits[0] < hit.z() < limits[1]):
glu_z.append(hit.z())
glu_y.append(hit.y())
glu_plot = ROOT.TGraph(len(glu_z), array('f', glu_z), array('f', glu_y))
glu_plot.SetMarkerColor(3)
plot = ROOT.TMultiGraph()
if('p' in flag or 'P' in flag): plot.Add(pix_plot,"P")
if('s' in flag or 'S' in flag): plot.Add(str_plot,"P")
if('g' in flag or 'G' in flag): plot.Add(glu_plot,"P")
self.plots_2D.append(plot)
def PlotTracksXY(self, tracks):
plot = ROOT.TMultiGraph()
for track in tracks:
X = []; Y = [];
for hit in track.hits():
if(hit.isValid()):
X.append(hit.x())
Y.append(hit.y())
plot.Add(ROOT.TGraph(len(X),array("f",X),array("f",Y)),"L")
self.plots_2D.append(plot)
def PlotTracksZY(self, tracks):
plot = ROOT.TMultiGraph()
for track in tracks:
Y = []; Z = [];
for hit in track.hits():
if(hit.isValid()):
Y.append(hit.y())
Z.append(hit.z())
plot.Add(ROOT.TGraph(len(Z),array("f",Z),array("f",Y)),"L")
self.plots_2D.append(plot)
def PlotTracks3D(self, tracks):
for track in tracks:
X = []; Y = []; Z = [];
for hit in track.hits():
if(hit.isValid()):
X.append(hit.x())
Y.append(hit.y())
Z.append(hit.z())
if(X):
self.plots_3D.append(ROOT.TPolyLine3D(len(X),array("f",X),array("f",Y),array("f",Z)))
def PlotPixelTracks3D(self, tracks):
for track in tracks:
X = []; Y = []; Z = [];
for hit in track.pixelHits():
if(hit.isValid()):
X.append(hit.x())
Y.append(hit.y())
Z.append(hit.z())
if(X):
self.plots_3D.append(ROOT.TPolyLine3D(len(X),array("f",X),array("f",Y),array("f",Z)))
def PlotPixelGluedTracks3D(self, tracks):
for track in tracks:
X = []; Y = []; Z = [];
for hit in track.hits():
if(hit.isValid() and hit.hitType != 1):
X.append(hit.x())
Y.append(hit.y())
Z.append(hit.z())
if(X):
self.plots_3D.append(ROOT.TPolyLine3D(len(X),array("f",X),array("f",Y),array("f",Z)))
def PlotTrack3D(self, track, color = 1):
'''Plots a single track and prints track info'''
# Not so hasardous experimental edit:
#hits = sorted([hit for hit in track.hits()], key = lambda hit: hit.index())
#print [hit.index() for hit in hits]
X = []; Y = []; Z = [];
for hit in track.hits(): #hits: #track.hits():
if(hit.isValid()):
X.append(hit.x())
Y.append(hit.y())
Z.append(hit.z())
if(not X):
print("Track has no valid points")
return
plot = ROOT.TPolyLine3D(len(X),array("f",X),array("f",Y),array("f",Z))
plot.SetLineColor(color)
self.plots_3D.append(plot)
'''
print "Track parameters:"
print "px : " + str(track.px())
print "py : " + str(track.py())
print "pz : " + str(track.pz())
print "pt : " + str(track.pt())
print "eta : " + str(track.eta())
print "phi : " + str(track.phi())
print "dxy : " + str(track.dxy())
print "dz : " + str(track.dz())
print "q : " + str(track.q())
'''
def TrackHelix(self, track, color = 1, style = 0):
'''Creates a THelix object which can be plotted with Draw() method.'''
if isinstance(track, TrackingParticle):
phi = track.pca_phi()
dxy = track.pca_dxy()
dz = track.pca_dz()
else:
phi = track.phi()
dxy = track.dxy()
dz = track.dz()
xyz = array("d", [-dxy*ROOT.TMath.Sin(phi), dxy*ROOT.TMath.Cos(phi), dz])
v = array("d", [track.px(), track.py(), track.pz()])
w = 0.3*3.8*track.q()*0.01 #Angular frequency = 0.3*B*q*hattuvakio, close enough
z_last = dz
for hit in track.hits():
if(hit.isValidHit()): z_last = hit.z()
helix = ROOT.THelix(xyz, v, w, array("d", [dz, z_last]))
helix.SetLineColor(color)
if style == 1: helix.SetLineStyle(9)
if style == 2: helix.SetLineStyle(7)
return helix
def Plot3DHelixes(self, tracks, color = 1, style = 0):
for track in tracks:
if(track.hits()):
self.plots_3D.append(self.TrackHelix(track, color, style))
def Plot3DHelix(self, track, color = 1, style = 0):
if(track.hits()):
self.plots_3D.append(self.TrackHelix(track, color, style))
def Plot3DHits(self, track, color = 1, style = 0):
'''
Plots the 3D hits from a track.
'''
pix_coords = []
for hit in track.pixelHits():
if hit.isValid():
pix_coords.append(hit.x())
pix_coords.append(hit.y())
pix_coords.append(hit.z())
if pix_coords:
pix_plot = ROOT.TPolyMarker3D(len(pix_coords)/3, array('f', pix_coords), 2)
pix_plot.SetMarkerColor(color)
if style == 1: pix_plot.SetMarkerStyle(5)
if style == 2: pix_plot.SetMarkerStyle(4)
self.plots_3D.append(pix_plot)
for hit in track.gluedHits():
if hit.isValid():
x = hit.x(); y = hit.y(); z = hit.z()
if hit.isBarrel():
X = [x, x]
Y = [y, y]
Z = [z - sqrt(hit.zz()), z + sqrt(hit.zz())]
else:
X = [x - copysign(sqrt(hit.xx()),x), x + copysign(sqrt(hit.xx()),x)]
Y = [y - copysign(sqrt(hit.yy()),y), y + copysign(sqrt(hit.yy()),y)]
Z = [hit.z(), hit.z()]
glu_plot = ROOT.TPolyLine3D(len(X),array("f",X),array("f",Y),array("f",Z))
#glu_plot.SetLineStyle(2)
if style == 1: glu_plot.SetLineStyle(2)
if style == 2: glu_plot.SetLineStyle(3)
glu_plot.SetLineColor(color)
self.plots_3D.append(glu_plot)
for hit in track.stripHits():
if hit.isValid():
x = hit.x(); y = hit.y(); z = hit.z()
if hit.isBarrel():
X = [x, x]
Y = [y, y]
Z = [z - 1.5*sqrt(hit.zz()), z + 1.5*sqrt(hit.zz())]
else:
X = [x - 1.5*copysign(sqrt(hit.xx()),x), x + 1.5*copysign(sqrt(hit.xx()),x)]
Y = [y - 1.5*copysign(sqrt(hit.yy()),y), y + 1.5*copysign(sqrt(hit.yy()),y)]
Z = [hit.z(), hit.z()]
str_plot = ROOT.TPolyLine3D(len(X),array("f",X),array("f",Y),array("f",Z))
if style == 1: str_plot.SetLineStyle(2)
if style == 2: str_plot.SetLineStyle(3)
str_plot.SetLineColor(color)
self.plots_3D.append(str_plot)
def PlotVertex3D(self, vertex, color=1):
plot = ROOT.TPolyMarker3D(1, array('f', [vertex.x(), vertex.y(), vertex.z()]),3)
plot.SetMarkerColor(color)
self.plots_3D.append(plot)
def PlotPoint3D(self, point, color=1):
plot = ROOT.TPolyMarker3D(1, array('f', [point[0], point[1], point[2]]),3)
plot.SetMarkerColor(color)
self.plots_3D.append(plot)
def PlotTrackingFail(self, match):
X = array('f', [match.last_loc[0], match.fail_loc[0]])
Y = array('f', [match.last_loc[1], match.fail_loc[1]])
Z = array('f', [match.last_loc[2], match.fail_loc[2]])
plot = ROOT.TPolyLine3D(2, X, Y, Z)
plot.SetLineWidth(3)
plot.SetLineColor(2)
self.plots_3D.append(plot)
self.PlotPoint3D(match.last_loc, 2)
def PlotFakes_MatchedRecos(self, event, iterative = 1, reconstructed = 0):
fakes = analysis.FindFakes(event)
if iterative:
# Plot fakes one by one
for fake in fakes:
self.Reset()
self.Plot3DHelixes([fake],2)
self.Plot3DHits(fake, 2)
# Plot real particle tracks which include fake tracks hits
icol = 0
particle_inds = []
particles = []
reco_inds = []
recos = []
for hit in fake.hits():
if hit.isValid() and hit.nSimHits() >= 0:
for simHit in hit.simHits():
particle = simHit.trackingParticle()
if particle.index() not in particle_inds:
particle_inds.append(particle.index())
particles.append(particle)
'''
self.Plot3DHelix(particle, ROOT.TColor().GetColor(0,255,0), 1) # kAzure color, maybe ;)
self.Plot3DHits(particle, 3+icol, 1)
icol += 1
print "Number of matched tracks to real particle: " + str(particle.nMatchedTracks())
'''
# Plot reconstructed tracks of these real particle tracks
if reconstructed and particle.nMatchedTracks() > 0:
for info in particle.matchedTrackInfos():
track = info.track()
if track.index() not in reco_inds:
reco_inds.append(track.index())
recos.append(track)
'''
if particle.nMatchedTracks() == 1:
self.Plot3DHelix(track,1,2)
self.Plot3DHits(track,1,2)
else:
self.Plot3DHelix(track,5,2)
self.Plot3DHits(track,5,2)
'''
icol = 0
for particle in particles:
self.Plot3DHelix(particle, self.colors_G[icol], 1) # kAzure color, maybe ;)
self.Plot3DHits(particle, self.colors_G[icol], 1)
icol += 1
for track in recos:
self.Plot3DHelix(track,1,2)
self.Plot3DHits(track,1,2)
'''
if track.trackingParticle().nMatchedTracks() == 1:
self.Plot3DHelix(track,1,2)
self.Plot3DHits(track,1,2)
else:
self.Plot3DHelix(track,5,2)
self.Plot3DHits(track,5,2)
'''
self.Draw()
return
# the following is useless by now
# Plot all fakes at once (messy picture)
'''
self.Plot3DHelixes(fakes,2)
for fake in fakes:
self.Plot3DHits(fake, 2)
# Plot real particle tracks which include fake tracks hits
for hit in fake.hits():
if hit.isValid() and hit.nMatchedTrackingParticles() >= 0:
for info in hit.matchedTrackingParticleInfos():
particle = info.trackingParticle()
self.Plot3DHelix(particle,3,1)
self.Plot3DHits(particle,3,1)
print "Number of matched tracks to real particle: " + str(particle.nMatchedTracks())
# Plot reconstructed tracks of these real particle tracks
if reconstructed and particle.nMatchedTracks() > 0:
for info in particle.matchedTrackInfos():
track = info.track()
if particle.nMatchedTracks() == 1:
self.Plot3DHelix(track,1,2)
self.Plot3DHits(track,1,2)
else:
self.Plot3DHelix(track,5,2)
self.Plot3DHits(track,5,2)
| |
<gh_stars>10-100
import warnings
import numpy as np
from math import fsum
from matplotlib import rcParams
import matplotlib.pyplot as plt
from MulensModel.fitdata import FitData
from MulensModel.mulensdata import MulensData
from MulensModel.model import Model
from MulensModel.coordinates import Coordinates
from MulensModel.utils import PlotUtils
class Event(object):
"""
Combines a microlensing model with data. Allows calculating chi^2 and
making a number of plots.
Arguments :
:py:obj:`~datasets` : :py:class:`~MulensModel.mulensdata.MulensData`
or *list* of :py:class:`~MulensModel.mulensdata.MulensData` objects,
Datasets that will be linked to the event. These datasets will
be used for chi^2 calculation, plotting etc.
:py:obj:`~model` : :py:class:`~MulensModel.model.Model`
Microlensing model that will be linked to the event. In order to
get chi^2 for different sets of model parameters you should
keep a single :py:class:`~MulensModel.model.Model` instance and
change parameters for this model (i.e., do not provide separate
:py:class:`~MulensModel.model.Model` instances).
:py:obj:`~coords` : *str*,
:py:class:`~MulensModel.coordinates.Coordinates`, or astropy.SkyCoord_
Coordinates of the event. If *str*, then needs format accepted by
astropy.SkyCoord_ e.g., ``'18:00:00 -30:00:00'``.
fix_blend_flux, fix_source_flux: *dict*
Used to fix the source flux(es) or blend flux
for a particular dataset. The dataset is
the key, and the value to be fixed is the value. For example, to
fix the blending of some dataset *my_data* to zero set
*fix_blend_flux={my_data: 0.}*. See also
:py:class:`~MulensModel.fitdata.FitData` .
fix_source_flux_ratio: *dict*
Used to fix the flux ratio for a given band or dataset. The keys
should be either :py:class:`~MulensModel.mulensdata.MulensData`
objects or *str*. If a
:py:class:`~MulensModel.mulensdata.MulensData` object is specified,
it will take precedence over a band.
fit: DEPRECATED
data_ref: *int* or :py:class:`~MulensModel.mulensdata.MulensData`
Reference dataset. If *int* then gives index of reference dataset
in :py:attr:`~datasets`. Default is the first dataset.
The datasets can be in magnitude or flux spaces. When we calculate chi^2
we do it in magnitude or flux space depending on value of
:py:attr:`~MulensModel.mulensdata.MulensData.chi2_fmt` attribute.
If dataset is in magnitude space and model results
in negative flux, then we calculate chi^2 in flux space but only for the
epochs with negative model flux.
.. _astropy.SkyCoord:
http://docs.astropy.org/en/stable/api/astropy.coordinates.SkyCoord.html
"""
def __init__(
self, datasets=None, model=None, coords=None, fix_blend_flux=None,
fix_source_flux=None, fix_source_flux_ratio=None, data_ref=0):
self._model = None
self._coords = None
# Initialize self._model (and check that model is defined).
if isinstance(model, Model):
self._model = model
elif model is not None:
raise TypeError('incorrect argument model of class Event()')
# Initialize self._datasets (and check that datasets is defined).
if isinstance(datasets, (list, tuple, MulensData)) or datasets is None:
self._set_datasets(datasets)
else:
raise TypeError('incorrect argument datasets of class Event()')
self._data_ref = self._set_data_ref(data_ref)
# Set event coordinates
if coords is not None:
self._update_coords(coords=coords)
elif self._model is not None:
if self._model.coords is not None:
self._update_coords(coords=self._model.coords)
self.sum_function = 'numpy.sum'
# Properties related to FitData
self._fits = None # New property
self.chi2 = None
if fix_blend_flux is None:
self.fix_blend_flux = {}
else:
self.fix_blend_flux = fix_blend_flux
if fix_source_flux is None:
self.fix_source_flux = {}
else:
self.fix_source_flux = fix_source_flux
if fix_source_flux_ratio is None:
self.fix_source_flux_ratio = {}
else:
self.fix_source_flux_ratio = fix_source_flux_ratio
def plot_model(self, data_ref=None, **kwargs):
"""
Plot the model light curve in magnitudes. See
:py:func:`MulensModel.model.Model.plot_lc()` for details.
Keywords :
data_ref: *int* or *MulensData*
If data_ref is not specified, uses :py:obj:`~data_ref`.
"""
if data_ref is None:
data_ref = self.data_ref
(f_source_0, f_blend_0) = self.get_flux_for_dataset(data_ref)
self.model.plot_lc(
source_flux=f_source_0, blend_flux=f_blend_0, **kwargs)
def plot_data(
self, phot_fmt='mag', data_ref=None, show_errorbars=None,
show_bad=None,
subtract_2450000=False, subtract_2460000=False, **kwargs):
"""
Plot the data scaled to the model.
Keywords (all optional):
phot_fmt: *string* ('mag', 'flux')
Whether to plot the data in magnitudes or in flux. Default
is 'mag'.
data_ref: *int* or *MulensData*
If data_ref is not specified, uses :py:obj:`~data_ref`.
show_errorbars: *boolean* or *None*
Do you want errorbars to be shown for all datasets?
Default is *None*, which means the option is taken from each
dataset plotting properties (for which default is *True*).
If *True*, then data are plotted using matplotlib.errorbar().
If *False*, then data are plotted using matplotlib.scatter().
show_bad: *boolean* or *None*
Do you want data marked as bad to be shown?
Default is *None*, which means the option is taken from each
dataset plotting properties (for which default is *False*).
If bad data are shown, then they are plotted with 'x' marker.
subtract_2450000, subtract_2460000: *boolean*
If True, subtracts 2450000 or 2460000 from the time
axis to get more human-scale numbers. If using, make
sure to also set the same settings for all other
plotting calls (e.g. :py:func:`plot_lc()`).
``**kwargs``:
Passed to matplotlib plotting functions. Contrary to
previous behavior, ``**kwargs`` are no longer remembered.
"""
self._set_default_colors() # For each dataset
if self.fits is None:
self.get_chi2()
if data_ref is None:
data_ref = self.data_ref
# JCY want to implement show_errobars, show_bad as list option, so it
# can be different for different datasets. DO LATER.
# Set plot limits
t_min = 3000000.
t_max = 0.
subtract = PlotUtils.find_subtract(subtract_2450000, subtract_2460000)
# Get fluxes for the reference dataset
(f_source_0, f_blend_0) = self.get_flux_for_dataset(data_ref)
for (i, data) in enumerate(self._datasets):
# Scale the data flue
(flux, err_flux) = self.fits[i].scale_fluxes(f_source_0, f_blend_0)
(y_value, y_err) = PlotUtils.get_y_value_y_err(
phot_fmt, flux, err_flux)
data._plot_datapoints(
(y_value, y_err), subtract_2450000=subtract_2450000,
subtract_2460000=subtract_2460000,
show_errorbars=show_errorbars, show_bad=show_bad, **kwargs)
t_min = min(t_min, np.min(data.time))
t_max = max(t_max, np.max(data.time))
# Plot properties
plt.ylabel('Magnitude')
plt.xlabel(
PlotUtils.find_subtract_xlabel(subtract_2450000, subtract_2460000))
plt.xlim(t_min-subtract, t_max-subtract)
(ymin, ymax) = plt.gca().get_ylim()
if ymax > ymin:
plt.gca().invert_yaxis()
def plot_residuals(
self, show_errorbars=None, data_ref=None, subtract_2450000=False,
subtract_2460000=False, show_bad=None, **kwargs):
"""
Plot the residuals (in magnitudes) to the model.
Keywords:
For explanation of keywords, see doctrings in
:py:func:`plot_data()`. Note different order of keywords.
"""
self._set_default_colors()
if data_ref is None:
data_ref = self.data_ref
# Plot limit parameters
t_min = 3000000.
t_max = 0.
subtract = PlotUtils.find_subtract(subtract_2450000, subtract_2460000)
# Plot zeropoint line
plt.plot([0., 3000000.], [0., 0.], color='black')
# Plot residuals
(f_source_0, f_blend_0) = self.get_flux_for_dataset(data_ref)
for i, data in enumerate(self._datasets):
# Evaluate whether or nor it is necessary to calculate the model
# for bad datapoints.
if show_bad:
bad = True
else:
bad = False
(residuals, errorbars) = self.fits[i].get_residuals(
phot_fmt='scaled', source_flux=f_source_0,
blend_flux=f_blend_0, bad=bad)
y_value = residuals
y_err = errorbars
data._plot_datapoints(
(y_value, y_err), subtract_2450000=subtract_2450000,
subtract_2460000=subtract_2460000,
show_errorbars=show_errorbars, show_bad=show_bad, **kwargs)
t_min = min(t_min, np.min(data.time))
t_max = max(t_max, np.max(data.time))
# Plot properties
y_lim = np.max([np.abs(y_lim) for y_lim in plt.gca().get_ylim()])
if y_lim > 1.:
y_lim = 0.5
plt.ylim(y_lim, -y_lim)
plt.xlim(t_min-subtract, t_max-subtract)
plt.ylabel('Residuals')
plt.xlabel(
PlotUtils.find_subtract_xlabel(subtract_2450000, subtract_2460000))
def plot_trajectory(self, **kwargs):
"""
Plot the trajectory of the source. See :
py:func:`MulensModel.model.Model.plot_trajectory()` for details.
"""
self.model.plot_trajectory(**kwargs)
def plot_source_for_datasets(self, **kwargs):
"""
Plot source positions for all linked datasets.
See :py:func:`MulensModel.model.Model.plot_source` for
details.
Note: plots all points in datasets (including ones flagged as bad)
using the same marker.
"""
self._set_default_colors()
for dataset in self.datasets:
properties = dataset._set_plot_properties()
self.model.plot_source(
times=dataset.time, color=properties['color'], **kwargs)
def _set_default_colors(self):
"""
If the user has not specified a color for a dataset, assign
one.
"""
colors = [cycle['color'] for cycle in rcParams['axes.prop_cycle']]
# Below we change the order of colors to most distinct first.
used_colors = []
for data in self._datasets:
if 'color' in data.plot_properties.keys():
used_colors.append(data.plot_properties['color'])
if len(used_colors) == len(self._datasets):
return
if len(used_colors) == 0:
differences = None
else:
diffs = np.array(
[np.min(
PlotUtils.get_color_differences(used_colors, c))
for c in colors])
indexes = np.argsort(diffs)[::-1]
colors = [colors[i] for i in indexes]
differences = diffs[indexes]
# Assign colors when needed.
color_index = 0
for data in self._datasets:
if 'color' not in data.plot_properties.keys():
if differences is not None:
if differences[color_index] < 0.35:
msg = ('The color assign to one of the datasets in ' +
'automated way (' + colors[color_index] +
') is very similar to already used color')
warnings.warn(msg, UserWarning)
data.plot_properties['color'] = colors[color_index]
color_index += 1
if color_index == len(colors):
color_index = 0
msg = ('Too many datasets without colors assigned - ' +
'same color will be used for different datasets')
warnings.warn(msg, UserWarning)
def get_flux_for_dataset(self, dataset):
"""
Get the source and blend flux for a given dataset.
Parameters :
dataset: :py:class:`~MulensModel.mulensdata.MulensData` or *int*
If *int* should be the index (starting at 0) of the appropriate
dataset in the :py:obj:`~datasets` list.
Returns :
source_flux: *np.ndarray*
flux of sources. see
:py:obj:`~MulensModel.fitdata.FitData.source_fluxes`
blend_flux: *float*
blending flux. see
:py:obj:`~MulensModel.fitdata.FitData.blend_flux`
NOTE: This function does not recalculate fits or fluxes. If the data
haven't yet been fit to the model (i.e. self.fits = None),
it will run :py:func:`~fit_fluxes()`. Otherwise, it just accesses the
| |
i in range(size):
conv.append(float(np.mean(temp_gradient_x[i:])))
d_conv = []
for i in range(len(conv) - 1):
temp = np.abs(conv[i + 1] - conv[i])
d_conv.append(temp)
noise_floor_vals = d_conv[2:size / 2] # these are rough bounds that should work
plt.plot(d_conv)
noise_floor = np.mean(noise_floor_vals)
x_floor = list(range(2, 50))
y_floor = [noise_floor for number in range(2, 50)]
plt.plot(x_floor, y_floor)
plt.title("Convergence of $\\frac{dT(x)}{dx}$ calculation sweeping from x to the end\n"
"Noise floor: %.4E" % noise_floor)
plt.xlabel('X')
plt.ylabel('Y')
plt.savefig('%s/temp_gradient_conv.pdf' % save_dir)
if not quiet:
plt.show()
plt.close()
# def plot_distribution_tube_lengths
# def plot_distribution_tube_angles
# def plot_distribution_tube_distances(centers or ends)
def plot_k_convergence(quantity, quiet, save_dir, x_list=None):
logging.info("Plotting k convergence")
if x_list is not None:
plt.plot(x_list, quantity)
plt.xlabel('Timesteps')
else:
plt.plot(quantity)
plt.xlabel('Total walkers/2')
plt.title("Convergence of conductivity k")
plt.ylabel('Conductivity k')
plt.savefig('%s/k_convergence.pdf' % save_dir)
if not quiet:
plt.show()
plt.close()
def plot_k_convergence_err(quantity, quiet, save_dir, begin_cov_check, x_list=None):
logging.info("Plotting k convergence error")
if x_list is not None:
plt.plot(x_list, quantity)
plt.xlabel('Timesteps')
else:
x = list(range(begin_cov_check, len(quantity) + begin_cov_check))
plt.plot(x, quantity)
plt.xlabel('Total walkers/2')
plt.title("Error in convergence of conductivity k")
plt.ylabel('Conductivity k error')
plt.savefig('%s/k_convergence_err.pdf' % save_dir)
if not quiet:
plt.show()
plt.close()
def plot_dt_dx(quantity, quiet, save_dir, x_list=None):
logging.info("Plotting dt/dx")
if x_list is not None:
plt.plot(x_list, quantity)
plt.xlabel('Timesteps')
else:
plt.plot(quantity)
plt.xlabel('Total walkers/2')
plt.title("dT(x)/dx")
plt.ylabel('dT(x)/dx')
plt.savefig('%s/dt_dx.pdf' % save_dir)
if not quiet:
plt.show()
plt.close()
def plot_heat_flux(quantity, quiet, save_dir, x_list=None):
logging.info("Plotting heat flux")
if x_list is not None:
plt.plot(x_list, quantity)
plt.xlabel('Timesteps')
else:
plt.plot(quantity)
plt.xlabel('Total walkers/2')
plt.title("Heat flux")
plt.ylabel('Heat flux')
plt.savefig('%s/heat_flux.pdf' % save_dir)
if not quiet:
plt.show()
plt.close()
def plot_k_vs_num_tubes(tube_length, num_configs, grid_size, dim, legend=True, exclude_vals='',
tunneling=False, max_tube_num=100000, force_y_int=False, y_max=None, dec_fill_fract=True,
w_err=True):
"""Plots REDUCED thermal conductivity k-k_0/k_0 vs. CNT filling fraction or percent
w_err - weighted linear fit based on k error bars from configurations"""
def fill_fraction_tubes(x, orientation, tunneling, grid_size, dim):
######
random_2d_15 = {'0': 0, '10': 2.04, '20': 4.04, '30': 6.15, '40': 8.22, '50': 10.06, '60': 12.21,
'70': 14.23,
'80': 16.25, '90': 17.97, '100': 20.2, '110': 22.49, '120': 24.37, '130': 26.08,
'140': 28.25, '150': 30.34}
######
h_v_2d_15 = {'0': 0, '10': 1.63, '20': 3.26, '30': 4.9, '40': 6.53, '50': 8.16, '60': 9.79, '70': 11.43,
'80': 13.06, '90': 14.69, '100': 16.32, '110': 17.96, '120': 19.59, '130': 21.22, '140': 22.85,
'150': 24.49}
###### CORRECTED 8/6/2017 DUE TO NEW RANDOM TUBE GENERATION ALGORITHM ######
random_3d_15 = {'0': 0, '1250': 2.91, '2500': 5.76, '3750': 8.63, '5000': 11.43, '6250': 14.15,
'7500': 16.93,
'8750': 19.6, '10000': 22.29, '11250': 24.92, '12500': 27.49, '13750': None}
######
h_v_3d_15 = {'0': 0, '1250': 2.06, '2500': 4.12, '3750': 6.18, '5000': 8.24, '6250': 10.3, '7500': 12.36,
'8750': 14.42, '10000': 16.48, '11250': 18.56, '12500': 20.62}
######
random_2d_10 = {'0': 0, '14': 1.95, '30': 4.2, '45': 6.29, '58': 8.23, '75': 10.58, '88': 12.32,
'102': 14.19,
'115': 16.05, '131': 18.48, '143': 19.99, '156': 21.81, '172': 24.14, '191': 26.47,
'201': 28.13, '220': 30.67}
######
h_v_2d_10 = {'0': 0, '15': 1.63, '29': 3.26, '44': 4.9, '58': 6.53, '73': 8.16, '87': 9.79, '102': 11.43,
'116': 13.06, '131': 14.69, '145': 16.32, '160': 17.96, '175': 19.59, '189': 21.22, '204': 22.85}
######
random_3d_10 = {'0': 0, '1794': 2.85, '3609': 5.7, '5403': 8.5, '7212': 11.3, '8992': 14.1, '10801': 16.8,
'12604': 19.6, '14429': 22.3, '16228': 24.97, '18060': 27.7, '19845': 30.25}
######
h_v_3d_10 = {'0': 0, '1813': 2.06, '3640': 4.12, '5453': 6.18, '7266': 8.24, '9080': 10.3, '10906': 12.36,
'12719': 14.42, '14533': 16.48, '16376': 18.56, '18190': 20.62}
######
random_2d_20 = {'0': 0, '8': 2.02, '15': 4.09, '23': 6.25, '30': 8.02, '39': 10.55, '46': 12.14,
'53': 14.28,
'60': 15.97, '70': 18.34, '75': 19.73, '82': 21.4, '89': 23.21, '99': 25.97,
'108': 28.15, '116': 30.64}
######
h_v_2d_20 = {'0': 0, '8': 1.63, '15': 3.26, '23': 4.9, '30': 6.53, '38': 8.16, '46': 9.79, '53': 11.43,
'61': 13.06, '69': 14.69, '76': 16.32, '84': 17.96, '91': 19.59, '99': 21.22, '107': 22.85}
######
random_3d_20 = {'0': 0, '951': 2.91, '1904': 5.73, '2863': 8.6, '3822': 11.35, '4757': 14.07, '5728': 16.8,
'6660': 19.35, '7627': 21.96, '8638': 24.65, '9562': 27.1, '10519': 29.5}
######
h_v_3d_20 = {'0': 0, '950': 2.06, '1904': 4.12, '2854': 6.18, '3808': 8.24, '4758': 10.3, '5712': 12.36,
'6662': 14.42, '7616': 16.48, '8575': 18.56, '9529': 20.62, '6919': 15.0, '7163': 15.5,
'7389': 16.0, '7856': 17.0, '8084': 17.5, '8318': 18, '8778': 19.0, '9011': 19.5, '9240': 20.0,
'9704': 21.0, '10166': 22.0, '11091': 24.0}
#####
tunnel = 2.0 * float(x) * 100.0 / grid_size ** dim
if grid_size != 100:
print("Assuming the default formula to get values, NOT for random")
fill_fract = int(round((float(tube_length) * float(x) * 100) / (grid_size ** dim)))
else:
if not tunneling:
if orientation == 'random':
search_str = 'random_%dd_%d[str(int(x))]' % (dim, tube_length)
fill_fract = eval(search_str)
elif (orientation == 'horizontal') or (orientation == 'vertical'):
search_str = 'h_v_%dd_%d[str(int(x))]' % (dim, tube_length)
fill_fract = eval(search_str)
else:
fill_fract = tunnel
return fill_fract
def lin_fit(x, y, dim):
'''Fits a linear fit of the form mx+b to the data'''
dim_dict = {2: 0.5, 3: 1.0 / 3.0}
fitfunc = lambda params, x: params[0] * x # create fitting function of form mx+no_tubes_const
errfunc = lambda p, x, y: fitfunc(p, x) - y # create error function for least squares fit
init_a = 0.5 # find initial value for a (gradient)
init_p = np.array((init_a)) # bundle initial values in initial parameters
# calculate best fitting parameters (i.e. m and b) using the error function
p1, success = sp.optimize.leastsq(errfunc, init_p.copy(), args=(x, y))
f = fitfunc(p1, x) # create a fit with those parameters
return p1, f
exclude_vals = list(map(str, exclude_vals)) # array of numbers
exclude_vals = [x + '_' for x in exclude_vals]
folds = [] # list of all folder name strings
zero_folds = []
orientations = [] # list of all orientations (not unique yet)
dim = int(dim)
tube_length = int(tube_length)
old_plot = 'k_num_tubes_%d_%dD.pdf' % (tube_length, dim) # let's get rid of the old one!
if os.path.isfile(old_plot):
os.remove(old_plot)
for file in glob.glob("*_*_%d_*" % tube_length):
checker = file.split('_')[0] + '_'
config_num = int(file.split('_')[3])
tube_val = int(file.split('_')[0])
if (checker not in exclude_vals) and (config_num <= num_configs) and (
tube_val <= max_tube_num): # throws out extra config
folds.append(file) # all files
orientations.append(file.split('_')[1])
for file in glob.glob("0_*_*_*"):
zero_folds.append(file)
uni_orientations = list(set(orientations))
sep_folds = []
# separate folds by orientation
for i in range(len(uni_orientations)):
sep_folds.append([x for x in folds if uni_orientations[i] in x])
sep_folds[i] = sorted(sep_folds[i])
sep_folds[i] += zero_folds
slopes = []
d_slopes = [] # error on the slope
y_ints = []
r_twos = []
for i in range(len(uni_orientations)):
uni_tubes = int(len(sep_folds[i]) / num_configs)
uni_num_tubes = []
for k in range(uni_tubes):
uni_num_tubes.append(sep_folds[i][k * num_configs].split('_')[0])
uni_num_tubes = [float(y) for y in uni_num_tubes]
all_k_vals = np.zeros(len(sep_folds[i]))
# all_kapitza_vals = np.zeros(len(sep_folds[i]))
for j in range(len(sep_folds[i])):
os.chdir(sep_folds[i][j])
# kapitza = np.loadtxt('prob_m_cn.txt')
all_k_vals[j] = np.loadtxt('k.txt')
os.chdir('..')
k_vals = []
k_err = []
k_0 = {2: 0.5, 3: 1.0 / 300.0}
for l in range(len(uni_num_tubes)):
k_vals_temp = np.mean(all_k_vals[l * num_configs:(l + 1) * num_configs])
k_vals.append((k_vals_temp - k_0[dim]) / k_0[dim])
k_err_temp = np.std(all_k_vals[l * num_configs:(l + 1) * num_configs], ddof=1) / np.sqrt(num_configs)
k_err.append(k_err_temp * k_0[dim])
fill_fract = []
for a in range(len(uni_num_tubes)):
temp_ff = fill_fraction_tubes(uni_num_tubes[a], uni_orientations[i], tunneling, grid_size, dim)
fill_fract.append(temp_ff)
# sort data ascending
fill_fract_temp = np.array(fill_fract)
k_err_temp = np.array(k_err)
k_vals_temp = np.array(k_vals)
idx = np.argsort(fill_fract)
fill_fract = fill_fract_temp[idx]
k_err = k_err_temp[idx]
k_vals = k_vals_temp[idx]
# sort should be working
# remove duplicate fill fractions
unq, unq_idx = np.unique(fill_fract, return_index=True)
fill_fract = fill_fract[unq_idx]
k_vals = k_vals[unq_idx]
k_err = k_err[unq_idx]
if dec_fill_fract:
fill_fract *= 0.01 # uses decimal for fill fraction values, more reasonable slopes
# apply linear fit
if force_y_int:
slope, _ = lin_fit(fill_fract, k_vals, dim)
print(slope)
raise SystemExit
x = np.array(fill_fract)
y = np.array(k_vals)
# x = x[:, np.newaxis] # for 0 y intercept
intercept = dim_dict[dim]
x = np.vstack([x, np.ones(len(x)) * intercept]).T # forces set y-int
a, _, _, _ = np.linalg.lstsq(x, y)
slope = a[0]
r_value = a[1]
x_fit = x
y_fit = slope * x
else:
if w_err:
k_err_weights = 1.0 / k_err
p, V = np.polyfit(fill_fract, k_vals, 1, cov=True, w=k_err_weights)
else:
p, V = np.polyfit(fill_fract, k_vals, 1, cov=True, w=k_err)
slope = p[0]
intercept = p[1]
d_slope = np.sqrt(V[0][0])
d_yint = np.sqrt(V[1][1])
notused_slope, notused_intercept, r_value, | |
&)
* * (matches 0 or more characters)
* ? (matches exactly 1 character)
- *(string) --*
- **HostHeaderConfig** *(dict) --*
- **Values** *(list) --*
- *(string) --*
- **PathPatternConfig** *(dict) --*
- **Values** *(list) --*
- *(string) --*
- **HttpHeaderConfig** *(dict) --*
- **HttpHeaderName** *(string) --*
- **Values** *(list) --*
- *(string) --*
- **QueryStringConfig** *(dict) --*
- **Values** *(list) --*
- *(dict) --*
- **Key** *(string) --*
- **Value** *(string) --*
- **HttpRequestMethodConfig** *(dict) --*
- **Values** *(list) --*
- *(string) --*
- **SourceIpConfig** *(dict) --*
- **Values** *(list) --*
- *(string) --*
- **Actions** *(list) --*
The actions.
- *(dict) --*
Information about an action.
- **Type** *(string) --*
The type of action. Each rule must include exactly one of the following types of actions: ``forward`` , ``fixed-response`` , or ``redirect`` .
- **TargetGroupArn** *(string) --*
The Amazon Resource Name (ARN) of the target group. Specify only when ``Type`` is ``forward`` .
- **AuthenticateOidcConfig** *(dict) --*
[HTTPS listeners] Information about an identity provider that is compliant with OpenID Connect (OIDC). Specify only when ``Type`` is ``authenticate-oidc`` .
- **Issuer** *(string) --*
The OIDC issuer identifier of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.
- **AuthorizationEndpoint** *(string) --*
The authorization endpoint of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.
- **TokenEndpoint** *(string) --*
The token endpoint of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.
- **UserInfoEndpoint** *(string) --*
The user info endpoint of the IdP. This must be a full URL, including the HTTPS protocol, the domain, and the path.
- **ClientId** *(string) --*
The OAuth 2.0 client identifier.
- **ClientSecret** *(string) --*
The OAuth 2.0 client secret. This parameter is required if you are creating a rule. If you are modifying a rule, you can omit this parameter if you set ``UseExistingClientSecret`` to true.
- **SessionCookieName** *(string) --*
The name of the cookie used to maintain session information. The default is AWSELBAuthSessionCookie.
- **Scope** *(string) --*
The set of user claims to be requested from the IdP. The default is ``openid`` .
To verify which scope values your IdP supports and how to separate multiple values, see the documentation for your IdP.
- **SessionTimeout** *(integer) --*
The maximum duration of the authentication session, in seconds. The default is 604800 seconds (7 days).
- **AuthenticationRequestExtraParams** *(dict) --*
The query parameters (up to 10) to include in the redirect request to the authorization endpoint.
- *(string) --*
- *(string) --*
- **OnUnauthenticatedRequest** *(string) --*
The behavior if the user is not authenticated. The following are possible values:
* deny- Return an HTTP 401 Unauthorized error.
* allow- Allow the request to be forwarded to the target.
* authenticate- Redirect the request to the IdP authorization endpoint. This is the default value.
- **UseExistingClientSecret** *(boolean) --*
Indicates whether to use the existing client secret when modifying a rule. If you are creating a rule, you can omit this parameter or set it to false.
- **AuthenticateCognitoConfig** *(dict) --*
[HTTPS listeners] Information for using Amazon Cognito to authenticate users. Specify only when ``Type`` is ``authenticate-cognito`` .
- **UserPoolArn** *(string) --*
The Amazon Resource Name (ARN) of the Amazon Cognito user pool.
- **UserPoolClientId** *(string) --*
The ID of the Amazon Cognito user pool client.
- **UserPoolDomain** *(string) --*
The domain prefix or fully-qualified domain name of the Amazon Cognito user pool.
- **SessionCookieName** *(string) --*
The name of the cookie used to maintain session information. The default is AWSELBAuthSessionCookie.
- **Scope** *(string) --*
The set of user claims to be requested from the IdP. The default is ``openid`` .
To verify which scope values your IdP supports and how to separate multiple values, see the documentation for your IdP.
- **SessionTimeout** *(integer) --*
The maximum duration of the authentication session, in seconds. The default is 604800 seconds (7 days).
- **AuthenticationRequestExtraParams** *(dict) --*
The query parameters (up to 10) to include in the redirect request to the authorization endpoint.
- *(string) --*
- *(string) --*
- **OnUnauthenticatedRequest** *(string) --*
The behavior if the user is not authenticated. The following are possible values:
* deny- Return an HTTP 401 Unauthorized error.
* allow- Allow the request to be forwarded to the target.
* authenticate- Redirect the request to the IdP authorization endpoint. This is the default value.
- **Order** *(integer) --*
The order for the action. This value is required for rules with multiple actions. The action with the lowest value for order is performed first. The final action to be performed must be a ``forward`` or a ``fixed-response`` action.
- **RedirectConfig** *(dict) --*
[Application Load Balancer] Information for creating a redirect action. Specify only when ``Type`` is ``redirect`` .
- **Protocol** *(string) --*
The protocol. You can specify HTTP, HTTPS, or #{protocol}. You can redirect HTTP to HTTP, HTTP to HTTPS, and HTTPS to HTTPS. You cannot redirect HTTPS to HTTP.
- **Port** *(string) --*
The port. You can specify a value from 1 to 65535 or #{port}.
- **Host** *(string) --*
The hostname. This component is not percent-encoded. The hostname can contain #{host}.
- **Path** *(string) --*
The absolute path, starting with the leading "/". This component is not percent-encoded. The path can contain #{host}, #{path}, and #{port}.
- **Query** *(string) --*
The query parameters, URL-encoded when necessary, but not percent-encoded. Do not include the leading "?", as it is automatically added. You can specify any of the reserved keywords.
- **StatusCode** *(string) --*
The HTTP redirect code. The redirect is either permanent (HTTP 301) or temporary (HTTP 302).
- **FixedResponseConfig** *(dict) --*
[Application Load Balancer] Information for creating an action that returns a custom HTTP response. Specify only when ``Type`` is ``fixed-response`` .
- **MessageBody** *(string) --*
The message.
- **StatusCode** *(string) --*
The HTTP response code (2XX, 4XX, or 5XX).
- **ContentType** *(string) --*
The content type.
Valid Values: text/plain | text/css | text/html | application/javascript | application/json
- **IsDefault** *(boolean) --*
Indicates whether this is the default rule.
:type ListenerArn: string
:param ListenerArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the listener.
:type Conditions: list
:param Conditions: **[REQUIRED]**
The conditions. Each condition specifies a field name and a single value.
If the field name is ``host-header`` , you can specify a single host name (for example, my.example.com). A host name is case insensitive, can be up to 128 characters in length, and can contain any of the following characters. You can include up to three wildcard characters.
* A-Z, a-z, 0-9
* - .
* * (matches 0 or more characters)
* ? (matches exactly 1 character)
If the field name is ``path-pattern`` , you can specify a single path pattern. A path pattern is case-sensitive, can be up to 128 characters in length, and can contain any of the following characters. You can include up to three wildcard characters.
* A-Z, a-z, 0-9
* _ - . $ / ~ \" \' @ : +
* & (using &)
* * (matches 0 or more characters)
* ? (matches exactly 1 character)
- *(dict) --*
Information about a condition for a rule.
- **Field** *(string) --*
The name of the field. The | |
range(nbins[j]):
f.write('{:>6} '.format('%.3f'%(corr_young[k])))
f.write('\n')
f.write('{:<8} '.format('corr_old'))
for k in range(nbins[j]):
f.write('{:>6} '.format('%.3f'%(corr_old[k])))
f.write('\n')
f.write('{:<8} '.format('bins_gmc'))
for k in range(nbins[j]):
f.write('{:>6} '.format('%.3f'%(logbins_gmc[k])))
f.write('\n')
f.write('{:<8} '.format('corr_gmc'))
for k in range(nbins[j]):
f.write('{:>6} '.format('%.3f'%(corr_gmc[k])))
f.close()
def cross_correlate(sc_df, gmc_df, rand_sc_df, rand_gmc_df, dist, nbins=16, min_bin=1.1e-5):
""" calculate the cross-correlation between the given star clusters and GMC with the Landy & Szalay (1993) estimator
makes use of astropy search_around_sky which employs kdtrees to find separations b/w objects
follows the same methodology as astroML two point correlation function (https://www.astroml.org/user_guide/correlation_functions.html)
but modified to just use search_around_sky (so i didn't have to write the kdtree stuff since it's implemented already in search_around_sky)
and follows the math for doing cross-correlation rather than auto-correlation
Inputs:
sc_df pandas DataFrame dataframe which contains the star cluster catalog; needs to include the cluster ra, dec
gmc_df pandas DataFrame dataframe which contains the gmc catalog; needs to include the gmc cluster ra, dec
rand_sc_df pandas DataFrame dataframe which contains the random star cluster catalog over which the correlation will be compared to; needs the random cluster ra, dec
rand_gmc_df pandas DataFrame dataframe which contains the random gmc catalog over which the correlation will be compared to; needs the random gmc ra, dec
dist float distance to the galaxy in Mpc; needed for calculating the separations b/w clusters and gmcs in pc
nbins int the number of radial bins over which to do the correlation
min_bin float the angular location of the first/minimum bin
Outputs:
bins array array of the angular bin edges (degrees)
corr array correlation values --> 1 + omega(theta)
"""
bins = 10 ** np.linspace(np.log10(min_bin), np.log10(0.1), nbins+1)
# total numbers in the two catalogs
N_Dsc = len(sc_df)
N_Dgmc = len(gmc_df)
N_Rsc = len(rand_sc_df)
N_Rgmc = len(rand_gmc_df)
# make sky coords for all the cats
sc_coords = SkyCoord(ra=sc_df['ra']*u.deg, dec=sc_df['dec']*u.deg, frame='icrs', distance=dist*u.Mpc)
gmc_coords = SkyCoord(ra=gmc_df['XCTR_DEG']*u.deg, dec=gmc_df['YCTR_DEG']*u.deg, frame='fk5', distance=dist*u.Mpc)
rand_sc_coords = SkyCoord(ra=rand_sc_df['ra']*u.deg, dec=rand_sc_df['dec']*u.deg, frame='icrs', distance=dist*u.Mpc)
rand_gmc_coords = SkyCoord(ra=rand_gmc_df['ra']*u.deg, dec=rand_gmc_df['dec']*u.deg, frame='icrs', distance=dist*u.Mpc)
# get separations for all pairs
DscDgmc_sas = search_around_sky(sc_coords, gmc_coords, seplimit=5*u.deg)
DscRgmc_sas = search_around_sky(sc_coords, rand_gmc_coords, seplimit=5*u.deg)
RscDgmc_sas = search_around_sky(rand_sc_coords, gmc_coords, seplimit=5*u.deg)
RscRgmc_sas = search_around_sky(rand_sc_coords, rand_gmc_coords, seplimit=5*u.deg)
DscDgmc_sep = DscDgmc_sas[2]
DscRgmc_sep = DscRgmc_sas[2]
RscDgmc_sep = RscDgmc_sas[2]
RscRgmc_sep = RscRgmc_sas[2]
# loop through bins [drop first bin/min_bin]
lbins = bins[1:] * u.deg
corr = []
for j in range(len(lbins)):
if j == 0:
wDDr = np.where((DscDgmc_sep >= min_bin*u.deg) & (DscDgmc_sep < lbins[j]) )[0]
wDRr = np.where((DscRgmc_sep >= min_bin*u.deg) & (DscRgmc_sep < lbins[j]) )[0]
wRDr = np.where((RscDgmc_sep >= min_bin*u.deg) & (RscDgmc_sep < lbins[j]) )[0]
wRRr = np.where((RscRgmc_sep >= min_bin*u.deg) & (RscRgmc_sep < lbins[j]) )[0]
else:
wDDr = np.where((DscDgmc_sep >= lbins[j-1]) & (DscDgmc_sep < lbins[j]) )[0]
wDRr = np.where((DscRgmc_sep >= lbins[j-1]) & (DscRgmc_sep < lbins[j]) )[0]
wRDr = np.where((RscDgmc_sep >= lbins[j-1]) & (RscDgmc_sep < lbins[j]) )[0]
wRRr = np.where((RscRgmc_sep >= lbins[j-1]) & (RscRgmc_sep < lbins[j]) )[0]
# check for empty RR, and replace with nan if needed
if len(wRRr) == 0:
corr.append(np.nan)
else:
factor1 = (N_Rsc * N_Rgmc * len(wDDr))/(N_Dsc * N_Dgmc * len(wRRr))
factor2 = (N_Rsc * len(wDRr))/(N_Dsc * len(wRRr))
factor3 = (N_Rgmc * len(wRDr))/(N_Dgmc * len(wRRr))
xi_r = factor1 - factor2 - factor3 + 1
corr.append(xi_r)
corr = np.array(corr) + 1
return bins, corr
def cross_correlate_bootstrap(sc_df, gmc_df, wcs_hst, xmax_hst, ymax_hst, wcs_alma, xmax_alma, ymax_alma, mask, dist, nbootstraps=100, **kwargs):
""" run the cross-correlation with a non-parametric bootstrap estimation of the errors on the correlation values
calls on the cross_correlate function above but runs through the given number of bootstraps and returns an estimation of the error
bootstrap estimation is done by randomly resampling the star cluster and gmc catalogs and then recalculating the correlation over
a new random cluster and gmc catalog
the correlation value (1 + omega(theta)) is saved for each bin for each bootstrap run
and error is estimated as the standard deviation (with ddof = 1) of the correlation values in each bin
Inputs:
sc_df pandas DataFrame dataframe which contains the star cluster catalog; needs to include the cluster ra, dec
gmc_df pandas DataFrame dataframe which contains the gmc catalog; needs to include the gmc cluster ra, dec
wcs_hst astropy.wcs.wcs.WCS astropy wcs object of the hst image
xmax_hst int the maximum pixel location in the x-direction of the hst image; i.e., if hst image is 13000x14000 pixels, xmax_hst = 13000
ymax_hst int the maximum pixel location in the y-direction of the hst image; i.e., if hst image is 13000x14000 pixels, ymax_hst = 14000
wcs_alma astropy.wcs.wcs.WCS astropy wcs object of the alma image
xmax_alma int the maximum pixel location in the x-direction of the alma image; i.e., if alma image is 1599x1598 pixels, xmax_alma = 1599
ymax_alma int the maximum pixel location in the y-direction of the alma image; i.e., if alma image is 1599x1598 pixels, ymax_alma = 1598
mask array the data array of the hst-alma overlap mask image
dist float distance to the galaxy in Mpc; needed for calculating the separations b/w clusters and gmcs in pc
nbootstraps int number of bootstraps to run through
**kwargs dict keyword arguments to pass on to the cross_correlate funciton; really just nbins and min_bin
Outputs:
bins_centers_pc array centers of the bins in parsecs
corr array correlation values in each bin --> 1 + omega(theta)
corr_err array 1 sigma error on the correlation values; if correlation is nan, error will be 0
power_law_fits list the best-fit for powerlaws; [A_w (deg), error, A_w (pc), error, alpha, error ]
"""
bootstraps = []
for i in range(nbootstraps):
# generate random star cluster and gmc catalogs
rand_sc_df = generate_random_sc(sc_df, xmax_hst, ymax_hst, wcs_hst, mask)
rand_gmc_df = generate_random_gmc(gmc_df, xmax_alma, ymax_alma, wcs_alma, wcs_hst, mask)
# random resampling of the data (sc and gmcs) unless its the first time through
# then just run the original data
if i > 0:
rand_sc_ind = np.random.randint(0, len(sc_df), len(sc_df) )
rand_gmc_ind = np.random.randint(0, len(gmc_df), len(gmc_df) )
sc_boot = sc_df.iloc[rand_sc_ind]
gmc_boot = gmc_df.iloc[rand_gmc_ind]
else:
sc_boot = sc_df
gmc_boot = gmc_df
# run cross-correlation; bins won't change through out the bootstraps so it's ok to overwrite but we do want to return it later
bins, corr = cross_correlate(sc_boot, gmc_boot, rand_sc_df, rand_gmc_df, dist, **kwargs)
# save the correlation array from each bootstrap
bootstraps.append(corr)
# since the first one of the bootstraps was the original data, this is the correlation to return
corr = bootstraps[0]
# since there are nans in the correlation results, we mask them out and comput the standard deviations in each bin
# delta degree of freedom = 1 because the bootstraps are computed from a random sample of the population
# bins with nan correlation will be given 0 for the error on the correlation
corr_err = np.asarray(np.ma.masked_invalid(bootstraps).std(0, ddof=1))
# get centers of bins [degrees]
bin_centers = 0.5 * (bins[1:] + bins[:-1])
# bin centers in parsec
bin_centers_pc = dist*1e6 * bin_centers*u.deg.to(u.rad)
# will need to drop nans for power law fitting
wnnan = np.where(np.isnan(corr) == False)
try:
# power-law fit
popt_ang, pcov = curve_fit(powerlaw_func, bin_centers[wnnan], corr[wnnan])
perr_ang = np.sqrt(np.diag(pcov))
popt_pc, pcov = curve_fit(powerlaw_func, bin_centers_pc[wnnan], corr[wnnan])
perr_pc = np.sqrt(np.diag(pcov))
except:
print('\ncross correlation power law fit failed for nbins = %i'%(len(bin_centers)))
popt_ang = [0,0]
perr_ang = [0,0]
popt_pc = [0,0]
perr_pc = [0,0]
popt_ang = [0,0]
perr_ang = [0,0]
# sometimes the error doesn't converge so replace those with 0 (instead of inf)
winf = np.where(np.isinf(perr_ang))[0]
if len(winf) > 0:
perr_ang[winf] = 0
perr_pc[winf] = 0
return bin_centers_pc, corr, corr_err, [popt_ang[0], perr_ang[0], popt_pc[0], perr_pc[0], popt_ang[1], perr_ang[1]]
def generate_random_sc(sc_df, xmax, ymax, wcs_hst, mask, rseed=222):
""" generates a catalog of randomly placed 'star clusters'
Inputs:
sc_df pandas DataFrame dataframe which contains the star cluster catalog; really just to get the number of actual star clusters
xmax int the maximum pixel location in the x-direction of the hst image; i.e., if hst image is 13000x14000 pixels, xmax_hst = 13000
ymax int the maximum pixel location in the y-direction of the hst image; i.e., if hst image is 13000x14000 pixels, ymax_hst = 14000
wcs_hst astropy.wcs.wcs.WCS astropy wcs object of the hst image
mask array the data array of the hst-alma overlap mask image
rseed int the seed value which gets used for the numpy.random
Output:
rand_sc_df pandas DataFrame dataframe of the random star cluster catalog; includes x, y, ra, dec
"""
np.random.seed(rseed)
# generate random uniform distribution of star clusters over the entire HST image
rand_x_full = np.random.uniform(0, xmax, size=4*len(sc_df))
rand_y_full = np.random.uniform(0, ymax, size=4*len(sc_df))
# convert x,y values to integers b/c np.random.uniform can only produce floats
rand_x_full_int = np.array([int(np.round(x)) for x in rand_x_full ])
rand_y_full_int = np.array([int(np.round(y)) for y in rand_y_full ])
# limit to those in the hst-alma overlap mask
rand_in_mask = np.array([True if mask[y,x] == 1 else False for y,x in zip(rand_y_full_int, rand_x_full_int)])
# convert to ra, dec using the hst image wcs info
rand_ra, rand_dec = wcs_hst.wcs_pix2world(rand_x_full[rand_in_mask], rand_y_full[rand_in_mask], 1.0)
# create dictionary with the random clusters' x,y,ra,dec positions
rand_sc_data = {'x': rand_x_full[rand_in_mask], 'y': rand_y_full[rand_in_mask], 'ra': rand_ra, 'dec': rand_dec }
# create a dataframe from the dictionary
rand_sc_df = pd.DataFrame(rand_sc_data)
return rand_sc_df
def | |
import __init__paths
import attr
import spacy
from nltk.corpus import wordnet as wn
from tools import str2seq, read_text_test_data
from config import config_device, config_pwws_use_NE, \
config_data, config_dataset, model_path, config_pww_NNE_attack, BertConfig
import numpy as np
from get_NE_list import NE_list
from functools import partial
from torch import nn
import torch
import time
from transformers import BertTokenizer
from baseline_model import Baseline_Bert, Baseline_LSTM, Baseline_TextCNN
from baseline_config import Baseline_Config, dataset_config
from baseline_data import IMDB_Dataset, AGNEWS_Dataset
'''
ATTENTION:
Below three functions (PWWS, evaluate_word_saliency, adversarial_paraphrase)
is an non official PyTorch version of https://github.com/JHL-HUST/PWWS
'''
nlp = spacy.load('en_core_web_sm')
supported_pos_tags = [
'CC', # coordinating conjunction, like "and but neither versus whether yet so"
# 'CD', # Cardinal number, like "mid-1890 34 forty-two million dozen"
# 'DT', # Determiner, like all "an both those"
# 'EX', # Existential there, like "there"
# 'FW', # Foreign word
# 'IN', # Preposition or subordinating conjunction, like "among below into"
'JJ', # Adjective, like "second ill-mannered"
'JJR', # Adjective, comparative, like "colder"
'JJS', # Adjective, superlative, like "cheapest"
# 'LS', # List item marker, like "A B C D"
# 'MD', # Modal, like "can must shouldn't"
'NN', # Noun, singular or mass
'NNS', # Noun, plural
'NNP', # Proper noun, singular
'NNPS', # Proper noun, plural
# 'PDT', # Predeterminer, like "all both many"
# 'POS', # Possessive ending, like "'s"
# 'PRP', # Personal pronoun, like "hers herself ours they theirs"
# 'PRP$', # Possessive pronoun, like "hers his mine ours"
'RB', # Adverb
'RBR', # Adverb, comparative, like "lower heavier"
'RBS', # Adverb, superlative, like "best biggest"
# 'RP', # Particle, like "board about across around"
# 'SYM', # Symbol
# 'TO', # to
# 'UH', # Interjection, like "wow goody"
'VB', # Verb, base form
'VBD', # Verb, past tense
'VBG', # Verb, gerund or present participle
'VBN', # Verb, past participle
'VBP', # Verb, non-3rd person singular present
'VBZ', # Verb, 3rd person singular present
# 'WDT', # Wh-determiner, like "that what whatever which whichever"
# 'WP', # Wh-pronoun, like "that who"
# 'WP$', # Possessive wh-pronoun, like "whose"
# 'WRB', # Wh-adverb, like "however wherever whenever"
]
def PWWS(
doc,
true_y,
word_saliency_list=None,
rank_fn=None,
heuristic_fn=None, # Defined in adversarial_tools.py
halt_condition_fn=None, # Defined in adversarial_tools.py
verbose=True,
sub_rate_limit=None):
# defined in Eq.(8)
def softmax(x):
exp_x = np.exp(x)
softmax_x = exp_x / np.sum(exp_x)
return softmax_x
heuristic_fn = heuristic_fn or (
lambda _, candidate: candidate.similarity_rank)
halt_condition_fn = halt_condition_fn or (lambda perturbed_text: False)
perturbed_doc = doc
perturbed_text = perturbed_doc.text
substitute_count = 0 # calculate how many substitutions used in a doc
substitute_tuple_list = [] # save the information of substitute word
word_saliency_array = np.array(
[word_tuple[2] for word_tuple in word_saliency_list])
word_saliency_array = softmax(word_saliency_array)
NE_candidates = NE_list.L[config_dataset][true_y]
NE_tags = list(NE_candidates.keys())
use_NE = config_pwws_use_NE # whether use NE as a substitute
NNE_attack = config_pww_NNE_attack
max_len = config_data[config_dataset].padding_maxlen
if sub_rate_limit:
sub_rate_limit = int(sub_rate_limit * len(doc))
else:
sub_rate_limit = len(doc)
# for each word w_i in x, use WordNet to build a synonym set L_i
for (position, token, word_saliency, tag) in word_saliency_list:
if position >= max_len:
break
candidates = []
if use_NE:
NER_tag = token.ent_type_
if NER_tag in NE_tags:
for idx, str in enumerate(NE_candidates[NER_tag]):
if idx >= 250:
break
candidate = SubstitutionCandidate(position, 0, token, str)
candidates.append(candidate)
else:
if NNE_attack:
candidates = _generate_synonym_candidates(
token=token, token_position=position, rank_fn=rank_fn)
else:
if NNE_attack:
candidates = _generate_synonym_candidates(
token=token, token_position=position, rank_fn=rank_fn)
if len(candidates) == 0:
continue
# The substitute word selection method R(w_i;L_i) defined in Eq.(4)
sorted_candidates = zip(
map(partial(heuristic_fn, doc.text), candidates), candidates)
# Sorted according to the return value of heuristic_fn function, that is, \Delta P defined in Eq.(4)
sorted_candidates = list(sorted(sorted_candidates, key=lambda t: t[0]))
# delta_p_star is defined in Eq.(5); substitute is w_i^*
delta_p_star, substitute = sorted_candidates.pop()
# delta_p_star * word_saliency_array[position] equals H(x, x_i^*, w_i) defined in Eq.(7)
substitute_tuple_list.append(
(position, token.text, substitute,
delta_p_star * word_saliency_array[position], token.tag_))
# sort all the words w_i in x in descending order based on H(x, x_i^*, w_i)
sorted_substitute_tuple_list = sorted(substitute_tuple_list,
key=lambda t: t[3],
reverse=True)
# replace w_i in x^(i-1) with w_i^* to craft x^(i)
# replace w_i in x^(i-1) with w_i^* to craft x^(i)
NE_count = 0 # calculate how many NE used in a doc
change_tuple_list = []
for (position, token, substitute, score,
tag) in sorted_substitute_tuple_list:
if len(change_tuple_list) > sub_rate_limit:
break
# if score <= 0:
# break
if nlp(token)[0].ent_type_ in NE_tags:
NE_count += 1
change_tuple_list.append((position, token, substitute, score, tag))
perturbed_text = ' '.join(
_compile_perturbed_tokens(perturbed_doc, [substitute]))
perturbed_doc = nlp(perturbed_text)
substitute_count += 1
if halt_condition_fn(perturbed_text):
if verbose:
print("use", substitute_count, "substitution; use", NE_count,
'NE')
sub_rate = substitute_count / len(doc)
if substitute_count == 0:
NE_rate = 0.0
else:
NE_rate = NE_count / substitute_count
return perturbed_text, sub_rate, NE_rate, change_tuple_list
if verbose:
print("use", substitute_count, "substitution; use", NE_count, 'NE')
sub_rate = substitute_count / len(doc)
if substitute_count == 0:
NE_rate = 0.0
else:
NE_rate = NE_count / substitute_count
return perturbed_text, sub_rate, NE_rate, change_tuple_list
@attr.s
class SubstitutionCandidate:
token_position = attr.ib()
similarity_rank = attr.ib()
original_token = attr.ib()
candidate_word = attr.ib()
def vsm_similarity(doc, original, synonym):
window_size = 3
start = max(0, original.i - window_size)
try:
sim = doc[start:original.i + window_size].similarity(synonym)
except:
synonym = nlp(synonym.text)
sim = doc[start:original.i + window_size].similarity(synonym)
return sim
def _get_wordnet_pos(spacy_token):
'''Wordnet POS tag'''
pos = spacy_token.tag_[0].lower()
if pos in ['r', 'n', 'v']: # adv, noun, verb
return pos
elif pos == 'j':
return 'a' # adj
def _synonym_prefilter_fn(token, synonym):
'''
Similarity heuristics go here
'''
if (len(synonym.text.split()) > 2 or ( # the synonym produced is a phrase
synonym.lemma == token.lemma)
or ( # token and synonym are the same
synonym.tag != token.tag)
or ( # the pos of the token synonyms are different
token.text.lower() == 'be')): # token is be
return False
else:
return True
def _generate_synonym_candidates(token, token_position, rank_fn=None):
'''
Generate synonym candidates.
For each token in the doc, the list of WordNet synonyms is expanded.
:return candidates, a list, whose type of element is <class '__main__.SubstitutionCandidate'>
like SubstitutionCandidate(token_position=0, similarity_rank=10, original_token=Soft, candidate_word='subdued')
'''
if rank_fn is None:
rank_fn = vsm_similarity
candidates = []
if token.tag_ in supported_pos_tags:
wordnet_pos = _get_wordnet_pos(token) # 'r', 'a', 'n', 'v' or None
wordnet_synonyms = []
synsets = wn.synsets(token.text, pos=wordnet_pos)
for synset in synsets:
wordnet_synonyms.extend(synset.lemmas())
synonyms = []
for wordnet_synonym in wordnet_synonyms:
spacy_synonym = nlp(wordnet_synonym.name().replace('_', ' '))[0]
synonyms.append(spacy_synonym)
synonyms = filter(partial(_synonym_prefilter_fn, token), synonyms)
candidate_set = set()
for _, synonym in enumerate(synonyms):
candidate_word = synonym.text
if candidate_word in candidate_set: # avoid repetition
continue
candidate_set.add(candidate_word)
candidate = SubstitutionCandidate(token_position=token_position,
similarity_rank=None,
original_token=token,
candidate_word=candidate_word)
candidates.append(candidate)
return candidates
def _compile_perturbed_tokens(doc, accepted_candidates):
'''
Traverse the list of accepted candidates and do the token substitutions.
'''
candidate_by_position = {}
for candidate in accepted_candidates:
candidate_by_position[candidate.token_position] = candidate
final_tokens = []
for position, token in enumerate(doc):
word = token.text
if position in candidate_by_position:
candidate = candidate_by_position[position]
word = candidate.candidate_word.replace('_', ' ')
final_tokens.append(word)
return final_tokens
def evaluate_word_saliency(doc, origin_vector, input_y, net):
word_saliency_list = []
# zero the code of the current word and calculate the amount of change in the classification probability
max_len = config_data[config_dataset].padding_maxlen
origin_prob = net.predict_prob(origin_vector, input_y)[0]
for position in range(len(doc)):
if position >= max_len:
break
without_word_vector = origin_vector.clone().detach().to(config_device)
without_word_vector[position] = 0
prob_without_word = net.predict_prob(without_word_vector, input_y)[0]
# calculate S(x,w_i) defined in Eq.(6)
word_saliency = origin_prob - prob_without_word
word_saliency_list.append(
(position, doc[position], word_saliency, doc[position].tag_))
position_word_list = []
for word in word_saliency_list:
position_word_list.append((word[0], word[1]))
return position_word_list, word_saliency_list
def adversarial_paraphrase(input_text,
origin_vector,
true_y,
net: nn.Module,
tokenizer,
verbose=True,
sub_rate_limit=None):
'''
Compute a perturbation, greedily choosing the synonym if it causes the most
significant change in the classification probability after replacement
:return perturbed_text
: generated adversarial examples
:return perturbed_y: predicted class of perturbed_text
:return sub_rate: word replacement rate showed in Table 3
:return change_tuple_list: list of substitute words
'''
def halt_condition_fn(perturbed_text):
'''
Halt if model output is changed.
'''
maxlen = config_data[config_dataset].padding_maxlen
perturbed_vector = str2seq(perturbed_text, maxlen,
tokenizer).to(config_device)
predict = net.predict_class(perturbed_vector)[0]
return predict != true_y
def heuristic_fn(text, candidate):
'''
Return the difference between the classification probability of the original
word and the candidate substitute synonym, which is defined in Eq.(4) and Eq.(5).
'''
doc = nlp(text)
maxlen = config_data[config_dataset].padding_maxlen
perturbed_tokens = _compile_perturbed_tokens(doc, [candidate])
perturbed_doc = ' '.join(perturbed_tokens)
perturbed_vector = str2seq(perturbed_doc, maxlen,
tokenizer).to(config_device)
adv_y = net.predict_prob(perturbed_vector, true_y)[0]
ori_y = | |
# BSD 3-Clause License
#
# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the psutil authors nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model. """
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
SequenceClassifierOutput,
)
from transformers import BertLayer, BertPreTrainedModel
from transformers.models.bert.modeling_bert import BertEmbeddings, BertPooler
from patrickstar.core.checkpoint import checkpoint as ckp
class BertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList(
[BertLayer(config) for _ in range(config.num_hidden_layers)]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = (
() if output_attentions and self.config.add_cross_attention else None
)
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
if use_cache:
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
layer_outputs = ckp(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class BertModel(BertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME> and <NAME>.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:
`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers`
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = (
past_key_values[0][0].shape[2] if past_key_values is not None else 0
)
if attention_mask is None:
attention_mask = torch.ones(
((batch_size, seq_length + past_key_values_length)), device=device
)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(
batch_size, seq_length
)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(
input_shape, dtype=torch.long, device=device
)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
attention_mask, input_shape, device
)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
(
encoder_batch_size,
encoder_sequence_length,
_,
) = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(
encoder_attention_mask
)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = (
self.pooler(sequence_output) if self.pooler is not None else None
)
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
| |
<gh_stars>1-10
import sys
import os
import subprocess
import shutil
import struct
import configparser
import locale
import fsb5
# PySide bindings
import PySide
from PySide.QtGui import *
from PySide.QtCore import *
from PySide.QtOpenGL import *
from PySide.QtMultimedia import *
from OpenGL.GL import *
import res_rc
langs = [
"en_US",
"ru_RU"
]
developers = [
"aspadm",
"volfin",
"<NAME>",
"erik945",
"Vindis",
"<NAME>"
]
version = "1.1 (17.09.2017)"
ext_preview = [
"pc_weightedprim",
"pc_tex",
"pc_prim",
"pc_linkedprim",
]
ext_non_preview = [
"pc_hair",
"pc_apex",
"pc_swf",
"pc_fsb"
]
all_types = [
"pc_animation", "pc_animationrig", "pc_animset", "pc_apex", "pc_binkvid",
"pc_bonerig", "pc_chartype", "pc_coll", "pc_collisionlayers",
"pc_coverdata", "pc_curvetex", "pc_decalpreset", "pc_entityblueprint",
"pc_entityresource", "pc_entitytemplate", "pc_entitytype", "pc_facefx",
"pc_fontdef", "pc_fsb", "pc_fsbm", "pc_hair", "pc_ies", "pc_irv",
"pc_kitsystem", "pc_linkedprim", "pc_localized-fontdef",
"pc_localized-swf", "pc_localized-textlist", "pc_localized-wavebank",
"pc_mate", "pc_mi", "pc_musiccomp", "pc_navmesh", "pc_platformspecific",
"pc_prim", "pc_rawentityblueprint", "pc_rawentitytype", "pc_rbs",
"pc_resourceidx", "pc_resourcelist", "pc_rigdataresource", "pc_rtet",
"pc_sdefs", "pc_socialresource", "pc_staticvisibility", "pc_swf", "pc_tex",
"pc_textline", "pc_textlist", "pc_umbra", "pc_volumetricnavgraph",
"pc_wavebank", "pc_wavebankfx", "pc_weightedprim"
]
ext_types = []
if os.path.isfile("unpack_ext.txt"):
for i in open("unpack_ext.txt", "r"):
if len(i) > 0:
if i[-1] == "\n":
i = i[:-1]
if i in all_types:
ext_types.append(i)
if ext_types == []:
ext_types = ext_preview[:] + ext_non_preview[:]
types_3d = [
"prim",
"weightedprim",
"linkedprim",
"staticscenecollisiondef",
"apx"
]
types_tex = [
"tex"
]
types_audio = [
"wavebank"
]
save_filter = [
"OBJ - Wavefront Object (*.obj);;\
FBX - Autodesc Filmbox (*.fbx);;\
3DS - 3D Studio Graphics (*.3ds);;\
STL - Stereolithography Interface Format (*.stl);;\
PRIM - DEMD native model (*.bin)",
"PNG - Portable Network Graphics (*.png);;\
TGA - Targa bitmap (*.tga);;\
JPG - JPEG (*.jpg);;\
TIF - Tagged Image Format File (*.tif);;\
DDS - Direct Draw Surface (*.dds);;\
TXET - DEMD native texture (*.tex)"
]
save_ext_tex = [
"png", "tga",
"jpg", "tif",
"dds", "tex"
]
save_ext_3d = [
"obj", "fbx",
"3ds", "stl",
"bin"
]
tree_list = [] # List of files by levels
folder_tree = {} # Pairs of filename: dirname
path = ""
lpath = os.getcwd() + "\\"
tpath = lpath+"temp_files\\"
last_dir = ""
last_filter = ["", ""]
# Toolset
dds_converter = ""
tex_converter = ""
unpacker = ""
blender = ""
# Fast export
epath = ""
ext_textures = ""
ext_models = ""
# Current element
file_name = ""
file_parent = ""
cur_hash = ""
cur_item = QTreeWidgetItem()
lang_name = ""
first_launch = False
icons = {"prim": ":/3d.png",
"tex": ":/tex.png",
"platform-tex": ":/tex.png",
"linkedprim": ":/3da.png",
"weightedprim": ":/3db.png",
"apx": ":/apx.png",
"hair": ":/hair.png",
"wavebank": ":/mus.png"}
#
##### 3D viewer class
class GLWidget(QGLWidget):
def __init__(self, parent=None, shareWidget=None):
super(GLWidget, self).__init__(parent, shareWidget)
self.clearColor = Qt.white
self.zoom_scale = 0.001 # wheel step
self.angle = 15 # arrows angle step
self.angle_scale = 1.0 # mouse rotation
self.move_scale = 0.001 # mouse position
self.size_scale = 0.02 # mouse scaling
self.size_hint = 0.7 # avoid clipping
# Model rotation
self.xRot = -90
self.yRot = 0
self.zRot = 180
# Model screen position and scale
self.xOff = 0.0
self.yOff = -0.5
self.zOff = 0.9
# Last mouse pos
self.lastPos = QPoint()
self.v_list = []
self.n_list = []
self.v_count = 0
self.can_show = False
def add_vertex(self, vertex):
vertex[0][0] *= -1
vertex[1][0] *= -1
vertex[2][0] *= -1
for i in range(3):
for j in range(3):
self.size_hint = max(self.size_hint, vertex[i][j])
for v in vertex[2]:
self.v_list.append(v)
for v in vertex[1]:
self.v_list.append(v)
for v in vertex[0]:
self.v_list.append(v)
Ux = vertex[1][0] - vertex[2][0]
Uy = vertex[1][1] - vertex[2][1]
Uz = vertex[1][2] - vertex[2][2]
Vx = vertex[0][0] - vertex[2][0]
Vy = vertex[0][1] - vertex[2][1]
Vz = vertex[0][2] - vertex[2][2]
normal = [Uy*Vz - Uz*Vy, Uz*Vx - Ux*Vz, Ux*Vy - Uy*Vx]
for k in range(3):
for v in normal:
self.n_list.append(v)
def read_model(self, name):
self.v_list = []
self.n_list = []
try:
model = open(name, "rb")
except:
return 1
vert_buf = [1, 1, 1]
model.read(80)
count = struct.unpack("I", model.read(4))[0]
for i in range(count):
model.read(12)
for j in range(3):
buf = model.read(12)
vert_buf[j] = list(struct.unpack("3f", buf))[:]
model.read(2)
self.add_vertex(vert_buf)
model.close()
self.v_count = count * 3
return 0
def load_model(self, name):
self.reset_view()
self.read_model(name)
self.size_hint = 1/self.size_hint
self.size_hint = max(min(0.7, self.size_hint), 0.0001)
self.initializeGL()
def unload_model(self):
self.can_show = False
posAttrib = glGetAttribLocation(self.shaderProgram, b"position")
glDisableVertexAttribArray(posAttrib)
glDeleteBuffers(1, [self.vbo])
glDeleteVertexArrays(1, [self.vao])
def initializeGL(self):
#glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
glShadeModel(GL_FLAT)
glEnable(GL_DEPTH_TEST)
#glEnable(GL_CULL_FACE)
# get Vertex Array Object name
self.vao = glGenVertexArrays(1)
# set this new VAO to the active one
glBindVertexArray(self.vao)
# vertex data for one triangle
triangle_vertices = self.v_list[:] + self.n_list[:]
# convert to ctypes c_float array
triangle_array = ((ctypes.c_float * len(triangle_vertices))
(*triangle_vertices))
# get a VBO name from the graphics card
self.vbo = glGenBuffers(1)
# bind our vbo name to the GL_ARRAY_BUFFER target
glBindBuffer(GL_ARRAY_BUFFER, self.vbo)
# move the vertex data to a new data store associated with our vbo
glBufferData(GL_ARRAY_BUFFER, ctypes.sizeof(triangle_array),
triangle_array, GL_STATIC_DRAW)
# vertex shader
vertexShaderProgram = r"""#version 130
in vec3 position;
in vec3 normal;
varying vec4 t_color;
uniform float scale_z;
void main() {
mat4 scale_m = mat4(
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, scale_z, 0.0,
0.0, 0.0, 0.0, 1.0);
mat4 mvmatrix = scale_m * gl_ModelViewMatrix;
gl_Position = mvmatrix * vec4 (position, 1.0);
const vec3 l = vec3(0.0, 0.0, 1.0);
vec3 n = normalize(gl_NormalMatrix * normal);
float snormal = dot(n, l);
vec4 color;
if (snormal < 0.0)
color = vec4 (-0.55, -0.55, -0.55, 1.0) * snormal;
else
color = vec4 (0.25, 0.22, 0.0, 1.0) * snormal;
vec4 spec = vec4(0.6, 0.5, 0.0, 1.0) * pow(max(dot(n, normalize(vec3(0.0, 1.1, -1.0))), 0.0), 20.0);
vec4 spec2 = vec4(0.5, 0.4, 0.0, 0.0) * pow(max(dot(n, normalize(vec3(0.0, -1.2, -1.0))), 0.0), 15.0);
t_color = color + spec + spec2;
}"""
vertexShader = glCreateShader(GL_VERTEX_SHADER)
glShaderSource(vertexShader, vertexShaderProgram)
glCompileShader(vertexShader)
# fragment shader
fragmentShaderProgram = r"""#version 130
varying vec4 t_color;
out vec4 outColor;
void main() {
outColor = t_color;
}"""
fragmentShader = glCreateShader(GL_FRAGMENT_SHADER)
glShaderSource(fragmentShader, fragmentShaderProgram)
glCompileShader(fragmentShader)
# shader program
self.shaderProgram = glCreateProgram()
glAttachShader(self.shaderProgram, vertexShader)
glAttachShader(self.shaderProgram, fragmentShader)
# color output buffer assignment
glBindFragDataLocation(self.shaderProgram, 0, b"outColor")
# link the program
glLinkProgram(self.shaderProgram)
# validate the program
glValidateProgram(self.shaderProgram)
# activate the program
glUseProgram(self.shaderProgram)
self.can_show = True
self.setupViewport(self.width(), self.height())
def mousePressEvent(self, event):
self.lastPos = event.pos()
def wheelEvent(self, event):
self.zOff += self.zoom_scale * event.delta()
if self.zOff <= 0:
self.zOff = 0.00001
self.updateGL()
def reload_model(self, name):
self.unload_model()
self.load_model(name)
self.updateGL()
def reset_view(self):
self.xOff = 0.0
self.yOff = -0.5
self.zOff = 0.9
self.setXRotation(-90)
self.setYRotation(0)
self.setZRotation(180)
self.updateGL()
def mouseDoubleClickEvent(self, event):
if event.buttons() & Qt.MiddleButton:
self.reset_view()
def keyPressEvent(self, event):
if event.key() == Qt.Key_Up:
self.setXRotation(self.xRot - self.angle)
self.updateGL()
if event.key() == Qt.Key_Down:
self.setXRotation(self.xRot + self.angle)
self.updateGL()
if event.key() == Qt.Key_Right:
self.setZRotation(self.zRot + self.angle)
self.updateGL()
if event.key() == Qt.Key_Left:
self.setZRotation(self.zRot - self.angle)
self.updateGL()
def mouseMoveEvent(self, event):
dx = event.x() - self.lastPos.x()
dy = event.y() - self.lastPos.y()
if event.buttons() & Qt.LeftButton:
self.xOff += self.move_scale * dx
self.yOff -= self.move_scale * dy
elif event.buttons() & Qt.RightButton:
self.setXRotation(self.xRot - self.angle_scale * dy)
self.setZRotation(self.zRot - self.angle_scale * dx)
elif event.buttons() & Qt.MiddleButton:
self.zOff += self.size_scale * dy
if self.zOff <= 0:
self.zOff = 0.00001
self.lastPos = event.pos()
self.updateGL()
def normalizeAngle(self, angle):
if angle < 0:
angle = 360 + angle % 360
elif angle > 359:
angle = angle % 360
return angle
def resizeGL(self, width, height):
self.setupViewport(width, height)
def setupViewport(self, width, height):
side = max(width, height)
glViewport((width - side) // 2, (height - side) // 2, side, side)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(-1.0, 1.0, -1.0, 1.0, -1.0, 1.0)
glDisable(GL_CLIP_PLANE0)
glMatrixMode(GL_MODELVIEW)
def paintGL(self):
self.qglClearColor(self.clearColor)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
# Change scroll offset
#glScalef(self.zOff, self.zOff, self.zOff)
glTranslatef(self.xOff, self.yOff, 0.0)
glScalef(self.zOff, self.zOff, self.zOff)
glRotatef(self.xRot, 1.0, 0.0, 0.0)
glRotatef(self.yRot, 0.0, 1.0, 0.0)
glRotatef(self.zRot, 0.0, 0.0, 1.0)
loc = glGetUniformLocation(self.shaderProgram, b"scale_z")
glUniform1f(loc, self.size_hint/self.zOff);
if self.can_show:
# Choose buffer; triangles
posAttrib = glGetAttribLocation(self.shaderProgram, b"position")
glEnableVertexAttribArray(posAttrib)
glVertexAttribPointer(posAttrib,
3,
GL_FLOAT,
False,
0,
ctypes.c_voidp(0))
nAttrib = glGetAttribLocation(self.shaderProgram, b"normal")
glEnableVertexAttribArray(nAttrib)
glVertexAttribPointer(nAttrib,
3,
GL_FLOAT,
False,
0,
ctypes.c_voidp(
self.v_count * 3 * sizeof(ctypes.c_float)))
# Draw triangles
glDrawArrays(GL_TRIANGLES, 0, self.v_count)
# Floor
glBegin(GL_LINES)
for i in range(21):
glVertex3f(-1.0 + 0.1 * i, -1.0, 0.0)
glVertex3f(-1.0 + 0.1 * i, +1.0, 0.0)
glVertex3f(-1.0, -1.0 + 0.1 * i, 0.0)
glVertex3f(+1.0, -1.0 + 0.1 * i, 0.0)
glEnd()
def setXRotation(self, angle):
angle = self.normalizeAngle(angle)
if angle != self.xRot:
self.xRot = angle
def setYRotation(self, angle):
angle = self.normalizeAngle(angle)
if angle != self.yRot:
self.yRot = angle
def setZRotation(self, angle):
angle = self.normalizeAngle(angle)
if angle != self.zRot:
self.zRot = angle
# End of 3D widget
def create_blank_config():
config = configparser.ConfigParser()
config.add_section("Main")
config.add_section("Tools")
config.add_section("Export")
config.set("Main", "last_base", "")
config.set("Main", "language", detect_locale())
if os.path.isdir(lpath+"tools\\Blender"):
bpath = lpath+"tools\\Blender"
elif os.path.isdir("C:\\Program Files (x86)\\Blender Foundation\\Blender"):
bpath = "C:\\Program Files (x86)\\Blender Foundation\\Blender"
else:
| |
self.assertEqual(self._callFUT(IException), True)
def test_is_IException_subinterface(self):
from pyramid.interfaces import IException
class ISubException(IException):
pass
self.assertEqual(self._callFUT(ISubException), True)
class TestMultiView(unittest.TestCase):
def _getTargetClass(self):
from pyramid.config.views import MultiView
return MultiView
def _makeOne(self, name='name'):
return self._getTargetClass()(name)
def test_class_implements_ISecuredView(self):
from zope.interface.verify import verifyClass
from pyramid.interfaces import ISecuredView
verifyClass(ISecuredView, self._getTargetClass())
def test_instance_implements_ISecuredView(self):
from zope.interface.verify import verifyObject
from pyramid.interfaces import ISecuredView
verifyObject(ISecuredView, self._makeOne())
def test_add(self):
mv = self._makeOne()
mv.add('view', 100)
self.assertEqual(mv.views, [(100, 'view', None)])
mv.add('view2', 99)
self.assertEqual(mv.views, [(99, 'view2', None), (100, 'view', None)])
mv.add('view3', 100, 'text/html')
self.assertEqual(mv.media_views['text/html'], [(100, 'view3', None)])
mv.add('view4', 99, 'text/html', 'abc')
self.assertEqual(mv.media_views['text/html'],
[(99, 'view4', 'abc'), (100, 'view3', None)])
mv.add('view5', 100, 'text/xml')
self.assertEqual(mv.media_views['text/xml'], [(100, 'view5', None)])
self.assertEqual(set(mv.accepts), set(['text/xml', 'text/html']))
self.assertEqual(mv.views, [(99, 'view2', None), (100, 'view', None)])
mv.add('view6', 98, 'text/*')
self.assertEqual(mv.views, [(98, 'view6', None),
(99, 'view2', None),
(100, 'view', None)])
def test_add_with_phash(self):
mv = self._makeOne()
mv.add('view', 100, phash='abc')
self.assertEqual(mv.views, [(100, 'view', 'abc')])
mv.add('view', 100, phash='abc')
self.assertEqual(mv.views, [(100, 'view', 'abc')])
mv.add('view', 100, phash='def')
self.assertEqual(mv.views, [(100, 'view', 'abc'),
(100, 'view', 'def')])
mv.add('view', 100, phash='abc')
self.assertEqual(mv.views, [(100, 'view', 'abc'),
(100, 'view', 'def')])
def test_add_with_phash_override_accept(self):
mv = self._makeOne()
def view1(): pass
def view2(): pass
def view3(): pass
mv.add(view1, 100, accept='text/html', phash='abc')
mv.add(view2, 100, accept='text/html', phash='abc')
mv.add(view3, 99, accept='text/html', phash='def')
self.assertEqual(mv.media_views['text/html'],
[(99, view3, 'def'), (100, view2, 'abc')])
def test_add_with_phash_override_accept2(self):
mv = self._makeOne()
def view1(): pass
def view2(): pass
def view3(): pass
mv.add(view1, 100, accept='text/html', phash='abc')
mv.add(view2, 100, accept='text/html', phash='def')
mv.add(view3, 99, accept='text/html', phash='ghi')
self.assertEqual(mv.media_views['text/html'],
[(99, view3, 'ghi'),
(100, view1, 'abc'),
(100, view2, 'def')]
)
def test_multiple_with_functions_as_views(self):
# this failed on py3 at one point, because functions aren't orderable
# and we were sorting the views via a plain sort() rather than
# sort(key=itemgetter(0)).
def view1(request): pass
def view2(request): pass
mv = self._makeOne()
mv.add(view1, 100, None)
self.assertEqual(mv.views, [(100, view1, None)])
mv.add(view2, 100, None)
self.assertEqual(mv.views, [(100, view1, None), (100, view2, None)])
def test_get_views_request_has_no_accept(self):
request = DummyRequest()
mv = self._makeOne()
mv.views = [(99, lambda *arg: None)]
self.assertEqual(mv.get_views(request), mv.views)
def test_get_views_no_self_accepts(self):
request = DummyRequest()
request.accept = True
mv = self._makeOne()
mv.accepts = []
mv.views = [(99, lambda *arg: None)]
self.assertEqual(mv.get_views(request), mv.views)
def test_get_views(self):
request = DummyRequest()
request.accept = DummyAccept('text/html')
mv = self._makeOne()
mv.accepts = ['text/html']
mv.views = [(99, lambda *arg: None)]
html_views = [(98, lambda *arg: None)]
mv.media_views['text/html'] = html_views
self.assertEqual(mv.get_views(request), html_views + mv.views)
def test_get_views_best_match_returns_None(self):
request = DummyRequest()
request.accept = DummyAccept(None)
mv = self._makeOne()
mv.accepts = ['text/html']
mv.views = [(99, lambda *arg: None)]
self.assertEqual(mv.get_views(request), mv.views)
def test_match_not_found(self):
from pyramid.httpexceptions import HTTPNotFound
mv = self._makeOne()
context = DummyContext()
request = DummyRequest()
self.assertRaises(HTTPNotFound, mv.match, context, request)
def test_match_predicate_fails(self):
from pyramid.httpexceptions import HTTPNotFound
mv = self._makeOne()
def view(context, request):
""" """
view.__predicated__ = lambda *arg: False
mv.views = [(100, view, None)]
context = DummyContext()
request = DummyRequest()
self.assertRaises(HTTPNotFound, mv.match, context, request)
def test_match_predicate_succeeds(self):
mv = self._makeOne()
def view(context, request):
""" """
view.__predicated__ = lambda *arg: True
mv.views = [(100, view, None)]
context = DummyContext()
request = DummyRequest()
result = mv.match(context, request)
self.assertEqual(result, view)
def test_permitted_no_views(self):
from pyramid.httpexceptions import HTTPNotFound
mv = self._makeOne()
context = DummyContext()
request = DummyRequest()
self.assertRaises(HTTPNotFound, mv.__permitted__, context, request)
def test_permitted_no_match_with__permitted__(self):
mv = self._makeOne()
def view(context, request):
""" """
mv.views = [(100, view, None)]
self.assertEqual(mv.__permitted__(None, None), True)
def test_permitted(self):
mv = self._makeOne()
def view(context, request):
""" """
def permitted(context, request):
return False
view.__permitted__ = permitted
mv.views = [(100, view, None)]
context = DummyContext()
request = DummyRequest()
result = mv.__permitted__(context, request)
self.assertEqual(result, False)
def test__call__not_found(self):
from pyramid.httpexceptions import HTTPNotFound
mv = self._makeOne()
context = DummyContext()
request = DummyRequest()
self.assertRaises(HTTPNotFound, mv, context, request)
def test___call__intermediate_not_found(self):
from pyramid.exceptions import PredicateMismatch
mv = self._makeOne()
context = DummyContext()
request = DummyRequest()
request.view_name = ''
expected_response = DummyResponse()
def view1(context, request):
raise PredicateMismatch
def view2(context, request):
return expected_response
mv.views = [(100, view1, None), (99, view2, None)]
response = mv(context, request)
self.assertEqual(response, expected_response)
def test___call__raise_not_found_isnt_interpreted_as_pred_mismatch(self):
from pyramid.httpexceptions import HTTPNotFound
mv = self._makeOne()
context = DummyContext()
request = DummyRequest()
request.view_name = ''
def view1(context, request):
raise HTTPNotFound
def view2(context, request):
""" """
mv.views = [(100, view1, None), (99, view2, None)]
self.assertRaises(HTTPNotFound, mv, context, request)
def test___call__(self):
mv = self._makeOne()
context = DummyContext()
request = DummyRequest()
request.view_name = ''
expected_response = DummyResponse()
def view(context, request):
return expected_response
mv.views = [(100, view, None)]
response = mv(context, request)
self.assertEqual(response, expected_response)
def test__call_permissive__not_found(self):
from pyramid.httpexceptions import HTTPNotFound
mv = self._makeOne()
context = DummyContext()
request = DummyRequest()
self.assertRaises(HTTPNotFound, mv, context, request)
def test___call_permissive_has_call_permissive(self):
mv = self._makeOne()
context = DummyContext()
request = DummyRequest()
request.view_name = ''
expected_response = DummyResponse()
def view(context, request):
""" """
def permissive(context, request):
return expected_response
view.__call_permissive__ = permissive
mv.views = [(100, view, None)]
response = mv.__call_permissive__(context, request)
self.assertEqual(response, expected_response)
def test___call_permissive_has_no_call_permissive(self):
mv = self._makeOne()
context = DummyContext()
request = DummyRequest()
request.view_name = ''
expected_response = DummyResponse()
def view(context, request):
return expected_response
mv.views = [(100, view, None)]
response = mv.__call_permissive__(context, request)
self.assertEqual(response, expected_response)
def test__call__with_accept_match(self):
mv = self._makeOne()
context = DummyContext()
request = DummyRequest()
request.accept = DummyAccept('text/html', 'text/xml')
expected_response = DummyResponse()
def view(context, request):
return expected_response
mv.views = [(100, None)]
mv.media_views['text/xml'] = [(100, view, None)]
mv.accepts = ['text/xml']
response = mv(context, request)
self.assertEqual(response, expected_response)
def test__call__with_accept_miss(self):
mv = self._makeOne()
context = DummyContext()
request = DummyRequest()
request.accept = DummyAccept('text/plain', 'text/html')
expected_response = DummyResponse()
def view(context, request):
return expected_response
mv.views = [(100, view, None)]
mv.media_views['text/xml'] = [(100, None, None)]
mv.accepts = ['text/xml']
response = mv(context, request)
self.assertEqual(response, expected_response)
class TestDefaultViewMapper(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
self.registry = self.config.registry
def tearDown(self):
del self.registry
testing.tearDown()
def _makeOne(self, **kw):
from pyramid.config.views import DefaultViewMapper
kw['registry'] = self.registry
return DefaultViewMapper(**kw)
def _makeRequest(self):
request = DummyRequest()
request.registry = self.registry
return request
def test_view_as_function_context_and_request(self):
def view(context, request):
return 'OK'
mapper = self._makeOne()
result = mapper(view)
self.assertTrue(result is view)
request = self._makeRequest()
self.assertEqual(result(None, request), 'OK')
def test__view_as_function_with_attr(self):
def view(context, request):
""" """
mapper = self._makeOne(attr='__name__')
result = mapper(view)
self.assertFalse(result is view)
request = self._makeRequest()
self.assertRaises(TypeError, result, None, request)
def test_view_as_function_requestonly(self):
def view(request):
return 'OK'
mapper = self._makeOne()
result = mapper(view)
self.assertFalse(result is view)
request = self._makeRequest()
self.assertEqual(result(None, request), 'OK')
def test_view_as_function_requestonly_with_attr(self):
def view(request):
""" """
mapper = self._makeOne(attr='__name__')
result = mapper(view)
self.assertFalse(result is view)
request = self._makeRequest()
self.assertRaises(TypeError, result, None, request)
def test_view_as_newstyle_class_context_and_request(self):
class view(object):
def __init__(self, context, request):
pass
def __call__(self):
return 'OK'
mapper = self._makeOne()
result = mapper(view)
self.assertFalse(result is view)
request = self._makeRequest()
self.assertEqual(result(None, request), 'OK')
def test_view_as_newstyle_class_context_and_request_with_attr(self):
class view(object):
def __init__(self, context, request):
pass
def index(self):
return 'OK'
mapper = self._makeOne(attr='index')
result = mapper(view)
self.assertFalse(result is view)
request = self._makeRequest()
self.assertEqual(result(None, request), 'OK')
def test_view_as_newstyle_class_requestonly(self):
class view(object):
def __init__(self, request):
pass
def __call__(self):
return 'OK'
mapper = self._makeOne()
result = mapper(view)
self.assertFalse(result is view)
request = self._makeRequest()
self.assertEqual(result(None, request), 'OK')
def test_view_as_newstyle_class_requestonly_with_attr(self):
class view(object):
def __init__(self, request):
pass
def index(self):
return 'OK'
mapper = self._makeOne(attr='index')
result = mapper(view)
self.assertFalse(result is view)
request = self._makeRequest()
self.assertEqual(result(None, request), 'OK')
def test_view_as_oldstyle_class_context_and_request(self):
class view:
def __init__(self, context, request):
pass
def __call__(self):
return 'OK'
mapper = self._makeOne()
result = mapper(view)
self.assertFalse(result is view)
request = self._makeRequest()
self.assertEqual(result(None, request), 'OK')
def test_view_as_oldstyle_class_context_and_request_with_attr(self):
class view:
def __init__(self, context, request):
pass
def index(self):
return 'OK'
mapper = self._makeOne(attr='index')
result = mapper(view)
self.assertFalse(result is view)
request = self._makeRequest()
self.assertEqual(result(None, request), 'OK')
def test_view_as_oldstyle_class_requestonly(self):
class view:
def __init__(self, request):
pass
def __call__(self):
return 'OK'
mapper = self._makeOne()
result = mapper(view)
self.assertFalse(result is view)
request = self._makeRequest()
self.assertEqual(result(None, request), 'OK')
def test_view_as_oldstyle_class_requestonly_with_attr(self):
class view:
def __init__(self, request):
pass
def index(self):
return 'OK'
mapper = self._makeOne(attr='index')
result = mapper(view)
self.assertFalse(result is view)
request = self._makeRequest()
self.assertEqual(result(None, request), 'OK')
def test_view_as_instance_context_and_request(self):
class View:
def __call__(self, context, request):
return 'OK'
view = View()
mapper = self._makeOne()
result = mapper(view)
self.assertTrue(result is view)
request = self._makeRequest()
self.assertEqual(result(None, request), 'OK')
def test_view_as_instance_context_and_request_and_attr(self):
class View:
def index(self, context, request):
return 'OK'
view = View()
mapper = self._makeOne(attr='index')
result = mapper(view)
self.assertFalse(result is view)
request = self._makeRequest()
self.assertEqual(result(None, request), 'OK')
def test_view_as_instance_requestonly(self):
class View:
def __call__(self, request):
return 'OK'
view = View()
mapper = self._makeOne()
result = mapper(view)
self.assertFalse(result is view)
request = self._makeRequest()
self.assertEqual(result(None, request), 'OK')
def test_view_as_instance_requestonly_with_attr(self):
class View:
def index(self, request):
| |
be
sent along with the request as metadata.
Returns:
google.cloud.security.privateca_v1.types.CaPool:
A [CaPool][google.cloud.security.privateca.v1.CaPool] represents a group of
[CertificateAuthorities][google.cloud.security.privateca.v1.CertificateAuthority]
that form a trust anchor. A
[CaPool][google.cloud.security.privateca.v1.CaPool]
can be used to manage issuance policies for one or
more
[CertificateAuthority][google.cloud.security.privateca.v1.CertificateAuthority]
resources and to rotate CA certificates in and out of
the trust anchor.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a service.GetCaPoolRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, service.GetCaPoolRequest):
request = service.GetCaPoolRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_ca_pool]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_ca_pools(self,
request: Union[service.ListCaPoolsRequest, dict] = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListCaPoolsPager:
r"""Lists [CaPools][google.cloud.security.privateca.v1.CaPool].
Args:
request (Union[google.cloud.security.privateca_v1.types.ListCaPoolsRequest, dict]):
The request object. Request message for
[CertificateAuthorityService.ListCaPools][google.cloud.security.privateca.v1.CertificateAuthorityService.ListCaPools].
parent (str):
Required. The resource name of the location associated
with the
[CaPools][google.cloud.security.privateca.v1.CaPool], in
the format ``projects/*/locations/*``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.security.privateca_v1.services.certificate_authority_service.pagers.ListCaPoolsPager:
Response message for
[CertificateAuthorityService.ListCaPools][google.cloud.security.privateca.v1.CertificateAuthorityService.ListCaPools].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a service.ListCaPoolsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, service.ListCaPoolsRequest):
request = service.ListCaPoolsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_ca_pools]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListCaPoolsPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def delete_ca_pool(self,
request: Union[service.DeleteCaPoolRequest, dict] = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Delete a [CaPool][google.cloud.security.privateca.v1.CaPool].
Args:
request (Union[google.cloud.security.privateca_v1.types.DeleteCaPoolRequest, dict]):
The request object. Request message for
[CertificateAuthorityService.DeleteCaPool][google.cloud.security.privateca.v1.CertificateAuthorityService.DeleteCaPool].
name (str):
Required. The resource name for this
[CaPool][google.cloud.security.privateca.v1.CaPool] in
the format ``projects/*/locations/*/caPools/*``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a service.DeleteCaPoolRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, service.DeleteCaPoolRequest):
request = service.DeleteCaPoolRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_ca_pool]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
empty_pb2.Empty,
metadata_type=service.OperationMetadata,
)
# Done; return the response.
return response
def fetch_ca_certs(self,
request: Union[service.FetchCaCertsRequest, dict] = None,
*,
ca_pool: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> service.FetchCaCertsResponse:
r"""FetchCaCerts returns the current trust anchor for the
[CaPool][google.cloud.security.privateca.v1.CaPool]. This will
include CA certificate chains for all ACTIVE
[CertificateAuthority][google.cloud.security.privateca.v1.CertificateAuthority]
resources in the
[CaPool][google.cloud.security.privateca.v1.CaPool].
Args:
request (Union[google.cloud.security.privateca_v1.types.FetchCaCertsRequest, dict]):
The request object. Request message for
[CertificateAuthorityService.FetchCaCerts][google.cloud.security.privateca.v1.CertificateAuthorityService.FetchCaCerts].
ca_pool (str):
Required. The resource name for the
[CaPool][google.cloud.security.privateca.v1.CaPool] in
the format ``projects/*/locations/*/caPools/*``.
This corresponds to the ``ca_pool`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.security.privateca_v1.types.FetchCaCertsResponse:
Response message for
[CertificateAuthorityService.FetchCaCerts][google.cloud.security.privateca.v1.CertificateAuthorityService.FetchCaCerts].
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([ca_pool])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a service.FetchCaCertsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, service.FetchCaCertsRequest):
request = service.FetchCaCertsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if ca_pool is not None:
request.ca_pool = ca_pool
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.fetch_ca_certs]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("ca_pool", request.ca_pool),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_certificate_revocation_list(self,
request: Union[service.GetCertificateRevocationListRequest, dict] = None,
*,
| |
import time
import pytest
from dvslib.dvs_common import wait_for_result
L3_TABLE_TYPE = "L3"
L3_TABLE_NAME = "L3_TEST"
L3_BIND_PORTS = ["Ethernet0"]
L3_RULE_NAME = "L3_TEST_RULE"
class TestNat(object):
def setup_db(self, dvs):
self.app_db = dvs.get_app_db()
self.asic_db = dvs.get_asic_db()
self.config_db = dvs.get_config_db()
def set_interfaces(self, dvs):
dvs.interface_ip_add("Ethernet0", "172.16.31.10/24")
dvs.interface_ip_add("Ethernet4", "172.16.17.32/24")
dvs.port_admin_set("Ethernet0", "up")
dvs.port_admin_set("Etherent4", "up")
dvs.servers[0].runcmd("ip link set down dev eth0")
dvs.servers[0].runcmd("ip link set up dev eth0")
dvs.servers[0].runcmd("ifconfig eth0 172.16.17.32/24")
dvs.servers[0].runcmd("ip route add default via 172.16.31.10")
dvs.servers[1].runcmd("ip link set down dev eth0")
dvs.servers[1].runcmd("ip link set up dev eth0")
dvs.servers[1].runcmd("ifconfig eth0 192.168.127.12/24")
dvs.servers[1].runcmd("ip route add default via 172.16.17.32")
dvs.set_nat_zone("Ethernet0", "1")
time.sleep(1)
def clear_interfaces(self, dvs):
dvs.servers[0].runcmd("ifconfig eth0 0.0.0.0")
dvs.servers[1].runcmd("ifconfig eth0 0.0.0.0")
time.sleep(1)
def test_NatGlobalTable(self, dvs, testlog):
# initialize
self.setup_db(dvs)
# enable NAT feature
dvs.nat_mode_set("enabled")
dvs.nat_timeout_set("450")
dvs.nat_udp_timeout_set("360")
dvs.nat_tcp_timeout_set("900")
# check NAT global values in appdb
self.app_db.wait_for_n_keys("NAT_GLOBAL_TABLE", 1)
fvs = self.app_db.wait_for_entry("NAT_GLOBAL_TABLE", "Values")
assert fvs == {"admin_mode": "enabled", "nat_timeout": "450", "nat_udp_timeout": "360", "nat_tcp_timeout": "900"}
def test_NatInterfaceZone(self, dvs, testlog):
# initialize
self.setup_db(dvs)
self.set_interfaces(dvs)
# check NAT zone is set for interface in app db
fvs = self.app_db.wait_for_entry("INTF_TABLE", "Ethernet0")
zone = False
for f, v in fvs.items():
if f == "nat_zone" and v == '1':
zone = True
break
assert zone
def test_AddNatStaticEntry(self, dvs, testlog):
# initialize
self.setup_db(dvs)
# get neighbor and arp entry
dvs.servers[0].runcmd("ping -c 1 192.168.127.12")
# add a static nat entry
dvs.add_nat_basic_entry("172.16.31.10", "192.168.127.12")
# check the entry in the config db
self.config_db.wait_for_n_keys("STATIC_NAT", 1)
fvs = self.config_db.wait_for_entry("STATIC_NAT", "172.16.31.10")
assert fvs == {"local_ip": "192.168.127.12"}
# check the entry in app db
self.app_db.wait_for_n_keys("NAT_TABLE", 2)
fvs = self.app_db.wait_for_entry("NAT_TABLE", "172.16.31.10")
assert fvs == {
"translated_ip": "192.168.127.12",
"nat_type": "dnat",
"entry_type": "static"
}
#check the entry in asic db, 3 keys = SNAT, DNAT and DNAT_Pool
keys = self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NAT_ENTRY", 3)
for key in keys:
if (key.find("dst_ip:172.16.31.10")) or (key.find("src_ip:192.168.127.12")):
assert True
else:
assert False
def test_DelNatStaticEntry(self, dvs, testlog):
# initialize
self.setup_db(dvs)
# delete a static nat entry
dvs.del_nat_basic_entry("172.16.31.10")
# check the entry is no there in the config db
self.config_db.wait_for_n_keys("STATIC_NAT", 0)
# check the entry is not there in app db
self.app_db.wait_for_n_keys("NAT_TABLE", 0)
#check the entry is not there in asic db
self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NAT_ENTRY", 0)
def test_AddNaPtStaticEntry(self, dvs, testlog):
# initialize
self.setup_db(dvs)
# get neighbor and arp entry
dvs.servers[0].runcmd("ping -c 1 192.168.127.12")
# add a static nat entry
dvs.add_nat_udp_entry("67.66.65.1", "670", "18.18.18.2", "180")
# check the entry in the config db
self.config_db.wait_for_n_keys("STATIC_NAPT", 1)
fvs = self.config_db.wait_for_entry("STATIC_NAPT", "172.16.31.10|UDP|670")
assert fvs == {"local_ip": "18.18.18.2", "local_port": "180"}
# check the entry in app db
self.app_db.wait_for_n_keys("NAPT_TABLE:UDP", 2)
fvs = self.app_db.wait_for_entry("NAPT_TABLE:UDP", "172.16.31.10:670")
assert fvs == {"translated_ip": "18.18.18.2", "translated_l4_port": "180", "nat_type": "dnat", "entry_type": "static"}
#check the entry in asic db, 3 keys = SNAT, DNAT and DNAT_Pool
keys = self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NAT_ENTRY", 3)
for key in keys:
if (key.find("dst_ip:172.16.31.10")) and (key.find("key.l4_dst_port:670")):
assert True
if (key.find("src_ip:18.18.18.2")) or (key.find("key.l4_src_port:180")):
assert True
else:
assert False
def test_DelNaPtStaticEntry(self, dvs, testlog):
# initialize
self.setup_db(dvs)
# delete a static nat entry
dvs.del_nat_udp_entry("172.16.31.10", "670")
# check the entry is no there in the config db
self.config_db.wait_for_n_keys("STATIC_NAPT", 0)
# check the entry is not there in app db
self.app_db.wait_for_n_keys("NAPT_TABLE:UDP", 0)
#check the entry is not there in asic db
self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NAT_ENTRY", 0)
@pytest.mark.skip(reason="Failing. Under investigation")
def test_AddTwiceNatEntry(self, dvs, testlog):
# initialize
self.setup_db(dvs)
# get neighbor and arp entry
dvs.servers[0].runcmd("ping -c 1 192.168.127.12")
dvs.servers[1].runcmd("ping -c 1 172.16.17.32")
# add a twice nat entry
dvs.add_twice_nat_basic_entry("172.16.17.32", "172.16.17.32", "snat", "9")
dvs.add_twice_nat_basic_entry("172.16.31.10", "192.168.127.12", "dnat", "9")
# check the entry in the config db
self.config_db.wait_for_n_keys("STATIC_NAT", 2)
fvs = self.config_db.wait_for_entry("STATIC_NAT", "172.16.31.10")
assert fvs == {"nat_type": "dnat", "twice_nat_id": "9", "local_ip": "192.168.127.12"}
fvs = self.config_db.wait_for_entry("STATIC_NAT", "172.16.17.32")
assert fvs == {"nat_type": "snat", "twice_nat_id": "9", "local_ip": "172.16.17.32"}
# check the entry in app db
self.app_db.wait_for_n_keys("NAT_TWICE_TABLE", 2)
fvs = self.app_db.wait_for_entry("NAT_TWICE_TABLE", "172.16.17.32:172.16.31.10")
assert fvs == {"translated_src_ip": "18.18.18.1", "translated_dst_ip": "18.18.18.2", "entry_type": "static"}
fvs = self.app_db.wait_for_entry("NAT_TWICE_TABLE", "18.18.18.2:18.18.18.1")
assert fvs == {"translated_src_ip": "172.16.31.10", "translated_dst_ip": "172.16.17.32", "entry_type": "static"}
#check the entry in asic db, 4 keys = SNAT, DNAT and 2 DNAT_Pools
keys = self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NAT_ENTRY", 4)
for key in keys:
if (key.find("dst_ip:18.18.18.1")) or (key.find("src_ip:18.18.18.2")):
assert True
else:
assert False
def test_DelTwiceNatStaticEntry(self, dvs, testlog):
# initialize
self.setup_db(dvs)
# delete a static nat entry
dvs.del_twice_nat_basic_entry("172.16.17.32")
dvs.del_twice_nat_basic_entry("172.16.31.10")
# check the entry is no there in the config db
self.config_db.wait_for_n_keys("STATIC_NAT", 0)
# check the entry is not there in app db
self.app_db.wait_for_n_keys("NAT_TWICE_TABLE", 0)
#check the entry is not there in asic db
self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NAT_ENTRY", 0)
def test_AddTwiceNaPtEntry(self, dvs, testlog):
# initialize
self.setup_db(dvs)
# get neighbor and arp entry
dvs.servers[0].runcmd("ping -c 1 192.168.127.12")
dvs.servers[1].runcmd("ping -c 1 172.16.17.32")
# add a twice nat entry
dvs.add_twice_nat_udp_entry("172.16.17.32", "670", "172.16.17.32", "181", "snat", "7")
dvs.add_twice_nat_udp_entry("172.16.31.10", "660", "192.168.127.12", "182", "dnat", "7")
# check the entry in the config db
self.config_db.wait_for_n_keys("STATIC_NAPT", 2)
fvs = self.config_db.wait_for_entry("STATIC_NAPT", "172.16.31.10|UDP|660")
assert fvs == {"nat_type": "dnat", "local_ip": "192.168.127.12", "twice_nat_id": "7", "local_port": "182"}
fvs = self.config_db.wait_for_entry("STATIC_NAPT", "172.16.17.32|UDP|670")
assert fvs == {"nat_type": "snat", "local_ip": "172.16.17.32", "twice_nat_id": "7", "local_port": "181"}
# check the entry in app db
self.app_db.wait_for_n_keys("NAPT_TWICE_TABLE", 2)
fvs = self.app_db.wait_for_entry("NAPT_TWICE_TABLE", "UDP:172.16.17.32:670:67.66.65.1:660")
assert fvs == {"translated_src_ip": "18.18.18.1", "translated_src_l4_port": "181", "translated_dst_ip": "18.18.18.2", "translated_dst_l4_port": "182", "entry_type": "static"}
fvs = self.app_db.wait_for_entry("NAPT_TWICE_TABLE", "UDP:192.168.127.12:182:172.16.17.32:181")
assert fvs == {"translated_src_ip": "172.16.31.10", "translated_src_l4_port": "660", "translated_dst_ip": "172.16.17.32", "translated_dst_l4_port": "670", "entry_type": "static"}
#check the entry in asic db, 4 keys = SNAT, DNAT and 2 DNAT_Pools
keys = self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NAT_ENTRY", 4)
for key in keys:
if (key.find("src_ip:18.18.18.2")) or (key.find("l4_src_port:182")):
assert True
if (key.find("dst_ip:18.18.18.1")) or (key.find("l4_dst_port:181")):
assert True
else:
assert False
def test_DelTwiceNaPtStaticEntry(self, dvs, testlog):
# initialize
self.setup_db(dvs)
# delete a static nat entry
dvs.del_twice_nat_udp_entry("172.16.17.32", "670")
dvs.del_twice_nat_udp_entry("172.16.31.10", "660")
# check the entry is not there in the config db
self.config_db.wait_for_n_keys("STATIC_NAPT", 0)
# check the entry is not there in app db
self.app_db.wait_for_n_keys("NAPT_TWICE_TABLE", 0)
#check the entry is not there in asic db
self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NAT_ENTRY", 0)
def test_VerifyConntrackTimeoutForNatEntry(self, dvs, testlog):
# get neighbor and arp entry
dvs.servers[0].runcmd("ping -c 1 18.18.18.2")
# add a static nat entry
dvs.add_nat_basic_entry("172.16.31.10", "18.18.18.2")
# check the conntrack timeout for static entry
def _check_conntrack_for_static_entry():
output = dvs.runcmd("conntrack -j -L -s 18.18.18.2 -p udp -q 172.16.31.10")
if len(output) != 2:
return (False, None)
conntrack_list = list(output[1].split(" "))
src_exists = "src=18.18.18.2" in conntrack_list
dst_exists = "dst=67.66.65.1" in conntrack_list
proto_exists = "udp" in conntrack_list
if not src_exists or not dst_exists or not proto_exists:
return (False, None)
proto_index = conntrack_list.index("udp")
if int(conntrack_list[proto_index + 7]) > 432000 or int(conntrack_list[proto_index + 7]) < 431900:
return (False, None)
return (True, None)
wait_for_result(_check_conntrack_for_static_entry)
# delete a static nat entry
dvs.del_nat_basic_entry("172.16.31.10")
def test_DoNotNatAclAction(self, dvs_acl, testlog):
# Creating the ACL Table
dvs_acl.create_acl_table(L3_TABLE_NAME, L3_TABLE_TYPE, L3_BIND_PORTS, stage="ingress")
acl_table_id = dvs_acl.get_acl_table_ids(1)[0]
acl_table_group_ids = dvs_acl.get_acl_table_group_ids(len(L3_BIND_PORTS))
dvs_acl.verify_acl_table_group_members(acl_table_id, acl_table_group_ids, 1)
dvs_acl.verify_acl_table_port_binding(acl_table_id, L3_BIND_PORTS, 1)
# Create a ACL Rule with "do_not_nat" packet action
config_qualifiers = {"SRC_IP": "192.168.3.11/32"}
dvs_acl.create_acl_rule(L3_TABLE_NAME, L3_RULE_NAME, config_qualifiers, action="DO_NOT_NAT", priority="97")
expected_sai_qualifiers = {
"SAI_ACL_ENTRY_ATTR_FIELD_SRC_IP": dvs_acl.get_simple_qualifier_comparator("192.168.3.11&mask:255.255.255.255")
}
dvs_acl.verify_nat_acl_rule(expected_sai_qualifiers, priority="97")
# Deleting the ACL Rule
dvs_acl.remove_acl_rule(L3_TABLE_NAME, L3_RULE_NAME)
dvs_acl.verify_no_acl_rules()
# Deleting the ACL Table
dvs_acl.remove_acl_table(L3_TABLE_NAME)
dvs_acl.verify_acl_table_count(0)
def test_CrmSnatAndDnatEntryUsedCount(self, dvs, testlog):
# initialize
self.setup_db(dvs)
# get neighbor and arp entry
dvs.servers[0].runcmd("ping -c 1 18.18.18.2")
# set pooling interval to 1
dvs.crm_poll_set("1")
dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_AVAILABLE_SNAT_ENTRY', '1000')
dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_AVAILABLE_DNAT_ENTRY', '1000')
time.sleep(2)
# get snat counters
used_snat_counter = dvs.getCrmCounterValue('STATS', 'crm_stats_snat_entry_used')
avail_snat_counter = dvs.getCrmCounterValue('STATS', 'crm_stats_snat_entry_available')
# get dnat counters
used_dnat_counter = dvs.getCrmCounterValue('STATS', 'crm_stats_dnat_entry_used')
avail_dnat_counter = dvs.getCrmCounterValue('STATS', 'crm_stats_dnat_entry_available')
# add a static nat entry
dvs.add_nat_basic_entry("172.16.31.10", "18.18.18.2")
#check the entry in asic db, 3 keys = SNAT, DNAT and DNAT_Pool
keys = self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NAT_ENTRY", 3)
for key in keys:
if (key.find("dst_ip:172.16.31.10")) or (key.find("src_ip:18.18.18.2")):
assert True
else:
assert False
dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_AVAILABLE_SNAT_ENTRY', '999')
dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_AVAILABLE_DNAT_ENTRY', '999')
time.sleep(2)
# get snat counters
new_used_snat_counter = dvs.getCrmCounterValue('STATS', 'crm_stats_snat_entry_used')
new_avail_snat_counter = dvs.getCrmCounterValue('STATS', 'crm_stats_snat_entry_available')
# get dnat counters
new_used_dnat_counter = dvs.getCrmCounterValue('STATS', 'crm_stats_dnat_entry_used')
new_avail_dnat_counter = dvs.getCrmCounterValue('STATS', 'crm_stats_dnat_entry_available')
assert new_used_snat_counter - used_snat_counter == 1
assert avail_snat_counter - new_avail_snat_counter == 1
assert new_used_dnat_counter - used_dnat_counter == 1
assert avail_dnat_counter - new_avail_dnat_counter == 1
# delete a static nat entry
dvs.del_nat_basic_entry("172.16.31.10")
dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_AVAILABLE_SNAT_ENTRY', '1000')
dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_AVAILABLE_DNAT_ENTRY', '1000')
time.sleep(2)
# get snat counters
new_used_snat_counter = dvs.getCrmCounterValue('STATS', 'crm_stats_snat_entry_used')
new_avail_snat_counter = dvs.getCrmCounterValue('STATS', 'crm_stats_snat_entry_available')
# get dnat counters
new_used_dnat_counter = dvs.getCrmCounterValue('STATS', 'crm_stats_dnat_entry_used')
new_avail_dnat_counter = dvs.getCrmCounterValue('STATS', 'crm_stats_dnat_entry_available')
assert new_used_snat_counter == used_snat_counter
assert new_avail_snat_counter == avail_snat_counter
assert new_used_dnat_counter == used_dnat_counter
assert new_avail_dnat_counter == avail_dnat_counter
# clear interfaces
self.clear_interfaces(dvs)
# Add Dummy always-pass test at end as workaroud
# for issue when | |
import numpy as np
from qtpy import QtWidgets, QtCore, QtGui
import flika
from flika import global_vars as g
from flika.window import Window
from flika.utils.io import tifffile
from flika.process.file_ import get_permutation_tuple
from flika.utils.misc import open_file_gui
import pyqtgraph as pg
import time
import os
from os import listdir
from os.path import expanduser, isfile, join
from distutils.version import StrictVersion
flika_version = flika.__version__
if StrictVersion(flika_version) < StrictVersion('0.2.23'):
from flika.process.BaseProcess import BaseProcess, SliderLabel, CheckBox, ComboBox
else:
from flika.utils.BaseProcess import BaseProcess, SliderLabel, CheckBox, ComboBox
class VolumeSlider(BaseProcess):
def __init__(self):
super().__init__()
self.numberOfTimeSlices = 0
self.displayedTimeSlice = 0
return
def startVolumeSlider(self):
#get volume arrays
self.getVolumes()
#update image
self.initiateImage()
#display image
self.displayWindow = Window(self.displayImage[self.displayedTimeSlice:(self.displayedTimeSlice+self.numberOfTimeSlices)],'Volume Slider Window')
#open gui
self.dialogbox = Form()
self.dialogbox.show()
return
def initiateImage(self):
self.displayImage = self.interleave(np.array(self.A_list))
#print(self.displayImage.shape)
return
def interleave(self, A):
self.nVols, self.nFrames, self.x, self.y = A.shape
#print(self.nVols, self.nFrames, self.x, self.y )
interleaved = np.zeros((self.nVols*self.nFrames,self.x,self.y))
#print(interleaved.shape)
z = 0
for i in np.arange(self.nFrames):
for j in np.arange(self.nVols):
#print(z, i, j)
interleaved[z] = A[j%self.nVols][i]
z = z +1
return interleaved
def updateImage(self):
self.displayWindow.imageview.setImage(self.displayImage[self.displayedTimeSlice:(self.displayedTimeSlice+self.numberOfTimeSlices)],autoLevels=False)
return
def getVolumes(self):
#clear volume list
self.A_list = []
#get path of volume folder
volume_path = QtWidgets.QFileDialog.getExistingDirectory(g.m, "Select a parent folder to save into.", expanduser("~"), QtWidgets.QFileDialog.ShowDirsOnly)
#get volume files in folder
vols = [f for f in listdir(volume_path) if isfile(join(volume_path, f))]
#add volumes to volume list
for i in range(len(vols)):
file = join(volume_path, vols[i])
self.A_list.append(self.openTiff(file))
self.numberOfTimeSlices = (len(self.A_list))
return
def openTiff(self, filename):
Tiff = tifffile.TiffFile(str(filename))
A = Tiff.asarray()
Tiff.close()
axes = [tifffile.AXES_LABELS[ax] for ax in Tiff.series[0].axes]
if set(axes) == set(['series', 'height', 'width']): # single channel, multi-volume
target_axes = ['series', 'width', 'height']
perm = get_permutation_tuple(axes, target_axes)
A = np.transpose(A, perm)
return A
def getNFrames(self):
return self.nFrames
def getFramesPerSlice(self):
return self.numberOfTimeSlices
def getDisplayFrame(self):
return self.displayWindow.imageview.currentIndex
def setDisplayFrame(self, value):
self.displayedFrame = value
self.displayWindow.imageview.setCurrentIndex(self.displayedFrame)
return
def updateSlice(self, value):
index = self.getDisplayFrame()
self.displayedTimeSlice = value * self.getFramesPerSlice()
#print(self.displayedTimeSlice)
self.updateImage()
self.setDisplayFrame(index)
return
volumeSlider = VolumeSlider()
class Form(QtWidgets.QDialog):
def __init__(self, parent = None):
super(Form, self).__init__(parent)
#window geometry
self.left = 300
self.top = 300
self.width = 600
self.height = 250
#spinboxes
self.spinLabel1 = QtWidgets.QLabel("Slice #")
self.SpinBox1 = QtWidgets.QSpinBox()
self.SpinBox1.setRange(0,volumeSlider.getNFrames())
self.SpinBox1.setValue(0)
#sliders
self.sliderLabel1 = QtWidgets.QLabel("Slice #")
self.slider1 = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.slider1.setFocusPolicy(QtCore.Qt.StrongFocus)
self.slider1.setTickPosition(QtWidgets.QSlider.TicksBothSides)
self.slider1.setMinimum(0)
self.slider1.setMaximum(volumeSlider.getNFrames())
self.slider1.setTickInterval(1)
self.slider1.setSingleStep(1)
#buttons
self.button1 = QtWidgets.QPushButton("Autolevel")
#grid layout
layout = QtWidgets.QGridLayout()
layout.setSpacing(10)
layout.addWidget(self.spinLabel1, 1, 0)
layout.addWidget(self.SpinBox1, 1, 1)
layout.addWidget(self.slider1, 2, 0,2,5)
layout.addWidget(self.button1, 4, 4,1,1)
self.setLayout(layout)
self.setGeometry(self.left, self.top, self.width, self.height)
#add window title
self.setWindowTitle("Volume Slider GUI")
#connect sliders & spinboxes
self.slider1.valueChanged.connect(self.slider1ValueChange)
self.SpinBox1.valueChanged.connect(self.spinBox1ValueChange)
#connect buttons
self.button1.clicked.connect(self.autoLevel)
return
#volume changes with slider & spinbox
def slider1ValueChange(self, value):
self.SpinBox1.setValue(value)
#volumeSlider.updateSlice(value)
return
def spinBox1ValueChange(self, value):
self.slider1.setValue(value)
volumeSlider.updateSlice(value)
return
def autoLevel(self):
volumeSlider.displayWindow.imageview.autoLevels()
return
#########################################################################################################
class CamVolumeSlider(BaseProcess):
def __init__(self):
super().__init__()
self.numberOfTimeSlices = 0
self.displayedTimeSlice = 0
self.nVols = 1
self.nChannels = 0
return
def startVolumeSlider(self):
#copy selected window
self.A = g.win.image
#update image
self.initiateImage()
#display image
self.displayWindow = Window(self.displayImage[self.displayedTimeSlice:(self.displayedTimeSlice+self.numberOfTimeSlices)],'Volume Slider Window')
#open gui
self.dialogbox = Form2()
self.dialogbox.show()
return
def initiateImage(self):
self.displayImage = self.interleave(self.A)
#print(self.displayImage.shape)
return
def interleave(self, A):
self.nFrames, self.x, self.y = A.shape
self.numberOfTimeSlices = self.nFrames
print(self.nVols, self.nFrames, self.x, self.y )
if self.nVols == 1:
return A
else:
self.numberOfTimeSlices = self.getslicesPerVolume()
interleaved = np.zeros((self.nFrames,self.x,self.y))
print(interleaved.shape)
print(A.shape)
z = 0
for i in np.arange(self.getNumberVols()):
for j in np.arange(self.getslicesPerVolume()):
#print(z, i, j)
interleaved[z] = A[(i*self.getslicesPerVolume()) + j%self.getslicesPerVolume()]
z = z +1
return interleaved
def updateImage(self):
self.displayWindow.imageview.setImage(self.displayImage[self.displayedTimeSlice:(self.displayedTimeSlice+self.numberOfTimeSlices)],autoLevels=False)
return
def getNFrames(self):
return self.nFrames
def getFramesPerSlice(self):
return self.numberOfTimeSlices
def getDisplayFrame(self):
return self.displayWindow.imageview.currentIndex
def setDisplayFrame(self, value):
self.displayedFrame = value
self.displayWindow.imageview.setCurrentIndex(self.displayedFrame)
return
def updateSlice(self, value):
index = self.getDisplayFrame()
self.displayedTimeSlice = value * self.getFramesPerSlice()
#print(self.displayedTimeSlice)
self.updateImage()
self.setDisplayFrame(index)
return
def setNumberVols(self,value):
self.nVols = value
return
def getslicesPerVolume(self):
return self.slicesPerVolume
def getNumberVols(self):
return self.nVols
def updateVolumeSize(self, value):
#set slices per volume
self.slicesPerVolume = value
#set number of volumes
self.setNumberVols(int(self.getNFrames()/value))
#update image
self.initiateImage()
self.updateImage()
return
camVolumeSlider = CamVolumeSlider()
class Form2(QtWidgets.QDialog):
def __init__(self, parent = None):
super(Form2, self).__init__(parent)
#window geometry
self.left = 300
self.top = 300
self.width = 600
self.height = 250
#spinboxes
self.spinLabel1 = QtWidgets.QLabel("Volume #")
self.SpinBox1 = QtWidgets.QSpinBox()
self.SpinBox1.setRange(0,camVolumeSlider.getNFrames())
self.SpinBox1.setValue(0)
self.spinLabel2 = QtWidgets.QLabel("# of slices per volume: ")
self.SpinBox2 = QtWidgets.QSpinBox()
self.SpinBox2.setRange(0,camVolumeSlider.getNFrames())
self.SpinBox2.setValue(camVolumeSlider.getNFrames())
#sliders
self.sliderLabel1 = QtWidgets.QLabel("Slice #")
self.slider1 = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.slider1.setFocusPolicy(QtCore.Qt.StrongFocus)
self.slider1.setTickPosition(QtWidgets.QSlider.TicksBothSides)
self.slider1.setMinimum(0)
self.slider1.setMaximum(camVolumeSlider.getNFrames())
self.slider1.setTickInterval(1)
self.slider1.setSingleStep(1)
#buttons
self.button1 = QtWidgets.QPushButton("Autolevel")
self.button2 = QtWidgets.QPushButton("Set Slices")
#grid layout
layout = QtWidgets.QGridLayout()
layout.setSpacing(10)
layout.addWidget(self.spinLabel1, 1, 0)
layout.addWidget(self.SpinBox1, 1, 1)
layout.addWidget(self.slider1, 2, 0, 2, 5)
layout.addWidget(self.spinLabel2, 3, 0)
layout.addWidget(self.SpinBox2, 3, 1)
layout.addWidget(self.button2, 3, 2)
layout.addWidget(self.button1, 4, 4, 1, 1)
self.setLayout(layout)
self.setGeometry(self.left, self.top, self.width, self.height)
#add window title
self.setWindowTitle("Volume Slider GUI")
#connect sliders & spinboxes
self.slider1.valueChanged.connect(self.slider1ValueChange)
self.SpinBox1.valueChanged.connect(self.spinBox1ValueChange)
#connect buttons
self.button1.clicked.connect(self.autoLevel)
self.button2.clicked.connect(self.updateVolumeValue)
return
#volume changes with slider & spinbox
def slider1ValueChange(self, value):
self.SpinBox1.setValue(value)
#camVolumeSlider.updateSlice(value)
return
def spinBox1ValueChange(self, value):
self.slider1.setValue(value)
camVolumeSlider.updateSlice(value)
return
def autoLevel(self):
camVolumeSlider.displayWindow.imageview.autoLevels()
return
def updateVolumeValue(self):
value = self.SpinBox2.value()
camVolumeSlider.updateVolumeSize(value)
if (camVolumeSlider.getNFrames()/camVolumeSlider.getNumberVols())%2 == 0:
self.SpinBox1.setRange(0,camVolumeSlider.getNumberVols()-1) #if even, display the last volume
self.slider1.setMaximum(camVolumeSlider.getNumberVols()-1)
else:
self.SpinBox1.setRange(0,camVolumeSlider.getNumberVols()-2) #else, don't display the last volume
self.slider1.setMaximum(camVolumeSlider.getNumberVols()-2)
return
#########################################################################################################
class Load_tiff (BaseProcess):
""" load_tiff()
This function loads tiff files from lightsheet experiments with multiple channels and volumes.
"""
def __init__(self):
self.nChannels = 0
self.nVolumes = 0
self.nFrames = 0
def gui(self):
filetypes = 'Image Files (*.tif *.tiff);;All Files (*.*)'
prompt = 'Open File'
filename = open_file_gui(prompt, filetypes=filetypes)
if filename is None:
return None
self.openTiff(filename)
self.sclicesPerVolume = int(self.nFrames*self.nChannels / self.nVolumes)
return (self.nChannels, self.nVolumes, self.sclicesPerVolume)
def openTiff(self, filename):
Tiff = tifffile.TiffFile(str(filename))
A = Tiff.asarray()
Tiff.close()
axes = [tifffile.AXES_LABELS[ax] for ax in Tiff.series[0].axes]
if set(axes) == set(['time', 'depth', 'height', 'width']): # single channel, multi-volume
target_axes = ['time', 'depth', 'width', 'height']
perm = get_permutation_tuple(axes, target_axes)
A = np.transpose(A, perm)
nScans, nFrames, x, y = A.shape
A = A.reshape(nScans*nFrames,x,y)
newWindow = Window(A,'Loaded Tiff')
self.nChannels = 1
self.nVolumes = nScans
self.nFrames = nFrames
return
elif set(axes) == set(['series', 'height', 'width']): # single channel, single-volume
target_axes = ['series', 'width', 'height']
perm = get_permutation_tuple(axes, target_axes)
A = np.transpose(A, perm)
nFrames, x, y = A.shape
A = A.reshape(nFrames,x,y)
newWindow = Window(A,'Loaded Tiff')
self.nChannels = 1
self.nVolumes = 1
self.nFrames = nFrames
return
elif set(axes) == set(['time', 'height', 'width']): # single channel, single-volume
target_axes = ['time', 'width', 'height']
perm = get_permutation_tuple(axes, target_axes)
A = np.transpose(A, perm)
nFrames, x, y = A.shape
A = A.reshape(nFrames,x,y)
newWindow = Window(A,'Loaded Tiff')
self.nChannels = 1
self.nVolumes = 1
self.nFrames = nFrames
return
elif set(axes) == set(['time', 'depth', 'channel', 'height', 'width']): # multi-channel, multi-volume
target_axes = ['channel','time','depth', 'width', 'height']
perm = get_permutation_tuple(axes, target_axes)
A = np.transpose(A, perm)
B = A[0]
C = A[1]
nChannels, nScans, nFrames, x, y = A.shape
n1Scans, n1Frames, x1, y1 = B.shape
n2Scans, n2Frames, x2, y2 = C.shape
B = B.reshape(n1Scans*n1Frames,x1,y1)
C = C.reshape(n2Scans*n2Frames,x2,y2)
self.channel_1 = Window(B,'Channel 1')
self.channel_2 = Window(C,'Channel 2')
#original shape before splitting channel
self.nChannels = nChannels
self.nVolumes = nScans
self.nFrames = nFrames
return
elif set(axes) == set(['depth', 'channel', 'height', 'width']): # multi-channel, single volume
target_axes = ['channel','depth', 'width', 'height']
perm = get_permutation_tuple(axes, target_axes)
A = np.transpose(A, perm)
B = A[0]
C = A[1]
nChannels, nFrames, x, y = A.shape
n1Frames, x1, y1 = B.shape
n2Frames, x2, y2 = C.shape
self.channel_1 = Window(B,'Channel 1')
self.channel_2 = Window(C,'Channel 2')
#original shape before splitting channel
self.nChannels = nChannels
self.nVolumes = 1
self.nFrames = nFrames
return
load_tiff = Load_tiff()
class CamVolumeSlider2(BaseProcess):
def __init__(self):
super().__init__()
self.numberOfTimeSlices = 0
self.displayedTimeSlice = 0
self.nVols = 1
self.nChannels = 0
self.overlayFlag = False
self.stackedFlag = False
return
def startVolumeSlider(self):
#open file
self.nChannels, self.nVols, self.slicesPerVolume = load_tiff.gui()
#print(self.nChannels, self.nVols, self.slicesPerVolume)
#copy selected window
self.A = g.win.image
text_1 = 'Volume Slider Window'
if self.nChannels == 2:
self.B = load_tiff.channel_1.imageview.getProcessedImage()
text_1 = 'Volume | |
审核帐号不能与创建帐号相同
raise serializers.ValidationError("审核帐号不能与创建帐号相同'")
try:
auditor = User.objects.get(username=value)
except Exception as e:
raise serializers.ValidationError("指定的审核账号不存在")
if not auditor.has_perm('plan.admin_semifinishedmanageplanmodel'):
raise serializers.ValidationError("指定的审核账号不具备审核权限")
return value
class SemifinishedManagePlanSerialize_Partial(serializers.ModelSerializer):
"""
半成品管理计划创建--partial
"""
class Meta:
model = SemifinishedManagePlanModel
fields = ("id", "state", "alter")
# 所有字段验证
def validate(self, attrs):
try:
del attrs['alter'] # 删除alter字段
except Exception:
pass
return attrs
# 状态字段验证
def validate_state(self, value):
validate_states3(self.instance.state, value)
if not self.instance.state == "使用中":
if self.instance.create_user == self.context['request'].user.username: # 如果当前用户为创建
if not (self.instance.state == "新建" and (value == "审核中" or value == "作废")):
raise serializers.ValidationError("创建者只能将[新建]信息更改成[审核中]或[作废]")
if (self.instance.state == "审核中" and value == "使用中"): # 如果是由审核状态转换成使用中状态
data = SemifinishedManagePlanModel.objects.filter(id=self.instance.id).first().child.all().values('id')
for item in data: # 遍历所有任务子项WAIT
try:
child = SemifinishedManagePlanItemModel.objects.get(id=item['id'])
except Exception as e:
raise serializers.ValidationError("当前任务项下的子项不存在")
child.state = "等待"
child.save()
if ((self.instance.state == "挂起" and value == "使用中") or
(self.instance.state == "使用中" and value == "挂起")): # 如果是由挂起状态转与使用中状态互相转换
if not (self.context['request'].user.has_perm('plan.deal_semifinishedmanageplanmodel')):
raise serializers.ValidationError("当前用户不具备执行任务权限")
if value == "终止": # 如果新状态为终止状态
if not (self.context['request'].user.has_perm('plan.deal_semifinishedmanageplanmodel')):
raise serializers.ValidationError("当前用户不具备执行订单权限")
data = SemifinishedManagePlanModel.objects.filter(id=self.instance.id).first().child.all().values('id')
for item in data: # 遍历所有订单子项,并将[使用中]的子项转换成END
try:
child = SemifinishedManagePlanItemModel.objects.get(id=item['id'])
except Exception as e:
raise serializers.ValidationError("当前任务项下的子项不存在")
if child.state == "等待":
child.state = "终止"
child.save()
return value
# 审核记录字段验证
def validate_alter(self, value):
obj = SemifinishedManagePlanModel.objects.get(id=self.instance.id).alter
for data in value:
obj.add(data.id)
return value
# endregion
# region 产品管理计划子项创建 序列化器
class ProductManagePlanItemSerialize_Create(serializers.ModelSerializer):
"""
产品管理计划子项创建--create
"""
state = serializers.HiddenField(default="新建")
create_user = serializers.HiddenField(default=serializers.CurrentUserDefault())
class Meta:
model = ProductManagePlanItemModel
fields = ("id", "type", "state", "product_id", "sum", "attribute1", "attribute2",
"attribute3", "attribute4", "attribute5", "desc", "create_user")
def validate(self, attrs):
try:
product = ProductInforDefinitionModel.objects.get(id=attrs["product_id"]) # 判断指定的产品是否存在
except Exception as e:
raise serializers.ValidationError("指定的产品不存在")
if product.state != "使用中":
raise serializers.ValidationError("指定的产品不在'使用中'状态")
attrs["productType_code"] = product.type.code # 获取产品类型编码
attrs["productType_name"] = product.type.name # 获取产品类型名称
attrs["product_code"] = product.code # 获取产品编码
attrs["product_name"] = product.name # 获取产品名称
return attrs
class ProductManagePlanItemSerialize_List(serializers.ModelSerializer):
"""
产品管理计划子项创建--list
"""
class Meta:
model = ProductManagePlanItemModel
fields = "__all__"
class ProductManagePlanItemSerialize_Partial(serializers.ModelSerializer):
"""
产品管理计划子项创建--partial
"""
class Meta:
model = ProductManagePlanItemModel
fields = ("id", "state", "completed")
# 状态字段验证
def validate_state(self, value):
parentState = ProductManagePlanItemModel.objects.filter(
id=self.instance.id).first().productManageItem_parent.all().values('state')
if (parentState[0]['state'] != "使用中"):
raise serializers.ValidationError("当前任务不处于[使用中状态],禁止更改子项任务状态")
if not (self.instance.state == "等待" and value == "终止"):
raise serializers.ValidationError("子任务只能从[等待状态]更改成[终止状态]")
if not (self.context['request'].user.has_perm('plan.deal_productmanageplanmodel')):
raise serializers.ValidationError("当前用户不具备执行任务权限")
# 遍历所有管理子任务项的任务项,如果任务项的所有子项都处于END,则将任务设置成END
data1 = ProductManagePlanItemModel.objects.filter(
id=self.instance.id).first().productManageItem_parent.all().values('id')
for item1 in data1: # 遍历所有关联此子项的父项
count = 1
parentModel = ProductManagePlanModel.objects.filter(id=item1['id']).first()
data2 = parentModel.child.all().values('id')
for item2 in data2: # 遍历父项的所有子项
child = ProductManagePlanItemModel.objects.filter(id=item2['id']).first()
if child.state == "终止":
count += 1
if count == len(data2):
parentModel.state = "终止"
parentModel.save()
return value
# 完成总数字段验证
def validate_completed(self, value):
if not (self.instance.state == "等待"):
raise serializers.ValidationError("只有在[等待]状态下,才能更新计划完成数")
if value >= self.instance.sum:
self.instance.state = "完成"
# 遍历所有子任务项的任务项,如果任务项的所有子项都处于DONE或END,则将任务设置成DONE
value1 = ProductManagePlanItemModel.objects.filter(
id=self.instance.id).first().productManageItem_parent.all().values('id')
for item1 in value1: # 遍历所有关联此子项的父项
count = 1
parentModel = ProductManagePlanModel.objects.filter(id=item1['id']).first()
value2 = parentModel.child.all().values('id')
for item2 in value2: # 遍历父项的所有子项
child = ProductManagePlanItemModel.objects.filter(id=item2['id']).first()
if (child.state == "终止" or child.state == "完成"):
count += 1
if count == len(value2):
parentModel.state = "完成"
parentModel.save()
return value
# endregion
# region 产品管理计划创建 序列化器
class ProductManagePlanSerialize_Create(serializers.ModelSerializer):
"""
产品管理计划创建--create
"""
state = serializers.HiddenField(default="新建")
priority = serializers.HiddenField(default="正常")
create_user = serializers.HiddenField(default=serializers.CurrentUserDefault())
class Meta:
model = ProductManagePlanModel
fields = (
"id", "name", "code", "state", "priority", "child", "dataTime", "file", "desc", "attribute1", "attribute2",
"attribute3", "attribute4", "attribute5", "auditor", "create_user")
# 所有字段验证
def validate(self, attrs):
if not attrs["create_user"].has_perm('plan.add_productmanageplanmodel'): # 如果当前用户没有创建权限
raise serializers.ValidationError("当前用户不具备创建权限'")
if settings.SAME_USER!=True:
if attrs["create_user"].username == attrs["auditor"]: # 审核帐号不能与创建帐号相同
raise serializers.ValidationError("审核帐号不能与创建帐号相同'")
return attrs
# 审核者字段验证
def validate_auditor(self, value):
try:
auditor = User.objects.get(username=value)
except Exception as e:
raise serializers.ValidationError("指定的审核账号不存在")
if not auditor.has_perm('plan.admin_productmanageplanmodel'):
raise serializers.ValidationError("指定的审核账号不具备审核权限")
return value
class ProductManagePlanSerialize_List(serializers.ModelSerializer):
"""
产品管理计划创建--list
"""
class Meta:
model = ProductManagePlanModel
fields = ("id", "name", "code", "state", "priority", "dataTime", "auditor", "create_user","create_time","update_time")
class ProductManagePlanSerialize_Retrieve(serializers.ModelSerializer):
"""
产品管理计划创建--retrieve
"""
file = PlanFileSerialize_List(many=True)
child = ProductManagePlanItemSerialize_List(many=True)
alter = PlanAlterRecordSerialize_List(many=True)
class Meta:
model = ProductManagePlanModel
fields = "__all__"
class ProductManagePlanSerialize_Update(serializers.ModelSerializer):
"""
产品管理计划创建--update
"""
class Meta:
model = ProductManagePlanModel
fields = ("id", "name", "code", "child", "priority", "dataTime", "file", "attribute1", "attribute2",
"attribute3", "attribute4", "attribute5", "desc", "auditor")
# 所有字段验证
def validate(self, attrs):
if self.instance.state != '新建': # 如果不是新建状态 不能更改信息
raise serializers.ValidationError("当前信息已提交,禁止更改")
return attrs
# 审核者字段验证
def validate_auditor(self, value):
if self.instance.state != '新建': # 如果不是新建状态 不能更改信息
raise serializers.ValidationError("当前信息已提交,禁止更改")
if settings.SAME_USER != True:
if self.instance.create_user == value: # 审核帐号不能与创建帐号相同
raise serializers.ValidationError("审核帐号不能与创建帐号相同'")
try:
auditor = User.objects.get(username=value)
except Exception as e:
raise serializers.ValidationError("指定的审核账号不存在")
if not auditor.has_perm('plan.admin_productmanageplanmodel'):
raise serializers.ValidationError("指定的审核账号不具备审核权限")
return value
class ProductManagePlanSerialize_Partial(serializers.ModelSerializer):
"""
产品管理计划创建--partial
"""
class Meta:
model = ProductManagePlanModel
fields = ("id", "state", "alter")
# 所有字段验证
def validate(self, attrs):
try:
del attrs['alter'] # 删除alter字段
except Exception:
pass
return attrs
# 状态字段验证
def validate_state(self, value):
validate_states3(self.instance.state, value)
if not self.instance.state == "使用中":
if self.instance.create_user == self.context['request'].user.username: # 如果当前用户为创建
if not (self.instance.state == "新建" and (value == "审核中" or value == "作废")):
raise serializers.ValidationError("创建者只能将[新建]信息更改成[审核中]或[作废]")
if (self.instance.state == "审核中" and value == "使用中"): # 如果是由审核状态转换成使用中状态
data = ProductManagePlanModel.objects.filter(id=self.instance.id).first().child.all().values('id')
for item in data: # 遍历所有任务子项WAIT
try:
child = ProductManagePlanItemModel.objects.get(id=item['id'])
except Exception as e:
raise serializers.ValidationError("当前任务项下的子项不存在")
child.state = "等待"
child.save()
if ((self.instance.state == "挂起" and value == "使用中") or
(self.instance.state == "使用中" and value == "挂起")): # 如果是由挂起状态转与使用中状态互相转换
if not (self.context['request'].user.has_perm('plan.deal_productmanageplanmodel')):
raise serializers.ValidationError("当前用户不具备执行任务权限")
if value == "终止": # 如果新状态为终止状态
if not (self.context['request'].user.has_perm('plan.deal_productmanageplanmodel')):
raise serializers.ValidationError("当前用户不具备执行订单权限")
data = ProductManagePlanModel.objects.filter(id=self.instance.id).first().child.all().values('id')
for item in data: # 遍历所有订单子项,并将[使用中]的子项转换成END
try:
child = ProductManagePlanItemModel.objects.get(id=item['id'])
except Exception as e:
raise serializers.ValidationError("当前任务项下的子项不存在")
if child.state == "等待":
child.state = "终止"
child.save()
return value
# 审核记录字段验证
def validate_alter(self, value):
obj = ProductManagePlanModel.objects.get(id=self.instance.id).alter
for data in value:
obj.add(data.id)
return value
# endregion
# region 设备维护计划子项创建 序列化器
class EquipmentMaintainPlanItemSerialize_Create(serializers.ModelSerializer):
"""
设备维护计划子项创建--create
"""
state = serializers.HiddenField(default="新建")
create_user = serializers.HiddenField(default=serializers.CurrentUserDefault())
class Meta:
model = EquipmentMaintainPlanItemModel
fields = ("id","state","equipment_id", "handler","consuming_time","file", "attribute1", "attribute2",
"attribute3", "attribute4","attribute5","desc", "create_user")
def validate(self, attrs):
try:
equipment = EquipmentAccountModel.objects.get(id=attrs["equipment_id"]) # 判断指定的设备是否存在
except Exception as e:
raise serializers.ValidationError("指定的设备不存在")
if equipment.state!="使用中":
raise serializers.ValidationError("指定的设备不在'使用中'状态")
attrs["equipmentType_code"] = equipment.type.code # 获取设备类型编码
attrs["equipmentType_name"] = equipment.type.name # 获取设备类型名称
attrs["equipment_code"] = equipment.code # 获取设备编码
attrs["equipment_name"] = equipment.name # 获取设备名称
return attrs
class EquipmentMaintainPlanItemSerialize_List(serializers.ModelSerializer):
"""
设备维护计划子项创建--list
"""
file = PlanFileSerialize_List(many=True)
class Meta:
model = EquipmentMaintainPlanItemModel
fields = "__all__"
class EquipmentMaintainPlanItemSerialize_Partial(serializers.ModelSerializer):
"""
设备维护计划子项创建--partial
"""
class Meta:
model = EquipmentMaintainPlanItemModel
fields = ("id","state")
# 状态字段验证
def validate_state(self, value):
parentState = EquipmentMaintainPlanItemModel.objects.filter(
id=self.instance.id).first().equipmentMaintainPlanItem_parent.all().values('state')
if (parentState[0]['state'] != "使用中" ):
raise serializers.ValidationError("当前设备维护计划单不处于[使用中状态],禁止更改子项维护计划单状态")
if not (self.instance.state == "等待" and (value == "终止" or value == "完成")):
raise serializers.ValidationError("子维护计划单只能从[等待状态]更改成[终止状态]或[完成状态]")
if not (self.context['request'].user.has_perm('plan.deal_equipmentmaintainplanmodel')):
raise serializers.ValidationError("当前用户不具备执行终止维护计划单权限")
# 遍历所有管理子维护计划单项的订单项,如果维护计划单项的所有子项都处于END,则将维护计划单设置成END
data1 = EquipmentMaintainPlanItemModel.objects.filter(id=self.instance.id).first().equipmentMaintainPlanItem_parent.all().values('id')
if (value == "终止"):
for item1 in data1: # 遍历所有关联此子项的父项
count = 1
parentModel = EquipmentMaintainPlanModel.objects.filter(id=item1['id']).first()
data2=parentModel.child.all().values('id')
for item2 in data2: # 遍历父项的所有子项
child = EquipmentMaintainPlanItemModel.objects.filter(id=item2['id']).first()
if child.state == "终止":
count += 1
if count ==len(data2):
parentModel.state="终止"
parentModel.save()
if (value == "完成"):
for item1 in data1: # 遍历所有关联此子项的父项
count = 1
parentModel = EquipmentMaintainPlanModel.objects.filter(id=item1['id']).first()
data2=parentModel.child.all().values('id')
for item2 in data2: # 遍历父项的所有子项
child = EquipmentMaintainPlanItemModel.objects.filter(id=item2['id']).first()
if (child.state == "终止" or child.state == "完成"):
count += 1
if count ==len(data2):
parentModel.state="完成"
parentModel.save()
return value
# endregion
# region 设备维护计划创建 序列化器
class EquipmentMaintainPlanSerialize_Create(serializers.ModelSerializer):
"""
设备维护计划创建--create
"""
state = serializers.HiddenField(default="新建")
create_user = serializers.HiddenField(default=serializers.CurrentUserDefault())
class Meta:
model = EquipmentMaintainPlanModel
fields = ("id", "name", "code", "state", "child", "dataTime","file", "desc","attribute1", "attribute2",
"attribute3", "attribute4","attribute5","auditor", "create_user")
# 所有字段验证
def validate(self, attrs):
if not attrs["create_user"].has_perm('plan.add_equipmentmaintainplanmodel'): # 如果当前用户没有创建权限
raise serializers.ValidationError("当前用户不具备创建权限'")
if settings.SAME_USER!=True:
if attrs["create_user"].username == attrs["auditor"]: # 审核帐号不能与创建帐号相同
raise serializers.ValidationError("审核帐号不能与创建帐号相同'")
return attrs
# 审核者字段验证
def validate_auditor(self, value):
try:
auditor = User.objects.get(username=value)
except Exception as e:
raise serializers.ValidationError("指定的审核账号不存在")
if not auditor.has_perm('plan.admin_equipmentmaintainplanmodel'):
raise serializers.ValidationError("指定的审核账号不具备审核权限")
return value
class EquipmentMaintainPlanSerialize_List(serializers.ModelSerializer):
"""
设备维护计划创建--list
"""
class Meta:
model = EquipmentMaintainPlanModel
fields = ("id", "name", "code", "state", "dataTime","auditor", "create_user","create_time","update_time")
class EquipmentMaintainPlanSerialize_Retrieve(serializers.ModelSerializer):
"""
设备维护计划创建--retrieve
"""
file = PlanFileSerialize_List(many=True)
child = EquipmentMaintainPlanItemSerialize_List(many=True)
alter = PlanAlterRecordSerialize_List(many=True)
class Meta:
model = EquipmentMaintainPlanModel
fields = "__all__"
class EquipmentMaintainPlanSerialize_Update(serializers.ModelSerializer):
"""
设备维护计划创建--update
"""
class Meta:
model = EquipmentMaintainPlanModel
fields = ("id", "name", "code", "child", "dataTime","file", "desc","attribute1", "attribute2",
"attribute3", "attribute4","attribute5","auditor",)
# 所有字段验证
def validate(self, attrs):
if self.instance.state != '新建': # 如果不是新建状态 不能更改信息
raise serializers.ValidationError("当前信息已提交,禁止更改")
return attrs
# 审核者字段验证
def validate_auditor(self, value):
if self.instance.state != '新建': # 如果不是新建状态 不能更改信息
raise serializers.ValidationError("当前信息已提交,禁止更改")
if settings.SAME_USER != True:
if self.instance.create_user == value: # 审核帐号不能与创建帐号相同
raise serializers.ValidationError("审核帐号不能与创建帐号相同'")
try:
auditor = User.objects.get(username=value)
except Exception as e:
raise serializers.ValidationError("指定的审核账号不存在")
if not auditor.has_perm('plan.admin_equipmentmaintainplanmodel'):
raise serializers.ValidationError("指定的审核账号不具备审核权限")
return value
class EquipmentMaintainPlanSerialize_Partial(serializers.ModelSerializer):
"""
设备维护计划创建--partial
"""
class Meta:
model = EquipmentMaintainPlanModel
fields = ("id", "state", "alter")
# 所有字段验证
def validate(self, attrs):
try:
del attrs['alter'] # 删除alter字段
except Exception:
pass
return attrs
# 状态字段验证
def validate_state(self, value):
validate_states4(self.instance.state, value)
if not self.instance.state | |
import numpy as np
import pandas as pd
import torch
import warnings
import numba
import rpy2.robjects as robjects
import scipy.integrate as integrate
from dataclasses import InitVar, dataclass, field
from Evaluations.custom_types import NumericArrayLike
def check_and_convert(*args):
""" Makes sure that the given inputs are numpy arrays, list,
tuple, panda Series, pandas DataFrames, or torch Tensors.
Also makes sure that the given inputs have the same shape.
Then convert the inputs to numpy array.
Parameters
----------
* args : tuple of objects
Input object to check / convert.
Returns
-------
* result : tuple of numpy arrays
The converted and validated arg.
If the input isn't numpy arrays, list or pandas DataFrames, it will
fail and ask to provide the valid format.
"""
result = ()
last_length = ()
for i, arg in enumerate(args):
if len(arg) == 0:
error = " The input is empty. "
error += "Please provide at least 1 element in the array."
raise IndexError(error)
else:
if isinstance(arg, np.ndarray):
x = (arg.astype(np.double),)
elif isinstance(arg, list):
x = (np.asarray(arg).astype(np.double),)
elif isinstance(arg, tuple):
x = (np.asarray(arg).astype(np.double),)
elif isinstance(arg, pd.Series):
x = (arg.values.astype(np.double),)
elif isinstance(arg, pd.DataFrame):
x = (arg.values.astype(np.double),)
elif isinstance(arg, torch.Tensor):
x = (arg.cpu().numpy().astype(np.double),)
else:
error = """{arg} is not a valid data format. Only use 'list', 'tuple', 'np.ndarray', 'torch.Tensor',
'pd.Series', 'pd.DataFrame'""".format(arg=type(arg))
raise TypeError(error)
if np.sum(np.isnan(x)) > 0.:
error = "The #{} argument contains null values"
error = error.format(i + 1)
raise ValueError(error)
if len(args) > 1:
if i > 0:
assert x[0].shape == last_length, """Shapes between {}-th input array and
{}-th input array are not consistent""".format(i - 1, i)
result += x
last_length = x[0].shape
else:
result = x[0]
return result
def predict_prob_from_curve(
survival_curve: np.ndarray,
times_coordinate: np.ndarray,
target_time: float
) -> float:
"""
Quote from ISDEvaluation/Evaluations/EvaluationHelperFunction.R
We need some type of predict function for survival curves - here we build a spline to fit the survival model curve.
This spline is the monotonic spline using the hyman filtering of the cubic Hermite spline method,
see https://en.wikipedia.org/wiki/Monotone_cubic_interpolation. Also see help(splinefun).
Note that we make an alteration to the method because if the last two time points
have the same probability (y value) then the spline is constant outside of the training data.
We need this to be a decreasing function outside the training data so instead we take the linear fit of (0,1)
and the last time point we have (p,t*) and then apply this linear function to all points outside of our fit.
"""
x = robjects.FloatVector(times_coordinate)
y = robjects.FloatVector(survival_curve)
r_splinefun = robjects.r['splinefun'] # extract splinefun method from R
spline = r_splinefun(x, y, method='hyman')
# predicting boundary
max_time = float(max(times_coordinate))
# simply calculate the slope by using the [0, 1] - [max_time, S(t|x)]
# Need to convert the R floatvector to numpy array and use .item() to obtain the single value
slope = (1 - np.array(spline(max_time)).item()) / (0 - max_time)
# If the true event time is out of predicting boundary, then use the linear fit mentioned above;
# Else if the true event time is in the boundary, then use the spline
if target_time > max_time:
# func: y = slope * x + 1, the minimum prob should be 0
predict_probability = max(slope * target_time + 1, 0)
else:
predict_probability = np.array(spline(float(target_time))).item()
return predict_probability
def predict_multi_probs_from_curve(
survival_curve: np.ndarray,
times_coordinate: np.ndarray,
target_times: NumericArrayLike
) -> np.ndarray:
"""
Quote from ISDEvaluation/Evaluations/EvaluationHelperFunction.R
We need some type of predict function for survival curves - here we build a spline to fit the survival model curve.
This spline is the monotonic spline using the hyman filtering of the cubic Hermite spline method,
see https://en.wikipedia.org/wiki/Monotone_cubic_interpolation. Also see help(splinefun).
Note that we make an alteration to the method because if the last two time points
have the same probability (y value) then the spline is constant outside of the training data.
We need this to be a decreasing function outside the training data so instead we take the linear fit of (0,1)
and the last time point we have (p,t*) and then apply this linear function to all points outside of our fit.
"""
target_times = check_and_convert(target_times).astype(float).tolist()
x = robjects.FloatVector(times_coordinate)
y = robjects.FloatVector(survival_curve)
r_splinefun = robjects.r['splinefun'] # extract splinefun method from R
spline = r_splinefun(x, y, method='hyman')
# predicting boundary
max_time = float(max(times_coordinate))
# simply calculate the slope by using the [0, 1] - [maxtime, S(t|x)]
# Need to convert the R floatvector to numpy array and use .item() to obtain the single value
slope = (1 - np.array(spline(max_time)).item()) / (0 - max_time)
# If the true event time is out of predicting boundary, then use the linear fit mentioned above;
# Else if the true event time is in the boundary, then use the spline
predict_probabilities = np.array(spline(target_times))
for i, target_time in enumerate(target_times):
if target_time > max_time:
predict_probabilities[i] = max(slope * target_time + 1, 0)
return predict_probabilities
def predict_mean_survival_time(survival_curve, times_coordinate: np.ndarray):
# If all the predicted probabilities are 1 the integral will be infinite.
if np.all(survival_curve == 1):
warnings.warn("All the predicted probabilities are 1, the integral will be infinite.")
return np.inf
x = robjects.FloatVector(times_coordinate)
y = robjects.FloatVector(survival_curve)
r_splinefun = robjects.r['splinefun'] # extract splinefun method from R
spline = r_splinefun(x, y, method='hyman')
# predicting boundary
max_time = max(times_coordinate.tolist())
# simply calculate the slope by using the [0, 1] - [max_time, S(t|x)]
slope = (1 - np.array(spline(max_time)).item()) / (0 - max_time)
# zero_probability_time = min(times_coordinate[np.where(survival_curve == 0)],
# max_time + (0 - np.array(spline(max_time)).item()) / slope)
if 0 in survival_curve:
zero_probability_time = min(times_coordinate[np.where(survival_curve == 0)])
else:
zero_probability_time = max_time + (0 - np.array(spline(max_time)).item()) / slope
def _func_to_integral(time, maximum_time, slope_rate):
return np.array(spline(time)).item() if time < maximum_time else (1 + time * slope_rate)
# _func_to_integral = lambda time: spline(time) if time < max_time else (1 + time * slope)
# limit controls the subdivision intervals used in the adaptive algorithm.
# Set it to 1000 is consistent with Haider's R code
mean_survival_time, *rest = integrate.quad(_func_to_integral, 0, zero_probability_time,
args=(max_time, slope), limit=1000)
return mean_survival_time
def predict_median_survival_time(survival_curve, times_coordinate: np.ndarray):
# If all the predicted probabilities are 1 the integral will be infinite.
if np.all(survival_curve == 1):
warnings.warn("All the predicted probabilities are 1, the median survival time will be infinite.")
return np.inf
x = robjects.FloatVector(times_coordinate)
y = robjects.FloatVector(survival_curve)
r_splinefun = robjects.r['splinefun'] # extract splinefun method from R
spline = r_splinefun(x, y, method='hyman')
min_prob = min(spline(times_coordinate.tolist()))
if 0.5 in survival_curve:
median_probability_time = times_coordinate[np.where(survival_curve == 0.5)[0][0]]
elif min_prob < 0.5:
min_time_before_median = times_coordinate[np.where(survival_curve > 0.5)[0][-1]]
max_time_after_median = times_coordinate[np.where(survival_curve < 0.5)[0][0]]
prob_range = robjects.FloatVector(
spline(np.linspace(min_time_before_median, max_time_after_median, num=1000).tolist()))
time_range = robjects.FloatVector(np.linspace(min_time_before_median, max_time_after_median, num=1000))
inverse_spline = r_splinefun(prob_range, time_range, method='hyman')
# Need to convert the R floatvector to numpy array and use .item() to obtain the single value
median_probability_time = np.array(inverse_spline(0.5)).item()
else:
max_time = max(times_coordinate.tolist())
slope = (1 - np.array(spline(max_time)).item()) / (0 - max_time)
median_probability_time = max_time + (0.5 - np.array(spline(max_time)).item()) / slope
return median_probability_time
def stratified_folds_survival(dataset: pd.DataFrame,
event_times: np.ndarray,
event_indicators: np.ndarray,
number_folds: int = 5):
event_times, event_indicators = event_times.tolist(), event_indicators.tolist()
assert len(event_indicators) == len(event_times)
indicators_and_times = list(zip(event_indicators, event_times))
sorted_idx = [i[0] for i in sorted(enumerate(indicators_and_times), key=lambda v: (v[1][0], v[1][1]))]
folds = [[sorted_idx[0]], [sorted_idx[1]], [sorted_idx[2]], [sorted_idx[3]], [sorted_idx[4]]]
for i in range(5, len(sorted_idx)):
fold_number = i % number_folds
folds[fold_number].append(sorted_idx[i])
training_sets = [dataset.drop(folds[i], axis=0) for i in range(number_folds)]
testing_sets = [dataset.iloc[folds[i], :] for i in range(number_folds)]
cross_validation_set = list(zip(training_sets, testing_sets))
return cross_validation_set
@dataclass
class KaplanMeier:
"""
This class is borrowed from survival_evaluation package.
"""
event_times: InitVar[np.array]
event_indicators: InitVar[np.array]
survival_times: np.array = field(init=False)
survival_probabilities: np.array = field(init=False)
def __post_init__(self, event_times, event_indicators):
index = np.lexsort((event_indicators, event_times))
unique_times = np.unique(event_times[index], return_counts=True)
self.survival_times = unique_times[0]
population_count = np.flip(np.flip(unique_times[1]).cumsum())
event_counter = np.append(0, unique_times[1].cumsum()[:-1])
event_ind = list()
for i in range(np.size(event_counter[:-1])):
event_ind.append(event_counter[i])
event_ind.append(event_counter[i + 1])
event_ind.append(event_counter[-1])
event_ind.append(len(event_indicators))
events = np.add.reduceat(np.append(event_indicators[index], 0), event_ind)[::2]
self.survival_probabilities = np.empty(population_count.size)
survival_probability = 1
counter = 0
for population, event_num in zip(population_count, events):
survival_probability *= 1 - event_num / population
self.survival_probabilities[counter] = survival_probability
counter += 1
def predict(self, prediction_times: np.array):
probability_index = np.digitize(prediction_times, self.survival_times)
probability_index = np.where(
probability_index == | |
# -*- coding: utf-8 -*-
#
# Project: Azimuthal integration
# https://github.com/silx-kit/pyFAI
#
#
# Copyright (C) 2014-2018 European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: <NAME> (<EMAIL>)
# <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Deprecated ... restore or delete !
"""
__authors__ = ["<NAME>", "<NAME>"]
__license__ = "MIT"
__date__ = "20/01/2021"
__copyright__ = "2014, ESRF, Grenoble"
__contact__ = "<EMAIL>"
import os
import logging
import threading
import numpy
from . import ocl, pyopencl
from ..ext.splitBBoxLUT import HistoBBox1d
if pyopencl:
mf = pyopencl.mem_flags
else:
raise ImportError("pyopencl is not installed")
from ..utils import crc32, get_cl_file
logger = logging.getLogger(__name__)
class OCLFullSplit1d(object):
def __init__(self,
pos,
bins=100,
pos0Range=None,
pos1Range=None,
mask=None,
mask_checksum=None,
allow_pos0_neg=False,
unit="undefined",
workgroup_size=256,
devicetype="all",
platformid=None,
deviceid=None,
profile=False):
self.bins = bins
self.lut_size = 0
self.allow_pos0_neg = allow_pos0_neg
if len(pos.shape) == 3:
assert pos.shape[1] == 4
assert pos.shape[2] == 2
elif len(pos.shape) == 4:
assert pos.shape[2] == 4
assert pos.shape[3] == 2
else:
raise ValueError("Pos array dimentions are wrong")
self.pos_size = pos.size
self.size = self.pos_size / 8
self.pos = numpy.ascontiguousarray(pos.ravel(), dtype=numpy.float32)
self.pos0Range = numpy.empty(2, dtype=numpy.float32)
self.pos1Range = numpy.empty(2, dtype=numpy.float32)
if (pos0Range is not None) and (len(pos0Range) == 2):
self.pos0Range[0] = min(pos0Range) # do it on GPU?
self.pos0Range[1] = max(pos0Range)
if (not self.allow_pos0_neg) and (self.pos0Range[0] < 0):
self.pos0Range[0] = 0.0
if self.pos0Range[1] < 0:
print("Warning: Invalid 0-dim range! Using the data derived range instead")
self.pos0Range[1] = 0.0
# self.pos0Range[0] = pos0Range[0]
# self.pos0Range[1] = pos0Range[1]
else:
self.pos0Range[0] = 0.0
self.pos0Range[1] = 0.0
if (pos1Range is not None) and (len(pos1Range) == 2):
self.pos1Range[0] = min(pos1Range) # do it on GPU?
self.pos1Range[1] = max(pos1Range)
# self.pos1Range[0] = pos1Range[0]
# self.pos1Range[1] = pos1Range[1]
else:
self.pos1Range[0] = 0.0
self.pos1Range[1] = 0.0
if mask is not None:
assert mask.size == self.size
self.check_mask = True
self.cmask = numpy.ascontiguousarray(mask.ravel(), dtype=numpy.int8)
if mask_checksum:
self.mask_checksum = mask_checksum
else:
self.mask_checksum = crc32(mask)
else:
self.check_mask = False
self.mask_checksum = None
self._sem = threading.Semaphore()
self.profile = profile
self._cl_kernel_args = {}
self._cl_mem = {}
self.events = []
self.workgroup_size = workgroup_size
if self.size < self.workgroup_size:
raise RuntimeError("Fatal error in workgroup size selection. Size (%d) must be >= workgroup size (%d)\n", self.size, self.workgroup_size)
if (platformid is None) and (deviceid is None):
platformid, deviceid = ocl.select_device(devicetype)
elif platformid is None:
platformid = 0
elif deviceid is None:
deviceid = 0
self.platform = ocl.platforms[platformid]
self.device = self.platform.devices[deviceid]
self.device_type = self.device.type
if (self.device_type == "CPU") and (self.platform.vendor == "Apple"):
logger.warning("This is a workaround for Apple's OpenCL on CPU: enforce BLOCK_SIZE=1")
self.workgroup_size = 1
try:
self._ctx = pyopencl.Context(devices=[pyopencl.get_platforms()[platformid].get_devices()[deviceid]])
if self.profile:
self._queue = pyopencl.CommandQueue(self._ctx, properties=pyopencl.command_queue_properties.PROFILING_ENABLE)
else:
self._queue = pyopencl.CommandQueue(self._ctx)
self._compile_kernels()
self._calc_boundaries()
self._calc_LUT()
except pyopencl.MemoryError as error:
raise MemoryError(error)
def _compile_kernels(self, kernel_file=None):
"""
Call the OpenCL compiler
:param kernel_file: path tothe
"""
kernel_name = "ocl_lut.cl"
if kernel_file is None:
if os.path.isfile(kernel_name):
kernel_file = os.path.abspath(kernel_name)
else:
kernel_file = get_cl_file("pyfai:openCL/" + kernel_name)
else:
kernel_file = str(kernel_file)
kernel_src = open(kernel_file).read()
compile_options = "-D BINS=%i -D POS_SIZE=%i -D SIZE=%i -D WORKGROUP_SIZE=%i -D EPS=%e" % \
(self.bins, self.pos_size, self.size, self.workgroup_size, numpy.finfo(numpy.float32).eps)
logger.info("Compiling file %s with options %s", kernel_file, compile_options)
try:
self._program = pyopencl.Program(self._ctx, kernel_src).build(options=compile_options)
except pyopencl.MemoryError as error:
raise MemoryError(error)
def _calc_boundaries(self):
"""
comments
"""
# # # # # # # # Check for memory# # # # # # # #
size_of_float = numpy.dtype(numpy.float32).itemsize
ualloc = (self.pos_size * size_of_float)
ualloc += (self.workgroup_size * 4 * size_of_float)
ualloc += (4 * size_of_float)
memory = self.device.memory
if ualloc >= memory:
raise MemoryError("Fatal error in _allocate_buffers. Not enough device memory for buffers (%lu requested, %lu available)" % (ualloc, memory))
# # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # allocate memory # # # # # # # #
try:
# No returned event for profiling
# self._cl_mem["pos"] = pyopencl.array.to_device(self._queue, self.pos)
# self._cl_mem["preresult"] = pyopencl.array.empty(self._queue, (4*self.workgroup_size,), dtype=numpy.float32)
# self._cl_mem["minmax"] = pyopencl.array.empty(self._queue, (4,), dtype=numpy.float32)
self._cl_mem["pos"] = pyopencl.Buffer(self._ctx, mf.READ_ONLY, size_of_float * self.pos_size)
self._cl_mem["preresult"] = pyopencl.Buffer(self._ctx, mf.READ_WRITE, size_of_float * 4 * self.workgroup_size)
self._cl_mem["minmax"] = pyopencl.Buffer(self._ctx, mf.READ_WRITE, size_of_float * 4)
except pyopencl.MemoryError as error:
self._free_device_memory()
raise MemoryError(error)
# # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # move data # # # # # # # # # #
with self._sem:
copy_pos = pyopencl.enqueue_copy(self._queue, self._cl_mem["pos"], self.pos)
self.events += [("copy pos", copy_pos)]
# # # # # # # # set arguments # # # # # # # # #
self._cl_kernel_args["reduce_minmax_1"] = [self._cl_mem["pos"], self._cl_mem["preresult"]]
self._cl_kernel_args["reduce_minmax_2"] = [self._cl_mem["preresult"], self._cl_mem["minmax"]]
# # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # do the minmax reduction # # # # # #
with self._sem:
reduce_minmax_1 = self._program.reduce_minmax_1(self._queue, (self.workgroup_size * self.workgroup_size,), (self.workgroup_size,), *self._cl_kernel_args["reduce_minmax_1"])
self.events += [("reduce_minmax_1", reduce_minmax_1)]
reduce_minmax_2 = self._program.reduce_minmax_2(self._queue, (self.workgroup_size,), (self.workgroup_size,), *self._cl_kernel_args["reduce_minmax_2"])
self.events += [("reduce_minmax_2", reduce_minmax_2)]
# # # # # # # # # # # # # # # # # # # # # # # #
# # # # # release the redundant data # # # # #
self._cl_mem["preresult"].release()
self._cl_mem.pop("preresult")
# # # # # # # # # # # # # # # # # # # # # # # #
# check memory of d_pos + d_preresult + d_minmax
# load d_pos
# allocate d_preresult
# allocate d_minmax
# run reduce1
# run reduce2
# save reference to d_minMax
# free d_preresult
def _calc_LUT(self):
"""
first need to call lut_1 and lut_2 to find the size of the LUT and the lut_3 to create it
"""
# # # # # # # # Check for memory# # # # # # # #
size_of_float = numpy.dtype(numpy.float32).itemsize
size_of_int = numpy.dtype(numpy.int32).itemsize
ualloc = (self.pos_size * size_of_float) # pos
ualloc += (4 * size_of_float) # minmax
ualloc += (2 * size_of_float) * 2 # pos0Range, pos1Range
ualloc += (self.bins * size_of_int) # outMax
ualloc += (1 * size_of_int) # lutsize
ualloc += ((self.bins + 1) * size_of_int) # idx_ptr
memory = self.device.memory
if ualloc >= memory:
raise MemoryError("Fatal error in _allocate_buffers. Not enough device memory for buffers (%lu requested, %lu available)" % (ualloc, memory))
# # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # allocate memory # # # # # # # #
try:
# self._cl_mem["pos0Range"] = pyopencl.Buffer(self._ctx, mf.READ_ONLY, size_of_float * 2)
# self._cl_mem["pos1Range"] = pyopencl.Buffer(self._ctx, mf.READ_ONLY, size_of_float * 2)
self._cl_mem["outMax"] = pyopencl.Buffer(self._ctx, mf.READ_WRITE, size_of_float * self.bins)
self._cl_mem["lutsize"] = pyopencl.Buffer(self._ctx, mf.READ_WRITE, size_of_float * 1)
self._cl_mem["idx_ptr"] = pyopencl.Buffer(self._ctx, mf.READ_WRITE, size_of_float * (self.bins + 1))
except pyopencl.MemoryError as error:
self._free_device_memory()
raise MemoryError(error)
# # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # move | |
#### PATTERN | DB ##################################################################################
# -*- coding: utf-8 -*-
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: <NAME> <<EMAIL>>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
####################################################################################################
import os
import sys
import inspect
import re
import base64
import json
import csv as csvlib
from codecs import BOM_UTF8
from itertools import islice
from datetime import datetime, timedelta
from calendar import monthrange
from time import mktime, strftime
from math import sqrt
from functools import cmp_to_key
from io import open, StringIO, BytesIO
BOM_UTF8 = BOM_UTF8.decode("utf-8")
from html.entities import name2codepoint
from email.utils import parsedate_tz, mktime_tz
try:
MODULE = os.path.dirname(os.path.realpath(__file__))
except:
MODULE = ""
from pattern.helpers import encode_string, decode_string
decode_utf8 = decode_string
encode_utf8 = encode_string
ALL = "*"
_sum = sum # pattern.db.sum() is also a column aggregate function.
#### DATE FUNCTIONS ################################################################################
NOW, YEAR = "now", datetime.now().year
# Date formats can be found in the Python documentation:
# http://docs.python.org/library/time.html#time.strftime
DEFAULT_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
date_formats = [
DEFAULT_DATE_FORMAT, # 2010-09-21 09:27:01 => SQLite + MySQL
"%Y-%m-%dT%H:%M:%SZ", # 2010-09-20T09:27:01Z => Bing
"%a, %d %b %Y %H:%M:%S +0000", # Fri, 21 Sep 2010 09:27:01 +000 => Twitter
"%a %b %d %H:%M:%S +0000 %Y", # Fri Sep 21 09:21:01 +0000 2010 => Twitter
"%Y-%m-%dT%H:%M:%S+0000", # 2010-09-20T09:27:01+0000 => Facebook
"%Y-%m-%d %H:%M", # 2010-09-21 09:27
"%Y-%m-%d", # 2010-09-21
"%d/%m/%Y", # 21/09/2010
"%d %B %Y", # 21 September 2010
"%d %b %Y", # 21 Sep 2010
"%B %d %Y", # September 21 2010
"%B %d, %Y", # September 21, 2010
]
def _yyyywwd2yyyymmdd(year, week, weekday):
""" Returns (year, month, day) for given (year, week, weekday).
"""
d = datetime(year, month=1, day=4) # 1st week contains January 4th.
d = d - timedelta(d.isoweekday() - 1) + timedelta(days=weekday - 1, weeks=week - 1)
return (d.year, d.month, d.day)
def _strftime1900(d, format):
""" Returns the given date formatted as a string.
"""
if d.year < 1900: # Python's strftime() doesn't handle year < 1900.
return strftime(format, (1900,) + d.timetuple()[1:]).replace("1900", str(d.year), 1)
return datetime.strftime(d, format)
class DateError(Exception):
pass
class Date(datetime):
""" A convenience wrapper for datetime.datetime with a default string format.
"""
format = DEFAULT_DATE_FORMAT
# Date.year
# Date.month
# Date.day
# Date.minute
# Date.second
@property
def minutes(self):
return self.minute
@property
def seconds(self):
return self.second
@property
def microseconds(self):
return self.microsecond
@property
def week(self):
return self.isocalendar()[1]
@property
def weekday(self):
return self.isocalendar()[2]
@property
def timestamp(self):
# In Python 3, years before 1900 are accepted whilee mktime() raises ValueError in Python 2. Let's stick to this.
if self.timetuple().tm_year < 1900:
raise ValueError("year out of range")
return int(mktime(self.timetuple())) # Seconds elapsed since 1/1/1970.
def strftime(self, format):
return _strftime1900(self, format)
def copy(self):
return date(self.timestamp)
def __str__(self):
return self.strftime(self.format)
def __repr__(self):
return "Date(%s)" % repr(self.__str__())
def __iadd__(self, t):
return self.__add__(t)
def __isub__(self, t):
return self.__sub__(t)
def __add__(self, t):
d = self
if getattr(t, "years", 0) \
or getattr(t, "months", 0):
# January 31 + 1 month = February 28.
y = (d.month + t.months - 1) // 12 + d.year + t.years
m = (d.month + t.months + 0) % 12 or 12
r = monthrange(y, m)
d = date(y, m, min(d.day, r[1]), d.hour, d.minute, d.second, d.microsecond)
d = datetime.__add__(d, t)
return date(d.year, d.month, d.day, d.hour, d.minute, d.second, d.microsecond, self.format)
def __sub__(self, t):
if isinstance(t, (Date, datetime)):
# Subtracting two dates returns a Time.
t = datetime.__sub__(self, t)
return Time(+t.days, +t.seconds,
microseconds = +t.microseconds)
if isinstance(t, (Time, timedelta)):
return self + Time(-t.days, -t.seconds,
microseconds = -t.microseconds,
months = -getattr(t, "months", 0),
years = -getattr(t, "years", 0))
def date(*args, **kwargs):
""" Returns a Date from the given parameters:
- date(format=Date.format) => now
- date(int)
- date(string)
- date(string, format=Date.format)
- date(string, inputformat, format=Date.format)
- date(year, month, day, format=Date.format)
- date(year, month, day, hours, minutes, seconds, format=Date.format)
If a string is given without an explicit input format, all known formats will be tried.
"""
d = None
f = None
if len(args) == 0 \
and kwargs.get("year") is not None \
and kwargs.get("month") \
and kwargs.get("day"):
# Year, month, day.
d = Date(**kwargs)
elif kwargs.get("week"):
# Year, week, weekday.
f = kwargs.pop("format", None)
d = Date(*_yyyywwd2yyyymmdd(
kwargs.pop("year", args and args[0] or Date.now().year),
kwargs.pop("week"),
kwargs.pop("weekday", kwargs.pop("day", 1))), **kwargs)
elif len(args) == 0 or args[0] == NOW:
# No parameters or one parameter NOW.
d = Date.now()
elif len(args) == 1 \
and isinstance(args[0], (Date, datetime)):
# One parameter, a Date or datetime object.
d = Date.fromtimestamp(int(mktime(args[0].timetuple())))
d += time(microseconds=args[0].microsecond)
elif len(args) == 1 \
and (isinstance(args[0], int) \
or isinstance(args[0], (str, bytes)) and args[0].isdigit()):
# One parameter, an int or string timestamp.
if isinstance(args[0], bytes):
args = (args[0].decode("utf-8"),)
d = Date.fromtimestamp(int(args[0]))
elif len(args) == 1 \
and isinstance(args[0], (str, bytes)):
# One parameter, a date string for which we guess the input format (RFC2822 or known formats).
if isinstance(args[0], bytes):
args = (args[0].decode("utf-8"),)
try:
d = Date.fromtimestamp(mktime_tz(parsedate_tz(args[0])))
except:
for format in ("format" in kwargs and [kwargs["format"]] or []) + date_formats:
try:
d = Date.strptime(args[0], format)
break
except:
pass
if d is None:
raise DateError("unknown date format for %s" % repr(args[0]))
elif len(args) == 2 \
and isinstance(args[0], (str, bytes)):
# Two parameters, a date string and an explicit input format.
if isinstance(args[0], bytes):
args = (args[0].decode("utf-8"), args[1].decode("utf-8"))
d = Date.strptime(args[0], args[1])
elif len(args) >= 3:
# 3-6 parameters: year, month, day, hours, minutes, seconds.
f = kwargs.pop("format", None)
d = Date(*args[:7], **kwargs)
else:
raise DateError("unknown date format")
d.format = kwargs.get("format") or len(args) > 7 and args[7] or f or Date.format
return d
class Time(timedelta):
def __new__(cls, *args, **kwargs):
""" A convenience wrapper for datetime.timedelta that handles months and years.
"""
# Time.years
# Time.months
# Time.days
# Time.seconds
# Time.microseconds
y = kwargs.pop("years", 0)
m = kwargs.pop("months", 0)
t = timedelta.__new__(cls, *args, **kwargs)
setattr(t, "years", y)
setattr(t, "months", m)
return t
def time(days=0, seconds=0, minutes=0, hours=0, **kwargs):
""" Returns a Time that can be added to a Date object.
Other parameters: microseconds, milliseconds, weeks, months, years.
"""
return Time(days=days, seconds=seconds, minutes=minutes, hours=hours, **kwargs)
def string(value, default=""):
""" Returns the value cast to unicode, or default if it is None/empty.
"""
# Useful for HTML interfaces.
if value is None or value == "": # Don't do value != None because this includes 0.
return default
return decode_utf8(value)
class EncryptionError(Exception):
pass
class DecryptionError(Exception):
pass
def encrypt_string(s, key=""):
""" Returns the given string as an encrypted bytestring.
"""
key += " "
a = []
for i in range(len(s)):
try:
a.append(chr(ord(s[i]) + ord(key[i % len(key)]) % 256).encode("latin-1"))
except:
raise EncryptionError()
s = b"".join(a)
s = base64.urlsafe_b64encode(s)
return s
def decrypt_string(s, key=""):
""" Returns the given string as a decrypted Unicode string.
"""
key += " "
s = base64.urlsafe_b64decode(s)
s = s.decode("latin-1")
a = []
for i in range(len(s)):
try:
a.append(chr(ord(s[i]) - ord(key[i % len(key)]) % 256))
except:
raise DecryptionError()
s = "".join(a)
s = decode_utf8(s)
return s
#### LIST FUNCTIONS ################################################################################
def order(list, cmp=None, key=None, reverse=False):
""" Returns a list of indices in the order as when the given list is sorted.
For example: ["c","a","b"] => [1, 2, 0]
This means that in the sorted list, "a" (index 1) comes first and "c" (index 0) last.
"""
if cmp and key:
f = lambda i, j: cmp(key(list[i]), key(list[j]))
elif cmp:
f = lambda i, j: cmp(list[i], list[j])
elif key:
f = lambda i, j: int(key(list[i]) >= key(list[j])) * 2 - 1
else:
f = lambda i, j: int(list[i] >= list[j]) * 2 - 1
return sorted(range(len(list)), key=cmp_to_key(f), reverse=reverse)
_order = order
def avg(list):
""" Returns the arithmetic mean of the given list of values.
For example: mean([1,2,3,4]) = 10/4 = 2.5.
"""
return float(_sum(list)) / (len(list) or 1)
def variance(list):
""" Returns the variance of the given list of values.
The variance is the average of squared deviations from the mean.
"""
a = avg(list)
return _sum([(x - a)**2 for x in list]) / (len(list) - 1 or 1)
def stdev(list):
""" Returns the standard deviation of the given list of values.
Low standard deviation => values are close to the mean.
High standard deviation => values are spread out over a large range.
"""
return sqrt(variance(list))
#### FIELD #########################################################################################
class _String(str):
# The STRING constant can be called | |
# From https://github.com/snap-stanford/GraphRNN/blob/1ef475d957414d7c0bf8c778a1d44cb52dd7829b/utils.py
import networkx as nx
import numpy as np
import torch
import torch.nn as nn
import torch.nn.init as init
from torch.autograd import Variable
import matplotlib.pyplot as plt
import torch.nn.functional as F
from torch import optim
from torch.optim.lr_scheduler import MultiStepLR
# import node2vec.src.main as nv
from sklearn.decomposition import PCA
import community
import pickle
import re
from . import graphrnn_data as data
def citeseer_ego():
_, _, G = data.Graph_load(dataset='citeseer')
G = max((G.subgraph(c) for c in nx.connected_components(G)), key=len)
G = nx.convert_node_labels_to_integers(G)
graphs = []
for i in range(G.number_of_nodes()):
G_ego = nx.ego_graph(G, i, radius=3)
if G_ego.number_of_nodes() >= 50 and (G_ego.number_of_nodes() <= 400):
graphs.append(G_ego)
return graphs
def caveman_special(c=2,k=20,p_path=0.1,p_edge=0.3):
p = p_path
path_count = max(int(np.ceil(p * k)),1)
G = nx.caveman_graph(c, k)
# remove 50% edges
p = 1-p_edge
for (u, v) in list(G.edges()):
if np.random.rand() < p and ((u < k and v < k) or (u >= k and v >= k)):
G.remove_edge(u, v)
# add path_count links
for i in range(path_count):
u = np.random.randint(0, k)
v = np.random.randint(k, k * 2)
G.add_edge(u, v)
G = max((G.subgraph(c) for c in nx.connected_components(G)), key=len)
return G
def n_community(c_sizes, p_inter=0.01):
graphs = [nx.gnp_random_graph(c_sizes[i], 0.7, seed=i) for i in range(len(c_sizes))]
G = nx.disjoint_union_all(graphs)
communities = list((G.subgraph(c) for c in nx.connected_components(G)))
for i in range(len(communities)):
subG1 = communities[i]
nodes1 = list(subG1.nodes())
for j in range(i+1, len(communities)):
subG2 = communities[j]
nodes2 = list(subG2.nodes())
has_inter_edge = False
for n1 in nodes1:
for n2 in nodes2:
if np.random.rand() < p_inter:
G.add_edge(n1, n2)
has_inter_edge = True
if not has_inter_edge:
G.add_edge(nodes1[0], nodes2[0])
#print('connected comp: ', len(list((G.subgraph(c) for c in nx.connected_components(G)))))
return G
def perturb(graph_list, p_del, p_add=None):
''' Perturb the list of graphs by adding/removing edges.
Args:
p_add: probability of adding edges. If None, estimate it according to graph density,
such that the expected number of added edges is equal to that of deleted edges.
p_del: probability of removing edges
Returns:
A list of graphs that are perturbed from the original graphs
'''
perturbed_graph_list = []
for G_original in graph_list:
G = G_original.copy()
trials = np.random.binomial(1, p_del, size=G.number_of_edges())
edges = list(G.edges())
i = 0
for (u, v) in edges:
if trials[i] == 1:
G.remove_edge(u, v)
i += 1
if p_add is None:
num_nodes = G.number_of_nodes()
p_add_est = np.sum(trials) / (num_nodes * (num_nodes - 1) / 2 -
G.number_of_edges())
else:
p_add_est = p_add
nodes = list(G.nodes())
tmp = 0
for i in range(len(nodes)):
u = nodes[i]
trials = np.random.binomial(1, p_add_est, size=G.number_of_nodes())
j = 0
for j in range(i+1, len(nodes)):
v = nodes[j]
if trials[j] == 1:
tmp += 1
G.add_edge(u, v)
j += 1
perturbed_graph_list.append(G)
return perturbed_graph_list
def perturb_new(graph_list, p):
''' Perturb the list of graphs by adding/removing edges.
Args:
p_add: probability of adding edges. If None, estimate it according to graph density,
such that the expected number of added edges is equal to that of deleted edges.
p_del: probability of removing edges
Returns:
A list of graphs that are perturbed from the original graphs
'''
perturbed_graph_list = []
for G_original in graph_list:
G = G_original.copy()
edge_remove_count = 0
for (u, v) in list(G.edges()):
if np.random.rand()<p:
G.remove_edge(u, v)
edge_remove_count += 1
# randomly add the edges back
for i in range(edge_remove_count):
while True:
u = np.random.randint(0, G.number_of_nodes())
v = np.random.randint(0, G.number_of_nodes())
if (not G.has_edge(u,v)) and (u!=v):
break
G.add_edge(u, v)
perturbed_graph_list.append(G)
return perturbed_graph_list
def imsave(fname, arr, vmin=None, vmax=None, cmap=None, format=None, origin=None):
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
fig = Figure(figsize=arr.shape[::-1], dpi=1, frameon=False)
canvas = FigureCanvas(fig)
fig.figimage(arr, cmap=cmap, vmin=vmin, vmax=vmax, origin=origin)
fig.savefig(fname, dpi=1, format=format)
def save_prediction_histogram(y_pred_data, fname_pred, max_num_node, bin_n=20):
bin_edge = np.linspace(1e-6, 1, bin_n + 1)
output_pred = np.zeros((bin_n, max_num_node))
for i in range(max_num_node):
output_pred[:, i], _ = np.histogram(y_pred_data[:, i, :], bins=bin_edge, density=False)
# normalize
output_pred[:, i] /= np.sum(output_pred[:, i])
imsave(fname=fname_pred, arr=output_pred, origin='upper', cmap='Greys_r', vmin=0.0, vmax=3.0 / bin_n)
# draw a single graph G
def draw_graph(G, prefix = 'test'):
parts = community.best_partition(G)
values = [parts.get(node) for node in G.nodes()]
colors = []
for i in range(len(values)):
if values[i] == 0:
colors.append('red')
if values[i] == 1:
colors.append('green')
if values[i] == 2:
colors.append('blue')
if values[i] == 3:
colors.append('yellow')
if values[i] == 4:
colors.append('orange')
if values[i] == 5:
colors.append('pink')
if values[i] == 6:
colors.append('black')
# spring_pos = nx.spring_layout(G)
plt.switch_backend('agg')
plt.axis("off")
pos = nx.spring_layout(G)
nx.draw_networkx(G, with_labels=True, node_size=35, node_color=colors,pos=pos)
# plt.switch_backend('agg')
# options = {
# 'node_color': 'black',
# 'node_size': 10,
# 'width': 1
# }
# plt.figure()
# plt.subplot()
# nx.draw_networkx(G, **options)
plt.savefig('figures/graph_view_'+prefix+'.png', dpi=200)
plt.close()
plt.switch_backend('agg')
G_deg = nx.degree_histogram(G)
G_deg = np.array(G_deg)
# plt.plot(range(len(G_deg)), G_deg, 'r', linewidth = 2)
plt.loglog(np.arange(len(G_deg))[G_deg>0], G_deg[G_deg>0], 'r', linewidth=2)
plt.savefig('figures/degree_view_' + prefix + '.png', dpi=200)
plt.close()
# degree_sequence = sorted(nx.degree(G).values(), reverse=True) # degree sequence
# plt.loglog(degree_sequence, 'b-', marker='o')
# plt.title("Degree rank plot")
# plt.ylabel("degree")
# plt.xlabel("rank")
# plt.savefig('figures/degree_view_' + prefix + '.png', dpi=200)
# plt.close()
# G = nx.grid_2d_graph(8,8)
# G = nx.karate_club_graph()
# draw_graph(G)
# draw a list of graphs [G]
def draw_graph_list(G_list, row, col, fname = 'figures/test', layout='spring', is_single=False,k=1,node_size=55,alpha=1,width=1.3):
# # draw graph view
# from pylab import rcParams
# rcParams['figure.figsize'] = 12,3
plt.switch_backend('agg')
for i,G in enumerate(G_list):
plt.subplot(row,col,i+1)
plt.subplots_adjust(left=0, bottom=0, right=1, top=1,
wspace=0, hspace=0)
# if i%2==0:
# plt.title('real nodes: '+str(G.number_of_nodes()), fontsize = 4)
# else:
# plt.title('pred nodes: '+str(G.number_of_nodes()), fontsize = 4)
# plt.title('num of nodes: '+str(G.number_of_nodes()), fontsize = 4)
# parts = community.best_partition(G)
# values = [parts.get(node) for node in G.nodes()]
# colors = []
# for i in range(len(values)):
# if values[i] == 0:
# colors.append('red')
# if values[i] == 1:
# colors.append('green')
# if values[i] == 2:
# colors.append('blue')
# if values[i] == 3:
# colors.append('yellow')
# if values[i] == 4:
# colors.append('orange')
# if values[i] == 5:
# colors.append('pink')
# if values[i] == 6:
# colors.append('black')
plt.axis("off")
if layout=='spring':
pos = nx.spring_layout(G,k=k/np.sqrt(G.number_of_nodes()),iterations=100)
# pos = nx.spring_layout(G)
elif layout=='spectral':
pos = nx.spectral_layout(G)
# # nx.draw_networkx(G, with_labels=True, node_size=2, width=0.15, font_size = 1.5, node_color=colors,pos=pos)
# nx.draw_networkx(G, with_labels=False, node_size=1.5, width=0.2, font_size = 1.5, linewidths=0.2, node_color = 'k',pos=pos,alpha=0.2)
if is_single:
# node_size default 60, edge_width default 1.5
nx.draw_networkx_nodes(G, pos, node_size=node_size, node_color='#336699', alpha=1, linewidths=0, font_size=0)
nx.draw_networkx_edges(G, pos, alpha=alpha, width=width)
else:
nx.draw_networkx_nodes(G, pos, node_size=1.5, node_color='#336699',alpha=1, linewidths=0.2, font_size = 1.5)
nx.draw_networkx_edges(G, pos, alpha=0.3,width=0.2)
# plt.axis('off')
# plt.title('Complete Graph of Odd-degree Nodes')
# plt.show()
plt.tight_layout()
plt.savefig(fname+'.png', dpi=600)
plt.close()
# # draw degree distribution
# plt.switch_backend('agg')
# for i, G in enumerate(G_list):
# plt.subplot(row, col, i + 1)
# G_deg = np.array(list(G.degree(G.nodes()).values()))
# bins = np.arange(20)
# plt.hist(np.array(G_deg), bins=bins, align='left')
# plt.xlabel('degree', fontsize = 3)
# plt.ylabel('count', fontsize = 3)
# G_deg_mean = 2*G.number_of_edges()/float(G.number_of_nodes())
# # if i % 2 == 0:
# # plt.title('real average degree: {:.2f}'.format(G_deg_mean), fontsize=4)
# # else:
# # plt.title('pred average degree: {:.2f}'.format(G_deg_mean), fontsize=4)
# plt.title('average degree: {:.2f}'.format(G_deg_mean), fontsize=4)
# plt.tick_params(axis='both', which='major', labelsize=3)
# plt.tick_params(axis='both', which='minor', labelsize=3)
# plt.tight_layout()
# plt.savefig(fname+'_degree.png', dpi=600)
# plt.close()
#
# # draw clustering distribution
# plt.switch_backend('agg')
# for i, G in enumerate(G_list):
# plt.subplot(row, col, i + 1)
# G_cluster = list(nx.clustering(G).values())
# bins = np.linspace(0,1,20)
# plt.hist(np.array(G_cluster), bins=bins, align='left')
# plt.xlabel('clustering coefficient', fontsize=3)
# plt.ylabel('count', fontsize=3)
# G_cluster_mean = sum(G_cluster) / len(G_cluster)
# # if i % 2 == 0:
# # plt.title('real average clustering: {:.4f}'.format(G_cluster_mean), fontsize=4)
# # else:
# # plt.title('pred average clustering: {:.4f}'.format(G_cluster_mean), fontsize=4)
# plt.title('average clustering: {:.4f}'.format(G_cluster_mean), fontsize=4)
# plt.tick_params(axis='both', which='major', labelsize=3)
# plt.tick_params(axis='both', which='minor', labelsize=3)
# plt.tight_layout()
# plt.savefig(fname+'_clustering.png', dpi=600)
# plt.close()
#
# # draw circle distribution
# plt.switch_backend('agg')
# for i, G in enumerate(G_list):
# plt.subplot(row, col, i + 1)
# cycle_len = []
# cycle_all = nx.cycle_basis(G)
# for item in cycle_all:
# cycle_len.append(len(item))
#
# bins = np.arange(20)
# plt.hist(np.array(cycle_len), bins=bins, align='left')
# plt.xlabel('cycle length', fontsize=3)
# plt.ylabel('count', fontsize=3)
# G_cycle_mean = 0
# if len(cycle_len)>0:
# G_cycle_mean = sum(cycle_len) / len(cycle_len)
# # if i % 2 == 0:
# # plt.title('real average cycle: {:.4f}'.format(G_cycle_mean), fontsize=4)
# # else:
# # plt.title('pred average cycle: {:.4f}'.format(G_cycle_mean), fontsize=4)
# plt.title('average cycle: {:.4f}'.format(G_cycle_mean), fontsize=4)
# plt.tick_params(axis='both', which='major', labelsize=3)
# plt.tick_params(axis='both', which='minor', labelsize=3)
# plt.tight_layout()
# plt.savefig(fname+'_cycle.png', dpi=600)
# plt.close()
#
# # draw community distribution
# plt.switch_backend('agg')
# for i, G in enumerate(G_list):
# plt.subplot(row, col, | |
#
# Copyright 2015 Naver Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import string
import random
import unittest
import default_cluster
import telnetlib
import config
import util
import time
import json
import subprocess
import crc16
import testbase
BASE32 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567'
BASE32HEX = '0123456789ABCDEFGHIJKLMNOPQRSTUV'
TRANS = string.maketrans(BASE32HEX, BASE32)
class TestDumpUtil(unittest.TestCase):
cluster = config.clusters[0]
def setUp(self):
util.set_process_logfile_prefix('TestDumpUtil_%s' % self._testMethodName)
self.conf_checker = default_cluster.initialize_starting_up_smr_before_redis(self.cluster)
self.assertIsNotNone(self.conf_checker, 'failed to initialize cluster')
def tearDown(self):
testbase.defaultTearDown(self)
def b32hexdecode(self, s):
s = s.encode('ascii')
s = s.upper()
base32 = string.translate(s, TRANS)
return base64.b32decode(base32)
#def string_gen(self, size=6, chars=string.printable):
def string_gen(self, size=6, chars=string.ascii_letters):
return ''.join(random.choice(chars) for x in range(size))
def bgsave(self, redis):
redis.write('time\r\n')
redis.read_until('\r\n', 1)
redis.read_until('\r\n', 1)
ret = redis.read_until('\r\n', 1)
before_save_time = int(ret.strip())
redis.read_until('\r\n', 1)
redis.read_until('\r\n', 1)
time.sleep(1.1)
redis.write('bgsave\r\n')
ret = redis.read_until('\r\n', 1)
self.assertEqual(ret, '+Background saving started\r\n')
# Wait finishing bgsave
while True:
redis.write('lastsave\r\n')
ret = redis.read_until('\r\n', 1)
lastsave_time = int(ret[1:].strip())
if lastsave_time > before_save_time: break
time.sleep(0.1)
def testbase32hex_conversion(self):
util.print_frame()
count = 100
dict = {}
server0 = self.cluster['servers'][0]
redis0 = telnetlib.Telnet(server0['ip'], server0['redis_port'])
while count > 0:
count -= 1;
key = self.string_gen(random.randint(1,1000))
val = self.string_gen(random.randint(1,1000))
dict[key] = val
redis0.write('*3\r\n$3\r\nset\r\n')
redis0.write('$%d\r\n%s\r\n' % (len(key), key))
redis0.write('$%d\r\n%s\r\n' % (len(val), val))
ret = redis0.read_until('\r\n', 1)
self.assertEqual(ret, '+OK\r\n')
self.bgsave(redis0)
cmd = "./dump-util --dump-iterator dump.rdb ./dump2json_base32hex.so out.json"
p = util.exec_proc_async(util.dump_util_dir(0), cmd, True, None, subprocess.PIPE, None)
ret = p.wait()
self.assertTrue(ret == 0);
f = file("%s/out.json" % util.dump_util_dir(0), "r")
skip_line = 2
for line in f.readlines():
# skip first 2 lines (smr_seqnum, smr_mstime)
if skip_line > 0:
skip_line -= 1
continue
line = line.strip()
key = self.b32hexdecode(json.loads(line)['key'])
val = self.b32hexdecode(json.loads(line)['value'])
self.assertTrue(key in dict.keys(), 'key(%s) is not in json output' % key)
self.assertEqual(dict[key], val,
"val(%s) is not match with %s" % (dict[key], val))
f.close()
def test_data_type(self):
util.print_frame()
dict = {}
server0 = self.cluster['servers'][0]
redis0 = telnetlib.Telnet(server0['ip'], server0['redis_port'])
# String
dict['string'] = {}
key = self.string_gen(random.randint(1,5)) + '_type_string'
val = self.string_gen(random.randint(1,5))
dict['string']['key'] = key
dict['string']['val'] = val
redis0.write('*3\r\n$3\r\nset\r\n')
redis0.write('$%d\r\n%s\r\n' % (len(key), key))
redis0.write('$%d\r\n%s\r\n' % (len(val), val))
ret = redis0.read_until('\r\n', 1)
self.assertEqual(ret, '+OK\r\n')
# List
dict['list'] = {}
key = self.string_gen(random.randint(1,5)) + '_type_list'
val1 = self.string_gen(random.randint(1,5))
val2 = self.string_gen(random.randint(1,5))
dict['list']['key'] = key
dict['list']['val1'] = val1
dict['list']['val2'] = val1 # Duplicate value
dict['list']['val3'] = val2
redis0.write('*5\r\n$5\r\nrpush\r\n')
redis0.write('$%d\r\n%s\r\n' % (len(key), key))
redis0.write('$%d\r\n%s\r\n' % (len(val1), val1))
redis0.write('$%d\r\n%s\r\n' % (len(val1), val1))
redis0.write('$%d\r\n%s\r\n' % (len(val2), val2))
ret = redis0.read_until('\r\n', 1)
self.assertEqual(ret, ':3\r\n')
# Set
dict['set'] = {}
key = self.string_gen(random.randint(1,5)) + '_type_set'
val1 = self.string_gen(random.randint(1,5)) + '_v1'
val2 = self.string_gen(random.randint(1,5)) + '_v2'
dict['set']['key'] = key
dict['set']['val1'] = val1
dict['set']['val2'] = val2
redis0.write('*4\r\n$4\r\nsadd\r\n')
redis0.write('$%d\r\n%s\r\n' % (len(key), key))
redis0.write('$%d\r\n%s\r\n' % (len(val1), val1))
redis0.write('$%d\r\n%s\r\n' % (len(val2), val2))
ret = redis0.read_until('\r\n', 1)
self.assertEqual(ret, ':2\r\n')
# Sorted Set
dict['zset'] = {}
key = self.string_gen(random.randint(1,5)) + '_type_zset'
val1 = self.string_gen(random.randint(1,5)) + '_v1'
val2 = self.string_gen(random.randint(1,5)) + '_v2'
dict['zset']['key'] = key
dict['zset']['val1'] = val1
dict['zset']['score1'] = 20
dict['zset']['val2'] = val2
dict['zset']['score2'] = 10
redis0.write('*6\r\n$4\r\nzadd\r\n')
redis0.write('$%d\r\n%s\r\n' % (len(key), key))
redis0.write('$2\r\n20\r\n$%d\r\n%s\r\n' % (len(val1), val1))
redis0.write('$2\r\n10\r\n$%d\r\n%s\r\n' % (len(val2), val2))
ret = redis0.read_until('\r\n', 1)
self.assertEqual(ret, ':2\r\n')
# Hash
dict['hash'] = {}
key = self.string_gen(random.randint(1,5)) + '_type_hash'
key1 = self.string_gen(random.randint(1,5)) + '_k1'
val1 = self.string_gen(random.randint(1,5))
key2 = self.string_gen(random.randint(1,5)) + '_k2'
val2 = self.string_gen(random.randint(1,5))
dict['hash']['key'] = key
dict['hash'][key1] = val1
dict['hash'][key2] = val2
redis0.write('*6\r\n$5\r\nhmset\r\n')
redis0.write('$%d\r\n%s\r\n' % (len(key), key))
redis0.write('$%d\r\n%s\r\n' % (len(key1), key1))
redis0.write('$%d\r\n%s\r\n' % (len(val1), val1))
redis0.write('$%d\r\n%s\r\n' % (len(key2), key2))
redis0.write('$%d\r\n%s\r\n' % (len(val2), val2))
ret = redis0.read_until('\r\n', 1)
self.assertEqual(ret, '+OK\r\n')
self.bgsave(redis0)
cmd = "./dump-util --dump-iterator dump.rdb ./dump2json_base32hex.so out.json"
p = util.exec_proc_async(util.dump_util_dir(0), cmd, True, None, subprocess.PIPE, None)
ret = p.wait()
self.assertTrue(ret == 0)
f = file("%s/out.json" % util.dump_util_dir(0), "r")
skip_line = 2
for line in f.readlines():
# skip first 2 lines (smr_seqnum, smr_mstime)
if skip_line > 0:
skip_line -= 1
continue
data = json.loads(line.strip())
key = self.b32hexdecode(data['key'])
if data['type'] == 'string':
self.assertEqual(dict['string']['key'], key,
"key(%s) is not match with %s" % (dict['string']['key'], key))
val = self.b32hexdecode(data['value'])
self.assertEqual(dict['string']['val'], val,
"val(%s) is not match with %s" % (dict['string']['val'], val))
elif data['type'] == 'list':
self.assertEqual(dict['list']['key'], key,
"key(%s) is not match with %s" % (dict['list']['key'], key))
val1 = self.b32hexdecode(data['value'][0])
val2 = self.b32hexdecode(data['value'][1])
val3 = self.b32hexdecode(data['value'][2])
self.assertEqual(dict['list']['val1'], val1,
"val(%s) is not match with %s" % (dict['list']['val1'], val1))
self.assertEqual(dict['list']['val2'], val2,
"val(%s) is not match with %s" % (dict['list']['val2'], val2))
self.assertEqual(dict['list']['val3'], val3,
"val(%s) is not match with %s" % (dict['list']['val3'], val3))
elif data['type'] == 'set':
self.assertEqual(dict['set']['key'], key,
"key(%s) is not match with %s" % (dict['set']['key'], key))
val1 = self.b32hexdecode(data['value'][0])
val2 = self.b32hexdecode(data['value'][1])
if not (val1 == dict['set']['val1'] and val2 == dict['set']['val2']
or val1 == dict['set']['val2'] and val2 == dict['set']['val1']):
util.log("values(%s, %s) is not match with (%s, %s)" % (dict['set']['val1'],
dict['set']['val2'],
val1,
val2))
self.assertTrue(False)
elif data['type'] == 'zset':
self.assertEqual(dict['zset']['key'], key,
"key(%s) is not match with %s" % (dict['zset']['key'], key))
# Set variable as sort order
val2 = self.b32hexdecode(data['value'][0]['data'])
score2 = int(data['value'][0]['score'])
val1 = self.b32hexdecode(data['value'][1]['data'])
score1 = int(data['value'][1]['score'])
self.assertEqual(dict['zset']['val1'], val1,
"val(%s) is not match with %s" % (dict['zset']['val1'], val1))
self.assertEqual(dict['zset']['score1'], score1,
"score(%d) is not match with %d" % (dict['zset']['score1'], score1))
self.assertEqual(dict['zset']['val2'], val2,
"val(%s) is not match with %s" % (dict['zset']['val2'], val2))
self.assertEqual(dict['zset']['score2'], score2,
"score(%d) is not match with %d" % (dict['zset']['score2'], score2))
elif data['type'] == 'hash':
self.assertEqual(dict['hash']['key'], key,
"key(%s) is not match with %s" % (dict['zset']['key'], key))
key1 = self.b32hexdecode(data['value'][0]['hkey'])
val1 = self.b32hexdecode(data['value'][0]['hval'])
key2 = self.b32hexdecode(data['value'][1]['hkey'])
val2 = self.b32hexdecode(data['value'][1]['hval'])
self.assertTrue(key1 in dict['hash'].keys(), 'hkey(%s) is not in json output' % key1)
self.assertTrue(key2 in dict['hash'].keys(), 'hkey(%s) is not in json output' % key2)
self.assertEqual(dict['hash'][key1], val1,
"val(%s) is not match with %s" % (dict['hash'][key1], val1))
self.assertEqual(dict['hash'][key2], val2,
"val(%s) is not match with %s" % (dict['hash'][key2], val2))
else:
self.assertTrue(False, "Unknown type")
f.close()
def get_redis_curtime(self, redis):
redis.write('time\r\n')
redis.read_until('\r\n')
redis.read_until('\r\n')
cur_time = int(redis.read_until('\r\n')[:-2]) + 1
redis.read_until('\r\n')
redis.read_until('\r\n')
return cur_time
def timedump_and_make_json_output(self, target_time):
cmd = "./dump-util --dump %d ../smr0/log0 . out.rdb" % target_time
p = util.exec_proc_async(util.dump_util_dir(0), cmd, True, None, subprocess.PIPE, None)
ret = p.wait()
self.assertTrue(ret == 0, p.stdout.readlines())
cmd = "./dump-util --dump-iterator out.rdb ./dump2json_base32hex.so out.json"
p = util.exec_proc_async(util.dump_util_dir(0), cmd, True, None, subprocess.PIPE, None)
ret = p.wait()
self.assertTrue(ret == 0, p.stdout.readlines())
def is_key_exists_in_json_file(self, key, json_file):
found = False
f = open(json_file)
for line in f.readlines():
data = json.loads(line.strip())
if self.b32hexdecode(data['key']) == key:
found = True
f.close()
return found
def print_file(self, json_file):
f = open(json_file)
for line in f.readlines():
print line.strip()
f.close()
def test_timedump_with_expire(self):
util.print_frame()
server0 = self.cluster['servers'][0]
redis0 = telnetlib.Telnet(server0['ip'], server0['redis_port'])
json_file = "%s/out.json" % util.dump_util_dir(0)
redis0.write('setex key1 5 value\r\n')
ret = redis0.read_until('\r\n')
self.bgsave(redis0)
curtime_1 = self.get_redis_curtime(redis0)
time.sleep(7)
redis0.write('setex key2 5 value\r\n')
ret = redis0.read_until('\r\n')
curtime_2 = self.get_redis_curtime(redis0)
time.sleep(7)
redis0.write('setex key3 5 value\r\n')
ret = redis0.read_until('\r\n')
curtime_3 = self.get_redis_curtime(redis0)
print 'currtime_1:%d currtime_2:%d currtime_3:%d ' % (curtime_1, curtime_2, curtime_3)
self.timedump_and_make_json_output(curtime_1)
self.print_file(json_file)
self.assertTrue(self.is_key_exists_in_json_file('key1', json_file))
self.assertFalse(self.is_key_exists_in_json_file('key2', json_file))
self.assertFalse(self.is_key_exists_in_json_file('key3', json_file))
self.timedump_and_make_json_output(curtime_2)
self.print_file(json_file)
self.assertFalse(self.is_key_exists_in_json_file('key1', json_file))
self.assertTrue(self.is_key_exists_in_json_file('key2', json_file))
self.assertFalse(self.is_key_exists_in_json_file('key3', json_file))
self.timedump_and_make_json_output(curtime_3)
self.print_file(json_file)
self.assertFalse(self.is_key_exists_in_json_file('key1', json_file))
self.assertFalse(self.is_key_exists_in_json_file('key2', json_file))
self.assertTrue(self.is_key_exists_in_json_file('key3', json_file))
def migstart(self, redis, range_from, range_to):
cmd = 'migconf migstart %s-%s\r\n' % (range_from, range_to)
redis.write(cmd)
ret = redis.read_until('\r\n', 1)
self.assertEqual(ret, '+OK\r\n')
def migend(self, redis):
cmd = 'migconf migend\r\n'
redis.write(cmd)
ret = redis.read_until('\r\n', 1)
self.assertEqual(ret, '+OK\r\n')
def clearstart(self, redis, range_from, range_to):
cmd = 'migconf clearstart %s-%s\r\n' % (range_from, range_to)
redis.write(cmd)
ret = redis.read_until('\r\n', 1)
self.assertEqual(ret, '+OK\r\n')
def clearend(self, redis):
cmd = 'migconf clearend\r\n'
redis.write(cmd)
ret = redis.read_until('\r\n', 1)
self.assertEqual(ret, '+OK\r\n')
def test_dump_iterator_with_mig_conf_migstart(self):
util.print_frame()
num_test = 100
dict = {}
server0 = self.cluster['servers'][0]
redis0 = telnetlib.Telnet(server0['ip'], server0['redis_port'])
for i in xrange(num_test):
key = self.string_gen(random.randint(1, 64))
val = self.string_gen(random.randint(1, 64))
dict[key] = val
redis0.write('*3\r\n$3\r\nset\r\n')
redis0.write('$%d\r\n%s\r\n' % (len(key), key))
redis0.write('$%d\r\n%s\r\n' % (len(val), val))
ret = redis0.read_until('\r\n', 1)
self.assertEqual(ret, '+OK\r\n')
self.migstart(redis0, 0, 4095)
self.bgsave(redis0)
cmd = "./dump-util --dump-iterator dump.rdb ./dump2json_base32hex.so out.json"
p = util.exec_proc_async(util.dump_util_dir(0), cmd, True, None, subprocess.PIPE, None)
ret = p.wait()
self.assertTrue(ret == 0)
count = 0
f = file("%s/out.json" % util.dump_util_dir(0), "r")
for line in | |
i in range(n - 1):
x_1 = pol_xy[i, 0]
y_1 = pol_xy[i, 1]
x_2 = pol_xy[i + 1, 0]
y_2 = pol_xy[i + 1, 1]
pol_updates.append((x_1, y_1))
dist = np.sqrt((x_1 - x_2) ** 2 + (y_1 - y_2) ** 2)
if dist > dist_param:
n_inter = int(dist // (dist_param / 2))
inter = addMiddlePoints((x_1, y_1), (x_2, y_2), n_inter)
for i in inter:
pol_updates.append(i)
else:
pol_updates.append((x_2, y_2))
pol_updates.append((pol_xy[-1, 0], pol_xy[-1, 1]))
exterior = pol_updates
if shapely:
# Interiors
interiors = []
for interior in pol.interiors:
pol_xy = interior.coords.xy
pol_xy = np.asarray(pol_xy)
pol_xy = np.rot90(pol_xy)
n = np.shape(pol_xy)[0]
pol_updates = []
for i in range(n-1):
x_1 = pol_xy[i, 0]
y_1 = pol_xy[i, 1]
x_2 = pol_xy[i+1, 0]
y_2 = pol_xy[i+1, 1]
pol_updates.append((x_1, y_1))
dist = np.sqrt((x_1-x_2)**2+(y_1-y_2)**2)
if dist > dist_param:
n_inter = int(dist//(dist_param/2))
inter = addMiddlePoints((x_1,y_1),(x_2,y_2), n_inter)
for i in inter:
pol_updates.append(i)
else:
pol_updates.append((x_2, y_2))
pol_updates.append((pol_xy[-1, 0], pol_xy[-1, 1]))
interiors.append(pol_updates)
pol_updated = shPol(shell = exterior, holes = interiors)
return pol_updated
else:
return exterior
def snapPolygonstoPolygon(Polygon2Snap, Polygon2Snap2, Snap_Sensitivity, n_blocks = 10, Progressbar = False):
""" Function to snap a polygon to another polygon
@ params:
polygon2Snap - Required : Polygon which has to be edited to snap to a reference polygon
polygon2Snap2 - Required : Reference Polygon
Snap_Sensitivity - Required : Max distance to snap
n_blocks - Optional : Number of blocks per row """
Polygon2Snap2_mult = []
Polygon2Snap2_xy = []
t = 0
# divide the polygon to snap to in blocks
Polygon2Snap2_mult = []
Polygon2Snap2_xy = []
Polygon2Snap2_x = []
Polygon2Snap2_y = []
for c in Polygon2Snap2:
if type(c) is shPol:
Polygon2Snap2_mult.append([c])
c = addVerticesToPolygon(c, Snap_Sensitivity / 4)
xy = c.exterior.xy
xy = np.rot90(xy)
for i in xy:
Polygon2Snap2_xy.append(i)
Polygon2Snap2_x.append(i[0])
Polygon2Snap2_y.append(i[1])
for int_pol in c.interiors:
xy = int_pol.xy
xy = np.rot90(xy)
for i in xy:
Polygon2Snap2_xy.append(i)
Polygon2Snap2_x.append(i[0])
Polygon2Snap2_y.append(i[1])
else:
for s in list(c):
Polygon2Snap2_mult.append([s])
s = addVerticesToPolygon(s, Snap_Sensitivity / 4)
xy = s.exterior.xy
xy = np.rot90(xy)
for i in xy:
Polygon2Snap2_xy.append(i)
Polygon2Snap2_x.append(i[0])
Polygon2Snap2_y.append(i[1])
ints = s.interiors
for int_pol in ints:
xy = int_pol.xy
xy = np.rot90(xy)
for i in xy:
Polygon2Snap2_xy.append(i)
Polygon2Snap2_x.append(i[0])
Polygon2Snap2_y.append(i[1])
Polygon2Snap2_x = np.asarray(Polygon2Snap2_x)
Polygon2Snap2_y = np.asarray(Polygon2Snap2_y)
Polygon2Snap2_xy = np.asarray(Polygon2Snap2_xy)
left = np.min(Polygon2Snap2_x)
right = np.max(Polygon2Snap2_x)
bottom = np.min(Polygon2Snap2_y)
top = np.max(Polygon2Snap2_y)
dx = (right - left) / n_blocks
dy = (top - bottom) / n_blocks
if dx < Snap_Sensitivity:
print('The number of blocks is too high!')
elif dy < Snap_Sensitivity:
print('The number of blocks is too high!')
Polygon2Snap2_Blocks = []
Blocks_lefts = np.zeros(n_blocks)
Blocks_tops = np.zeros(n_blocks)
if Progressbar: print('Creating the blocks...')
for j in range(n_blocks):
for i in range(n_blocks):
if Progressbar: printProgressBar(j, n_blocks)
Blocks_lefts[i] = left + i * dx
Blocks_tops[j] = top - j * dy
block = Polygon2Snap2_xy[
(Polygon2Snap2_x > left + i * dx) & (Polygon2Snap2_x <= left + (i + 1) * dx) & (Polygon2Snap2_y < top - j * dy) & (
Polygon2Snap2_y >= top - (j + 1) * dy), :]
Polygon2Snap2_Blocks.append(block)
Blocks_lefts = np.asarray(Blocks_lefts)
Blocks_tops = np.asarray(Blocks_tops)
# prepare the snapping
Polygon2Snap = addVerticesToPolygon(Polygon2Snap, Snap_Sensitivity/4)
Man_snapped = Polygon2Snap
Man_xy = Polygon2Snap.exterior.xy
Man_snapped_xy = Man_snapped.exterior.xy
Man_xy = np.rot90(Man_xy)
Man_snapped_xy_tuplelist = []
# carry out the snapping
if Progressbar: print('Snapping...')
for i in range(np.shape(Man_xy)[0]):
if Progressbar: printProgressBar(i,np.shape(Man_xy)[0])
x = Man_xy[i][0]
y = Man_xy[i][1]
# in what block is the point2snap located?
diff_x = Blocks_lefts - x
xn = np.where(diff_x < 0, diff_x, -np.inf).argmax()
diff_y = Blocks_tops - y
yn = np.where(diff_y > 0, diff_y, np.inf).argmin()
n = n_blocks * yn + xn
Sub2SnapTo = []
for nn in [-(n_blocks+1),-(n_blocks),-(n_blocks-1),-1,0,1,n_blocks-1,n_blocks,n_blocks+1]:
for xy in Polygon2Snap2_Blocks[n + nn]:
Sub2SnapTo.append(xy)
Dist = []
for j in range(len(Sub2SnapTo)):
x_ref = Sub2SnapTo[j][0]
y_ref = Sub2SnapTo[j][1]
d = np.sqrt((x-x_ref)**2+(y-y_ref)**2)
Dist.append(d)
if len(Dist)>0:
minDist = np.min(Dist)
minDist_arg = np.argmin(Dist)
if minDist < Snap_Sensitivity:
x_snap = Sub2SnapTo[minDist_arg][0]
y_snap = Sub2SnapTo[minDist_arg][1]
Man_snapped_xy_tuplelist.append((x_snap,y_snap))
#print(f'Point Snapped! from x: {round(x,4)} y: {round(y,4)} to from x: {round(x_snap,4)} y: {round(y_snap,4)}')
else:
Man_snapped_xy_tuplelist.append((x,y))
else:
Man_snapped_xy_tuplelist.append((x, y))
Man_snapped_xy_tuplelist.append(Man_snapped_xy_tuplelist[0])
Man_snapped = shPol(Man_snapped_xy_tuplelist)
return Man_snapped
def KML2Grid(Input_fn,Output_fn, Points_resolution = None):
""" Function to transform KML file into a meshed grid
@ params:
Input_fn - Required : Directory to indicate input google earth kml file
Output - Required : Directory to indicate name and location output vtk file (readable by gmsh software)
Points_Resolution - Optional : List of lists where each list contains Lat Lon and resolution """
# read kml as multi string
kml_str = open(Input_fn, 'r', encoding='latin-1').read()
kml_splitted = np.asarray(kml_str.splitlines())
loc = np.flatnonzero(np.core.defchararray.find(kml_splitted, '<coordinates>') != -1)
# parse lat and lon into lists
t = 0
coor_int = []
for l in loc:
if t == 0:
coor = kml_splitted[l + 1]
coor = np.asarray(re.split(' |/t|,', coor))
else:
c_int = kml_splitted[l + 1]
c_int = np.asarray(re.split(' |/t|,', c_int))
coor_int.append(c_int)
t += 1
Coor_UTM = []
for i in range(0, len(coor) - 1, 3):
try:
x, y, utm_number, utm_letter = utm.from_latlon(float(coor[i + 1]), float(coor[i]))
Coor_UTM.append([x, y, 0])
except:
pass
Coor_UTM_int = []
for c_int in coor_int:
C_UTM_int = []
for i in range(0, len(c_int) - 1, 3):
try:
x, y, utm_number, utm_letter = utm.from_latlon(float(c_int[i + 1]), float(c_int[i]))
C_UTM_int.append([x, y, 0])
except:
pass
Coor_UTM_int.append(C_UTM_int)
ext = Coor_UTM
int = Coor_UTM_int
# create polygon
geom = pygmsh.built_in.Geometry()
geom_points = []
for p in ext:
geom_points.append(geom.add_point(p, lcar=10000))
geom_points.append(geom_points[0])
geom_spline = geom.add_bspline(geom_points)
geom_ll = geom.add_line_loop([geom_spline])
geom_surf = geom.add_plane_surface(geom_ll)
mesh = pygmsh.generate_mesh(geom, dim=2)
# store mesh by means of meshio
fn = Output_fn
if fn[-4:] == '.vtk':
pass
else:
fn = fn + '.vtk'
meshio.write(fn, mesh)
print(fn + ' is saved!')
return ext,int
def polPlot(XY, XY_inner = None, plottitle = None, showvertices = False, showverticeslabels = False, showverticeslabelsinterval = 1, plotonaxis = 0, empty = False, vertices_color = 'silver'):
""" Function to plot polygon based on list of coordinates
XY - Required : List of x and y coordinates (list of lists)
"""
if plotonaxis == 0:
f, a = plt.subplots()
else:
a = plotonaxis
XY_np = np.asarray(XY)
xmin = np.min(XY_np[:, 0])
xmax = np.max(XY_np[:, 0])
ymin = np.min(XY_np[:, 1])
ymax = np.max(XY_np[:, 1])
if plottitle:
a.set_title(plottitle, fontweight = 'bold')
a.set_xlim(xmin - 0.1*(xmax-xmin), xmax + 0.1*(xmax-xmin))
a.set_ylim(ymin - 0.1*(ymax-ymin), ymax + 0.1*(ymax-ymin))
if empty:
col = 'none'
colin = 'none'
else:
col = 'silver'
colin = 'white'
pol = Polygon(XY, facecolor = col, edgecolor = 'turquoise' ) # matplotlib.patches.Polygon
if XY_inner is not None:
pols_inner = []
for i in XY_inner:
pol_inner = Polygon(i, facecolor=colin, edgecolor='turquoise') # matplotlib.patches.Polygon
pols_inner.append(pol_inner)
a.add_patch(pol)
if XY_inner is not None:
for pol_inner in pols_inner:
a.add_patch(pol_inner)
if showvertices:
a.scatter(XY_np[:,0], XY_np[:,1], s = 4, edgecolor = vertices_color, facecolor = 'none', zorder = 10)
if XY_inner:
for i in XY_inner:
a.scatter(np.asarray(i)[:, 0], np.asarray(i)[:, 1], s=4, edgecolor= vertices_color, facecolor='none', zorder = 10)
t = 1
if showverticeslabels:
for x,y in XY:
if t%showverticeslabelsinterval == 0: a.annotate(str(t),(x,y))
t += 1
if XY_inner:
for i in XY_inner:
for x,y in i:
if t%showverticeslabelsinterval == 0: a.annotate(str(t), (x, y))
t += 1
a.set_aspect('equal')
def loadPolygonFromShapefile(fn, Print_Coordinate_System = False):
""" Function to load a shapefile containing one polygon with outer and inner boundaries
fn - Required: File path directory
"""
# load shapefile with the ogr toolbox of osgeo
file = ogr.Open(fn)
shape = file.GetLayer(0)
if Print_Coordinate_System:
print(f'The Coordinate system is: {shape.GetSpatialRef()}')
n_features = shape.GetFeatureCount()
Coor = []
for i in range(n_features):
feature = shape.GetFeature(i)
feature_JSON = feature.ExportToJson()
feature_JSON = json.loads(feature_JSON)
coor = feature_JSON['geometry']['coordinates']
if n_features > 1:
Coor.append(coor)
if n_features == 1:
if len(coor[0][0]) > 2:
coor = coor[0]
exterior = coor[0]
interiors = coor[1:]
return exterior, interiors, feature_JSON
else:
exter, inter = [], []
for c in Coor:
ex = c
for e in ex:
exter.append(e[0])
if len(e)>1:
inter.append(e[1:])
return exter, inter, feature_JSON, Coor
def simplifyPol(XY,tresh):
""" Function to remove nodes which are located nearby other nodes
XY - Required: coordinate list of nodes
tresh - Required: treshold distance
"""
def remove_below_tresh(XY,tresh):
n = len(XY)
def eucldist(p1,p2):
x1 = p1[0]
x2 = p2[0]
| |
completion
assert runner._pairs[0][0].jobs[0].start_workflow.workflow_type == "HelloWorkflow"
assert (
runner._pairs[-1][-1]
.successful.commands[0]
.complete_workflow_execution.result.data
== b'"Hello, Temporal!"'
)
@workflow.defn
class ContinueAsNewWorkflow:
@workflow.run
async def run(self, past_run_ids: List[str]) -> List[str]:
if len(past_run_ids) == 5:
return past_run_ids
info = workflow.info()
if info.continued_run_id:
past_run_ids.append(info.continued_run_id)
workflow.continue_as_new(past_run_ids)
async def test_workflow_continue_as_new(client: Client):
async with new_worker(client, ContinueAsNewWorkflow) as worker:
handle = await client.start_workflow(
ContinueAsNewWorkflow.run,
cast(List[str], []),
id=f"workflow-{uuid.uuid4()}",
task_queue=worker.task_queue,
)
result = await handle.result()
assert len(result) == 5
assert result[0] == handle.first_execution_run_id
sa_prefix = "python_test_"
def search_attrs_to_dict_with_type(attrs: SearchAttributes) -> Mapping[str, Any]:
return {
k: {
"type": type(vals[0]).__name__ if vals else "<unknown>",
"values": [str(v) if isinstance(v, datetime) else v for v in vals],
}
for k, vals in attrs.items()
}
@workflow.defn
class SearchAttributeWorkflow:
@workflow.run
async def run(self) -> None:
# Wait forever
await asyncio.Future()
@workflow.query
def get_search_attributes(self) -> Mapping[str, Mapping[str, Any]]:
return search_attrs_to_dict_with_type(workflow.info().search_attributes or {})
@workflow.signal
def do_search_attribute_update(self) -> None:
workflow.upsert_search_attributes(
{
f"{sa_prefix}text": ["text3"],
# We intentionally leave keyword off to confirm it still comes back
f"{sa_prefix}int": [123, 456],
# Empty list to confirm removed
f"{sa_prefix}double": [],
f"{sa_prefix}bool": [False],
f"{sa_prefix}datetime": [
datetime(2003, 4, 5, 6, 7, 8, tzinfo=timezone(timedelta(hours=9)))
],
}
)
async def test_workflow_search_attributes(server: ExternalServer, client: Client):
if not server.supports_custom_search_attributes:
pytest.skip("Custom search attributes not supported")
async def search_attributes_present() -> bool:
resp = await client.service.get_search_attributes(GetSearchAttributesRequest())
return any(k for k in resp.keys.keys() if k.startswith(sa_prefix))
# Add search attributes if not already present
if not await search_attributes_present():
async with grpc.aio.insecure_channel(server.host_port) as channel:
stub = OperatorServiceStub(channel)
await stub.AddSearchAttributes(
AddSearchAttributesRequest(
search_attributes={
f"{sa_prefix}text": IndexedValueType.INDEXED_VALUE_TYPE_TEXT,
f"{sa_prefix}keyword": IndexedValueType.INDEXED_VALUE_TYPE_KEYWORD,
f"{sa_prefix}int": IndexedValueType.INDEXED_VALUE_TYPE_INT,
f"{sa_prefix}double": IndexedValueType.INDEXED_VALUE_TYPE_DOUBLE,
f"{sa_prefix}bool": IndexedValueType.INDEXED_VALUE_TYPE_BOOL,
f"{sa_prefix}datetime": IndexedValueType.INDEXED_VALUE_TYPE_DATETIME,
},
)
)
# TODO(cretz): Why is it required to issue this list call before it
# will appear in the other RPC list call?
await stub.ListSearchAttributes(ListSearchAttributesRequest())
# Confirm now present
assert await search_attributes_present()
async with new_worker(client, SearchAttributeWorkflow) as worker:
handle = await client.start_workflow(
SearchAttributeWorkflow.run,
id=f"workflow-{uuid.uuid4()}",
task_queue=worker.task_queue,
search_attributes={
f"{sa_prefix}text": ["text1", "text2", "text0"],
f"{sa_prefix}keyword": ["keyword1"],
f"{sa_prefix}int": [123],
f"{sa_prefix}double": [456.78],
f"{sa_prefix}bool": [True],
f"{sa_prefix}datetime": [
# With UTC
datetime(2001, 2, 3, 4, 5, 6, tzinfo=timezone.utc),
# With other offset
datetime(2002, 3, 4, 5, 6, 7, tzinfo=timezone(timedelta(hours=8))),
],
},
)
# Make sure it started with the right attributes
expected = {
f"{sa_prefix}text": {"type": "str", "values": ["text1", "text2", "text0"]},
f"{sa_prefix}keyword": {"type": "str", "values": ["keyword1"]},
f"{sa_prefix}int": {"type": "int", "values": [123]},
f"{sa_prefix}double": {"type": "float", "values": [456.78]},
f"{sa_prefix}bool": {"type": "bool", "values": [True]},
f"{sa_prefix}datetime": {
"type": "datetime",
"values": ["2001-02-03 04:05:06+00:00", "2002-03-04 05:06:07+08:00"],
},
}
assert expected == await handle.query(
SearchAttributeWorkflow.get_search_attributes
)
# Do an attribute update and check query
await handle.signal(SearchAttributeWorkflow.do_search_attribute_update)
expected = {
f"{sa_prefix}text": {"type": "str", "values": ["text3"]},
f"{sa_prefix}keyword": {"type": "str", "values": ["keyword1"]},
f"{sa_prefix}int": {"type": "int", "values": [123, 456]},
f"{sa_prefix}double": {"type": "<unknown>", "values": []},
f"{sa_prefix}bool": {"type": "bool", "values": [False]},
f"{sa_prefix}datetime": {
"type": "datetime",
"values": ["2003-04-05 06:07:08+09:00"],
},
}
assert expected == await handle.query(
SearchAttributeWorkflow.get_search_attributes
)
# Also confirm it matches describe from the server
desc = await handle.describe()
attrs = decode_search_attributes(
desc.raw_message.workflow_execution_info.search_attributes
)
# Remove attrs without our prefix
attrs = {k: v for k, v in attrs.items() if k.startswith(sa_prefix)}
assert expected == search_attrs_to_dict_with_type(attrs)
@workflow.defn
class LoggingWorkflow:
def __init__(self) -> None:
self._last_signal = "<none>"
@workflow.run
async def run(self) -> None:
await workflow.wait_condition(lambda: self._last_signal == "finish")
@workflow.signal
def my_signal(self, value: str) -> None:
self._last_signal = value
workflow.logger.info(f"Signal: {value}")
@workflow.query
def last_signal(self) -> str:
return self._last_signal
async def test_workflow_logging(client: Client):
# Use queue to capture log statements
log_queue: queue.Queue[logging.LogRecord] = queue.Queue()
handler = logging.handlers.QueueHandler(log_queue)
workflow.logger.base_logger.addHandler(handler)
prev_level = workflow.logger.base_logger.level
workflow.logger.base_logger.setLevel(logging.INFO)
def find_log(starts_with: str) -> Optional[logging.LogRecord]:
for record in cast(List[logging.LogRecord], log_queue.queue):
if record.message.startswith(starts_with):
return record
return None
try:
# Log two signals and kill worker before completing
async with new_worker(client, LoggingWorkflow) as worker:
handle = await client.start_workflow(
LoggingWorkflow.run,
id=f"workflow-{uuid.uuid4()}",
task_queue=worker.task_queue,
)
# Send a couple signals
await handle.signal(LoggingWorkflow.my_signal, "signal 1")
await handle.signal(LoggingWorkflow.my_signal, "signal 2")
assert "signal 2" == await handle.query(LoggingWorkflow.last_signal)
# Confirm two logs happened
assert find_log("Signal: signal 1 ({'attempt':")
assert find_log("Signal: signal 2")
assert not find_log("Signal: signal 3")
# Also make sure it has some workflow info
record = find_log("Signal: signal 1")
assert (
record
and record.__dict__["workflow_info"].workflow_type == "LoggingWorkflow"
)
# Clear queue and start a new one with more signals
log_queue.queue.clear()
async with new_worker(
client, LoggingWorkflow, task_queue=worker.task_queue
) as worker:
# Send a couple signals
await handle.signal(LoggingWorkflow.my_signal, "signal 3")
await handle.signal(LoggingWorkflow.my_signal, "finish")
await handle.result()
# Confirm replayed logs are not present but new ones are
assert not find_log("Signal: signal 1")
assert not find_log("Signal: signal 2")
assert find_log("Signal: signal 3")
assert find_log("Signal: finish")
finally:
workflow.logger.base_logger.removeHandler(handler)
workflow.logger.base_logger.setLevel(prev_level)
@workflow.defn
class StackTraceWorkflow:
def __init__(self) -> None:
self._status = "created"
@workflow.run
async def run(self) -> None:
# Start several tasks
awaitables = [
asyncio.sleep(1000),
workflow.execute_activity(
wait_cancel, schedule_to_close_timeout=timedelta(seconds=1000)
),
workflow.execute_child_workflow(
LongSleepWorkflow.run, id=f"{workflow.info().workflow_id}_child"
),
self.never_completing_coroutine(),
]
await asyncio.wait([asyncio.create_task(v) for v in awaitables])
async def never_completing_coroutine(self) -> None:
self._status = "waiting"
await workflow.wait_condition(lambda: False)
@workflow.query
def status(self) -> str:
return self._status
async def test_workflow_stack_trace(client: Client):
async with new_worker(
client, StackTraceWorkflow, LongSleepWorkflow, activities=[wait_cancel]
) as worker:
handle = await client.start_workflow(
StackTraceWorkflow.run,
id=f"workflow-{uuid.uuid4()}",
task_queue=worker.task_queue,
)
# Wait until waiting
async def status() -> str:
return await handle.query(StackTraceWorkflow.status)
await assert_eq_eventually("waiting", status)
# Send stack trace query
trace = await handle.query("__stack_trace")
# TODO(cretz): Do more specific checks once we clean up traces
assert "never_completing_coroutine" in trace
@dataclass
class MyDataClass:
field1: str
def assert_expected(self) -> None:
# Part of the assertion is that this is the right type, which is
# confirmed just by calling the method. We also check the field.
assert self.field1 == "some value"
@activity.defn
async def data_class_typed_activity(param: MyDataClass) -> MyDataClass:
param.assert_expected()
return param
@runtime_checkable
@workflow.defn(name="DataClassTypedWorkflow")
class DataClassTypedWorkflowProto(Protocol):
@workflow.run
async def run(self, arg: MyDataClass) -> MyDataClass:
...
@workflow.signal
def signal_sync(self, param: MyDataClass) -> None:
...
@workflow.query
def query_sync(self, param: MyDataClass) -> MyDataClass:
...
@workflow.signal
def complete(self) -> None:
...
@workflow.defn(name="DataClassTypedWorkflow")
class DataClassTypedWorkflowAbstract(ABC):
@workflow.run
@abstractmethod
async def run(self, arg: MyDataClass) -> MyDataClass:
...
@workflow.signal
@abstractmethod
def signal_sync(self, param: MyDataClass) -> None:
...
@workflow.query
@abstractmethod
def query_sync(self, param: MyDataClass) -> MyDataClass:
...
@workflow.signal
@abstractmethod
def complete(self) -> None:
...
@workflow.defn
class DataClassTypedWorkflow(DataClassTypedWorkflowAbstract):
def __init__(self) -> None:
self._should_complete = asyncio.Event()
@workflow.run
async def run(self, param: MyDataClass) -> MyDataClass:
param.assert_expected()
# Only do activities and child workflows on top level
if not workflow.info().parent:
param = await workflow.execute_activity(
data_class_typed_activity,
param,
start_to_close_timeout=timedelta(seconds=30),
)
param.assert_expected()
param = await workflow.execute_local_activity(
data_class_typed_activity,
param,
start_to_close_timeout=timedelta(seconds=30),
)
param.assert_expected()
child_handle = await workflow.start_child_workflow(
DataClassTypedWorkflow.run,
param,
id=f"{workflow.info().workflow_id}_child",
)
await child_handle.signal(DataClassTypedWorkflow.signal_sync, param)
await child_handle.signal(DataClassTypedWorkflow.signal_async, param)
await child_handle.signal(DataClassTypedWorkflow.complete)
param = await child_handle
param.assert_expected()
await self._should_complete.wait()
return param
@workflow.signal
def signal_sync(self, param: MyDataClass) -> None:
param.assert_expected()
@workflow.signal
async def signal_async(self, param: MyDataClass) -> None:
param.assert_expected()
@workflow.query
def query_sync(self, param: MyDataClass) -> MyDataClass:
param.assert_expected()
return param
@workflow.query
async def query_async(self, param: MyDataClass) -> MyDataClass:
return param
@workflow.signal
def complete(self) -> None:
self._should_complete.set()
async def test_workflow_dataclass_typed(client: Client):
async with new_worker(
client, DataClassTypedWorkflow, activities=[data_class_typed_activity]
) as worker:
val = MyDataClass(field1="some value")
handle = await client.start_workflow(
DataClassTypedWorkflow.run,
val,
id=f"workflow-{uuid.uuid4()}",
task_queue=worker.task_queue,
)
await handle.signal(DataClassTypedWorkflow.signal_sync, val)
await handle.signal(DataClassTypedWorkflow.signal_async, val)
(await handle.query(DataClassTypedWorkflow.query_sync, val)).assert_expected()
# TODO(cretz): Why does MyPy need this annotated?
query_result: MyDataClass = await handle.query(
DataClassTypedWorkflow.query_async, val
)
query_result.assert_expected()
await handle.signal(DataClassTypedWorkflow.complete)
(await handle.result()).assert_expected()
async def test_workflow_separate_protocol(client: Client):
# This test is to confirm that protocols can be used as "interfaces" for
# when the workflow impl is absent
async with new_worker(
client, DataClassTypedWorkflow, activities=[data_class_typed_activity]
) as worker:
# Our decorators add attributes on the class, but protocols don't allow
# you to use issubclass with any attributes other than their fixed ones.
# We are asserting that this invariant holds so we can document it and
# revisit in a later version if they change this.
# TODO(cretz): If we document how to use protocols as workflow
# interfaces/contracts, we should mention that they can't use
# @runtime_checkable with issubclass.
with pytest.raises(TypeError) as err:
assert issubclass(DataClassTypedWorkflow, DataClassTypedWorkflowProto)
assert "non-method members" in str(err.value)
assert isinstance(DataClassTypedWorkflow(), DataClassTypedWorkflowProto)
val = MyDataClass(field1="some value")
handle = await client.start_workflow(
DataClassTypedWorkflowProto.run,
val,
id=f"workflow-{uuid.uuid4()}",
task_queue=worker.task_queue,
)
await handle.signal(DataClassTypedWorkflowProto.signal_sync, val)
(
await handle.query(DataClassTypedWorkflowProto.query_sync, val)
).assert_expected()
await handle.signal(DataClassTypedWorkflowProto.complete)
(await handle.result()).assert_expected()
async def test_workflow_separate_abstract(client: Client):
# This test is to confirm that abstract classes can be used as "interfaces"
# for when the workflow impl is absent
async with new_worker(
client, DataClassTypedWorkflow, activities=[data_class_typed_activity]
) as | |
"""
Author: <NAME>
Defines the ExportManager class
"""
import logging
from enum import Enum as PythonEnum
import ee
from gee_tools.datasources.generic_datasources import GenericSingleImageDatasource
from gee_tools.datasources.interface import SingleImageDatasource
from gee_tools.exports.task_scheduler import TaskScheduler
from gee_tools.exports.image_spec import ImageSpec, add_imagery
from gee_tools.assetsmanager import asset_exists
logger = logging.getLogger(__name__)
class ExportManagerError(RuntimeError):
pass
class ExportManager(object):
"""
The purpose of this class is to consume one
large configuration file describing a desired scene
and then export that scene or sample tiles under
different configurations.
"""
def __init__(self, datasources_config):
"""
Args:
datasources_config (Dict[str, Dict[str, Any]]):
A dictionary specifying a mapping from datasource names
to datasource configurations. Each data source configuration
must have the following format:
{
"class": (Union[MultiImageDatasource, GlobalImageDatasource, SingleImageDatasource]),
"args": (Optiona[Dict[str, Any]]) Will be passed to the class constructor as kwargs
after filterpoly, start_date, and end_date as appropriate,
"composite_fn": (Callable[[ee.ImageCollection], ee.Image]) A function to convert the image collection to an image.
If None and the class inherits from SingleImageDatasource then .first() will be used.,
"bands": (List[str]) The list of expected band names,
"tag": (Optional[Union[str, Enum, List[str], List[Enum]]]) An ID or set of IDs
used for filtering as described in public methods. Defaults to [].,
"cache_asset_id": (Optional[str]) If this is present and the class inherits from SingleImageDatasource
then the output will be saved to the asset ID represented by this string before exports occur. This
can be useful for jobs that would otherwise run out of memory.,
}
{
"landsat": {
"class": optx.LandsatSR,
"args": {},
"composite_fn": cmp_fns.select_median(LANDSATSR_COMMON_BANDS),
"bands": LANDSATSR_COMMON_BANDS,
"tag": 'CNN_TILES',
},
"lm_worldpop": {
"class": GenericSingleImageDatasource,
"args": { 'image_args': ee.Image("projects/atlasaipbc/clients/world_bank_et/inputs/worldpop_v2_adj/AFR_PPP_2015_adj_v2").select([0], ['WORLD_POP']) },
"composite_fn": None,
"bands": ['WORLD_POP'],
"tag": 'LM_FEATURES',
# caching does not make sense here, but as an example if the composite function has intense featurization this may be useful.
"cache_asset_id": 'projects/atlasaipbc/clients/world_bank_et/linear_model_features/lm_worldpop',
},
}
"""
self.datasources_config = dict(datasources_config)
for input_name in list(self.datasources_config.keys()):
input_config = self.datasources_config[input_name]
# Create shallow copy with defaults.
input_config = dict(input_config)
if 'tag' not in input_config:
input_config['tag'] = []
in_tag = input_config['tag']
if isinstance(in_tag, list):
input_config['tag'] = set(in_tag)
elif isinstance(in_tag, str) or isinstance(in_tag, PythonEnum):
input_config['tag'] = set([in_tag])
else:
raise ExportManagerError('Unrecognized tag type (must be str, Enum, or List): {}'.format(in_tag))
self.datasources_config[input_name] = input_config
def _get_datasources_by_tag(self, tags=None):
"""
Args:
tags (Optiona[Iterable[Union[str, Enum]]]):
Returns:
(Dict[str, Dict[str, Any]]): self.datasources_config filtered by tags.
If tags is None, return all datasource configs.
"""
data_sources = self.datasources_config
if tags is None:
return data_sources
tags = set(tags)
def matches_tags(input_config):
in_tags = input_config['tag']
return len(in_tags.intersection(tags)) > 0
return {
input_name: input_config
for input_name, input_config in data_sources.items()
if matches_tags(input_config)
}
@staticmethod
def _convert_to_image_spec(image_spec):
"""
Convert a dict or an ImageSpec into an ImageSpec
"""
if isinstance(image_spec, ImageSpec):
return image_spec
else:
return ImageSpec(**image_spec)
@staticmethod
def _populate_cache(image_spec, datasources):
scheduler = TaskScheduler()
export_region = None
image_spec_kwargs = {
'start_date': image_spec.start_date,
'end_date': image_spec.end_date,
'filterpoly': image_spec.region,
'projection': image_spec.projection,
'scale': image_spec.scale,
}
for input_name, input_config in datasources.items():
output_asset_id = input_config.get('cache_asset_id', None)
if output_asset_id is None:
continue
if asset_exists(output_asset_id):
logger.warn('{} already exists. Will not precompute.'.format(output_asset_id))
continue
if not issubclass(input_config['class'], SingleImageDatasource):
raise ExportManagerError(
'Cannot cache {}. The provided class '
'is not a SingleImageDatasource.'.format(input_name)
)
image_spec = ImageSpec(**image_spec_kwargs)
image_spec.add_datasource(
datasource_class=input_config['class'],
composite_function=input_config['composite_fn'],
ds_kwargs=input_config['args'],
)
scene = image_spec.get_scene(add_latlon=False)
scene = scene.clip(image_spec_kwargs['filterpoly'])
if export_region is None:
export_region = image_spec.region.bounds().getInfo()['coordinates']
task = ee.batch.Export.image.toAsset(**{
'image': scene,
'description': input_name,
'assetId': output_asset_id,
'region': export_region,
'scale': image_spec_kwargs['scale'],
'crs': image_spec_kwargs['projection'],
'maxPixels': 1e13,
})
logger.info('Will precompute {}'.format(input_name))
scheduler.add_task(task, output_asset_id)
if len(scheduler) > 0:
scheduler.run(verbose=999, error_on_fail=True)
@staticmethod
def _populate_image_spec(image_spec, datasources):
output_bands = []
for _, input_config in datasources.items():
cache_asset_id = input_config.get('cache_asset_id', None)
if cache_asset_id is None:
image_spec.add_datasource(
datasource_class=input_config['class'],
composite_function=input_config['composite_fn'],
ds_kwargs=input_config['args'],
)
else:
image_spec.add_datasource(
datasource_class=GenericSingleImageDatasource,
composite_function=None,
ds_kwargs={'image_args': cache_asset_id},
)
output_bands.extend(input_config['bands'])
if 'LAT' in output_bands or 'LON' in output_bands:
raise ValueError(
'Output bands contained the key LAT or LON. '
'This is not allowed, LAT and LON will be added automatically.'
)
output_bands += [u'LAT', u'LON']
return output_bands
def _get_image_spec_helper(self, image_spec, tags=None):
datasources = self._get_datasources_by_tag(tags=tags)
image_spec = ExportManager._convert_to_image_spec(image_spec)
ExportManager._populate_cache(image_spec, datasources)
output_bands = ExportManager._populate_image_spec(image_spec, datasources)
return image_spec, output_bands
def get_scene(self, image_spec, tags=None, add_latlon=True):
"""
Take a featureCollection (fc) where each row has point geometry and
return a featureCollection with image bands added.
Args:
image_spect (Union[ImageSpec, Dict[str, Any]]): Either an ImageSpec instance or a dict formated as follows:
{
'start_date': Union[ee.Date, str],
'end_date': Union[ee.Date, str],
'filterpoly': ee.Geoemtry,
'projection': str, # CRS
'scale': Union[int, float],
}
If an ImageSpec is passed, datasources already in the ImageSpec instance will be included in the output.
tags (Optional[Union[str, Enum, Collection[str, Enum]]]): A collection of tags matching those passed in
the 'datasources_config' constructor argument. Only tags contained in the tags argument
will be used when generating the return value. If None, all datasources are included.
Defaults to None.
add_latlon (Boolean): If True add a LAT and a LON band to the output scene. Defaults to True.
Returns:
(Tuple[ee.Image, List[str]]):
First element: The image represented by combining datasources according to
the specifications in the image_spec argument.
Second element: The list of output bands.
If a collection in the constructor argument is filtered in such a way that it becomes the empty
collection, it's bands will be omitted from the output but will still be included in the second
return element.
"""
image_spec, output_bands = self._get_image_spec_helper(image_spec, tags)
return image_spec.get_scene(add_latlon=add_latlon), output_bands
def sample_tiles(self, fc, image_spec, export_radius, tags=None):
"""
Take a featureCollection (fc) where each row has point geometry and
return a featureCollection with image bands added.
Args:
fc (ee.FeatureCollection): A feature collection, all features must have point geometries.
image_spect (Union[ImageSpec, Dict[str, Any]]): Either an ImageSpec instance or a dict formated as follows:
{
'start_date': Union[ee.Date, str],
'end_date': Union[ee.Date, str],
'filterpoly': ee.Geoemtry,
'projection': str, # CRS
'scale': Union[int, float],
}
If an ImageSpec is passed, datasources already in the ImageSpec instance will be included in the output.
tags (Optional[Union[str, Enum, Collection[str, Enum]]]): A collection of tags matching those passed in
the 'datasources_config' constructor argument. Only tags contained in the tags argument
will be used when generating the return value. If None, all datasources are included.
Defaults to None.
export_radius (int): The outputsize in pixels (final output is an (2 * output_size + 1) by (2 * output_size + 1) square)
Returns:
(Tuple[ee.FeatuerCollection, List[str]]):
First element: A new feature collection with an output_size by output_size tile added to each row.
The tile's bands are stored in separate columns.
Second element: The list of output bands.
If a collection in the constructor argument is filtered in such a way that it becomes the empty
collection, it's bands will be omitted from the output but will still be included in the second
return element.
"""
image_spec, output_bands = self._get_image_spec_helper(image_spec, tags)
fc = add_imagery(fc, image_spec, output_size=export_radius)
return fc, output_bands
def sample_tiles_unstacked(self, fc, image_spec, export_radius, tags=None):
"""
Take a featureCollection (fc) where each row has point geometry and
return a featureCollection with image bands added. Identical to sample_tiles
with a different implementation that may be more memory efficient.
Args:
fc (ee.FeatureCollection): A feature collection, all features must have point geometries.
image_spec (Union[ImageSpec, Dict[str, Any]]): Either an ImageSpec instance or a dict formated as follows:
{
'start_date': Union[ee.Date, str],
'end_date': Union[ee.Date, str],
'filterpoly': ee.Geometry,
'projection': str, # CRS
'scale': Union[int, float],
}
If an ImageSpec is passed, datasources already in the ImageSpec instance will be included in the output.
export_radius (int): The output size in pixels (final output is an (2 * output_size + 1) by (2 * output_size + 1) square)
tags (Optional[Union[str, Enum, Collection[str, Enum]]]): A collection of tags matching those passed in
the 'datasources_config' constructor argument. Only tags contained in the tags argument
will be used when generating the return value. If None, all datasources are included.
Defaults to None.
Returns:
(Tuple[ee.FeatureCollection, List[str]]):
First element: A new feature collection with an output_size by output_size tile added to each row.
The tile's bands are stored in separate columns.
Second element: The list of output bands.
If a collection in the constructor argument is filtered in such a way that it becomes the empty
collection, it's bands will be omitted from the output but | |
"""
PBC changed.
"""
if self.PBCYCheckBox.isChecked():
self.PBC[1] = 1
else:
self.PBC[1] = 0
def PBCZChanged(self, val):
"""
PBC changed.
"""
if self.PBCZCheckBox.isChecked():
self.PBC[2] = 1
else:
self.PBC[2] = 0
def getCurrentStateIndexes(self):
"""
Return indexes of states that are currently selected
"""
refIndex = self.refCombo.currentIndex()
inputIndex = self.inputCombo.currentIndex()
return refIndex, inputIndex
def changeStateDisplayName(self, index, displayName):
"""
Change display name of state
"""
self.refCombo.setItemText(index, displayName)
self.inputCombo.setItemText(index, displayName)
def addStateOption(self, filename):
"""
Add state option to combo boxes
"""
self.refCombo.addItem(filename)
self.inputCombo.addItem(filename)
if not self.mainToolbar.tabWidget.isTabEnabled(1):
# enable and switch to analysis tab after first file is loaded
self.mainToolbar.tabWidget.setTabEnabled(1, True)
self.mainToolbar.tabWidget.setCurrentIndex(1)
def removeStateOption(self, index):
"""
Remove state option from combo boxes
"""
self.refCombo.removeItem(index)
self.inputCombo.removeItem(index)
def checkStateChangeOk(self):
"""
Check it was ok to change the state.
"""
if self.inputState is None:
return
ref = self.refState
inp = self.inputState
diff = False
for i in range(3):
if math.fabs(inp.cellDims[i] - ref.cellDims[i]) > 1e-4:
diff = True
break
if diff:
self.logger.warning("Cell dims differ")
return diff
def postRefLoaded(self, oldRef):
"""
Do stuff after the ref has been loaded.
"""
self.logger.debug("Running postRefLoaded")
if oldRef is not None:
self.clearAllActors()
self.removeInfoWindows()
self.refreshAllFilters()
for rw in self.rendererWindows:
if rw.currentPipelineIndex == self.pipelineIndex:
rw.textSelector.refresh()
rw.outputDialog.plotTab.rdfForm.refresh()
self.mainWindow.readLBOMDIN()
for rw in self.rendererWindows:
if rw.currentPipelineIndex == self.pipelineIndex:
rw.renderer.postRefRender()
rw.textSelector.refresh()
self.logger.debug("Finished postRefLoaded")
def postInputLoaded(self):
"""
Do stuff after the input has been loaded
"""
self.logger.debug("Running postInputLoaded")
self.clearAllActors()
self.removeInfoWindows()
self.refreshAllFilters()
for rw in self.rendererWindows:
if rw.currentPipelineIndex == self.pipelineIndex:
rw.postInputChanged()
settings = self.mainWindow.preferences.renderingForm
if self.inputState.NAtoms <= settings.maxAtomsAutoRun:
self.logger.debug("Running all filter lists automatically")
self.runAllFilterLists()
self.logger.debug("Finished postInputLoaded")
def refChanged(self, index):
"""
Ref changed
"""
# item
item = self.mainWindow.systemsDialog.systems_list_widget.item(index)
# lattice
state = item.lattice
# check if has really changed
if self.refState is state:
return
old_ref = self.refState
self.refState = state
self.extension = item.extension
# post ref loaded
self.postRefLoaded(old_ref)
# check ok
status = self.checkStateChangeOk()
if status:
# must change input too
self.inputCombo.setCurrentIndex(index)
def inputChanged(self, index):
"""
Input changed
"""
self.logger.debug("Running inputChanged")
# item
item = self.mainWindow.systemsDialog.systems_list_widget.item(index)
# lattice
state = item.lattice
# check if has really changed
if self.inputState is state:
return
self.inputState = state
self.filename = item.displayName
self.extension = item.extension
self.abspath = item.abspath
self.fileFormat = item.fileFormat
self.linkedLattice = item.linkedLattice
self.fromSFTP = item.fromSFTP
self.PBC = state.PBC
self.setPBCChecks()
# check ok
status = self.checkStateChangeOk()
if status:
# must change ref too
self.refCombo.setCurrentIndex(index)
# post input loaded
self.postInputLoaded()
def setPBCChecks(self):
"""
Set the PBC checks
"""
self.PBCXCheckBox.setChecked(self.PBC[0])
self.PBCYCheckBox.setChecked(self.PBC[1])
self.PBCZCheckBox.setChecked(self.PBC[2])
def removeInfoWindows(self):
"""
Remove all info windows and associated highlighters
"""
self.logger.debug("Clearing all info windows/highlighters")
for filterList_ in self.filterLists:
filterList_.removeInfoWindows()
def removeOnScreenInfo(self):
"""
Remove on screen info.
"""
for rw in self.mainWindow.rendererWindows:
if rw.currentPipelineString == self.mainToolbar.currentPipelineString:
rw.removeOnScreenInfo()
def clearAllActors(self):
"""
Clear all actors.
"""
self.logger.debug("Clearing all actors")
for filterList_ in self.filterLists:
filterList_.clearActors()
def runAllFilterLists(self, sequencer=False):
"""
Run all the filter lists.
"""
self.logger.info("Running all filter lists")
# unique id (used for POV-Ray file naming)
self.currentRunID = uuid.uuid4()
# first remove all old povray files
oldpovfiles = glob.glob(os.path.join(self.mainWindow.tmpDirectory,
"pipeline%d_*.pov" % self.pipelineIndex))
for fn in oldpovfiles:
os.unlink(fn)
# remove old info windows
self.removeInfoWindows()
# set scalar bar false
self.scalarBarAdded = False
# progress dialog
sequencer = True
if not sequencer:
progDiag = utils.showProgressDialog("Applying lists", "Applying lists...", self)
try:
status = 0
for count, filterList_ in enumerate(self.filterLists):
self.logger.info("Running filter list %d", count)
filterList_.applyList(sequencer=sequencer)
except:
exctype, value = sys.exc_info()[:2]
self.logger.exception("Run all filter lists failed!")
self.mainWindow.displayError("Run all filter lists failed!\n\n%s: %s" % (exctype, value))
status = 1
finally:
if not sequencer:
utils.cancelProgressDialog(progDiag)
self.mainWindow.setStatus("Ready")
return status
def refreshOnScreenInfo(self):
"""
Refresh the on-screen information.
"""
for rw in self.mainWindow.rendererWindows:
if rw.currentPipelineString == self.mainToolbar.currentPipelineString:
rw.refreshOnScreenInfo()
def addFilterList(self):
"""
Add a new filter list
"""
# widget to hold filter list
filterListWidget = QtWidgets.QWidget()
filterListLayout = QtWidgets.QVBoxLayout(filterListWidget)
filterListLayout.setContentsMargins(0, 0, 0, 0)
# add list
list1 = filterList.FilterList(self, self.mainToolbar, self.mainWindow, self.filterListCount, self.toolbarWidth)
filterListLayout.addWidget(list1)
self.filterLists.append(list1)
self.visAtomsList.append([])
# add to tab bar
self.filterTabBar.addTab(filterListWidget, str(self.filterListCount))
# select new tab
self.filterTabBar.setCurrentIndex(self.filterListCount)
self.filterListCount += 1
def clearAllFilterLists(self):
"""
Clear all the filter lists
"""
self.logger.debug("Clearing all filter lists")
for filterList_ in self.filterLists:
filterList_.clearList()
self.removeFilterList()
def filterTabBarChanged(self, val):
# guess need to handle addition and removal of tabs here
pass
def tabCloseRequested(self, index):
"""
Tab close requested
"""
self.removeFilterList(index=index)
def removeFilterList(self, index=None):
"""
Remove a filter list
"""
if self.filterListCount <= 1:
return
if index is not None:
currentList = index
else:
currentList = self.filterTabBar.currentIndex()
self.filterLists[currentList].clearList()
for i in range(self.filterListCount):
if i > currentList:
self.filterTabBar.setTabText(i, str(i - 1))
self.filterLists[i].tab -= 1
self.filterTabBar.removeTab(currentList)
self.filterLists.pop(currentList)
self.visAtomsList.pop(currentList)
self.filterListCount -= 1
def refreshAllFilters(self):
"""
Refresh filter settings
"""
self.logger.debug("Refreshing filters")
for filterList_ in self.filterLists:
currentSettings = filterList_.getCurrentFilterSettings()
for filterSettings in currentSettings:
filterSettings.refresh()
filterList_.bondsOptions.refresh()
filterList_.vectorsOptions.refresh()
filterList_.colouringOptions.refreshScalarColourOption()
filterList_.refreshAvailableFilters()
def gatherVisibleAtoms(self):
"""
Builds an array containing all (unique) visible atoms.
"""
visibleAtomsFull = None
for filterList_ in self.filterLists:
visibleAtoms = filterList_.filterer.visibleAtoms
if visibleAtomsFull is None:
visibleAtomsFull = visibleAtoms
else:
visibleAtomsFull = np.append(visibleAtomsFull, visibleAtoms)
visibleAtomsFull = np.unique(visibleAtomsFull)
return visibleAtomsFull
def broadcastToRenderers(self, method, args=(), kwargs={}, globalBcast=False):
"""
Broadcast command to associated renderers.
"""
if globalBcast:
rwList = self.mainWindow.rendererWindows
else:
rwList = [rw for rw in self.mainWindow.rendererWindows if rw.currentPipelineString == self.pipelineString]
self.logger.debug("Broadcasting to renderers (%d/%d): %s", len(rwList), len(self.mainWindow.rendererWindows),
method)
for rw in rwList:
if hasattr(rw, method):
call = getattr(rw, method)
call(*args, **kwargs)
def pickObject(self, pickPos, clickType):
"""
Pick object
"""
logger = self.logger
# loop over filter lists
filterLists = self.filterLists
# states
refState = self.refState
inputState = self.inputState
# we don't want PBCs when picking
pickPBC = np.zeros(3, np.int32)
# min/max pos for boxing
# we need the min/max of ref/input/pickPos
minPos = np.zeros(3, np.float64)
maxPos = np.zeros(3, np.float64)
for i in range(3):
# set to min ref pos
minPos[i] = min(refState.pos[i::3])
maxPos[i] = max(refState.pos[i::3])
# see if min input pos is less
minPos[i] = min(minPos[i], min(inputState.pos[i::3]))
maxPos[i] = max(maxPos[i], max(inputState.pos[i::3]))
# see if picked pos is less
minPos[i] = min(minPos[i], pickPos[i])
maxPos[i] = max(maxPos[i], pickPos[i])
logger.debug("Min pos for picker: %r", minPos)
logger.debug("Max pos for picker: %r", maxPos)
# loop over filter lists, looking for closest object to pick pos
minSepIndex = -1
minSep = 9999999.0
minSepType = None
minSepFilterList = None
for filterList_ in filterLists:
if not filterList_.visible:
continue
filterer = filterList_.filterer
visibleAtoms = filterer.visibleAtoms
interstitials = filterer.interstitials
vacancies = filterer.vacancies
antisites = filterer.antisites
onAntisites = filterer.onAntisites
splitInts = filterer.splitInterstitials
scalarsDict = filterer.scalarsDict
latticeScalarsDict = filterer.latticeScalarsDict
vectorsDict = filterer.vectorsDict
result = np.empty(3, np.float64)
status = picker.pickObject(visibleAtoms, vacancies, interstitials, onAntisites, splitInts, pickPos,
inputState.pos, refState.pos, pickPBC, inputState.cellDims,
inputState.specie, refState.specie, inputState.specieCovalentRadius,
refState.specieCovalentRadius, result)
if status:
raise RuntimeError("Picker exited with non zero status (%d)" % status)
tmp_type, tmp_index, tmp_sep = result
if tmp_index >= 0 and tmp_sep < minSep:
minSep = tmp_sep
minSepType = int(tmp_type)
minSepFilterList = filterList_
if minSepType == 0:
minSepIndex = visibleAtoms[int(tmp_index)]
defList = None
else:
minSepIndex = int(tmp_index)
if minSepType == 1:
defList = (vacancies,)
elif minSepType == 2:
defList = (interstitials,)
elif minSepType == 3:
defList = (antisites, onAntisites)
else:
defList = (splitInts,)
minSepScalars = {}
for scalarType, scalarArray in six.iteritems(scalarsDict):
minSepScalars[scalarType] = scalarArray[int(tmp_index)]
for scalarType, scalarArray in six.iteritems(latticeScalarsDict):
minSepScalars[scalarType] = scalarArray[int(tmp_index)]
minSepVectors = {}
for vectorType, vectorArray in six.iteritems(vectorsDict):
minSepVectors[vectorType] = vectorArray[int(tmp_index)]
logger.debug("Closest object to pick: %f (threshold: %f)", minSep, 0.1)
# check if close enough
if minSep < 0.1:
if clickType == "RightClick" and minSepType == 0:
logger.debug("Picked object (right click)")
viewAction = QtWidgets.QAction("View atom", self)
viewAction.setToolTip("View atom info")
viewAction.setStatusTip("View atom info")
viewAction.triggered.connect(functools.partial(self.viewAtomClicked, minSepIndex, minSepType,
minSepFilterList, minSepScalars, minSepVectors, defList))
editAction = QtWidgets.QAction("Edit atom", self)
editAction.setToolTip("Edit atom")
editAction.setStatusTip("Edit atom")
editAction.triggered.connect(functools.partial(self.editAtomClicked, minSepIndex))
removeAction = QtWidgets.QAction("Remove atom", self)
removeAction.setToolTip("Remove atom")
removeAction.setStatusTip("Remove atom")
removeAction.triggered.connect(functools.partial(self.removeAtomClicked, minSepIndex))
menu = self.pickerContextMenu
menu.clear()
menu.addAction(viewAction)
menu.addAction(editAction)
menu.addAction(removeAction)
# highlight atom
lattice = self.inputState
radius = lattice.specieCovalentRadius[lattice.specie[minSepIndex]] * minSepFilterList.displayOptions.atomScaleFactor
highlighter = highlight.AtomHighlighter(lattice.atomPos(minSepIndex), radius * 1.1)
self.broadcastToRenderers("addHighlighters", (self.pickerContextMenuID, [highlighter,]))
cursor = QtGui.QCursor()
menu.popup(cursor.pos())
else:
# show the info window
self.showInfoWindow(minSepIndex, minSepType, minSepFilterList, minSepScalars, minSepVectors, defList)
def checkIfAtomVisible(self, index):
"""
Check if the selected atom is visible in one of the filter lists.
"""
visible = False
visibleFilterList = None
for filterList_ in self.filterLists:
if index in filterList_.filterer.visibleAtoms:
visible | |
from thespian.actors import ActorAddress, ActorSystemConventionUpdate
from thespian.system.messages.convention import (ConventionRegister,
ConventionDeRegister,
ConventionInvite)
from thespian.system.admin.convention import (LocalConventionState, LostRemote,
HysteresisCancel,
CONVENTION_REREGISTRATION_PERIOD,
CONVENTION_REGISTRATION_MISS_MAX,
convention_reinvite_adjustment)
from thespian.system.utilis import fmap, StatsManager
from thespian.system.timing import timePeriodSeconds, ExpirationTimer
import thespian.system.timing
from thespian.system.logdirector import LogAggregator
from thespian.system.transport import SendStatus
try:
from unittest.mock import patch
except ImportError:
try:
from mock import patch
except ImportError:
patch = None
from datetime import timedelta
from pytest import fixture, mark
import inspect
from contextlib import contextmanager
@contextmanager
def update_elapsed_time(time_base, elapsed):
with patch('thespian.system.timing.currentTime') as p_ctime:
with patch('thespian.system.admin.convention.currentTime') as p_convtime:
p_ctime.return_value = time_base + (timePeriodSeconds(elapsed)
if isinstance(elapsed, timedelta)
else elapsed)
p_convtime.return_value = time_base + (timePeriodSeconds(elapsed)
if isinstance(elapsed, timedelta)
else elapsed)
yield p_ctime.return_value
@mark.skipif(not patch, reason='requires mock patch')
def test_time_control():
# This test ensures that the update_elapsed_time works properly to
# control the perception of time changes in the code. If this is
# not operational, many of the other tests will fail.
timer = ExpirationTimer(CONVENTION_REREGISTRATION_PERIOD)
assert not timer.view().expired()
ct = thespian.system.timing.currentTime()
with update_elapsed_time(ct,
CONVENTION_REREGISTRATION_PERIOD +
timedelta(seconds=1)) as later:
print(ct, thespian.system.timing.currentTime(), later,
CONVENTION_REREGISTRATION_PERIOD,
timePeriodSeconds(CONVENTION_REREGISTRATION_PERIOD))
assert ct < thespian.system.timing.currentTime()
assert later == thespian.system.timing.currentTime()
assert timer.view().expired()
@fixture
def lcs1():
ret = LocalConventionState(ActorAddress(1),
{'Admin Port': 1,
'Convention Address.IPv4': ActorAddress(1),
'popsicle': 'cold'},
StatsManager(),
lambda x: ActorAddress(1))
# Activate the system
verify_io(ret.setup_convention(activation=True), [])
return ret
@fixture
def lcs2():
ret = LocalConventionState(ActorAddress(2),
{'Admin Port': 2,
'Convention Address.IPv4': ActorAddress(1),
'apple pie': 'hot'},
StatsManager(),
lambda x: ActorAddress(1))
# Next message is similar to ~convreg2_first(ret)~
ret._expected_setup_convreg = ConventionRegister(ret.myAddress,
ret.capabilities,
firstTime=True,
preRegister=False)
# Activate the system
verify_io(ret.setup_convention(activation=True),
[ (ConventionRegister, Sends(ret._expected_setup_convreg) >= ActorAddress(1)),
])
return ret
@fixture
def solo_lcs1():
# Like lcs1, but does not specify a convention address; intended
# for use with pre-registration (e.g. to simulate TXOnly
# environments.
ret = LocalConventionState(ActorAddress(1),
{'Admin Port': 1, 'popsicle': 'cold'},
StatsManager(),
lambda x: None)
# Activate the system
assert [] == ret.setup_convention(activation=True)
return ret
@fixture
def solo_lcs2():
# Like lcs2, but does not specify a convention address; intended
# for use with pre-registration (e.g. to simulate TXOnly
# environments.
ret = LocalConventionState(ActorAddress(2),
{'Admin Port': 2, 'apple pie': 'hot'},
StatsManager(),
lambda x: None)
# Activate the system
assert [] == ret.setup_convention(activation=True)
return ret
@fixture
def convreg1(lcs1):
return ConventionRegister(lcs1.myAddress,
lcs1.capabilities,
firstTime=False,
preRegister=False)
@fixture
def convreg1_first(lcs1):
return ConventionRegister(lcs1.myAddress,
lcs1.capabilities,
firstTime=True,
preRegister=False)
@fixture
def convreg1_noadmin(solo_lcs1):
return ConventionRegister(solo_lcs1.myAddress,
solo_lcs1.capabilities,
firstTime=False,
preRegister=False)
@fixture
def convreg1_first_noadmin(solo_lcs1):
return ConventionRegister(solo_lcs1.myAddress,
solo_lcs1.capabilities,
firstTime=True,
preRegister=False)
@fixture
def convreg2(lcs2):
return ConventionRegister(lcs2.myAddress,
lcs2.capabilities,
firstTime=False,
preRegister=False)
@fixture
def convreg2_prereg(lcs2):
return ConventionRegister(lcs2.myAddress,
{'Admin Port': lcs2.capabilities['Admin Port']},
firstTime=False,
preRegister=True)
@fixture
def convreg2_first(lcs2):
return ConventionRegister(lcs2.myAddress,
lcs2.capabilities,
firstTime=True,
preRegister=False)
# solo relationships causing mincap? These are actual preregister reqs?
@fixture
def convreg2_noadmin(solo_lcs2):
return ConventionRegister(solo_lcs2.myAddress,
solo_lcs2.capabilities,
firstTime=False,
preRegister=False)
@fixture
def convreg2_first_noadmin(solo_lcs2):
return ConventionRegister(solo_lcs2.myAddress,
solo_lcs2.capabilities,
firstTime=True,
preRegister=False)
@fixture
def convdereg_lcs1(lcs1):
return ConventionDeRegister(lcs1.myAddress, preRegistered=False)
@fixture
def convdereg_lcs2(lcs2):
return ConventionDeRegister(lcs2.myAddress, preRegistered=False)
@fixture
def convdereg_lcs2_prereg(lcs2):
return ConventionDeRegister(lcs2.myAddress, preRegistered=True)
@fixture
def conv1_notifyAddr(lcs1):
notifyAddr = ActorAddress('notify')
lcs1.add_notification_handler(notifyAddr)
return notifyAddr
@fixture
def update_lcs2_added(lcs2):
return ActorSystemConventionUpdate(lcs2.myAddress,
lcs2.capabilities,
added=True)
@fixture
def update_lcs2_added_noadmin(solo_lcs2):
return ActorSystemConventionUpdate(solo_lcs2.myAddress,
solo_lcs2.capabilities,
added=True)
@fixture
def update_lcs2_removed(lcs2):
return ActorSystemConventionUpdate(lcs2.myAddress,
lcs2.capabilities,
added=False)
@fixture
def update_lcs2_removed_noadmin(solo_lcs2):
return ActorSystemConventionUpdate(solo_lcs2.myAddress,
solo_lcs2.capabilities,
added=False)
@fixture
def solo_conv1_notifyAddr(solo_lcs1):
notifyAddr = ActorAddress('notify')
solo_lcs1.add_notification_handler(notifyAddr)
return notifyAddr
## ############################################################
## Tests
## ############################################################
def test_prereg_reg(solo_lcs1, solo_lcs2,
convreg1_first_noadmin, convreg1_noadmin,
convreg2_first_noadmin, convreg2_prereg, convreg2_noadmin,
update_lcs2_added_noadmin):
lcs1, lcs2 = solo_lcs1, solo_lcs2
# This test sends a pre-registration to lcs1 for lcs2, which
# should cause lcs1 to actually register with lcs2 and lcs2 to
# retro-register with its actual data.
# Pre-register lcs2 with lcs1 and verify lcs1 sends its own info
# to lcs2. The registration indicated pre-registration but not an
# assertion of first time (which would cause all existing remote
# information to be dropped and all remote actors to be shutdown)
# because this system may already know about the remote. In this
# scenario, lcs1 does not know about lcs2, so it should set the
# first time indication on the info sent to lcs2.
ops = lcs1.got_convention_register(convreg2_prereg)
verify_io(ops,
[ (HysteresisCancel, None),
Sends(ConventionInvite) >= lcs2.myAddress,
])
ops[1].tx_done(SendStatus.Failed) # indicate failure
verify_io(lcs2.got_convention_invite(lcs1.myAddress),
[ (ConventionRegister, Sends(convreg2_first_noadmin) >= lcs1.myAddress),
])
# lcs1 gets full ConventionRegister from lcs2. This should also
# cause an update notification with the full specification.
verify_io(lcs1.got_convention_register(convreg2_first_noadmin),
[ Sends(convreg1_noadmin) >= lcs2.myAddress,
])
verify_io(lcs2.got_convention_register(convreg1_noadmin),
[ (LogAggregator, None),
])
verify_normal_notification_updates(lcs1, lcs2, convreg1_noadmin, convreg2_noadmin)
assert [] == lcs1.check_convention()
assert [] == lcs2.check_convention()
###
### Notification Tests
###
def test_notification_management(solo_lcs1, solo_lcs2):
lcs1, lcs2 = solo_lcs1, solo_lcs2
notifyAddr = ActorAddress('notify')
verify_io(lcs1.add_notification_handler(notifyAddr), [])
# Re-registration does nothing
verify_io(lcs1.add_notification_handler(notifyAddr), [])
# Registering a another handler is fine
notifyAddr2 = ActorAddress('notify2')
verify_io(lcs1.add_notification_handler(notifyAddr2), [])
# Re-registration still does nothing
verify_io(lcs1.add_notification_handler(notifyAddr), [])
# De-registration
lcs1.remove_notification_handler(notifyAddr)
# Re-registration now adds it back
verify_io(lcs1.add_notification_handler(notifyAddr), [])
# Multiple de-registration is ok
lcs1.remove_notification_handler(notifyAddr)
lcs1.remove_notification_handler(notifyAddr)
# Re-registration now adds it back again
verify_io(lcs1.add_notification_handler(notifyAddr), [])
@mark.skipif(not patch, reason='requires mock patch')
def test_notification_management_with_registrations(lcs1, lcs2, convreg1,
convreg2, convreg2_first,
conv1_notifyAddr,
update_lcs2_added,
update_lcs2_removed):
# Setup both in the registered condition
test_reg_with_notifications(lcs1, lcs2, convreg1,
convreg2, convreg2_first,
conv1_notifyAddr,
update_lcs2_added, update_lcs2_removed)
# Re-registration does nothing
verify_io(lcs1.add_notification_handler(conv1_notifyAddr), [])
# Registering a another handler is fine
notifyAddr2 = ActorAddress('notify2')
notify_of_lcs2 = ActorSystemConventionUpdate(lcs2.myAddress,
lcs2.capabilities,
added=True)
verify_io(lcs1.add_notification_handler(notifyAddr2),
[ Sends(update_lcs2_added) >= notifyAddr2,
])
# Re-registration still does nothing
verify_io(lcs1.add_notification_handler(conv1_notifyAddr), [])
# De-registration
lcs1.remove_notification_handler(conv1_notifyAddr)
# Re-registration now adds it back
verify_io(lcs1.add_notification_handler(conv1_notifyAddr),
[ Sends(update_lcs2_added) >= conv1_notifyAddr,
])
# Multiple de-registration is ok
lcs1.remove_notification_handler(conv1_notifyAddr)
lcs1.remove_notification_handler(conv1_notifyAddr)
# Re-registration now adds it back again
verify_io(lcs1.add_notification_handler(conv1_notifyAddr),
[ Sends(update_lcs2_added) >= conv1_notifyAddr,
])
def test_prereg_reg_with_notifications(solo_lcs1, solo_lcs2,
convreg1_noadmin, #convreg1_first_noadmin,
convreg2_noadmin, convreg2_first_noadmin,
convreg2_prereg,
solo_conv1_notifyAddr,
update_lcs2_added_noadmin):
lcs1, lcs2 = solo_lcs1, solo_lcs2
notifyAddr = solo_conv1_notifyAddr
ops = lcs1.got_convention_register(convreg2_prereg)
verify_io(ops,
[ (HysteresisCancel, None),
Sends(ConventionInvite) >= lcs2.myAddress,
])
ops[1].tx_done(SendStatus.Failed) # indicate failure
# lcs2 gets the ConventionRegister generated above, and responds
# with actual info of its own. If the other side is indicating
# firstTime, that means it has no previous knowledge; this side
# should not also set firstTime or that will bounce back and forth
# indefinitely. Note that this side will perform a transport
# reset (LostRemote and HysteresisCancel); the TCPTransport may
# ignore the transport reset for TXOnly addresses.
verify_io(lcs2.got_convention_invite(lcs1.myAddress),
[ Sends(convreg2_first_noadmin) >= lcs1.myAddress,
])
# lcs1 gets full ConventionRegister from lcs2. This should also
# cause an update notification with the full specification.
verify_io(lcs1.got_convention_register(convreg2_first_noadmin),
[ Sends(convreg1_noadmin) >= lcs2.myAddress,
Sends(update_lcs2_added_noadmin) >= notifyAddr,
])
verify_io(lcs2.got_convention_register(convreg1_noadmin),
[ (LogAggregator, None),
])
verify_normal_notification_updates(lcs1, lcs2, convreg1_noadmin, convreg2_noadmin)
def test_multi_prereg_reg_with_notifications(solo_lcs1, solo_lcs2,
convreg1_noadmin,
convreg2_prereg, convreg2_first_noadmin,
convreg2_noadmin,
conv1_notifyAddr,
update_lcs2_added_noadmin):
lcs1, lcs2 = solo_lcs1, solo_lcs2
ops = lcs1.got_convention_register(convreg2_prereg)
verify_io(ops,
[ (HysteresisCancel, None),
Sends(ConventionInvite) >= lcs2.myAddress,
])
ops[1].tx_done(SendStatus.Failed) # indicate failure
# Another prereg should just repeat the invitation but have no
# other effect because the previous is in progress
ops = lcs1.got_convention_register(convreg2_prereg)
verify_io(ops,
[ (HysteresisCancel, None),
Sends(ConventionInvite) >= lcs2.myAddress,
])
ops[1].tx_done(SendStatus.Failed) # indicate failure
# lcs2 gets the ConventionRegister generated above, and responds
# with actual info of its own. If the other side is indicating
# firstTime, that means it has no previous knowledge; this side
# should not also set firstTime or that will bounce back and forth
# indefinitely. Note that this side will perform a transport
# reset (LostRemote and HysteresisCancel); the TCPTransport may
# ignore the transport reset for TXOnly addresses.
verify_io(lcs2.got_convention_invite(lcs1.myAddress),
[ Sends(convreg2_first_noadmin) >= lcs1.myAddress,
])
# lcs1 gets full ConventionRegister from lcs2. This should also
# cause an update notification with the full specification.
verify_io(lcs1.got_convention_register(convreg2_first_noadmin),
[ Sends(convreg1_noadmin) >= lcs2.myAddress,
])
verify_io(lcs2.got_convention_register(convreg1_noadmin),
[ (LogAggregator, None),
])
verify_normal_notification_updates(lcs1, lcs2, convreg1_noadmin, convreg2_noadmin)
def test_prereg_reg_prereg_with_notifications(solo_lcs1, solo_lcs2,
convreg1_noadmin, #convreg1_first_noadmin,
convreg2_noadmin, convreg2_first_noadmin,
convreg2_prereg,
update_lcs2_added_noadmin):
#, update_lcs2_removed):
lcs1, lcs2 = solo_lcs1, solo_lcs2
notifyAddr = ActorAddress('notify')
lcs1.add_notification_handler(notifyAddr)
ops = lcs1.got_convention_register(convreg2_prereg)
verify_io(ops,
[ (HysteresisCancel, None),
Sends(ConventionInvite) >= lcs2.myAddress,
])
ops[1].tx_done(SendStatus.Failed) # indicate failure
# Lcs2 gets the ConventionInvite generated above, and responds
# with actual info of its own. If the other side is indicating
# firstTime, that means it has no previous knowledge; this side
# should not also set firstTime or that will bounce back and forth
# indefinitely. Note that this side will perform a transport
# reset (LostRemote and HysteresisCancel); the TCPTransport may
# ignore the transport reset for TXOnly addresses.
verify_io(lcs2.got_convention_invite(lcs1.myAddress),
[ Sends(convreg2_first_noadmin) >= lcs1.myAddress,
])
# lcs1 gets full ConventionRegister from lcs2. lcs1 as
# ConventionLeader sends back its registration (not a first-time
# registration) as it normally would, and also generates an update
# notification with the full specification.
verify_io(lcs1.got_convention_register(convreg2_first_noadmin),
[ Sends(convreg1_noadmin) >= lcs2.myAddress,
Sends(update_lcs2_added_noadmin) >= notifyAddr,
])
verify_io(lcs2.got_convention_register(convreg1_noadmin),
[ (LogAggregator, None),
])
verify_normal_notification_updates(lcs1, lcs2, convreg1_noadmin, convreg2_noadmin)
# Another prereg has no effect other than causing a new invite | |
<reponame>CGATOxford/cgat
'''bam2stats.py - compute stats from a bam-file
===============================================
:Tags: Genomics NGS Summary BAM
Purpose
-------
This script takes a bam file as input and computes a few metrics by
iterating over the file. The metrics output are:
+------------------------+------------------------------------------+
|*Category* |*Content* |
+------------------------+------------------------------------------+
|total |total number of alignments in bam file |
+------------------------+------------------------------------------+
|alignments_mapped |alignments mapped to a chromosome (bam |
| |flag) |
+------------------------+------------------------------------------+
|alignments_unmapped |alignments unmapped (bam flag) |
+------------------------+------------------------------------------+
|qc_fail |alignments failing QC (bam flag) |
+------------------------+------------------------------------------+
|mate_unmapped |alignments in which the mate is unmapped |
| |(bam flag) |
+------------------------+------------------------------------------+
|reverse |alignments in which read maps to reverse |
| |strand (bam flag) |
+------------------------+------------------------------------------+
|mate_reverse |alignments in which mate maps to reverse |
| |strand (bam flag) |
+------------------------+------------------------------------------+
|proper_pair |alignments in which both pairs have been |
| |mapped properly (according to the mapper) |
| |(bam flag) |
+------------------------+------------------------------------------+
|read1 |alignments for 1st read of pair (bam flag)|
+------------------------+------------------------------------------+
|paired |alignments of reads that are paired (bam |
| |flag) |
+------------------------+------------------------------------------+
|duplicate |read is PCR or optical duplicate (bam |
| |flag) |
+------------------------+------------------------------------------+
|read2 |alignment is for 2nd read of pair (bam |
| |flag) |
+------------------------+------------------------------------------+
|secondary |alignment is not primary alignment |
+------------------------+------------------------------------------+
|alignments_rna |alignments mapping to regions specified in|
| |a .gff file |
+------------------------+------------------------------------------+
|alignments_no_rna |alignments not mapping to regions in a |
| |.gff file (if --ignore-masked-reads has |
| |been set, otherwise equal to mapped) |
+------------------------+------------------------------------------+
|alignments_duplicates |number of alignments mapping to the same |
| |location |
+------------------------+------------------------------------------+
|alignments_unique |number of alignments mapping to unique |
| |locations |
+------------------------+------------------------------------------+
|reads_total |number of reads in file. Either given via |
| |--num-reads or deduc ed as the sum of |
| |mappend and unmapped reads |
+------------------------+------------------------------------------+
|reads_mapped |number of reads mapping in file. Derived |
| |from the total number o f alignments and |
| |removing counts for multiple |
| |matches. Requires the NH flag to be set |
| |correctly. |
+------------------------+------------------------------------------+
|reads_unmapped |number of reads unmapped in file. Assumes |
| |that there is only one |
| |entry per unmapped read. |
+------------------------+------------------------------------------+
|reads_missing |number of reads missing, if number of |
| |reads given by --input-rea ds. Otherwise |
| |0. |
+------------------------+------------------------------------------+
|reads_norna |reads not mapping to repetetive RNA |
| |regions. |
+------------------------+------------------------------------------+
|pairs_total |number of total pairs - this is the number|
| |of reads_total divided by two. If there |
| |were no pairs, pairs_total will be 0. |
+------------------------+------------------------------------------+
|pairs_mapped |number of mapped pairs - this is the same |
| |as the number of proper pairs. |
+------------------------+------------------------------------------+
Additionally, the script outputs histograms for the following tags and
scores.
* NM: number of mismatches in alignments.
* NH: number of hits of reads.
* mapq: mapping quality of alignments.
Supplying a fastq file
++++++++++++++++++++++
If a fastq file is supplied (``--fastq-file``), the script will
compute some additional summary statistics. However, as it builds a dictionary
of all sequences, it will also require a good amount of memory. The additional
metrics output are:
+-----------------------------+----------------------------------------+
|*Category* |*Content* |
+-----------------------------+----------------------------------------+
|pairs_total |total number of pairs in input data |
+-----------------------------+----------------------------------------+
|pairs_mapped |pairs in which both reads map |
+-----------------------------+----------------------------------------+
|pairs_unmapped |pairs in which neither read maps |
+-----------------------------+----------------------------------------+
|pairs_proper_unique |pairs which are proper and map uniquely.|
+-----------------------------+----------------------------------------+
|pairs_incomplete_unique |pairs in which one of the reads maps |
| |uniquely, but the other does not map. |
+-----------------------------+----------------------------------------+
|pairs_incomplete_multimapping|pairs in which one of the reads maps |
| |uniquely, but the other maps to multiple|
| |locations. |
+-----------------------------+----------------------------------------+
|pairs_proper_duplicate |pairs which are proper and unique, but |
| |marked as duplicates. |
+-----------------------------+----------------------------------------+
|pairs_proper_multimapping |pairs which are proper, but map to |
| |multiple locations. |
+-----------------------------+----------------------------------------+
|pairs_not_proper_unique |pairs mapping uniquely, but not flagged |
| |as proper |
+-----------------------------+----------------------------------------+
|pairs_other |pairs not in any of the above categories|
+-----------------------------+----------------------------------------+
Note that for paired-end data, any ``\1`` or ``/2`` suffixes will be
removed from the read name in the assumption that these have been removed
in the bam file as well.
Usage
-----
Example::
python bam2stats.py in.bam
This command will generate various statistics based on the supplied
BAM file, such as percentage reads mapped and percentage reads mapped
in pairs. The output looks like this:
+-----------------------------+------+-------+-----------------+
|category |counts|percent|of |
+-----------------------------+------+-------+-----------------+
|alignments_total |32018 |100.00 |alignments_total |
+-----------------------------+------+-------+-----------------+
|alignments_mapped |32018 |100.00 |alignments_total |
+-----------------------------+------+-------+-----------------+
|alignments_unmapped |0 | 0.00 |alignments_total |
+-----------------------------+------+-------+-----------------+
|alignments_qc_fail |0 | 0.00 |alignments_mapped|
+-----------------------------+------+-------+-----------------+
|alignments_mate_unmapped |241 | 0.75 |alignments_mapped|
+-----------------------------+------+-------+-----------------+
|alignments_reverse |16016 |50.02 |alignments_mapped|
+-----------------------------+------+-------+-----------------+
|alignments_mate_reverse |15893 |49.64 |alignments_mapped|
+-----------------------------+------+-------+-----------------+
|alignments_proper_pair |30865 |96.40 |alignments_mapped|
+-----------------------------+------+-------+-----------------+
|alignments_read1 |16057 |50.15 |alignments_mapped|
+-----------------------------+------+-------+-----------------+
|alignments_paired |32018 |100.00 |alignments_mapped|
+-----------------------------+------+-------+-----------------+
|alignments_duplicate |0 | 0.00 |alignments_mapped|
+-----------------------------+------+-------+-----------------+
|alignments_read2 |15961 |49.85 |alignments_mapped|
+-----------------------------+------+-------+-----------------+
|alignments_secondary |0 | 0.00 |alignments_mapped|
+-----------------------------+------+-------+-----------------+
|alignments_rna |68 | 0.21 |alignments_mapped|
+-----------------------------+------+-------+-----------------+
|alignments_no_rna |31950 |99.79 |alignments_mapped|
+-----------------------------+------+-------+-----------------+
|alignments_filtered |31950 |99.79 |alignments_mapped|
+-----------------------------+------+-------+-----------------+
|reads_total |34250 |100.00 |reads_total |
+-----------------------------+------+-------+-----------------+
|reads_unmapped |0 | 0.00 |reads_total |
+-----------------------------+------+-------+-----------------+
|reads_mapped |32018 |93.48 |reads_total |
+-----------------------------+------+-------+-----------------+
|reads_missing |2232 | 6.52 |reads_total |
+-----------------------------+------+-------+-----------------+
|reads_mapped_unique |32018 |100.00 |reads_mapped |
+-----------------------------+------+-------+-----------------+
|reads_multimapping |0 | 0.00 |reads_mapped |
+-----------------------------+------+-------+-----------------+
|pairs_total |17125 |100.00 |pairs_total |
+-----------------------------+------+-------+-----------------+
|pairs_mapped |17125 |100.00 |pairs_total |
+-----------------------------+------+-------+-----------------+
|pairs_unmapped |0 | 0.00 |pairs_total |
+-----------------------------+------+-------+-----------------+
|pairs_proper_unique |14880 |86.89 |pairs_total |
+-----------------------------+------+-------+-----------------+
|pairs_incomplete_unique |2232 |13.03 |pairs_total |
+-----------------------------+------+-------+-----------------+
|pairs_incomplete_multimapping|0 | 0.00 |pairs_total |
+-----------------------------+------+-------+-----------------+
|pairs_proper_duplicate |0 | 0.00 |pairs_total |
+-----------------------------+------+-------+-----------------+
|pairs_proper_multimapping |0 | 0.00 |pairs_total |
+-----------------------------+------+-------+-----------------+
|pairs_not_proper_unique |13 | 0.08 |pairs_total |
+-----------------------------+------+-------+-----------------+
|pairs_other |0 | 0.00 |pairs_total |
+-----------------------------+------+-------+-----------------+
|read1_total |17125 |100.00 |read1_total |
+-----------------------------+------+-------+-----------------+
|read1_unmapped |0 | 0.00 |read1_total |
+-----------------------------+------+-------+-----------------+
|read1_mapped |16057 |93.76 |read1_total |
+-----------------------------+------+-------+-----------------+
|read1_mapped_unique |16057 |100.00 |read1_mapped |
+-----------------------------+------+-------+-----------------+
|reads_multimapping |0 | 0.00 |read1_mapped |
+-----------------------------+------+-------+-----------------+
|read1_missing |1068 | 6.65 |read1_total |
+-----------------------------+------+-------+-----------------+
|read2_total |17125 |100.00 |read2_total |
+-----------------------------+------+-------+-----------------+
|read2_unmapped |0 | 0.00 |read2_total |
+-----------------------------+------+-------+-----------------+
|read2_mapped |15961 |93.20 |read2_total |
+-----------------------------+------+-------+-----------------+
|read2_mapped_unique |15961 |100.00 |read2_mapped |
+-----------------------------+------+-------+-----------------+
|reads_multimapping |0 | 0.00 |read2_mapped |
+-----------------------------+------+-------+-----------------+
|read2_missing |1164 | 7.29 |read2_total |
+-----------------------------+------+-------+-----------------+
The first column contains the caterogy, the second the number of
counts and the third a percentage. The fourth column denotes the
denomiminator that was used to compute the percentage. In the table
above, wee see that 16,057 first reads in a pair map and 15,961
second reads in pair map, resulting in 14,880 proper uniquely mapped
pairs.
Type::
cgat bam2stats --help
for command line help.
Bam2stats can read from standard input::
cat in.bam | python bam2stats.py -
Documentation
-------------
Reads are not counted via read name, but making use of NH and HI flags
when present. To recap, NH is the number of reported alignments that
contain the query in the current record, while HI is the hit index and
ranges from 0 to NH-1.
Unfortunately, not all aligners follow this convention. For example,
gsnap seems to set NH to the number of reportable alignments, while
the actual number of reported alignments in the file is less. Thus, if
the HI flag is present, the maximum HI is used to correct the NH
flag. The assumption is, that the same reporting threshold has been
used for all alignments.
If no NH flag is present, it is assumed that all reads have only been
reported once.
Multi-matching counts after filtering are really guesswork. Basically,
the assumption is that filtering is consistent and will tend to remove
all alignments of a query.
Command line options
--------------------
'''
import os
import sys
import CGAT.Experiment as E
import CGAT.IOTools as IOTools
import CGAT.GTF as GTF
import pysam
import CGAT.scripts._bam2stats as _bam2stats
FLAGS = {
1: 'paired',
2: 'proper_pair',
4: 'unmapped',
8: 'mate_unmapped',
16: 'reverse',
32: 'mate_reverse',
64: 'read1',
128: 'read2',
256: 'secondary',
512: 'qc_fail',
1024: 'duplicate',
}
def computeMappedReadsFromAlignments(total_alignments, nh, max_hi):
'''compute number of reads alignment from total number of alignments.
'''
nreads_mapped = total_alignments
if len(nh) > 0:
max_nh = max(nh.keys())
if max_hi > 0:
for x in range(2, min(max_nh + 1, max_hi)):
nreads_mapped -= (nh[x] / x) * (x - 1)
for x in range(max_hi, max_nh + 1):
nreads_mapped -= (nh[x] / max_hi) * (max_hi - 1)
else:
for x in range(2, max(nh.keys()) + 1):
nreads_mapped -= (nh[x] / x) * (x - 1)
return nreads_mapped
def writeNH(outfile, nh, max_hi):
'''output nh array, correcting for max_hi if less than nh'''
# need to remove double counting
# one read matching to 2 positions is only 2
max_nh = max(nh.keys())
if max_hi > 0:
for x in range(1, min(max_nh + 1, max_hi)):
if nh[x] == 0:
continue
outfile.write("%i\t%i\n" % (x, nh[x] / x))
for x in range(max_hi, max_nh + 1):
if nh[x] == 0:
continue
outfile.write("%i\t%i\n" % (x, nh[x] / max_hi))
else:
for x in range(1, max_nh + 1):
if nh[x] == 0:
continue
outfile.write("%i\t%i\n" % (x, nh[x] / x))
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option(
"-r", "--mask-bed-file", dest="filename_rna", type="string",
metavar='GFF',
help="gff formatted file with masking locations. The number of "
"reads overlapping the intervals in the given file will be "
"computed. Note that the computation currently does not take "
"into account indels, so it is an approximate count only. "
"[%default]")
parser.add_option(
"-f", "--ignore-masked-reads", dest="remove_rna", action="store_true",
help="as well as counting reads in the file given by --mask-bed-file, "
"also remove these | |
and outputs is
a Tensor, and another is a Tensor list/tuple, then the Jacobian will
be a tuple of Tensors. If both of inputs and outputs are Tensor
list/tuple, then the Jacobian will be a tuple of tuple of Tensors
where ``Jacobian[i][j]`` will contain the Jacobian matrix of the
linearized ``i``th output and ``j``th input and will have same
dtype and device as the corresponding input. ``Jacobian[i][j]`` will
have as size ``m * n``, where ``m`` and ``n`` denote the numbers of
elements of ``i``th output and ``j``th input respectively.
Examples 1:
.. code-block:: python
import paddle
def func(x):
return paddle.matmul(x, x)
x = paddle.ones(shape=[2, 2], dtype='float32')
x.stop_gradient = False
jacobian = paddle.autograd.jacobian(func, x)
print(jacobian)
# Tensor(shape=[4, 4], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[2., 1., 1., 0.],
# [1., 2., 0., 1.],
# [1., 0., 2., 1.],
# [0., 1., 1., 2.]])
Examples 2:
.. code-block:: python
import paddle
def func(x, y):
return paddle.matmul(x, y)
x = paddle.ones(shape=[2, 2], dtype='float32')
y = paddle.ones(shape=[2, 2], dtype='float32') * 2
x.stop_gradient = False
y.stop_gradient = False
jacobian = paddle.autograd.jacobian(func, [x, y], create_graph=True)
print(jacobian)
# (Tensor(shape=[4, 4], dtype=float32, place=CUDAPlace(0), stop_gradient=False,
# [[2., 2., 0., 0.],
# [2., 2., 0., 0.],
# [0., 0., 2., 2.],
# [0., 0., 2., 2.]]),
# Tensor(shape=[4, 4], dtype=float32, place=CUDAPlace(0), stop_gradient=False,
# [[1., 0., 1., 0.],
# [0., 1., 0., 1.],
# [1., 0., 1., 0.],
# [0., 1., 0., 1.]]))
Examples 3:
.. code-block:: python
import paddle
def func(x, y):
return paddle.matmul(x, y), x * x
x = paddle.ones(shape=[2, 2], dtype='float32')
y = paddle.ones(shape=[2, 2], dtype='float32') * 2
x.stop_gradient = False
y.stop_gradient = False
jacobian = paddle.autograd.jacobian(func, [x, y], allow_unused=True)
print(jacobian)
# ((Tensor(shape=[4, 4], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[2., 2., 0., 0.],
# [2., 2., 0., 0.],
# [0., 0., 2., 2.],
# [0., 0., 2., 2.]]),
# Tensor(shape=[4, 4], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[1., 0., 1., 0.],
# [0., 1., 0., 1.],
# [1., 0., 1., 0.],
# [0., 1., 0., 1.]])),
# (Tensor(shape=[4, 4], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[2., 0., 0., 0.],
# [0., 2., 0., 0.],
# [0., 0., 2., 0.],
# [0., 0., 0., 2.]]), None))
'''
inputs = _tensors(inputs, "inputs")
outputs = _tensors(func(*inputs), "outputs")
fin_size = len(inputs)
fout_size = len(outputs)
flat_outputs = tuple(reshape(output, shape=[-1]) for output in outputs)
jacobian = tuple()
for i, flat_output in enumerate(flat_outputs):
jac_i = list([] for _ in range(fin_size))
for k in range(len(flat_output)):
row_k = grad(
flat_output[k],
inputs,
create_graph=create_graph,
retain_graph=True,
allow_unused=allow_unused)
for j in range(fin_size):
jac_i[j].append(
reshape(
row_k[j], shape=[-1])
if isinstance(row_k[j], paddle.Tensor) else None)
jacobian += (tuple(
_stack_tensor_or_return_none(jac_i_j) for jac_i_j in jac_i), )
if fin_size == 1 and fout_size == 1:
return jacobian[0][0]
elif fin_size == 1 and fout_size != 1:
return tuple(jacobian[i][0] for i in range(fout_size))
elif fin_size != 1 and fout_size == 1:
return jacobian[0]
else:
return jacobian
@framework.dygraph_only
def batch_jacobian(func, inputs, create_graph=False, allow_unused=False):
'''
.. note::
**This API is ONLY available in the imperative mode.**
This function computes the batch Jacobian matrix of `func` with respect to `inputs`.
Noted that the first dimension of inputs is batch size.
Parameters:
func (function): a Python function that takes a Tensor or a Tensor
list/tuple as inputs(the first dimension is batch size) and
returns a Tensor or a Tensor tuple.
inputs (Tensor|list(Tensor)|tuple(Tensor)): the input Tensor or
Tensor list/tuple of the function ``func``, Noted that
the first dimension of inputs is batch size.
create_graph (bool, optional): whether to create the gradient graphs
of the computing process. When it is True, higher order derivatives
are supported to compute; when it is False, the gradient graphs of
the computing process would be discarded. Defaults to ``False``.
allow_unused (bool, optional): whether to raise error or return None if
some Tensors of `inputs` are unreachable in the graph. Error would
be raised if allow_unused=False, and None would be returned as
their gradients if allow_unused=True. Default False.
Returns:
Jacobian (Tensor or nested tuple of Tensors): if function ``func``
takes a Tensor as inputs and returns a Tensor as outputs, Jacobian
will be a single Tensor containing the Jacobian matrix for the
linearized inputs and outputs. If one of the inputs and outputs is
a Tensor, and another is a Tensor list/tuple, then the Jacobian will
be a tuple of Tensors. If both of inputs and outputs are Tensor
list/tuple, then the Jacobian will be a tuple of tuple of Tensors.
Noted that the first dimension of inputs is batch size.
For example,
the inputs shape and outputs shape of function ``func` is [batch_size, num]
and [batch_size, num] respectively, then the Jacobian will be a Tensor with
a shape of [num, batch_size * num], where ``Jacobian[i][j]`` will contain
the Jacobian matrix of the ``i``th column output and the ``j``th input and
will have same dtype and device as the corresponding input.
Other situations can be deduced by analogy.
Examples 1:
.. code-block:: python
import paddle
x = paddle.ones(shape=(4, 2), dtype='float64')
weight = paddle.ones(shape=(2, 4), dtype='float64')
y = paddle.ones(shape=(4, 2), dtype='float64')
def func(x):
return paddle.matmul(paddle.matmul(x, weight), y)
x.stop_gradient = False
batch_jacobian = paddle.autograd.batch_jacobian(func, x)
print(batch_jacobian)
# Tensor(shape=[2, 8], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
# [[4., 4., 4., 4., 4., 4., 4., 4.],
# [4., 4., 4., 4., 4., 4., 4., 4.]])
Examples 2:
.. code-block:: python
import paddle
x = paddle.ones(shape=(4, 2), dtype='float64')
weight = paddle.ones(shape=(2, 4), dtype='float64')
y = paddle.ones(shape=(4, 2), dtype='float64')
def func(x):
return paddle.matmul(paddle.matmul(x, weight), y), x * x
x.stop_gradient = False
batch_jacobian = paddle.autograd.batch_jacobian(func, x)
print(batch_jacobian)
# (Tensor(shape=[2, 8], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
# [[4., 4., 4., 4., 4., 4., 4., 4.],
# [4., 4., 4., 4., 4., 4., 4., 4.]]), Tensor(shape=[2, 8], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
# [[2., 0., 2., 0., 2., 0., 2., 0.],
# [0., 2., 0., 2., 0., 2., 0., 2.]]))
Examples 3:
.. code-block:: python
import paddle
x = paddle.ones(shape=(4, 2), dtype='float64')
weight = paddle.ones(shape=(2, 4), dtype='float64')
y = paddle.ones(shape=(4, 2), dtype='float64')
def func(x, y):
return x * y
x.stop_gradient = False
y.stop_gradient = False
batch_jacobian = paddle.autograd.batch_jacobian(func, [x, y])
print(batch_jacobian)
# (Tensor(shape=[2, 8], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
# [[1., 0., 1., 0., 1., 0., 1., 0.],
# [0., 1., 0., 1., 0., 1., 0., 1.]]), Tensor(shape=[2, 8], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
# [[1., 0., 1., 0., 1., 0., 1., 0.],
# [0., 1., 0., 1., 0., 1., 0., 1.]]))
'''
inputs = _tensors(inputs, "inputs")
outputs = _tensors(func(*inputs), "outputs")
batch_size = inputs[0].shape[0]
for input in inputs:
assert input.shape[
0] == batch_size, "The first dimension of input should equals to the same batch size!"
for output in outputs:
assert output.shape[
0] == batch_size, "The first dimension of output should equals to the same batch size!"
fin_size = len(inputs)
fout_size = len(outputs)
flat_outputs = tuple(
reshape(
output, shape=[batch_size, -1]) for output in outputs)
jacobian = tuple()
for i, flat_output in enumerate(flat_outputs):
jac_i = list([] for _ in range(fin_size))
for k in range(flat_output.shape[1]):
row_k = grad(
flat_output[:, k],
inputs,
create_graph=create_graph,
retain_graph=True,
allow_unused=allow_unused)
for j in range(fin_size):
jac_i[j].append(
reshape(
row_k[j], shape=[-1])
if isinstance(row_k[j], paddle.Tensor) else None)
jacobian += (tuple(
_stack_tensor_or_return_none(jac_i_j) for jac_i_j in jac_i), )
if fin_size == 1 and fout_size == 1:
return jacobian[0][0]
elif fin_size == 1 and fout_size != 1:
return tuple(jacobian[i][0] for i in range(fout_size))
elif fin_size != 1 and fout_size == 1:
return jacobian[0]
else:
return jacobian
@framework.dygraph_only
def batch_hessian(func, inputs, create_graph=False, allow_unused=False):
'''
.. note::
**This API is ONLY available in the imperative mode.**
This function computes the batch Hessian matrix of `func` with respect to `inputs`.
Noted that the first dimension of inputs is batch size.
Parameters:
func (function): a Python function that takes a Tensor or a Tensor
list/tuple as inputs(the first dimension is batch size) and
returns a Tensor with shape [batch_size, 1].
inputs (Tensor|list(Tensor)|tuple(Tensor)): the input Tensor or
Tensor list/tuple of the function ``func``.
Noted that the first dimension of | |
`image_shape` is required.
raise ValueError("Argument `image_shape` must be provided for NUFFT.")
image_shape = tf.TensorShape(image_shape)
image_shape.assert_has_rank(rank)
if multicoil is None:
# `multicoil` defaults to True if sensitivities were passed; False
# otherwise.
multicoil = sensitivities is not None
# Compensate non-uniform sampling density.
if density is None:
density = traj_ops.estimate_density(trajectory, image_shape)
kspace = tf.math.divide_no_nan(kspace, tensor_util.cast_to_complex(density))
# Do NUFFT.
image = tfft.nufft(kspace, trajectory,
grid_shape=image_shape,
transform_type='type_1',
fft_direction='backward')
# Do coil combination.
if multicoil and combine_coils:
image = coil_ops.combine_coils(image, maps=sensitivities, coil_axis=-rank-1)
return image
def _inufft(kspace,
trajectory,
sensitivities=None,
image_shape=None,
tol=1e-5,
max_iter=10,
return_cg_state=False,
multicoil=None,
combine_coils=True):
"""MR image reconstruction using iterative inverse NUFFT.
For the parameters, see `tfmr.reconstruct`.
"""
kspace = tf.convert_to_tensor(kspace)
trajectory = tf.convert_to_tensor(trajectory)
if sensitivities is not None:
sensitivities = tf.convert_to_tensor(sensitivities)
# Infer rank from number of dimensions in trajectory.
rank = trajectory.shape[-1]
if rank > 3:
raise ValueError(
f"Can only reconstruct images up to rank 3, but `trajectory` implies "
f"rank {rank}.")
# Check inputs and set defaults.
if image_shape is None:
# `image_shape` is required.
raise ValueError("Argument `image_shape` must be provided for NUFFT.")
image_shape = tf.TensorShape(image_shape)
image_shape.assert_has_rank(rank)
if multicoil is None:
# `multicoil` defaults to True if sensitivities were passed; False
# otherwise.
multicoil = sensitivities is not None
batch_shape = tf.shape(kspace)[:-1]
# Set up system operator and right hand side.
linop_nufft = linalg_ops.LinearOperatorNUFFT(image_shape, trajectory)
operator = tf.linalg.LinearOperatorComposition(
[linop_nufft.H, linop_nufft],
is_self_adjoint=True, is_positive_definite=True)
# Compute right hand side.
rhs = tf.linalg.matvec(linop_nufft.H, kspace)
# Solve linear system using conjugate gradient iteration.
result = linalg_ops.conjugate_gradient(operator, rhs, x=None,
tol=tol, max_iter=max_iter)
# Restore image shape.
image = tf.reshape(result.x, tf.concat([batch_shape, image_shape], 0))
# Do coil combination.
if multicoil and combine_coils:
image = coil_ops.combine_coils(image, maps=sensitivities, coil_axis=-rank-1)
return (image, result) if return_cg_state else image
def _sense(kspace,
sensitivities,
reduction_axis,
reduction_factor,
rank=None,
l2_regularizer=0.0,
fast=True):
"""MR image reconstruction using SENSitivity Encoding (SENSE).
For the parameters, see `tfmr.reconstruct`.
"""
# Parse inputs.
kspace = tf.convert_to_tensor(kspace)
sensitivities = tf.convert_to_tensor(sensitivities)
# Rank or spatial dimensionality.
rank = rank or kspace.shape.rank - 1
reduced_shape = kspace.shape[-rank:]
reduction_axis = check_util.validate_list(
reduction_axis, element_type=int, name='reduction_axis')
reduction_factor = check_util.validate_list(
reduction_factor, element_type=int, length=len(reduction_axis),
name='reduction_factor')
reduction_axis = [ax + rank if ax < 0 else ax for ax in reduction_axis]
canonical_reduction = [1] * rank
for ax, r in zip(reduction_axis, reduction_factor):
canonical_reduction[ax] = r
image_shape = tf.TensorShape(
[s * r for s, r in zip(reduced_shape.as_list(), canonical_reduction)])
# Compute batch shapes. `batch_shape` is the output batch shape.
kspace_rank = kspace.shape.rank
kspace_batch_shape = kspace.shape[:-rank-1]
sens_rank = sensitivities.shape.rank
sens_batch_shape = sensitivities.shape[:-rank-1]
batch_shape = tf.broadcast_static_shape(kspace_batch_shape, sens_batch_shape)
# We do not broadcast the k-space, by design.
if batch_shape != kspace_batch_shape:
raise ValueError(
f"`kspace` and `sensitivities` have incompatible batch shapes: "
f"{kspace_batch_shape}, {sens_batch_shape}")
# Rearrange dimensions. Put spatial dimensions first, then coil dimension,
# then batch dimensions.
kspace_perm = list(range(-rank, 0)) + [-rank-1]
kspace_perm = [ax + kspace_rank for ax in kspace_perm]
kspace_perm += list(range(0, kspace_rank - rank - 1))
sens_perm = list(range(-rank, 0)) + [-rank-1]
sens_perm = [ax + sens_rank for ax in sens_perm]
sens_perm += list(range(0, sens_rank - rank - 1))
kspace = tf.transpose(kspace, kspace_perm)
sensitivities = tf.transpose(sensitivities, sens_perm)
# Compute aliased images and shift along the reduced dimensions.
aliased_images = fft_ops.ifftn(kspace, axes=list(range(rank)), shift=True)
aliased_images = tf.signal.ifftshift(aliased_images, axes=reduction_axis)
# Create a grid of indices into the reduced FOV image.
reduced_indices = tf.stack(tf.meshgrid(*[tf.range(s) for s in reduced_shape]))
reduced_indices = tf.transpose(tf.reshape(reduced_indices, [rank, -1]))
# Compute corresponding indices into the full FOV image.
offsets = [tf.range(r) * s for s, r in zip(
reduced_shape.as_list(), canonical_reduction)]
offsets = tf.transpose(tf.reshape(
tf.stack(tf.meshgrid(*offsets)), [rank, -1]))
indices = tf.expand_dims(reduced_indices, -2) + offsets
# Compute the system matrices, ie, pixel-wise sensitivity matrices folding the
# full FOV image into a reduced FOV image.
sens_matrix = tf.gather_nd(sensitivities, indices)
sens_matrix = tf.transpose(
sens_matrix, [0, 2, 1] + list(range(3, 3 + sens_batch_shape.rank)))
# Compute the right hand sides for the set of linear systems.
rhs = tf.gather_nd(aliased_images, reduced_indices)
# Remove any pixels known to have zero signal, with no contributions from any
# of the aliases. Currently we can't do this for batched sensitivities, so it
# is disabled in that case.
if sens_batch_shape.rank == 0:
mask = tf.reduce_sum(tf.math.square(tf.math.abs(sens_matrix)), -2) > 0
mask = tf.math.reduce_any(mask, axis=-1)
sens_matrix = tf.boolean_mask(sens_matrix, mask, axis=0)
rhs = tf.boolean_mask(rhs, mask, axis=0)
indices = tf.boolean_mask(indices, mask, axis=0)
# Move batch dimensions to the beginning.
sens_matrix = tf.transpose(
sens_matrix, list(range(3, sens_matrix.shape.rank)) + [0, 1, 2])
rhs = tf.transpose(rhs, list(range(2, rhs.shape.rank)) + [0, 1])
rhs = tf.expand_dims(rhs, -1)
# Broadcast the sensitivity matrix as necessary.
sens_matrix = tf.broadcast_to(
sens_matrix, batch_shape + sens_matrix.shape[-3:])
# Solve the pixel-wise linear least-squares problems.
unfolded_values = tf.linalg.lstsq(sens_matrix, rhs,
l2_regularizer=l2_regularizer,
fast=fast)
unfolded_values = tf.reshape(unfolded_values, [-1])
output_indices = tf.reshape(indices, [-1, rank])
# For batch mode we need to do some additional indexing calculations.
if batch_shape.rank > 0:
batch_size = batch_shape.num_elements()
element_size = unfolded_values.shape[0] // batch_size
batch_indices = tf.stack(tf.meshgrid(*[tf.range(s) for s in batch_shape]))
batch_indices = tf.transpose(
tf.reshape(batch_indices, [batch_shape.rank, -1]))
batch_indices = tf.expand_dims(batch_indices, -2)
batch_indices = tf.tile(
batch_indices, [1] * batch_shape.rank + [element_size, 1])
batch_indices = tf.reshape(batch_indices, [-1, batch_shape.rank])
output_indices = tf.tile(output_indices, [batch_size, 1])
output_indices = tf.concat([batch_indices, output_indices], -1)
# Scatter the unfolded values into the reconstructed image.
image = tf.scatter_nd(output_indices, unfolded_values,
batch_shape + image_shape)
return image
def _cg_sense(kspace,
trajectory,
density=None,
sensitivities=None,
tol=1e-5,
max_iter=10,
return_cg_state=False):
"""MR image reconstruction using conjugate gradient SENSE (CG-SENSE).
For the parameters, see `tfmr.reconstruct`.
"""
if sensitivities is None:
raise ValueError("Argument `sensitivities` must be specified for CG-SENSE.")
# Inputs.
kspace = tf.convert_to_tensor(kspace)
sensitivities = tf.convert_to_tensor(sensitivities)
trajectory = tf.convert_to_tensor(trajectory)
rank = trajectory.shape[-1]
num_points = kspace.shape[-1]
num_coils = kspace.shape[-2]
batch_shape = kspace.shape[:-2]
image_shape = sensitivities.shape[-rank:]
# Check some inputs.
tf.debugging.assert_equal(
tf.shape(kspace)[-1], tf.shape(trajectory)[-2], message=(
f"The number of samples in `kspace` (axis -1) and `trajectory` "
f"(axis -2) must match, but got: {tf.shape(kspace)[-1]}, "
f"{tf.shape(trajectory)[-2]}"))
tf.debugging.assert_equal(
tf.shape(kspace)[-2], tf.shape(sensitivities)[-rank-1], message=(
f"The number of coils in `kspace` (axis -2) and `sensitivities` "
f"(axis {-rank-1}) must match, but got: {tf.shape(kspace)[-1]}, "
f"{tf.shape(sensitivities)[-rank-1]}"))
# Check batch shapes.
kspace_batch_shape = kspace.shape[:-2]
sens_batch_shape = sensitivities.shape[:-rank-1]
traj_batch_shape = trajectory.shape[:-2]
batch_shape = tf.broadcast_static_shape(kspace_batch_shape, sens_batch_shape)
# We do not broadcast the k-space input, by design.
if batch_shape != kspace_batch_shape:
raise ValueError(
f"`kspace` and `sensitivities` have incompatible batch shapes: "
f"{kspace_batch_shape}, {sens_batch_shape}")
batch_shape = tf.broadcast_static_shape(kspace_batch_shape, traj_batch_shape)
if batch_shape != kspace_batch_shape:
raise ValueError(
f"`kspace` and `trajectory` have incompatible batch shapes: "
f"{kspace_batch_shape}, {traj_batch_shape}")
# For sampling density correction.
if density is None:
# Sampling density not provided, so estimate from trajectory.
density = traj_ops.estimate_density(trajectory, image_shape)
else:
# Use the provided sampling density.
density = tf.convert_to_tensor(density)
density = tf.expand_dims(density, -2) # Add coil dimension.
# For intensity correction.
intensity = tf.math.reduce_sum(tf.math.square(tf.math.abs(sensitivities)),
axis=-rank-1)
# Prepare intensity correction linear operator.
intensity_weights = tf.math.reciprocal_no_nan(intensity)
linop_intensity = linalg_ops.LinearOperatorRealWeighting(
tf.math.sqrt(intensity_weights),
arg_shape=intensity_weights.shape[-rank:],
dtype=kspace.dtype)
# Prepare density compensation linear operator.
density_weights = tf.math.reciprocal_no_nan(density)
linop_density = linalg_ops.LinearOperatorRealWeighting(
tf.math.sqrt(density_weights),
arg_shape=[num_coils, num_points],
dtype=kspace.dtype)
# Get non-Cartesian parallel MRI operator.
linop_parallel_mri = linalg_ops.LinearOperatorParallelMRI(
sensitivities, trajectory=trajectory)
# Calculate the right half of the system operator. Then, the left half is the
# adjoint of the right half.
linop_right = tf.linalg.LinearOperatorComposition(
[linop_density, linop_parallel_mri, linop_intensity])
linop_left = linop_right.H
# Finally, make system operator. We know this to be self-adjoint and positive
# definite, as required for CG.
operator = tf.linalg.LinearOperatorComposition(
[linop_left, linop_right],
is_self_adjoint=True, is_positive_definite=True)
# Step 1. Compute the right hand side of the linear system.
kspace_vec = tf.reshape(kspace, batch_shape.as_list() + [-1])
rhs = tf.linalg.matvec(linop_left,
tf.linalg.matvec(linop_density, kspace_vec))
# Step 2. Perform CG iteration to solve modified system.
result = linalg_ops.conjugate_gradient(operator, rhs,
tol=tol, max_iter=max_iter)
# Step 3. Correct intensity to obtain solution to original system.
image_vec = tf.linalg.matvec(linop_intensity, result.x)
# Restore image shape.
image = tf.reshape(image_vec, batch_shape.as_list() + image_shape)
return (image, result) if return_cg_state else image
def _grappa(kspace,
mask=None,
calib=None,
sensitivities=None,
kernel_size=5,
weights_l2_regularizer=0.0,
combine_coils=True,
return_kspace=False):
"""MR image reconstruction using GRAPPA.
For the parameters, see `tfmr.reconstruct`.
"""
if mask is None:
raise ValueError("Argument `mask` must be provided.")
if calib is None:
raise ValueError("Argument `calib` must be provided.")
kspace = tf.convert_to_tensor(kspace)
calib = tf.convert_to_tensor(calib)
mask = tf.convert_to_tensor(mask)
# If mask has no holes, there is nothing to do.
if tf.math.count_nonzero(tf.math.logical_not(mask)) | |
projects
variable = PY.Word(PY.alphanums + "_", PY.alphanums + "_")
message_key = PY.Literal("MessageId") + PY.Literal("=") + PY.Optional(PY.Optional(PY.Literal("+")) + variable)
severity = PY.Literal("Severity") + PY.Literal("=") + variable
facility = PY.Literal("Facility") + PY.Literal("=") + variable
symbolic_name = (PY.Literal("SymbolicName") + PY.Literal("=")).suppress() + variable
output_base = PY.Literal("OutputBase") + PY.Literal("=") + PY.Optional(PY.Literal("{")) + variable + PY.Optional(PY.Literal("}"))
language = PY.Literal("Language") + PY.Literal("=") + variable
message_value = PY.SkipTo(PY.lineStart + PY.Literal(".")).setParseAction(lambda s, l, t: t[0].strip())
#comment out below pattern since severity/facility/symbolic items can be in any order in reality, not like MSDN says...
#key_value_pair = message_key.suppress() + PY.Optional(severity).suppress() + PY.Optional(facility).suppress() + symbolic_name + PY.Optional(output_base).suppress() + PY.Optional(language).suppress() + message_value
careless_item = language | severity | facility | output_base
key_value_pair = message_key.suppress() + PY.ZeroOrMore(careless_item).suppress() + symbolic_name + PY.ZeroOrMore(careless_item).suppress() + message_value
return key_value_pair.ignore(comment).parseWithTabs()
def get_placeholder_pattern(self):
#reference : http://msdn.microsoft.com/en-us/library/windows/desktop/dd996906(v=vs.85).aspx and the links ont the page
positive_integer = PY.Word("123456789", PY.nums)
integer = PY.Literal("0") | positive_integer
flags = PY.Word("-#0")
width = integer
precision = PY.Literal(".") + PY.Optional(integer)
type_flag = PY.Word("h", "cCdsSu", exact = 2) | PY.Word("l", "cCdisSuxX", exact = 2) | PY.Word("cCdipsSu", exact = 1)
format_string_body = PY.Optional(flags) + PY.Optional(width) + PY.Optional(precision) + type_flag
special_characters = PY.Combine(PY.Literal("%") + PY.Word("0.!%nbr", exact = 1))
numbered_format_string = PY.Combine(PY.Literal("%") + positive_integer + PY.Optional(PY.Literal("!") + format_string_body + PY.Literal("!")))
placeholder_pattern = PY.originalTextFor(numbered_format_string | special_characters)
return placeholder_pattern
class ResxResFile(BaseResFile):
def parse(self, parsing_patterns = None):
data = self.read()
if not data:
LOG.warning("There is no data in file '{path}'".format(path = self.path))
return
try:
root = ET.fromstring(data)
#escape_pattern = None # need to add whether there is an escape error, no need for now since parseError will be thrown in current implementation
for elem in root.findall("data"):
key = elem.get("name")
if key is None:
continue
#filter strings from all values parsed
if ("." in key) and (not key.endswith(".Text")):
continue
#if there is no child named "value" under "data", the actual value in C# project is null, we set it to "" in order to save effort to handle it
#if there is no text in "value" node, the actual value in C# project is ""
value = ""
sub_elem = elem.find("value")
if sub_elem != None:
value = "".join(sub_elem.itertext())
if key in self.keys:
self.duplicate_keys.append(key)
#if escape_pattern.match(value):
# self.escape_error_keys.append(key)
self.keys.add(key)
self.values.append(value)
self.key_value_pairs[key] = value
self.item_count += 1
except Exception as e:
LOG.error("An error occurred when parsing key-value pairs from file '{path}': {exception}".format(path = self.path, exception = e))
self.reset_value_containers()
return
class ResxResDetector(BaseResDetector):
def is_translation_necessary(self, value):
return (BaseResDetector.is_translation_necessary(self, value) and (not "PublicKeyToken" in value))
def get_placeholder_pattern(self):
return PY.Literal("{").suppress() + PY.Word(PY.nums) + PY.Literal("}").suppress()
class ReswResFile(ResxResFile):
pass
class ReswResDetector(ResxResDetector):
pass
class WxlResFile(BaseResFile):
# Maybe the most effeicent way is to get the last five character of the pure file name when determining the language based on the file name
def get_language(self):
sub_names = self.file.lower().split(".")
try:
sub_name = sub_names[-2]
if sub_name in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[sub_name]
except IndexError:
pass
for sub_name in sub_names:
if sub_name in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[sub_name]
#sometimes the file name is like agee_zh-CN.wxl
sub_names = self.file.lower().replace("." + self.extension, "").split("_")
try:
sub_name = sub_names[-1]
if sub_name in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[sub_name]
except IndexError:
pass
for sub_name in sub_names:
if sub_name in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[sub_name]
#sometimes the file name is like Dmc-de-de.wxl
try:
sub_name = self.file.lower()[-9:-4]
if sub_name in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[sub_name]
except Exception:
pass
sub_dirs = self.directory.lower().split(os.sep)
try:
sub_dir = sub_dirs[-1]
if sub_dir in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[sub_dir]
except IndexError:
pass
#Is the following necessary? Do we need to decide whether the other sub directory is language id besides the last sub directory?
for sub_dir in sub_dirs:
if sub_dir in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[sub_dir]
return BASE_LANGUAGE
def get_group_id(self):
#Maybe the most efficient way to get adjusted file name is sef.file[0:-9]
sub_names = self.file.split(".")
file_adjusted = ""
for sub_name in sub_names:
if not sub_name.lower() in STANDARDIZED_LANGUAGES.keys():
file_adjusted += sub_name
#sometimes the file name is like agee_zh-CN.wxl
if "".join(sub_names) == file_adjusted:
file_adjusted = ""
sub_names = self.file.replace("." + self.extension, "").split("_")
for sub_name in sub_names:
if not sub_name.lower() in STANDARDIZED_LANGUAGES.keys():
file_adjusted += sub_name
file_adjusted = file_adjusted + "." + self.extension
#sometimes the file name is like Dmc-de-de.wxl
if ("_".join(sub_names) + "." + self.extension) == file_adjusted:
file_adjusted = self.file[0:-9]
sub_dirs = self.directory.split(os.sep)
dir_adjusted = sub_dirs
index = 0
for sub_dir in sub_dirs:
if sub_dir.lower() in STANDARDIZED_LANGUAGES.keys():
dir_adjusted.remove(sub_dir)
break
index += 1
return file_adjusted, os.sep.join(dir_adjusted), index
def parse(self, parsing_patterns = None):
data = self.read()
if not data:
LOG.warning("There is no data in file '{path}'".format(path = self.path))
return
try:
root = ET.fromstring(data)
#escape_pattern = None # need to add whether there is an escape error, no need for now since parseError will be thrown in current implementation
for elem in root.iter():
if elem.tag.endswith("String"):
key = elem.get("Id")
if key is None:
continue
value = "".join(elem.itertext())
if key in self.keys:
self.duplicate_keys.append(key)
#if escape_pattern.match(value):
# self.escape_error_keys.append(key)
self.keys.add(key)
self.values.append(value)
self.key_value_pairs[key] = value
self.item_count += 1
except Exception as e:
LOG.error("An error occurred when parsing key-value pairs from file '{path}': {exception}".format(path = self.path, exception = e))
self.reset_value_containers()
return
class WxlResDetector(BaseResDetector):
def get_placeholder_pattern(self):
variable = PY.Word(PY.alphas + "_", PY.alphanums + "_")
number = PY.Literal("0") | PY.Word("123456789", PY.nums)
placeholder_pattern = PY.originalTextFor((PY.Literal("[") + (variable | number) + PY.Literal("]")) | (PY.Literal("{") + PY.Literal("\\") + variable + PY.Literal("}")))
return placeholder_pattern
class StrResFile(BaseResFile):
def get_language(self):
sub_names = os.path.basename(self.directory).lower().split(".")
if len(sub_names) > 1:
language = sub_names[-2]
if language in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[language]
else:
for language in sub_names:
if language in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[language]
return BASE_LANGUAGE
else:
return BASE_LANGUAGE
def get_group_id(self):
return self.file, os.path.dirname(self.directory)
def parse(self, parsing_patterns = None):
data = self.read()
if not data:
LOG.warning("There is no data in file '{path}'".format(path = self.path))
return
try:
variable = PY.Word(PY.alphas + "_", PY.alphanums + "_")
key_pattern = variable | PY.dblQuotedString
value_pattern = PY.dblQuotedString
key_value_pair = key_pattern + PY.Literal("=").suppress() + value_pattern + PY.Literal(";").suppress()
escape_pattern = re.compile(".*(?<!\\\)\".*")
for token, start_location, end_location in key_value_pair.ignore(PY.cppStyleComment).scanString(data):
key = token[0]
value = token[1]
pure_value = value[1:-1]
if key in self.keys:
self.duplicate_keys.append(key)
if escape_pattern.match(pure_value):
self.escape_error_keys.append(key)
self.keys.add(key)
self.values.append(pure_value)
self.key_value_pairs[key] = pure_value
self.item_count += 1
except Exception as e:
LOG.error("An error occurred when parsing key-value pairs from file '{path}': {exception}".format(path = self.path, exception = e))
self.reset_value_containers()
return
class StrResDetector(BaseResDetector):
def get_placeholder_pattern(self):
#reference: http://pubs.opengroup.org/onlinepubs/009695399/functions/printf.html, https://developer.apple.com/library/mac/documentation/Cocoa/Conceptual/Strings/Articles/formatSpecifiers.html
#can only detect placeholders, do not make sure they are legal
positive_integer = PY.Word("123456789", PY.nums)
index = positive_integer + PY.Literal("$")
flags = PY.Word("'-+ #0")
width = positive_integer | (PY.Literal("*") + PY.Optional(positive_integer + PY.Literal("$")))
precision = PY.Literal(".") + width
length_modifier = PY.Literal("hh") | PY.Literal("ll") | PY.Word("hljztqL", exact = 1)
conversion_specifier = PY.Word("@sdiouUxXfFeEgGaAcpnCS%", exact = 1)
placeholder_pattern = PY.originalTextFor(PY.Combine(PY.Literal("%") + PY.Optional(index) + PY.Optional(flags) + PY.Optional(width) + PY.Optional(precision) + PY.Optional(length_modifier) + conversion_specifier))
return placeholder_pattern
class XibResFile(StrResFile):
def parse(self, parsing_patterns = None):
data = self.read()
if not data:
LOG.warning("There is no data in file '{path}'".format(path = self.path))
return
try:
self.keys.add("KeyPlaceholder")
self.values.append("ValuePlaceholder")
self.key_value_pairs["KeyPlaceholder"] = "ValuePlaceholder"
self.item_count += 1
except Exception as e:
LOG.error("An error occurred when parsing key-value pairs from file '{path}': {exception}".format(path = self.path, exception = e))
self.reset_value_containers()
return
class XibResDetector(BaseResDetector):
def get_detect_issues(self):
if self.config.use_user_config:
LOG.info("Reading issue types to be detected from configuration file...")
self.detect_issues = getattr(self.config.config_module, self.config.detect_issues_attrs[self.res_file_type], [])
else:
LOG.info("Getting default issue types to be detected...")
self.detect_issues = []
class XmlResFile(BaseResFile):
def get_language(self):
sub_names = os.path.basename(self.directory).lower().split("-")
count = len(sub_names)
if count == 1:
return BASE_LANGUAGE
elif count > 1:
for i in range(1, count):
language = sub_names[i]
if language in STANDARDIZED_LANGUAGES.keys():
result = STANDARDIZED_LANGUAGES[language]
if i + 1 < count and sub_names[i + 1].startswith("r"):
language = sub_names[i] + "-" + sub_names[i + 1]
if language in STANDARDIZED_LANGUAGES.keys():
result = STANDARDIZED_LANGUAGES[language]
return result
return BASE_LANGUAGE
else:
LOG.critical("A fatal error occurred when determining the language of file '{path}'".format(path = self.path))
quit_application(-1)
def get_group_id(self):
sub_names = os.path.basename(self.directory).lower().split("-")
base_name_adjusted = ""
count = len(sub_names)
region_flag = False
for i in range(count):
sub_name = sub_names[i]
if (not sub_name in STANDARDIZED_LANGUAGES.keys()) and (not region_flag):
base_name_adjusted += sub_name
elif not region_flag:
if i + 1 < count and sub_names[i + 1].startswith("r"):
language = sub_name + "-" + sub_names[i + 1]
if language in STANDARDIZED_LANGUAGES.keys():
region_flag = True
else:
region_flag = False
return self.file, base_name_adjusted, os.path.dirname(self.directory)
def parse(self, parsing_patterns | |
<reponame>cdondrup/teaching
#
# This file is part of pyperplan.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
"""
Implementation of LM-cut heuristic.
"""
from heapq import *
import logging
from heuristics.heuristic_base import Heuristic
def _compare(op):
"""General compare function for objects containing hmax values."""
def comp(self, x):
m = getattr(self.hmax_value, op)
return m(x.hmax_value)
return comp
class RelaxedFact:
def __init__(self, name):
self.name = name
self.hmax_value = float('inf')
self.precondition_of = list() # list of RelaxedOp
self.effect_of = list() # list of RelaxedOp
# We want to be able to insert RelaxedFact into a heap.
# We thus use a general compare function here
# and instantiate the __lt__, __gt__ etc. class methods with this function.
(__lt__, __leq__, __gt__, __geq__) = map(_compare, ['__lt__', '__leq__',
'__gt__', '__geq__'])
def clear(self):
self.hmax_value = float('inf')
def dump(self):
return ('< FACT name: %s, hmax: %f, precond_of: %s, effect_of: %s >' %
(self.name, self.hmax_value,
[str(p) for p in self.precondition_of],
[str(e) for e in self.effect_of]))
def __str__(self):
return self.name
__repr__ = dump
class RelaxedOp:
def __init__(self, name, cost_zero=False):
self.name = name
# list of RelaxedFact
self.precondition = list()
# list of RelaxedFact
self.effects = list()
# the most expensive predecessor (a RelaxedFact)
self.hmax_supporter = None
self.hmax_value = float('inf')
self.cost_zero = cost_zero
# used to check whether an operator can be applied
self.preconditions_unsat = 0
if self.cost_zero:
self.cost = 0.
else:
self.cost = 1.
# We want to be able to insert RelaxedOp into a heap.
# We thus use a general compare function for Operators here
# and instantiate the __lt__, __gt__ etc. class methods with this function.
(__lt__, __leq__, _gt__, __geq__) = map(_compare, ['__lt__', '__leq__',
'__gt__', '__geq__'])
def clear(self, clear_op_cost):
"""This method resets the operator values to its defaults.
It is called during the hmax computation on each operator.
Effect:
-------
clears preconditions_unsat
sets cost to 1
"""
self.preconditions_unsat = len(self.precondition)
if clear_op_cost and not self.cost_zero:
self.cost = 1.
self.hmax_supporter = None
self.hmax_value = float('inf')
def dump(self):
return ('< OPERATOR name: %s, '
'hmax_supp: %s, precond: %s, effects: %s, cost: %d >' %
(self.name, str(self.hmax_supporter),
[str(p) for p in self.precondition],
[str(e) for e in self.effects], self.cost))
def __str__(self):
return self.name
__repr__ = dump
class LmCutHeuristic(Heuristic):
"""Class and methods for computing the LM-cut heuristic value.
We define some constant names for special facts and operators.
NOTE: we use upper case names here as the PDDL tasks generally do not
contain any upper case names. This way it is ensured that the denominators
'ALWAYSTRUE', 'GOAL' and 'GOALOP' are always unique.
"""
# operators without precondition get ALWAYSTRUE as precondition
always_true = 'ALWAYSTRUE'
# we use this to have a single goal instead of multiple goals
explicit_goal = 'GOAL'
goal_operator_name = 'GOALOP'
def __init__(self, task):
self.relaxed_facts = dict() # fact name -> RelaxedFact
self.relaxed_ops = dict()
self.reachable = set()
self.goal_plateau = set()
self.dead_end = True
self._compute_relaxed_facts_and_operators(task)
def _compute_relaxed_facts_and_operators(self, task):
"""Store all facts from the task as relaxed facts into our dict."""
# little helper functions that build the relaxed operator graph
def link_op_to_precondition(relaxed_op, factname):
relaxed_op.precondition.append(self.relaxed_facts[factname])
self.relaxed_facts[factname].precondition_of.append(relaxed_op)
def link_op_to_effect(relaxed_op, factname):
relaxed_op.effects.append(self.relaxed_facts[factname])
self.relaxed_facts[factname].effect_of.append(relaxed_op)
for fact in task.facts:
self.relaxed_facts[fact] = RelaxedFact(fact)
for op in task.operators:
assert(not op.name in self.relaxed_ops)
# build new relaxed operator from the task operator
relaxed_op = RelaxedOp(op.name)
# insert all preconditions into relaxed_op and
# mark all preconditions in the relaxed_facts
if not op.preconditions:
# insert one fact that is always true if not already defined
# --> this fact will be used for all operators with empty
# preconditions
if not self.always_true in self.relaxed_facts:
self.relaxed_facts[self.always_true] = \
RelaxedFact(self.always_true)
link_op_to_precondition(relaxed_op, self.always_true)
else:
for fact in op.preconditions:
assert(fact in self.relaxed_facts)
link_op_to_precondition(relaxed_op, fact)
# insert all effects into relaxed_op and
# mark all effects in relaxed_facts
for fact in op.add_effects:
assert(fact in self.relaxed_facts)
link_op_to_effect(relaxed_op, fact)
# insert relaxed_op into hash
self.relaxed_ops[op.name] = relaxed_op
# insert explicit goal and goal operator
goalfact = RelaxedFact(self.explicit_goal)
goalop = RelaxedOp(self.goal_operator_name, True)
self.relaxed_facts[self.explicit_goal] = goalfact
self.relaxed_ops[self.goal_operator_name] = goalop
link_op_to_effect(goalop, self.explicit_goal)
# link all goals to the explicit goal
for fact in task.goals:
assert(fact in self.relaxed_facts)
link_op_to_precondition(goalop, fact)
def compute_hmax(self, state, clear_op_cost=True):
"""Compute hmax values with a Dijkstra like procedure."""
self.reachable.clear()
facts_seen = set()
unexpanded = []
op_cleared = set()
fact_cleared = set()
start_state = {x for x in state}
if self.always_true in self.relaxed_facts:
start_state.add(self.always_true)
for fact in start_state:
self.reachable.add(fact)
fact_obj = self.relaxed_facts[fact]
fact_obj.hmax_value = 0.
# mark all initial facts such that they are not cleared again!
fact_cleared.add(fact_obj)
facts_seen.add(fact_obj)
heappush(unexpanded, fact_obj)
while unexpanded:
fact_obj = heappop(unexpanded)
if fact_obj == self.relaxed_facts[self.explicit_goal]:
self.dead_end = False
# store fact as reachable
self.reachable.add(fact_obj)
hmax_value = fact_obj.hmax_value
# update all operators that have this fact
# as their precondition
for op in fact_obj.precondition_of:
# check if we have explored this operator in this iteration
# --> if this is not the case then precond_fulfilled might
# still contain facts from a previous heuristic computation
# hence we need to clear it first!
if not op in op_cleared:
op.clear(clear_op_cost)
op_cleared.add(op)
op.preconditions_unsat -= 1
# first check if all preconditions are fullfilled
if op.preconditions_unsat == 0:
# update hmax_supporter if necessary
if (op.hmax_supporter is None or
hmax_value > op.hmax_supporter.hmax_value):
op.hmax_supporter = fact_obj
# store for next hmax iteration
op.hmax_value = hmax_value + op.cost
hmax_next = op.hmax_supporter.hmax_value + op.cost
for eff in op.effects:
if not eff in fact_cleared:
# clear fact if necessary
eff.clear()
fact_cleared.add(eff)
if hmax_next < eff.hmax_value:
eff.hmax_value = hmax_next
if not eff in facts_seen:
# enqueue effect if not already explored
facts_seen.add(eff)
heappush(unexpanded, eff)
def compute_hmax_from_last_cut(self, state, last_cut):
"""This computes hmax values starting from the last cut.
This saves us from recomputing the hmax values of all facts/operators
that have not changed anyway.
NOTE: a complete cut procedure needs to be finished (i.e. one cut must
be computed) for this to work!
"""
unexpanded = []
# add all operators from the last cut
# to the queue of operators for which the hmax value needs to be
# recomouted
for op in last_cut:
op.hmax_value = op.hmax_supporter.hmax_value + op.cost
heappush(unexpanded, op)
while unexpanded:
# iterate over all operators whose effects might need updating
op = heappop(unexpanded)
next_hmax = op.hmax_value
#op_seen.add(op)
for fact_obj in op.effects:
# if hmax value of this fact is outdated
fact_hmax = fact_obj.hmax_value
if fact_hmax > next_hmax:
# update hmax value
#logging.debug('updating %s' % fact_obj)
fact_obj.hmax_value = next_hmax
# enqueue all ops of which fact_obj is a hmax supporter
for next_op in fact_obj.precondition_of:
if next_op.hmax_supporter == fact_obj:
next_op.hmax_value = next_hmax + next_op.cost
for supp in next_op.precondition:
if (supp.hmax_value + next_op.cost >
next_op.hmax_value):
next_op.hmax_supporter = supp
next_op.hmax_value = (supp.hmax_value +
next_op.cost)
heappush(unexpanded, next_op)
def compute_goal_plateau(self, fact_name):
"""Recursively mark a goal plateau."""
# assure the fact itself is not in an unreachable region
fact_in_plateau = self.relaxed_facts[fact_name]
if (fact_in_plateau in self.reachable and
not fact_in_plateau in self.goal_plateau):
# add this fact to the goal plateau
self.goal_plateau.add(fact_in_plateau)
for op in fact_in_plateau.effect_of:
# recursive call to mark hmax_supporters of all operators
if op.cost == 0:
self.compute_goal_plateau(op.hmax_supporter.name)
def find_cut(self, state):
"""This returns the set of relaxed operators which are in the cut."""
unexpanded = []
facts_seen = set()
op_cleared = set()
cut = set()
start_state = {x for x in state}
if self.always_true in self.relaxed_facts:
start_state.add(self.always_true)
for fact in start_state:
assert(fact in self.relaxed_facts)
fact_obj = self.relaxed_facts[fact]
facts_seen.add(fact_obj)
heappush(unexpanded, fact_obj)
while unexpanded:
fact_obj = heappop(unexpanded)
for relaxed_op in fact_obj.precondition_of:
if not relaxed_op in op_cleared:
relaxed_op.precond_unsat = len(relaxed_op.precondition)
op_cleared.add(relaxed_op)
relaxed_op.precond_unsat -= | |
# Copyright (c) 2015 Intel Research and Development Ireland Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from __future__ import absolute_import
import os
import re
import six.moves.configparser
import logging
import fileinput
from experimental_framework.constants import conf_file_sections as cf
from experimental_framework.constants import framework_parameters as fp
# ------------------------------------------------------
# List of common variables
# ------------------------------------------------------
allowed_releases = ['liberty', 'kilo', 'juno']
LOG = None
CONF_FILE = None
DEPLOYMENT_UNIT = None
ITERATIONS = None
RELEASE = None
BASE_DIR = None
RESULT_DIR = None
TEMPLATE_DIR = None
TEMPLATE_NAME = None
TEMPLATE_FILE_EXTENSION = None
PKTGEN = None
PKTGEN_DIR = None
PKTGEN_DPDK_DIRECTORY = None
PKTGEN_PROGRAM = None
PKTGEN_COREMASK = None
PKTGEN_MEMCHANNEL = None
PKTGEN_BUS_SLOT_NIC_1 = None
PKTGEN_BUS_SLOT_NIC_2 = None
PKTGEN_NAME_NIC_1 = None
PKTGEN_NAME_NIC_2 = None
INFLUXDB_IP = None
INFLUXDB_PORT = None
INFLUXDB_DB_NAME = None
# ------------------------------------------------------
# Initialization and Input 'heat_templates/'validation
# ------------------------------------------------------
def init(api=False):
global BASE_DIR
# BASE_DIR = os.getcwd()
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
BASE_DIR = BASE_DIR.replace('/experimental_framework', '')
BASE_DIR = InputValidation.validate_directory_exist_and_format(
BASE_DIR, "Error 000001")
init_conf_file(api)
init_log()
init_general_vars(api)
if CONF_FILE.get_variable_list(cf.CFS_PKTGEN):
init_pktgen()
def init_conf_file(api=False):
global CONF_FILE
if api:
CONF_FILE = ConfigurationFile(cf.get_sections_api(),
'/tmp/apexlake/apexlake.conf')
else:
CONF_FILE = ConfigurationFile(cf.get_sections(),
'/tmp/apexlake/apexlake.conf')
def init_general_vars(api=False):
global TEMPLATE_FILE_EXTENSION
global TEMPLATE_NAME
global TEMPLATE_DIR
global RESULT_DIR
global ITERATIONS
global RELEASE
TEMPLATE_FILE_EXTENSION = '.yaml'
# Check Section in Configuration File
InputValidation.\
validate_configuration_file_section(
cf.CFS_GENERAL,
"Section " + cf.CFS_GENERAL +
"is not present in configuration file")
InputValidation.\
validate_configuration_file_section(
cf.CFS_OPENSTACK,
"Section " + cf.CFS_OPENSTACK +
"is not present in configuration file")
TEMPLATE_DIR = '/tmp/apexlake/heat_templates/'
# if not os.path.exists(TEMPLATE_DIR):
# os.makedirs(TEMPLATE_DIR)
# cmd = "cp /tmp/apexlake/heat_templates/*.yaml {}".format(TEMPLATE_DIR)
# run_command(cmd)
if not api:
# Validate template name
InputValidation.\
validate_configuration_file_parameter(
cf.CFS_GENERAL,
cf.CFSG_TEMPLATE_NAME,
"Parameter " + cf.CFSG_TEMPLATE_NAME +
"is not present in configuration file")
TEMPLATE_NAME = CONF_FILE.get_variable(cf.CFS_GENERAL,
cf.CFSG_TEMPLATE_NAME)
InputValidation.validate_file_exist(
TEMPLATE_DIR + TEMPLATE_NAME,
"The provided template file does not exist")
RESULT_DIR = "/tmp/apexlake/results/"
if not os.path.isdir(RESULT_DIR):
os.makedirs(RESULT_DIR)
if cf.CFSO_RELEASE in CONF_FILE.get_variable_list(cf.CFS_OPENSTACK):
RELEASE = CONF_FILE.get_variable(cf.CFS_OPENSTACK, cf.CFSO_RELEASE)
if RELEASE not in allowed_releases:
raise ValueError("Release {} is not supported".format(RELEASE))
# Validate and assign Iterations
if cf.CFSG_ITERATIONS in CONF_FILE.get_variable_list(cf.CFS_GENERAL):
ITERATIONS = int(CONF_FILE.get_variable(cf.CFS_GENERAL,
cf.CFSG_ITERATIONS))
else:
ITERATIONS = 1
def init_log():
global LOG
LOG = logging.getLogger()
debug = CONF_FILE.get_variable(cf.CFS_GENERAL, cf.CFSG_DEBUG)
if debug == 'true' or debug == 'True':
LOG.setLevel(level=logging.DEBUG)
else:
LOG.setLevel(level=logging.INFO)
log_formatter = logging.Formatter("%(asctime)s --- %(message)s")
file_handler = logging.FileHandler("{0}/{1}.log".format("./", "benchmark"))
file_handler.setFormatter(log_formatter)
file_handler.setLevel(logging.DEBUG)
LOG.addHandler(file_handler)
# ------------------------------------------------------
# InfluxDB conf variables
# ------------------------------------------------------
def init_influxdb():
global INFLUXDB_IP
global INFLUXDB_PORT
global INFLUXDB_DB_NAME
INFLUXDB_IP = CONF_FILE.get_variable(cf.CFS_INFLUXDB, cf.CFSI_IDB_IP)
INFLUXDB_PORT = CONF_FILE.get_variable(cf.CFS_INFLUXDB, cf.CFSI_IDB_PORT)
INFLUXDB_DB_NAME = CONF_FILE.get_variable(cf.CFS_INFLUXDB,
cf.CFSI_IDB_DB_NAME)
# ------------------------------------------------------
# Packet Generator conf variables
# ------------------------------------------------------
def init_pktgen():
global PKTGEN
global PKTGEN_DIR
global PKTGEN_PROGRAM
global PKTGEN_COREMASK
global PKTGEN_MEMCHANNEL
global PKTGEN_BUS_SLOT_NIC_1
global PKTGEN_BUS_SLOT_NIC_2
global PKTGEN_DPDK_DIRECTORY
global PKTGEN_NAME_NIC_1
global PKTGEN_NAME_NIC_2
msg = "Section {} is not present in the configuration file".\
format(cf.CFS_PKTGEN)
InputValidation.validate_configuration_file_section(cf.CFS_PKTGEN, msg)
pktgen_var_list = CONF_FILE.get_variable_list(cf.CFS_PKTGEN)
PKTGEN = 'dpdk_pktgen' # default value
if cf.CFSP_PACKET_GENERATOR in pktgen_var_list:
msg = "Parameter {} is not present in section {}".format(
cf.CFSP_PACKET_GENERATOR, cf.CFS_PKTGEN)
InputValidation.validate_configuration_file_parameter(
cf.CFS_PKTGEN, cf.CFSP_PACKET_GENERATOR, msg)
PKTGEN = CONF_FILE.get_variable(
cf.CFS_PKTGEN, cf.CFSP_PACKET_GENERATOR)
if PKTGEN not in fp.get_supported_packet_generators():
raise ValueError('The specified packet generator is not supported '
'by the framework')
# Check if the packet gen is dpdk_pktgen
if PKTGEN == cf.CFSP_PG_DPDK:
# Validation of DPDK pktgen directory
msg = "Parameter {} is not present in section {}".format(
cf.CFSP_DPDK_PKTGEN_DIRECTORY, cf.CFS_PKTGEN)
InputValidation.validate_configuration_file_parameter(
cf.CFS_PKTGEN, cf.CFSP_DPDK_PKTGEN_DIRECTORY, msg)
PKTGEN_DIR = CONF_FILE.get_variable(
cf.CFS_PKTGEN, cf.CFSP_DPDK_PKTGEN_DIRECTORY)
msg = "The directory {} does not exist.".format(PKTGEN_DIR)
PKTGEN_DIR = InputValidation.validate_directory_exist_and_format(
PKTGEN_DIR, msg)
# Validation of the DPDK program name
msg = "Parameter {} is not present in section {}".format(
cf.CFSP_DPDK_PROGRAM_NAME, cf.CFS_PKTGEN)
InputValidation.validate_configuration_file_parameter(
cf.CFS_PKTGEN, cf.CFSP_DPDK_PROGRAM_NAME, msg)
PKTGEN_PROGRAM = CONF_FILE.get_variable(
cf.CFS_PKTGEN, cf.CFSP_DPDK_PROGRAM_NAME)
# Validation of the DPDK Coremask parameter
msg = "Parameter {} is not present in section {}".format(
cf.CFSP_DPDK_COREMASK, cf.CFS_PKTGEN)
InputValidation.validate_configuration_file_parameter(
cf.CFS_PKTGEN, cf.CFSP_DPDK_COREMASK, msg)
PKTGEN_COREMASK = CONF_FILE.get_variable(
cf.CFS_PKTGEN, cf.CFSP_DPDK_COREMASK)
# Validation of the DPDK Memory Channel parameter
msg = "Parameter {} is not present in section {}".format(
cf.CFSP_DPDK_MEMORY_CHANNEL, cf.CFS_PKTGEN)
InputValidation.validate_configuration_file_parameter(
cf.CFS_PKTGEN, cf.CFSP_DPDK_MEMORY_CHANNEL, msg)
PKTGEN_MEMCHANNEL = CONF_FILE.get_variable(
cf.CFS_PKTGEN, cf.CFSP_DPDK_MEMORY_CHANNEL)
# Validation of the DPDK Bus Slot 1
msg = "Parameter {} is not present in section {}".format(
cf.CFSP_DPDK_BUS_SLOT_NIC_1, cf.CFS_PKTGEN)
InputValidation.validate_configuration_file_parameter(
cf.CFS_PKTGEN, cf.CFSP_DPDK_BUS_SLOT_NIC_1, msg)
PKTGEN_BUS_SLOT_NIC_1 = CONF_FILE.get_variable(
cf.CFS_PKTGEN, cf.CFSP_DPDK_BUS_SLOT_NIC_1)
# Validation of the DPDK Bus Slot 2
msg = "Parameter {} is not present in section {}".format(
cf.CFSP_DPDK_BUS_SLOT_NIC_2, cf.CFS_PKTGEN)
InputValidation.validate_configuration_file_parameter(
cf.CFS_PKTGEN, cf.CFSP_DPDK_BUS_SLOT_NIC_2, msg)
PKTGEN_BUS_SLOT_NIC_2 = CONF_FILE.get_variable(
cf.CFS_PKTGEN, cf.CFSP_DPDK_BUS_SLOT_NIC_2)
# Validation of the DPDK NIC 1
msg = "Parameter {} is not present in section {}".format(
cf.CFSP_DPDK_NAME_IF_1, cf.CFS_PKTGEN)
InputValidation.validate_configuration_file_parameter(
cf.CFS_PKTGEN, cf.CFSP_DPDK_NAME_IF_1, msg)
PKTGEN_NAME_NIC_1 = CONF_FILE.get_variable(
cf.CFS_PKTGEN, cf.CFSP_DPDK_NAME_IF_1)
# Validation of the DPDK NIC 2
msg = "Parameter {} is not present in section {}".format(
cf.CFSP_DPDK_NAME_IF_2, cf.CFS_PKTGEN)
InputValidation.validate_configuration_file_parameter(
cf.CFS_PKTGEN, cf.CFSP_DPDK_NAME_IF_2, msg)
PKTGEN_NAME_NIC_2 = CONF_FILE.get_variable(
cf.CFS_PKTGEN, cf.CFSP_DPDK_NAME_IF_2)
# Validation of DPDK directory parameter
msg = "Parameter {} is not present in section {}".format(
cf.CFSP_DPDK_DPDK_DIRECTORY, cf.CFS_PKTGEN)
InputValidation.validate_configuration_file_parameter(
cf.CFS_PKTGEN, cf.CFSP_DPDK_DPDK_DIRECTORY, msg)
PKTGEN_DPDK_DIRECTORY = CONF_FILE.get_variable(
cf.CFS_PKTGEN, cf.CFSP_DPDK_DPDK_DIRECTORY)
msg = "Directory {} does not exist".format(
cf.CFSP_DPDK_DPDK_DIRECTORY)
PKTGEN_DPDK_DIRECTORY = InputValidation.\
validate_directory_exist_and_format(PKTGEN_DPDK_DIRECTORY, msg)
# ------------------------------------------------------
# Configuration file access
# ------------------------------------------------------
class ConfigurationFile:
"""
Used to extract data from the configuration file
"""
def __init__(self, sections, config_file='conf.cfg'):
"""
Reads configuration file sections
:param sections: list of strings representing the sections to be
loaded
:param config_file: name of the configuration file (string)
:return: None
"""
InputValidation.validate_string(
config_file, "The configuration file name must be a string")
# config_file = BASE_DIR + config_file
InputValidation.validate_file_exist(
config_file, 'The provided configuration file does not exist')
self.config = six.moves.configparser.ConfigParser()
self.config.read(config_file)
for section in sections:
setattr(
self, section, ConfigurationFile.
_config_section_map(section, self.config))
@staticmethod
def _config_section_map(section, config_file):
"""
Returns a dictionary with the configuration values for the specific
section
:param section: section to be loaded (string)
:param config_file: name of the configuration file (string)
:return: dict
"""
dict1 = dict()
options = config_file.options(section)
for option in options:
dict1[option] = config_file.get(section, option)
return dict1
def get_variable(self, section, variable_name):
"""
Returns the value correspondent to a variable
:param section: section to be loaded (string)
:param variable_name: name of the variable (string)
:return: string
"""
message = "The variable name must be a string"
InputValidation.validate_string(variable_name, message)
if variable_name in self.get_variable_list(section):
sect = getattr(self, section)
return sect[variable_name]
else:
exc_msg = 'Parameter {} is not in the {} section of the ' \
'conf file'.format(variable_name, section)
raise ValueError(exc_msg)
def get_variable_list(self, section):
"""
Returns the list of the available variables in a section
:param section: section to be loaded (string)
:return: list
"""
try:
return getattr(self, section)
except:
msg = 'Section {} not found in the configuration file'.\
format(section)
raise ValueError(msg)
# ------------------------------------------------------
# Get OpenStack Credentials
# ------------------------------------------------------
def get_credentials():
"""
Returns the credentials for OpenStack access from the configuration file
:return: dictionary
"""
credentials = dict()
credentials[cf.CFSO_IP_CONTROLLER] = CONF_FILE.get_variable(
cf.CFS_OPENSTACK, cf.CFSO_IP_CONTROLLER)
credentials[cf.CFSO_HEAT_URL] = CONF_FILE.get_variable(
cf.CFS_OPENSTACK, cf.CFSO_HEAT_URL)
credentials[cf.CFSO_USER] = CONF_FILE.get_variable(
cf.CFS_OPENSTACK, cf.CFSO_USER)
credentials[cf.CFSO_PASSWORD] = CONF_FILE.get_variable(
cf.CFS_OPENSTACK, cf.CFSO_PASSWORD)
credentials[cf.CFSO_AUTH_URI] = CONF_FILE.get_variable(
cf.CFS_OPENSTACK, cf.CFSO_AUTH_URI)
credentials[cf.CFSO_PROJECT] = CONF_FILE.get_variable(
cf.CFS_OPENSTACK, cf.CFSO_PROJECT)
return credentials
# ------------------------------------------------------
# Manage files
# ------------------------------------------------------
def get_heat_template_params():
"""
Returns the list of deployment parameters from the configuration file
for the heat template
:return: dict
"""
heat_parameters_list = CONF_FILE.get_variable_list(
cf.CFS_DEPLOYMENT_PARAMETERS)
testcase_parameters = dict()
for param in heat_parameters_list:
testcase_parameters[param] = CONF_FILE.get_variable(
cf.CFS_DEPLOYMENT_PARAMETERS, param)
return testcase_parameters
def get_testcase_params():
"""
Returns the list of testcase parameters from the configuration file
:return: dict
"""
testcase_parameters = dict()
parameters = CONF_FILE.get_variable_list(cf.CFS_TESTCASE_PARAMETERS)
for param in parameters:
testcase_parameters[param] = CONF_FILE.get_variable(
cf.CFS_TESTCASE_PARAMETERS, param)
return testcase_parameters
def get_file_first_line(file_name):
"""
Returns the first line of a file
:param file_name: name of the file to be read (str)
:return: str
"""
message = "The name of the file must be a string"
InputValidation.validate_string(file_name, message)
message = 'The file {} does not exist'.format(file_name)
InputValidation.validate_file_exist(file_name, message)
res = open(file_name, 'r')
return res.readline()
def replace_in_file(file, text_to_search, text_to_replace):
"""
Replaces a string within a file
:param file: name of the file (str)
:param text_to_search: text to be replaced
:param text_to_replace: new text that will replace the previous
:return: None
"""
message = 'The text to be replaced in the file must be a string'
InputValidation.validate_string(text_to_search, message)
message = 'The text | |
"""
Model Classes
<NAME>, January 21, 2020
"""
import golly as g
import model_parameters as mparam
import random as rand
import numpy as np
import copy
"""
Make a class for seeds.
"""
#
# Note: Golly locates cells by x (horizontal) and y (vertical) coordinates,
# usually given in the format (x, y). On the other hand, we are storing
# these cells in matrices, where the coordinates are usually given in the
# format [row][column], where row is a vertical coordinate and column
# is a horizontal coordinate. Although it may be somewhat confusing, we
# use [x][y] for our matrices (x = row index, y = column index). That is:
#
# self.xspan = self.cells.shape[0]
# self.yspan = self.cells.shape[1]
#
class Seed:
"""
A class for seeds.
"""
#
# __init__(self, xspan, yspan, pop_size) -- returns NULL
#
def __init__(self, xspan, yspan, pop_size):
"""
Make an empty seed (all zeros).
"""
# width of seed on the x-axis
self.xspan = xspan
# height of seed on the y-axis
self.yspan = yspan
# initial seed of zeros, to be modified later
self.cells = np.zeros((xspan, yspan), dtype=np.int)
# initial history of zeros
self.history = np.zeros(pop_size, dtype=np.float)
# initial similarities of zeros
self.similarities = np.zeros(pop_size, dtype=np.float)
# position of seed in the population array, to be modified later
self.address = 0
# count of living cells (ones) in the seed, to be modified later
self.num_living = 0
#
# randomize(self, seed_density) -- returns NULL
#
def randomize(self, seed_density):
"""
Randomly set some cells to state 1. It is assumed that the
cells in the given seed are initially all in state 0. The
result is a seed in which the fraction of cells in state 1
is approximately equal to seed_density (with some random
variation). Strictly speaking, seed_density is the
expected value of the fraction of cells in state 1.
"""
for x in range(self.xspan):
for y in range(self.yspan):
if (rand.random() <= seed_density):
self.cells[x][y] = 1
#
# shuffle(self) -- returns a shuffled copy of the given seed
#
def shuffle(self):
"""
Make a copy of the given seed and then shuffle the cells in
the seed. The new shuffled seed will have the same dimensions
and the same density of 1s and 0s as the given seed, but the
locations of the 1s and 0s will be different. (There is a very
small probability that shuffling might not result in any change,
just as shuffling a deck of cards might not change the deck.)
The density of shuffled_seed is exactly the same as the density
of the given seed.
"""
#
shuffled_seed = copy.deepcopy(self)
#
# for each location [x0][y0], randomly choose another location
# [x1][y1] and swap the values of the cells in the two locations.
#
for x0 in range(self.xspan):
for y0 in range(self.yspan):
x1 = rand.randrange(self.xspan)
y1 = rand.randrange(self.yspan)
temp = shuffled_seed.cells[x0][y0]
shuffled_seed.cells[x0][y0] = shuffled_seed.cells[x1][y1]
shuffled_seed.cells[x1][y1] = temp
#
return shuffled_seed
#
#
# red2blue(self) -- returns NULL
#
def red2blue(self):
"""
Switch cells from state 1 (red) to state 2 (blue).
"""
for x in range(self.xspan):
for y in range(self.yspan):
if (self.cells[x][y] == 1):
self.cells[x][y] = 2
#
# insert(self, g, g_xmin, g_xmax, g_ymin, g_ymax) -- returns NULL
#
def insert(self, g, g_xmin, g_xmax, g_ymin, g_ymax):
"""
Write the seed into the Golly grid at a random location
within the given bounds.
g = the Golly universe
s = a seed
"""
step = 1
g_xstart = rand.randrange(g_xmin, g_xmax - self.xspan, step)
g_ystart = rand.randrange(g_ymin, g_ymax - self.yspan, step)
for s_x in range(self.xspan):
for s_y in range(self.yspan):
g_x = g_xstart + s_x
g_y = g_ystart + s_y
s_state = self.cells[s_x][s_y]
g.setcell(g_x, g_y, s_state)
#
# random_rotate(self) -- returns new_seed
#
def random_rotate(self):
"""
Randomly rotate and flip the given seed and return a new seed.
"""
rotation = rand.randrange(0, 4, 1) # 0, 1, 2, 3
flip = rand.randrange(0, 2, 1) # 0, 1
new_seed = copy.deepcopy(self)
# rotate by 90 degrees * rotation (0, 90, 180 270)
new_seed.cells = np.rot90(new_seed.cells, rotation)
if (flip == 1):
# flip upside down
new_seed.cells = np.flipud(new_seed.cells)
new_seed.xspan = new_seed.cells.shape[0]
new_seed.yspan = new_seed.cells.shape[1]
return new_seed
#
# fitness(self) -- returns fitness
#
def fitness(self):
"""
Calculate a seed's fitness from its history.
"""
history = self.history
return sum(history) / len(history)
#
# mutate(self, prob_grow, prob_flip, prob_shrink, seed_density, mutation_rate)
# -- returns mutant
#
def mutate(self, prob_grow, prob_flip, prob_shrink, seed_density, mutation_rate):
"""
Make a copy of self and return a mutated version of the copy.
"""
#
mutant = copy.deepcopy(self)
#
# prob_grow = probability of invoking grow()
# prob_flip = probability of invoking flip_bits()
# prob_shrink = probability of invoking shrink()
# seed_density = target density of ones in an initial random seed
# mutation_rate = probability of flipping an individual bit
#
assert prob_grow + prob_flip + prob_shrink == 1.0
#
uniform_random = rand.uniform(0, 1)
#
if (uniform_random < prob_grow):
# this will be invoked with a probability of prob_grow
mutant.grow(seed_density)
elif (uniform_random < (prob_grow + prob_flip)):
# this will be invoked with a probability of prob_flip
mutant.flip_bits(mutation_rate)
else:
# this will be invoked with a probability of prob_shrink
mutant.shrink()
# erase the parent's history from the child
pop_size = len(self.history)
mutant.history = np.zeros(pop_size, dtype=np.float)
return mutant
#
# flip_bits(self, mutation_rate) -- returns NULL
#
def flip_bits(self, mutation_rate):
"""
Mutate a seed by randomly flipping bits. Assumes the seed
contains 0s and 1s.
"""
num_mutations = 0
for s_x in range(self.xspan):
for s_y in range(self.yspan):
if (rand.uniform(0, 1) < mutation_rate):
# flip cell value: 0 becomes 1 and 1 becomes 0
self.cells[s_x][s_y] = 1 - self.cells[s_x][s_y]
# count the number of mutations so far
num_mutations = num_mutations + 1
# force a minimum of one mutation -- there is no value
# in having duplicates in the population
if (num_mutations == 0):
s_x = rand.randrange(self.xspan)
s_y = rand.randrange(self.yspan)
self.cells[s_x][s_y] = 1 - self.cells[s_x][s_y]
#
# shrink(self) -- returns NULL
#
def shrink(self):
"""
Randomly remove rows or columns from a seed.
"""
# first we need to decide how to shrink
choice = rand.choice([0, 1, 2, 3])
# now do it
if ((choice == 0) and (self.xspan > mparam.min_s_xspan)):
# delete first row
self.cells = np.delete(self.cells, (0), axis=0)
elif ((choice == 1) and (self.xspan > mparam.min_s_xspan)):
# delete last row
self.cells = np.delete(self.cells, (-1), axis=0)
elif ((choice == 2) and (self.yspan > mparam.min_s_yspan)):
# delete first column
self.cells = np.delete(self.cells, (0), axis=1)
elif ((choice == 3) and (self.yspan > mparam.min_s_yspan)):
# delete last column
self.cells = np.delete(self.cells, (-1), axis=1)
# now let's update xspan and yspan to the new size
self.xspan = self.cells.shape[0]
self.yspan = self.cells.shape[1]
#
#
# grow(self, seed_density) -- returns NULL
#
def grow(self, seed_density):
"""
Randomly add or remove rows or columns from a seed. Assumes
the seed contains 0s and 1s.
"""
# - first we need to decide how to grow
choice = rand.choice([0, 1, 2, 3])
# - now do it
if (choice == 0):
# add a new row before the first row
self.cells = np.vstack([np.zeros(self.yspan, dtype=np.int), self.cells])
# initialize the new row with a density of approximately seed_density
for s_y in range(self.yspan):
if (rand.uniform(0, 1) < seed_density):
self.cells[0][s_y] = 1
#
elif (choice == 1):
# add a new row after the last row
self.cells = np.vstack([self.cells, np.zeros(self.yspan, dtype=np.int)])
# initialize the new row with a density of approximately seed_density
for s_y in range(self.yspan):
if (rand.uniform(0, 1) < seed_density):
self.cells[-1][s_y] = 1
#
elif (choice == 2):
# add a new column before the first column
self.cells = np.hstack([np.zeros((self.xspan, 1), dtype=np.int), self.cells])
# initialize the new column with a density of approximately seed_density
for s_x in range(self.xspan):
if (rand.uniform(0, 1) < seed_density):
self.cells[s_x][0] = 1
#
elif (choice == 3):
# add a | |
)
o0oo0 . print_notify ( )
if 32 - 32: OoO0O00 / I1Ii111 / I1Ii111
if 45 - 45: iII111i + O0 % i11iIiiIii * I1ii11iIi11i + I1Ii111 / OOooOOo
if 55 - 55: OoooooooOO % iIii1I11I1II1 . ooOoO0o
if 10 - 10: O0 * iIii1I11I1II1 . OOooOOo
i1IIiI1iII = o0oo0 . nonce_key
if ( lisp_map_notify_queue . has_key ( i1IIiI1iII ) ) :
iii = lisp_map_notify_queue [ i1IIiI1iII ]
iii . retransmit_timer . cancel ( )
del ( iii )
if 10 - 10: oO0o - i11iIiiIii + I1IiiI / Oo0Ooo - II111iiii * i11iIiiIii
lisp_map_notify_queue [ i1IIiI1iII ] = o0oo0
if 57 - 57: I1Ii111 * II111iiii * Oo0Ooo . O0
if 90 - 90: iIii1I11I1II1 % iIii1I11I1II1 / IiII
if 21 - 21: ooOoO0o / iII111i % II111iiii * I1IiiI * II111iiii
if 40 - 40: Ii1I / i1IIi . iII111i
lprint ( "Send merged Map-Notify to ETR {}" . format ( red ( iiIi1I . print_address ( ) , False ) ) )
if 65 - 65: iIii1I11I1II1 * O0 . II111iiii * o0oOOo0O0Ooo . I1ii11iIi11i * I1IiiI
lisp_send ( lisp_sockets , iiIi1I , LISP_CTRL_PORT , iI1IIII1ii1 )
if 63 - 63: II111iiii . Oo0Ooo % iIii1I11I1II1
parent . site . map_notifies_sent += 1
if 85 - 85: I1IiiI + i1IIi % I1Ii111
if 76 - 76: i11iIiiIii % i11iIiiIii
if 33 - 33: OOooOOo . ooOoO0o / iIii1I11I1II1 * OOooOOo / oO0o
if 75 - 75: Ii1I - OoOoOO00 . OOooOOo - o0oOOo0O0Ooo - I1ii11iIi11i
o0oo0 . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ o0oo0 ] )
o0oo0 . retransmit_timer . start ( )
if 69 - 69: O0 % I1ii11iIi11i
return
if 77 - 77: iIii1I11I1II1 . OOooOOo
if 64 - 64: OoOoOO00 - i1IIi * i1IIi / iII111i * OoOoOO00 * OoO0O00
if 61 - 61: OOooOOo
if 51 - 51: Oo0Ooo * OOooOOo / iII111i
if 49 - 49: ooOoO0o . i1IIi % I1Ii111 . I1IiiI . I1ii11iIi11i + OoO0O00
if 65 - 65: I1ii11iIi11i + Ii1I / i11iIiiIii * I1Ii111 + OoooooooOO
if 7 - 7: Oo0Ooo % o0oOOo0O0Ooo
def lisp_build_map_notify ( lisp_sockets , eid_records , eid_list , record_count ,
source , port , nonce , key_id , alg_id , auth_len , site , map_register_ack ) :
if 40 - 40: oO0o * IiII
i1IIiI1iII = lisp_hex_string ( nonce ) + source . print_address ( )
if 29 - 29: O0 - II111iiii + iII111i
if 73 - 73: I1Ii111 - I11i + IiII - o0oOOo0O0Ooo - I11i - OOooOOo
if 40 - 40: iIii1I11I1II1 . iII111i * I1ii11iIi11i + IiII - iIii1I11I1II1
if 83 - 83: i1IIi
if 9 - 9: iIii1I11I1II1 + i11iIiiIii
if 70 - 70: I1IiiI - OoO0O00 % OOooOOo + ooOoO0o % II111iiii
lisp_remove_eid_from_map_notify_queue ( eid_list )
if ( lisp_map_notify_queue . has_key ( i1IIiI1iII ) ) :
o0oo0 = lisp_map_notify_queue [ i1IIiI1iII ]
i1I1iIi1IiI = red ( source . print_address_no_iid ( ) , False )
lprint ( "Map-Notify with nonce 0x{} pending for xTR {}" . format ( lisp_hex_string ( o0oo0 . nonce ) , i1I1iIi1IiI ) )
if 19 - 19: I11i + i1IIi / i1IIi - II111iiii + I1Ii111
return
if 11 - 11: i11iIiiIii % i11iIiiIii / IiII - Oo0Ooo / O0 - I11i
if 29 - 29: OOooOOo * iIii1I11I1II1 * ooOoO0o
o0oo0 = lisp_map_notify ( lisp_sockets )
o0oo0 . record_count = record_count
key_id = key_id
o0oo0 . key_id = key_id
o0oo0 . alg_id = alg_id
o0oo0 . auth_len = auth_len
o0oo0 . nonce = nonce
o0oo0 . nonce_key = lisp_hex_string ( nonce )
o0oo0 . etr . copy_address ( source )
o0oo0 . etr_port = port
o0oo0 . site = site
o0oo0 . eid_list = eid_list
if 80 - 80: oO0o * I1Ii111
if 87 - 87: iII111i + OoOoOO00 % ooOoO0o - oO0o
if 40 - 40: i1IIi / OoOoOO00 - I11i / ooOoO0o . Ii1I
if 8 - 8: I1IiiI . IiII . OOooOOo . O0
if ( map_register_ack == False ) :
i1IIiI1iII = o0oo0 . nonce_key
lisp_map_notify_queue [ i1IIiI1iII ] = o0oo0
if 3 - 3: Ii1I + i11iIiiIii
if 87 - 87: ooOoO0o - iII111i % I11i
if ( map_register_ack ) :
lprint ( "Send Map-Notify to ack Map-Register" )
else :
lprint ( "Send Map-Notify for RLOC-set change" )
if 88 - 88: I11i . OoooooooOO
if 86 - 86: Ii1I - I1IiiI - iII111i % Ii1I . I1ii11iIi11i % i1IIi
if 84 - 84: OoOoOO00
if 99 - 99: OoO0O00 - OoOoOO00 - i1IIi / OoO0O00 * I1ii11iIi11i * iIii1I11I1II1
if 65 - 65: iII111i - O0 / i1IIi . I1Ii111
iI1IIII1ii1 = o0oo0 . encode ( eid_records , site . auth_key [ key_id ] )
o0oo0 . print_notify ( )
if 85 - 85: o0oOOo0O0Ooo % Ii1I
if ( map_register_ack == False ) :
I111IoOo0oOOO0o = lisp_eid_record ( )
I111IoOo0oOOO0o . decode ( eid_records )
I111IoOo0oOOO0o . print_record ( " " , False )
if 81 - 81: oO0o / OoO0O00 * i1IIi % iIii1I11I1II1
if 23 - 23: II111iiii . II111iiii
if 17 - 17: i11iIiiIii / IiII * I1IiiI . Oo0Ooo / o0oOOo0O0Ooo - iIii1I11I1II1
if 21 - 21: OOooOOo % Ii1I
if 3 - 3: OOooOOo / ooOoO0o / I1Ii111 . I11i
lisp_send_map_notify ( lisp_sockets , iI1IIII1ii1 , o0oo0 . etr , port )
site . map_notifies_sent += 1
if 54 - 54: I1ii11iIi11i - I1IiiI . OoOoOO00
if ( map_register_ack ) : return
if 36 - 36: OoO0O00 * I1IiiI / iII111i
if 95 - 95: Ii1I . Oo0Ooo
if 42 - 42: IiII . i1IIi % O0 * ooOoO0o - OOooOOo % ooOoO0o
if 99 - 99: i1IIi + OoOoOO00 - iII111i % II111iiii
if 6 - 6: ooOoO0o - I1Ii111 . OoOoOO00
if 64 - 64: iII111i + I1ii11iIi11i
o0oo0 . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ o0oo0 ] )
o0oo0 . retransmit_timer . start ( )
return
if 88 - 88: I1Ii111 / i11iIiiIii - O0 . II111iiii / II111iiii * II111iiii
if 56 - 56: Oo0Ooo / I1IiiI % I1Ii111 % I1ii11iIi11i * I1IiiI - IiII
if 39 - 39: oO0o + iII111i . I1Ii111 * i11iIiiIii % o0oOOo0O0Ooo + OOooOOo
if 61 - 61: ooOoO0o / I1Ii111 / I1ii11iIi11i - Ii1I % o0oOOo0O0Ooo * iII111i
if 94 - 94: I1IiiI / I11i
if 100 - 100: Ii1I % OoO0O00 % OoooooooOO / II111iiii * I1Ii111
if 64 - 64: I1Ii111 * OOooOOo * Ii1I + I1ii11iIi11i / iIii1I11I1II1 / Oo0Ooo
if 50 - 50: OOooOOo % i11iIiiIii
def lisp_send_map_notify_ack ( lisp_sockets , eid_records , map_notify , ms ) :
map_notify . map_notify_ack = True
if 99 - 99: IiII
if 87 - 87: IiII
if 35 - 35: oO0o . O0 . Ii1I / ooOoO0o
if 36 - 36: i11iIiiIii . II111iiii . I11i . II111iiii
iI1IIII1ii1 = map_notify . encode ( eid_records , ms . password )
map_notify . print_notify ( )
if 36 - 36: Ii1I + ooOoO0o / Oo0Ooo % Oo0Ooo
if 2 - 2: oO0o - Oo0Ooo * OoO0O00 . ooOoO0o . OOooOOo - oO0o
if 74 - 74: o0oOOo0O0Ooo
if 18 - 18: Oo0Ooo % OOooOOo / OOooOOo . I1IiiI + i1IIi . I1IiiI
iiIi1I = ms . map_server
lprint ( "Send Map-Notify-Ack to {}" . format (
red ( iiIi1I . print_address ( ) , False ) ) )
lisp_send ( lisp_sockets , iiIi1I , LISP_CTRL_PORT , iI1IIII1ii1 )
return
if 3 - 3: O0 * O0 + II111iiii + OoOoOO00 * I11i % Oo0Ooo
if 19 - 19: oO0o % IiII % OoooooooOO % I1ii11iIi11i / OoO0O00
if 6 - 6: O0 * I1Ii111 - II111iiii
if 60 - 60: oO0o | |
axis=1))) == 1
assert len(np.where(np.all(patch_indices == np.asarray([0, 0, 3]), axis=1))) == 1
# Make sure we skipped over this one, we want maximum spaced patches
# So this one should not be in there
assert 3 not in patch_indices[:, 2]
assert len(np.where(np.all(patch_indices == np.asarray([0, 5, 0]), axis=1))) == 1
def test_overlap_parameters_2D():
image_size = [10, 10]
# Need to flip for correct orientation
sample = np.ones(np.flip(image_size))
sample = sitk.GetImageFromArray(sample)
patch_size = np.asarray([4, 4])
(
patch_indices,
left_padding,
right_padding,
) = SingleSamplePreprocessor._get_overlap_patching_parameters(patch_size, 0.5, 0, sample)
assert isinstance(patch_indices, np.ndarray)
assert len(patch_indices) == 25
assert np.unique(patch_indices[:, 0]) == pytest.approx(np.asarray([0, 2, 4, 6, 8]))
assert np.unique(patch_indices[:, 1]) == pytest.approx(np.asarray([0, 2, 4, 6, 8]))
assert left_padding == pytest.approx(np.asarray([1, 1]))
assert right_padding == pytest.approx(np.asarray([1, 1]))
image_size = [10, 10]
# Need to flip for correct orientation
sample = np.ones(np.flip(image_size))
sample = sitk.GetImageFromArray(sample)
patch_size = np.asarray([3, 5])
(
patch_indices,
left_padding,
right_padding,
) = SingleSamplePreprocessor._get_overlap_patching_parameters(patch_size, 0.5, 0, sample)
assert isinstance(patch_indices, np.ndarray)
assert len(patch_indices) == 36
assert np.unique(patch_indices[:, 0]) == pytest.approx(np.asarray([0, 1, 2, 3, 4, 5, 6, 7, 8]))
assert np.unique(patch_indices[:, 1]) == pytest.approx(np.asarray([0, 2, 4, 6]))
assert left_padding == pytest.approx(np.asarray([1, 1]))
assert right_padding == pytest.approx(np.asarray([0, 0]))
image_size = [80, 35]
# Need to flip for correct orientation
sample = np.ones(np.flip(image_size))
sample = sitk.GetImageFromArray(sample)
patch_size = np.asarray([10, 4])
(
patch_indices,
left_padding,
right_padding,
) = SingleSamplePreprocessor._get_overlap_patching_parameters(patch_size, [0.3, 0.5], 0, sample)
assert isinstance(patch_indices, np.ndarray)
assert len(patch_indices) == 204
assert left_padding == pytest.approx(np.asarray([4, 1]))
assert right_padding == pytest.approx(np.asarray([3, 0]))
assert np.unique(patch_indices[:, 0]) == pytest.approx(
np.asarray([0, 7, 14, 21, 28, 35, 42, 49, 56, 63, 70, 77])
)
assert np.unique(patch_indices[:, 1]) == pytest.approx(
np.asarray([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32])
)
# Test with set number of overlap voxels
image_size = [80, 35]
# Need to flip for correct orientation
sample = np.ones(np.flip(image_size))
sample = sitk.GetImageFromArray(sample)
patch_size = np.asarray([10, 4])
(
patch_indices,
left_padding,
right_padding,
) = SingleSamplePreprocessor._get_overlap_patching_parameters(patch_size, [3, 2], 0, sample)
assert isinstance(patch_indices, np.ndarray)
assert len(patch_indices) == 204
assert left_padding == pytest.approx(np.asarray([4, 1]))
assert right_padding == pytest.approx(np.asarray([3, 0]))
assert np.unique(patch_indices[:, 0]) == pytest.approx(
np.asarray([0, 7, 14, 21, 28, 35, 42, 49, 56, 63, 70, 77])
)
assert np.unique(patch_indices[:, 1]) == pytest.approx(
np.asarray([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32])
)
def test_overlap_parameters_3D():
image_size = [80, 35, 21]
# Need to flip for correct orientation
sample = np.ones(np.flip(image_size))
sample = sitk.GetImageFromArray(sample)
patch_size = np.asarray([10, 4, 3])
(
patch_indices,
left_padding,
right_padding,
) = SingleSamplePreprocessor._get_overlap_patching_parameters(
patch_size, [0.3, 0.5, 0.33], 0, sample
)
assert isinstance(patch_indices, np.ndarray)
assert len(patch_indices) == 2244
assert left_padding == pytest.approx(np.asarray([4, 1, 1]))
assert right_padding == pytest.approx(np.asarray([3, 0, 1]))
assert np.unique(patch_indices[:, 0]) == pytest.approx(
np.asarray([0, 7, 14, 21, 28, 35, 42, 49, 56, 63, 70, 77])
)
assert np.unique(patch_indices[:, 1]) == pytest.approx(
np.asarray([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32])
)
assert np.unique(patch_indices[:, 2]) == pytest.approx(
np.asarray([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20])
)
# Test with set number of overlap voxels
image_size = [80, 35, 21]
# Need to flip for correct orientation
sample = np.ones(np.flip(image_size))
sample = sitk.GetImageFromArray(sample)
patch_size = np.asarray([10, 4, 3])
(
patch_indices,
left_padding,
right_padding,
) = SingleSamplePreprocessor._get_overlap_patching_parameters(patch_size, [3, 2, 1], 0, sample)
assert isinstance(patch_indices, np.ndarray)
assert len(patch_indices) == 2244
assert left_padding == pytest.approx(np.asarray([4, 1, 1]))
assert right_padding == pytest.approx(np.asarray([3, 0, 1]))
assert np.unique(patch_indices[:, 0]) == pytest.approx(
np.asarray([0, 7, 14, 21, 28, 35, 42, 49, 56, 63, 70, 77])
)
assert np.unique(patch_indices[:, 1]) == pytest.approx(
np.asarray([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32])
)
assert np.unique(patch_indices[:, 2]) == pytest.approx(
np.asarray([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20])
)
@NIFTI_FILES
def test_get_patch_parameters(datafiles):
samples = get_samples(datafiles)
for i_sample in samples:
# Overlap
preprocessor = SingleSamplePreprocessor(
i_sample,
{
"patching": {
"patch_size": [5, 5, 5],
"pad_if_needed": True,
"pad_constant": 0,
"extraction_type": "overlap",
"overlap_fraction": 0.5,
}
},
)
patch_parameters = preprocessor._get_patch_parameters()
assert isinstance(patch_parameters, dict)
assert isinstance(patch_parameters["patch_indices"], np.ndarray)
assert patch_parameters["left_padding"] == pytest.approx(np.asarray([1, 1, 1]))
assert patch_parameters["right_padding"] == pytest.approx(np.asarray([0, 0, 0]))
assert len(patch_parameters["patch_indices"]) == 2744
assert np.unique(patch_parameters["patch_indices"][:, 0]) == pytest.approx(
np.asarray([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26])
)
assert np.unique(patch_parameters["patch_indices"][:, 1]) == pytest.approx(
np.asarray([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26])
)
assert np.unique(patch_parameters["patch_indices"][:, 2]) == pytest.approx(
np.asarray([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26])
)
# Random
preprocessor = SingleSamplePreprocessor(
i_sample,
{
"patching": {
"patch_size": [5, 5, 5],
"pad_if_needed": True,
"pad_constant": 0,
"extraction_type": "random",
"max_number_of_patches": 15,
}
},
)
patch_parameters = preprocessor._get_patch_parameters()
assert isinstance(patch_parameters, dict)
assert isinstance(patch_parameters["patch_indices"], np.ndarray)
assert patch_parameters["left_padding"] == pytest.approx(np.asarray([0, 0, 0]))
assert patch_parameters["right_padding"] == pytest.approx(np.asarray([0, 0, 0]))
assert len(patch_parameters["patch_indices"]) == 15
# fitting
preprocessor = SingleSamplePreprocessor(
i_sample,
{
"patching": {
"patch_size": [5, 5, 5],
"pad_if_needed": True,
"pad_constant": 0,
"extraction_type": "fitting",
}
},
)
patch_parameters = preprocessor._get_patch_parameters()
assert isinstance(patch_parameters, dict)
assert isinstance(patch_parameters["patch_indices"], np.ndarray)
assert patch_parameters["left_padding"] == pytest.approx(np.asarray([0, 0, 0]))
assert patch_parameters["right_padding"] == pytest.approx(np.asarray([0, 0, 0]))
def test_patch_making():
image_size = [80, 35, 21]
# Need to flip for correct orientation
sample = np.ones(np.flip(image_size))
sample[0:3, 0:4, 0:10] = 50
sample[18:, 31:, 70:] = 25
sample[15:18, 12:18, 40:60] = 105.5
sample = sitk.GetImageFromArray(sample)
patch_size = np.asarray([10, 4, 3])
patch_indices = np.asarray(
list(
itertools.product(
np.asarray([0, 7, 14, 21, 28, 35, 42, 49, 56, 63, 70, 77]),
np.asarray([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32]),
np.asarray([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20]),
)
)
)
patch_parameters = {
"left_padding": np.asarray([4, 1, 1]),
"right_padding": np.asarray([3, 0, 1]),
"patch_indices": patch_indices,
}
patches = SingleSamplePreprocessor._make_patches(sample, patch_parameters, -15.0, patch_size)
assert len(patches) == 2244
# Find indices and check whether correct patches were extraced
origin_patch_index = np.squeeze(np.argwhere(np.all(patch_indices == np.asarray([0, 0, 0]), 1)))
assert patches[origin_patch_index].GetSize() == (10, 4, 3)
obtained_origin_patch = sitk.GetArrayFromImage(patches[origin_patch_index])
assert obtained_origin_patch[0:1, 0:1, 0:4] == pytest.approx(-15)
assert obtained_origin_patch[1:3, 1:4, 4:10] == pytest.approx(50)
last_patch_index = np.squeeze(np.argwhere(np.all(patch_indices == np.asarray([77, 32, 20]), 1)))
last_patch = sitk.GetArrayFromImage(patches[last_patch_index])
assert last_patch[0:2, :, 0:7] == pytest.approx(25)
assert last_patch[2:, :, 7:] == pytest.approx(-15)
@NIFTI_FILES
def test_patching(datafiles):
samples = get_samples(datafiles)
for i_sample in samples:
# Overlap
preprocessor = SingleSamplePreprocessor(
i_sample,
{
"patching": {
"patch_size": [5, 5, 5],
"pad_if_needed": True,
"pad_constant": 0,
"extraction_type": "overlap",
"overlap_fraction": 0.5,
}
},
)
preprocessor.patching()
assert preprocessor.sample.has_patches
assert preprocessor.sample.number_of_patches == 2744
# Make sure that the patches for all the channels are the same
patches = preprocessor.sample.get_grouped_channels()
for i_patch in patches:
first_channel = sitk.GetArrayFromImage(i_patch[0])
for i_i_patch_channel, i_patch_channel in enumerate(i_patch):
i_patch_channel = sitk.GetArrayFromImage(i_patch_channel)
assert (
i_patch_channel[i_i_patch_channel, i_i_patch_channel, i_i_patch_channel]
== first_channel[0, 0, 0]
)
assert (
i_patch_channel[
i_i_patch_channel + 1, i_i_patch_channel + 1, i_i_patch_channel + 1,
]
== first_channel[1, 1, 1]
)
preprocessor = SingleSamplePreprocessor(
i_sample,
{
"patching": {
"patch_size": [5, 5, 5],
"extraction_type": "random",
"max_number_of_patches": 10,
}
},
)
preprocessor.patching()
assert preprocessor.sample.has_patches
assert preprocessor.sample.number_of_patches == 10
@NIFTI_FILES
def test_patching_2D(datafiles):
samples = get_samples(datafiles)
for i_sample in samples:
# Overlap
original_sample_channel = sitk.GetArrayFromImage(i_sample.get_example_channel())
preprocessor = SingleSamplePreprocessor(
i_sample,
{
"patching": {
"patch_size": [30, 30, 1],
"pad_if_needed": True,
"pad_constant": 0,
"extraction_type": "fitting",
}
},
)
preprocessor.patching()
assert preprocessor.sample.has_patches
assert preprocessor.sample.number_of_patches == 30
# Make sure that the patches are indeed the slices
sample_patches = preprocessor.sample.get_example_channel_patches()
for i_i_patch, i_patch in enumerate(sample_patches):
i_patch = sitk.GetArrayFromImage(i_patch)
assert i_patch == pytest.approx(original_sample_channel[i_i_patch, :, :])
# ===============================================================
# Rejecting
# ===============================================================
def test_get_rejecting_patches():
image_size = [10, 10, 10]
# Need to flip for correct orientation
mask = np.zeros(np.flip(image_size))
mask = sitk.Cast(sitk.GetImageFromArray(mask), sitk.sitkUInt8)
to_reject = SingleSamplePreprocessor._get_to_reject_patches(mask, 0.5)
assert isinstance(to_reject, list)
assert len(to_reject) == 1
assert to_reject[0]
to_reject = SingleSamplePreprocessor._get_to_reject_patches(mask, 0)
assert isinstance(to_reject, list)
assert len(to_reject) == 1
assert not to_reject[0]
mask = np.ones(np.flip(image_size))
mask = sitk.Cast(sitk.GetImageFromArray(mask), sitk.sitkUInt8)
to_reject = SingleSamplePreprocessor._get_to_reject_patches(mask, 0.5)
assert isinstance(to_reject, list)
assert len(to_reject) == 1
assert not to_reject[0]
N_patches = 50
total_voxels = 10 * 10 * 10
rejection_limit = 0.3
rejection_limit_voxels = total_voxels * rejection_limit
patches = []
true_output = []
for i_patch in range(N_patches):
patch_mask | |
#!/bin/python
'''
script to plot ABC
'''
import os
import sys
import h5py
import getpass
import numpy as np
import corner as DFM
# -- abcpmc --
import abcpmc
# -- galpopfm --
from galpopfm import dustfm as dustFM
from galpopfm import dust_infer as dustInfer
from galpopfm import measure_obs as measureObs
# -- plotting --
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
mpl.rcParams['text.usetex'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rcParams['axes.xmargin'] = 1
mpl.rcParams['xtick.labelsize'] = 'x-large'
mpl.rcParams['xtick.major.size'] = 5
mpl.rcParams['xtick.major.width'] = 1.5
mpl.rcParams['ytick.labelsize'] = 'x-large'
mpl.rcParams['ytick.major.size'] = 5
mpl.rcParams['ytick.major.width'] = 1.5
mpl.rcParams['legend.frameon'] = False
def get_abc(T, name, pwd, abc_dir=None):
''' scp ABC files from sirocco
'''
if not os.path.isdir(abc_dir):
os.system('mkdir -p %s' % abc_dir)
f = open('scp_abc.expect', 'w')
cntnt = '\n'.join([
'#!/usr/bin/expect',
'spawn scp sirocco:/home/users/hahn/data/galpopfm/abc/%s/*t%i.dat %s/' % (name, T, abc_dir),
'',
'expect "yes/no" {',
' send "yes\r"'
' expect "*?assword" { send "%s\r" }' % pwd,
' } "*?assword" { send "%s\r" }' % pwd,
'',
'expect "yes/no" {',
' send "yes\r"'
' expect "*?assword" { send "%s\r" }' % pwd,
' } "*?assword" { send "%s\r" }' % pwd,
'',
'interact'])
f.write(cntnt)
f.close()
cmd = 'expect scp_abc.expect'
os.system(cmd)
os.system('rm scp_abc.expect')
return None
def plot_pool(T, prior=None, dem='slab_calzetti', abc_dir=None):
''' plot ABC pool
'''
# read pool
theta_T = np.loadtxt(os.path.join(abc_dir, 'theta.t%i.dat' % T))
rho_T = np.loadtxt(os.path.join(abc_dir, 'rho.t%i.dat' % T))
w_T = np.loadtxt(os.path.join(abc_dir, 'w.t%i.dat' % T))
pool = abcpmc.PoolSpec(T, None, None, theta_T, rho_T, w_T)
dustInfer.plotABC(pool, prior=prior, dem=dem, abc_dir=abc_dir)
plt.close()
return None
def abc_sumstat(T, sim='simba', dem='slab_calzetti', sfr0_prescription='adhoc', abc_dir=None):
''' compare ABC summary statistics to data
'''
####################################################################################
# read in SDSS measurements
####################################################################################
r_edges, gr_edges, fn_edges, x_obs = dustInfer.sumstat_obs(statistic='2d', return_bins=True)
dr = r_edges[1] - r_edges[0]
dgr = gr_edges[1] - gr_edges[0]
dfn = fn_edges[1] - fn_edges[0]
ranges = [(r_edges[0], r_edges[-1]), (-1., 3.), (-1., 10.)]
nbar_obs, x_obs_gr, x_obs_fn = x_obs
####################################################################################
# read pool
####################################################################################
theta_T = np.loadtxt(os.path.join(abc_dir, 'theta.t%i.dat' % T))
rho_T = np.loadtxt(os.path.join(abc_dir, 'rho.t%i.dat' % T))
w_T = np.loadtxt(os.path.join(abc_dir, 'w.t%i.dat' % T))
theta_med = np.median(theta_T, axis=0)
####################################################################################
# read simulations
####################################################################################
_sim_sed = dustInfer._read_sed(sim)
wlim = (_sim_sed['wave'] > 1e3) & (_sim_sed['wave'] < 8e3)
cuts = (_sim_sed['logmstar'] > 9.4)
sim_sed = {}
sim_sed['sim'] = sim
sim_sed['logmstar'] = _sim_sed['logmstar'][cuts].copy()
sim_sed['logsfr.inst'] = _sim_sed['logsfr.inst'][cuts].copy()
sim_sed['wave'] = _sim_sed['wave'][wlim].copy()
sim_sed['sed_noneb'] = _sim_sed['sed_noneb'][cuts,:][:,wlim].copy()
sim_sed['sed_onlyneb'] = _sim_sed['sed_onlyneb'][cuts,:][:,wlim].copy()
nbar_mod, x_mod_gr, x_mod_fn = dustInfer.sumstat_model(theta_med, sed=sim_sed, dem=dem,
statistic='2d', sfr0_prescription=sfr0_prescription)
########################################################################
print('obs nbar = %.4e' % nbar_obs)
print('mod nbar = %.4e' % nbar_mod)
########################################################################
fig = plt.figure(figsize=(10,10))
sub = fig.add_subplot(221)
sub.pcolormesh(r_edges, gr_edges, x_obs_gr.T,
vmin=1e-5, vmax=1e-2, norm=mpl.colors.LogNorm(), cmap='Greys')
sub.text(0.95, 0.95, r'SDSS', ha='right', va='top', transform=sub.transAxes, fontsize=25)
sub.set_xlim(20., 23)
sub.set_xticks([20., 21., 22., 23])
sub.set_xticklabels([])
sub.set_ylabel(r'$G-R$', fontsize=20)
sub.set_ylim(ranges[1])
sub = fig.add_subplot(222)
sub.pcolormesh(r_edges, gr_edges, x_mod_gr.T,
vmin=1e-5, vmax=1e-2, norm=mpl.colors.LogNorm(), cmap='Oranges')
sub.text(0.95, 0.95, sim_sed['sim'].upper(), ha='right', va='top', transform=sub.transAxes, fontsize=25)
sub.set_xlim(20., 23)
sub.set_xticks([20., 21., 22., 23])
sub.set_xticklabels([])
sub.set_ylim(ranges[1])
sub.set_yticklabels([])
sub = fig.add_subplot(223)
h = sub.pcolormesh(r_edges, fn_edges, x_obs_fn.T,
vmin=1e-5, vmax=1e-2, norm=mpl.colors.LogNorm(), cmap='Greys')
sub.set_xlabel(r'$M_r$', fontsize=20)
sub.set_xlim(20., 23)
sub.set_xticks([20., 21., 22., 23])
sub.set_xticklabels([-20, -21, -22, -23])
sub.set_ylabel(r'$FUV - NUV$', fontsize=20)
sub.set_ylim(ranges[2])
sub = fig.add_subplot(224)
sub.pcolormesh(r_edges, fn_edges, x_mod_fn.T,
vmin=1e-5, vmax=1e-2, norm=mpl.colors.LogNorm(), cmap='Oranges')
sub.set_xlabel(r'$M_r$', fontsize=20)
sub.set_xlim(20., 23)
sub.set_xticks([20., 21., 22., 23])
sub.set_xticklabels([-20, -21, -22, -23])
sub.set_ylim(ranges[2])
sub.set_yticklabels([])
fig.subplots_adjust(wspace=0.1, hspace=0.1, right=0.85)
cbar_ax = fig.add_axes([0.875, 0.15, 0.02, 0.7])
fig.colorbar(h, cax=cbar_ax)
try:
fig.savefig(os.path.join(abc_dir, 'abc_sumstat.t%i.png' % T), bbox_inches='tight')
except RuntimeError:
fig.savefig(os.path.join(abc_dir, 'abc_sumstat.t%i.pdf' % T), bbox_inches='tight')
plt.close()
return None
def abc_attenuationt(T, sim='simba', dem='slab_calzetti', abc_dir=None):
'''
'''
# read pool
theta_T = np.loadtxt(os.path.join(abc_dir, 'theta.t%i.dat' % T))
rho_T = np.loadtxt(os.path.join(abc_dir, 'rho.t%i.dat' % T))
w_T = np.loadtxt(os.path.join(abc_dir, 'w.t%i.dat' % T))
theta_med = np.median(theta_T, axis=0)
wave = np.linspace(1e3, 1e4, 101)
flux = np.ones(len(wave))
i3000 = (np.abs(wave - 3000.)).argmin() # index at 3000A
# read simulations
_sim_sed = dustInfer._read_sed(sim)
cuts = _sim_sed['logmstar'] > 9.4
logms = _sim_sed['logmstar'][cuts].copy()
#logsfr = _sim_sed['logsfr.100'][cens].copy()
logsfr = _sim_sed['logsfr.inst'][cuts].copy()
A_lambdas, highmass, sfing = [], [], []
for i in np.arange(np.sum(cuts))[::100]:
if dem == 'slab_calzetti':
A_lambda = -2.5 * np.log10(dustFM.DEM_slabcalzetti(theta_med, wave,
flux, logms[i], logsfr[i], nebular=False))
elif dem == 'slab_noll_m':
A_lambda = -2.5 * np.log10(dustFM.DEM_slab_noll_m(theta_med, wave,
flux, logms[i], logsfr[i], nebular=False))
elif dem == 'slab_noll_msfr':
A_lambda = -2.5 * np.log10(dustFM.DEM_slab_noll_msfr(theta_med, wave,
flux, logms[i], logsfr[i], nebular=False))
elif dem == 'slab_noll_simple':
A_lambda = -2.5 * np.log10(dustFM.DEM_slab_noll_simple(theta_med, wave,
flux, logms[i], logsfr[i], nebular=False))
elif dem == 'tnorm_noll_msfr':
A_lambda = -2.5 * np.log10(dustFM.DEM_tnorm_noll_msfr(theta_med, wave,
flux, logms[i], logsfr[i], nebular=False))
elif dem == 'slab_noll_msfr_fixbump':
A_lambda = -2.5 * np.log10(dustFM.DEM_slab_noll_msfr_fixbump(theta_med, wave,
flux, logms[i], logsfr[i], nebular=False))
elif dem == 'tnorm_noll_msfr_fixbump':
A_lambda = -2.5 * np.log10(dustFM.DEM_tnorm_noll_msfr_fixbump(theta_med, wave,
flux, logms[i], logsfr[i], nebular=False))
elif dem == 'slab_noll_msfr_kink_fixbump':
A_lambda = -2.5 * np.log10(dustFM.DEM_slab_noll_msfr_kink_fixbump(theta_med, wave,
flux, logms[i], logsfr[i], nebular=False))
else:
raise NotImplementedError
A_lambdas.append(A_lambda)
if logms[i] > 10.5:
highmass.append(True)
else: highmass.append(False)
if logsfr[i] - logms[i] > -11.: sfing.append(True)
else: sfing.append(False)
fig = plt.figure(figsize=(10,10))
sub = fig.add_subplot(311)
for hm, A_lambda in zip(highmass, A_lambdas):
if hm:
sub.plot(wave, A_lambda, c='C1', lw=0.1)
else:
sub.plot(wave, A_lambda, c='C0', lw=0.1)
sub.set_xlim(1.5e3, 1e4)
sub.set_xticklabels([])
sub.set_ylabel(r'$A_\lambda$', fontsize=25)
sub.set_yscale('log')
sub.set_ylim(1e-4, 20.)
sub = fig.add_subplot(312)
for sf, A_lambda in zip(sfing, A_lambdas):
if sf:
sub.plot(wave, A_lambda, c='C0', lw=0.1)
else:
sub.plot(wave, A_lambda, c='C1', lw=0.1)
sub.set_xlim(1.5e3, 1e4)
sub.set_xticklabels([])
sub.set_ylabel(r'$A_\lambda$', fontsize=25)
sub.set_yscale('log')
sub.set_ylim(0.1, 20.)
sub = fig.add_subplot(313)
for A_lambda in A_lambdas:
sub.plot(wave, A_lambda/A_lambda[i3000], c='k', lw=0.1)
sub.set_xlabel('Wavelength [$A$]', fontsize=25)
sub.set_xlim(1.5e3, 1e4)
sub.set_ylim(0., 10.)
sub.set_ylabel(r'$A_\lambda/A_{3000}$', fontsize=25)
fig.savefig(os.path.join(abc_dir, 'abc_attenuation.t%i.png' % T), bbox_inches='tight')
return None
def run_params(name):
''' parameters for abc set up given name
'''
params = {}
if name == 'test' :
params['sim'] = 'simba'
params['dem'] = 'slab_calzetti'
params['prior_min'] = np.array([0., 0., 2.])
params['prior_max'] = np.array([5., 4., 4.])
return params
params['sim'] = name.split('.')[0]
params['dem'] = name.split('.')[1]
params['distance'] = name.split('.')[2]
params['statistic'] = name.split('.')[3]
if params['dem'] == 'slab_noll_m':
#m_tau c_tau m_delta c_delta m_E c_E fneb
params['prior_min'] = np.array([-5., 0., -5., -4., -4., 0., 1.])
params['prior_max'] = np.array([5.0, 6., 5.0, 4.0, 0.0, 4., 4.])
elif params['dem'] == 'slab_noll_msfr':
#m_tau1 m_tau2 c_tau m_delta1 m_delta2 c_delta m_E c_E fneb
params['prior_min'] = np.array([-5., -5., 0., -4., -4., -4., -4., 0., 1.])
params['prior_max'] = np.array([5.0, 5.0, 6., 4.0, 4.0, 4.0, 0.0, 4., 4.])
elif params['dem'] == 'slab_noll_simple':
params['prior_min'] = np.array([0., -4])
params['prior_max'] = np.array([10., 4.])
elif params['dem'] == 'tnorm_noll_msfr':
params['prior_min'] = np.array([-5., -5., 0., -5., -5., 0.1, -4., -4., -4., -4., 0., 1.])
params['prior_max'] = np.array([5.0, 5.0, 6., 5.0, 5.0, 3., 4.0, 4.0, 4.0, 0.0, 4., 4.])
elif params['dem'] == 'slab_noll_msfr_fixbump':
params['prior_min'] = np.array([-5., -5., 0., -4., -4., -4.])
params['prior_max'] = np.array([5.0, 5.0, 6., 4.0, 4.0, 4.0])
elif params['dem'] == 'tnorm_noll_msfr_fixbump':
params['prior_min'] = np.array([-5., -5., 0., -5., -5., 0.1, -4., -4., -4., 1.])
params['prior_max'] = np.array([5.0, 5.0, 6., 5.0, 5.0, 3., 4.0, 4.0, 4.0, 4.])
elif params['dem'] == 'slab_noll_msfr_kink_fixbump':
#m_tau,M*0 m_tau,M*1 m_tau,SFR0 m_tau,SFR1 c_tau m_delta1 m_delta2 c_delta fneb
params['prior_min'] = np.array([-5., -5., -5., -5., 0., -4., -4., -4., 1.])
params['prior_max'] = np.array([5.0, 5.0, 5.0, 5.0, 6., 4.0, 4.0, 4.0, 4.])
elif params['dem'] == 'slab_noll_mssfr_fixbump':
params['prior_min'] = np.array([-5., -5., 0., -4., -4., -4.])
params['prior_max'] = np.array([5.0, 5.0, 6., 4.0, 4.0, 4.0])
else:
raise NotImplementedError
return params
if __name__=="__main__":
####################### inputs #######################
fetch = sys.argv[1] == 'True'
name = sys.argv[2] # name of ABC run
i0 = int(sys.argv[3])
i1 = int(sys.argv[4])
sfr0 = sys.argv[5]
if fetch:
pwd = getpass.getpass('<PASSWORD>: ')
######################################################
dat_dir = os.environ['GALPOPFM_DIR']
abc_dir = os.path.join(dat_dir, 'abc', name)
params = run_params(name)
sim = params['sim']
dem | |
# -*- coding: utf-8 -*-
from wakatime.main import execute
from wakatime.packages import requests
import logging
import os
import time
import shutil
import sys
import uuid
from testfixtures import log_capture
from wakatime.compat import u, is_py3
from wakatime.constants import (
API_ERROR,
AUTH_ERROR,
MAX_FILE_SIZE_SUPPORTED,
SUCCESS,
)
from wakatime.packages import tzlocal
from wakatime.packages.requests.exceptions import RequestException
from wakatime.packages.requests.models import Response
from . import utils
from .utils import ANY, CustomResponse
class MainTestCase(utils.TestCase):
patch_these = [
'time.sleep',
'wakatime.packages.requests.adapters.HTTPAdapter.send',
'wakatime.offlinequeue.Queue.push',
['wakatime.offlinequeue.Queue.pop', None],
['wakatime.offlinequeue.Queue.connect', None],
'wakatime.session_cache.SessionCache.save',
'wakatime.session_cache.SessionCache.delete',
['wakatime.session_cache.SessionCache.get', requests.session],
['wakatime.session_cache.SessionCache.connect', None],
]
def test_500_response(self):
response = Response()
response.status_code = 500
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
with utils.TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/twolinefile.txt'
shutil.copy(entity, os.path.join(tempdir, 'twolinefile.txt'))
entity = os.path.realpath(os.path.join(tempdir, 'twolinefile.txt'))
now = u(int(time.time()))
key = str(<KEY>
heartbeat = {
'language': 'Text only',
'entity': 'HIDDEN.txt',
'project': None,
'time': float(now),
'type': 'file',
'is_write': False,
'user_agent': ANY,
}
args = ['--file', entity, '--key', key,
'--config', 'tests/samples/configs/paranoid.cfg', '--time', now]
retval = execute(args)
self.assertEquals(retval, API_ERROR)
self.assertNothingPrinted()
self.assertHeartbeatSent(heartbeat)
self.assertHeartbeatSavedOffline()
self.assertOfflineHeartbeatsNotSynced()
self.assertSessionCacheDeleted()
def test_400_response(self):
response = Response()
response.status_code = 400
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
with utils.TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/twolinefile.txt'
shutil.copy(entity, os.path.join(tempdir, 'twolinefile.txt'))
entity = os.path.realpath(os.path.join(tempdir, 'twolinefile.txt'))
now = u(int(time.time()))
key = str(<KEY>())
heartbeat = {
'language': 'Text only',
'entity': 'HIDDEN.txt',
'project': None,
'time': float(now),
'type': 'file',
'is_write': False,
'user_agent': ANY,
}
args = ['--file', entity, '--key', key,
'--config', 'tests/samples/configs/paranoid.cfg', '--time', now]
retval = execute(args)
self.assertEquals(retval, API_ERROR)
self.assertNothingPrinted()
self.assertHeartbeatSent(heartbeat)
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsNotSynced()
self.assertSessionCacheDeleted()
def test_401_response(self):
response = Response()
response.status_code = 401
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
with utils.TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/twolinefile.txt'
shutil.copy(entity, os.path.join(tempdir, 'twolinefile.txt'))
entity = os.path.realpath(os.path.join(tempdir, 'twolinefile.txt'))
now = u(int(time.time()))
key = str(<KEY>())
heartbeat = {
'language': 'Text only',
'lines': None,
'entity': 'HIDDEN.txt',
'project': None,
'time': float(now),
'type': 'file',
'is_write': False,
'user_agent': ANY,
}
args = ['--file', entity, '--key', key,
'--config', 'tests/samples/configs/paranoid.cfg', '--time', now]
retval = execute(args)
self.assertEquals(retval, AUTH_ERROR)
self.assertNothingPrinted()
self.assertHeartbeatSent(heartbeat)
self.assertHeartbeatSavedOffline()
self.assertOfflineHeartbeatsNotSynced()
self.assertSessionCacheDeleted()
@log_capture()
def test_500_response_without_offline_logging(self, logs):
logging.disable(logging.NOTSET)
response = Response()
response.status_code = 500
response._content = 'fake content'
if is_py3:
response._content = 'fake content'.encode('utf8')
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
with utils.TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/twolinefile.txt'
shutil.copy(entity, os.path.join(tempdir, 'twolinefile.txt'))
entity = os.path.realpath(os.path.join(tempdir, 'twolinefile.txt'))
now = u(int(time.time()))
key = str(<KEY>())
heartbeat = {
'language': 'Text only',
'lines': 2,
'entity': entity,
'project': None,
'time': float(now),
'type': 'file',
'is_write': False,
'user_agent': ANY,
'dependencies': [],
}
args = ['--file', entity, '--key', key, '--disable-offline',
'--config', 'tests/samples/configs/good_config.cfg', '--time', now]
retval = execute(args)
self.assertEquals(retval, API_ERROR)
self.assertNothingPrinted()
actual = self.getLogOutput(logs)
expected = "WakaTime ERROR {'response_code': 500, 'response_content': u'fake content'}"
if actual[-2] == '0':
expected = "WakaTime ERROR {'response_content': u'fake content', 'response_code': 500}"
if is_py3:
expected = "WakaTime ERROR {'response_code': 500, 'response_content': 'fake content'}"
if actual[-2] == '0':
expected = "WakaTime ERROR {'response_content': 'fake content', 'response_code': 500}"
self.assertEquals(expected, actual)
self.assertHeartbeatSent(heartbeat)
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsNotSynced()
self.assertSessionCacheDeleted()
@log_capture()
def test_requests_exception(self, logs):
logging.disable(logging.NOTSET)
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].side_effect = RequestException('requests exception')
with utils.TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/twolinefile.txt'
shutil.copy(entity, os.path.join(tempdir, 'twolinefile.txt'))
entity = os.path.realpath(os.path.join(tempdir, 'twolinefile.txt'))
now = u(int(time.time()))
key = str(<KEY>
heartbeat = {
'language': 'Text only',
'lines': 2,
'entity': entity,
'project': None,
'time': float(now),
'type': 'file',
'is_write': False,
'user_agent': ANY,
'dependencies': [],
}
args = ['--file', entity, '--key', key, '--verbose',
'--config', 'tests/samples/configs/good_config.cfg', '--time', now]
retval = execute(args)
self.assertEquals(retval, API_ERROR)
self.assertNothingPrinted()
actual = self.getLogOutput(logs)
expected = 'Parsing dependencies not supported for special.TextParser'
self.assertIn(expected, actual)
expected = 'WakaTime DEBUG Sending heartbeats to api at https://api.wakatime.com/api/v1/users/current/heartbeats.bulk'
self.assertIn(expected, actual)
expected = "RequestException': u'requests exception'"
if is_py3:
expected = "RequestException': 'requests exception'"
self.assertIn(expected, actual)
self.assertHeartbeatSent(heartbeat)
self.assertHeartbeatSavedOffline()
self.assertOfflineHeartbeatsNotSynced()
self.assertSessionCacheDeleted()
@log_capture()
def test_requests_exception_without_offline_logging(self, logs):
logging.disable(logging.NOTSET)
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].side_effect = RequestException('requests exception')
with utils.TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/twolinefile.txt'
shutil.copy(entity, os.path.join(tempdir, 'twolinefile.txt'))
entity = os.path.realpath(os.path.join(tempdir, 'twolinefile.txt'))
now = u(int(time.time()))
key = str(<KEY>())
args = ['--file', entity, '--key', key, '--disable-offline',
'--config', 'tests/samples/configs/good_config.cfg', '--time', now]
retval = execute(args)
self.assertEquals(retval, API_ERROR)
self.assertNothingPrinted()
log_output = u("\n").join([u(' ').join(x) for x in logs.actual()])
expected = "WakaTime ERROR {'RequestException': u'requests exception'}"
if is_py3:
expected = "WakaTime ERROR {'RequestException': 'requests exception'}"
self.assertEquals(expected, log_output)
self.assertHeartbeatSent()
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsNotSynced()
self.assertSessionCacheDeleted()
@log_capture()
def test_invalid_api_key(self, logs):
logging.disable(logging.NOTSET)
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = CustomResponse()
config = 'tests/samples/configs/missing_api_key.cfg'
args = ['--config', config, '--key', 'invalid-api-key']
retval = execute(args)
self.assertEquals(retval, AUTH_ERROR)
self.assertEquals(sys.stdout.getvalue(), '')
expected = 'error: Invalid api key. Find your api key from wakatime.com/settings/api-key.'
self.assertIn(expected, sys.stderr.getvalue())
log_output = u("\n").join([u(' ').join(x) for x in logs.actual()])
expected = ''
self.assertEquals(log_output, expected)
self.assertHeartbeatNotSent()
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsNotSynced()
self.assertSessionCacheUntouched()
def test_nonascii_hostname(self):
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = CustomResponse()
with utils.TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/emptyfile.txt'
shutil.copy(entity, os.path.join(tempdir, 'emptyfile.txt'))
entity = os.path.realpath(os.path.join(tempdir, 'emptyfile.txt'))
hostname = 'test汉语' if is_py3 else 'test\xe6\xb1\x89\xe8\xaf\xad'
with utils.mock.patch('socket.gethostname') as mock_gethostname:
mock_gethostname.return_value = hostname
self.assertEquals(type(hostname).__name__, 'str')
config = 'tests/samples/configs/good_config.cfg'
args = ['--file', entity, '--config', config]
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
headers = {
'X-Machine-Name': hostname.encode('utf-8') if is_py3 else hostname,
}
self.assertHeartbeatSent(headers=headers)
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsSynced()
self.assertSessionCacheSaved()
def test_nonascii_timezone(self):
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = CustomResponse()
with utils.TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/emptyfile.txt'
shutil.copy(entity, os.path.join(tempdir, 'emptyfile.txt'))
entity = os.path.realpath(os.path.join(tempdir, 'emptyfile.txt'))
class TZ(object):
@property
def zone(self):
return 'tz汉语' if is_py3 else 'tz\xe6\xb1\x89\xe8\xaf\xad'
timezone = TZ()
with utils.mock.patch('wakatime.packages.tzlocal.get_localzone') as mock_getlocalzone:
mock_getlocalzone.return_value = timezone
config = 'tests/samples/configs/has_everything.cfg'
timeout = 15
args = ['--file', entity, '--config', config, '--timeout', u(timeout)]
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
headers = {
'TimeZone': u(timezone.zone).encode('utf-8') if is_py3 else timezone.zone,
}
self.assertHeartbeatSent(headers=headers, proxies=ANY, timeout=timeout)
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsSynced()
self.assertSessionCacheSaved()
def test_timezone_with_invalid_encoding(self):
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = CustomResponse()
with utils.TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/emptyfile.txt'
shutil.copy(entity, os.path.join(tempdir, 'emptyfile.txt'))
entity = os.path.realpath(os.path.join(tempdir, 'emptyfile.txt'))
class TZ(object):
@property
def zone(self):
return bytes('\xab', 'utf-16') if is_py3 else '\xab'
timezone = TZ()
with self.assertRaises(UnicodeDecodeError):
timezone.zone.decode('utf8')
with utils.mock.patch('wakatime.packages.tzlocal.get_localzone') as mock_getlocalzone:
mock_getlocalzone.return_value = timezone
timeout = 15
config = 'tests/samples/configs/has_everything.cfg'
args = ['--file', entity, '--config', config, '--timeout', u(timeout)]
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
headers = {
'TimeZone': u(bytes('\xab', 'utf-16') if is_py3 else '\xab').encode('utf-8'),
}
self.assertHeartbeatSent(headers=headers, proxies=ANY, timeout=timeout)
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsSynced()
self.assertSessionCacheSaved()
def test_tzlocal_exception(self):
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = CustomResponse()
with utils.TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/emptyfile.txt'
shutil.copy(entity, os.path.join(tempdir, 'emptyfile.txt'))
entity = os.path.realpath(os.path.join(tempdir, 'emptyfile.txt'))
with utils.mock.patch('wakatime.packages.tzlocal.get_localzone') as mock_getlocalzone:
mock_getlocalzone.side_effect = Exception('tzlocal exception')
timeout = 15
config = 'tests/samples/configs/has_everything.cfg'
args = ['--file', entity, '--config', config, '--timeout', u(timeout)]
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
headers = {
'TimeZone': None,
}
self.assertHeartbeatSent(headers=headers, proxies=ANY, timeout=timeout)
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsSynced()
self.assertSessionCacheSaved()
def test_timezone_header(self):
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = CustomResponse()
with utils.TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/emptyfile.txt'
shutil.copy(entity, os.path.join(tempdir, 'emptyfile.txt'))
entity = os.path.realpath(os.path.join(tempdir, 'emptyfile.txt'))
config = 'tests/samples/configs/good_config.cfg'
args = ['--file', entity, '--config', config]
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
timezone = tzlocal.get_localzone()
headers = {
'TimeZone': u(timezone.zone).encode('utf-8') if is_py3 else timezone.zone,
}
self.assertHeartbeatSent(headers=headers)
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsSynced()
self.assertSessionCacheSaved()
@log_capture()
def test_nonascii_filename(self, logs):
logging.disable(logging.NOTSET)
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = CustomResponse()
with utils.TemporaryDirectory() as tempdir:
filename = list(filter(lambda x: x.endswith('.txt'), os.listdir(u('tests/samples/codefiles/unicode'))))[0]
entity = os.path.join('tests/samples/codefiles/unicode', filename)
shutil.copy(entity, os.path.join(tempdir, filename))
entity = os.path.realpath(os.path.join(tempdir, filename))
now = u(int(time.time()))
config = 'tests/samples/configs/good_config.cfg'
key = <KEY>
heartbeat = {
'language': 'Text only',
'lines': 0,
'entity': os.path.realpath(entity),
'project': None,
'time': float(now),
'type': 'file',
'is_write': False,
'user_agent': ANY,
'dependencies': [],
}
args = ['--file', entity, '--key', key, '--config', config, '--time', now]
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
self.assertNothingLogged(logs)
self.assertHeartbeatSent(heartbeat)
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsSynced()
self.assertSessionCacheSaved()
@log_capture()
def test_nonascii_filename_saved_when_offline(self, logs):
logging.disable(logging.NOTSET)
response = Response()
response.status_code = 500
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
with utils.TemporaryDirectory() as tempdir:
filename = list(filter(lambda x: x.endswith('.txt'), os.listdir(u('tests/samples/codefiles/unicode'))))[0]
entity = os.path.join('tests/samples/codefiles/unicode', filename)
shutil.copy(entity, os.path.join(tempdir, filename))
entity = os.path.realpath(os.path.join(tempdir, filename))
now = u(int(time.time()))
config = 'tests/samples/configs/good_config.cfg'
key = str(uuid.uuid4())
heartbeat = {
'language': 'Text only',
'lines': 0,
'entity': os.path.realpath(entity),
'project': None,
'time': float(now),
'type': 'file',
'is_write': False,
'user_agent': ANY,
'dependencies': [],
}
args = ['--file', entity, '--key', key, '--config', config, '--time', now]
retval = execute(args)
self.assertEquals(retval, API_ERROR)
self.assertNothingPrinted()
self.assertNothingLogged(logs)
self.assertHeartbeatSent(heartbeat)
self.assertHeartbeatSavedOffline()
self.assertOfflineHeartbeatsNotSynced()
self.assertSessionCacheDeleted()
@log_capture()
def test_unhandled_exception(self, logs):
logging.disable(logging.NOTSET)
with utils.mock.patch('wakatime.main.send_heartbeats') as mock_send:
ex_msg = 'testing unhandled exception'
mock_send.side_effect = RuntimeError(ex_msg)
entity = 'tests/samples/codefiles/twolinefile.txt'
config = 'tests/samples/configs/good_config.cfg'
key = <KEY>
args = ['--entity', entity, '--key', key, '--config', config]
execute(args)
self.assertIn(ex_msg, sys.stdout.getvalue())
self.assertEquals(sys.stderr.getvalue(), '')
log_output = u("\n").join([u(' ').join(x) for x in logs.actual()])
self.assertIn(ex_msg, log_output)
self.assertHeartbeatNotSent()
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsNotSynced()
self.assertSessionCacheUntouched()
def test_large_file_skips_lines_count(self):
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
entity = 'tests/samples/codefiles/twolinefile.txt'
config = 'tests/samples/configs/good_config.cfg'
now = u(int(time.time()))
heartbeat = {
'language': 'Text only',
'lines': None,
'entity': os.path.realpath(entity),
'project': os.path.basename(os.path.abspath('.')),
'cursorpos': None,
'lineno': None,
'branch': ANY,
'time': float(now),
'type': 'file',
'is_write': False,
'user_agent': ANY,
'dependencies': [],
}
args = ['--entity', entity, '--config', config, '--time', now]
with utils.mock.patch('os.path.getsize') as mock_getsize:
mock_getsize.return_value = MAX_FILE_SIZE_SUPPORTED + 1
retval | |
" \
f"{set(self._successor)}, left adjacency {self._adj_left} with " \
f"{'same' if self._adj_left_same_direction else 'opposite'} direction, and " \
f"right adjacency with {'same' if self._adj_right_same_direction else 'opposite'} direction"
def __repr__(self):
return f"Lanelet(left_vertices={self._left_vertices.tolist()}, " \
f"center_vertices={self._center_vertices.tolist()}, " \
f"right_vertices={self._right_vertices.tolist()}, lanelet_id={self._lanelet_id}, " \
f"predecessor={self._predecessor}, successor={self._successor}, adjacent_left={self._adj_left}, " \
f"adjacent_left_same_direction={self._adj_left_same_direction}, adjacent_right={self._adj_right}, " \
f"adjacent_right_same_direction={self._adj_right_same_direction}, " \
f"line_marking_left_vertices={self._line_marking_left_vertices}, " \
f"line_marking_right_vertices={self._line_marking_right_vertices}), " \
f"stop_line={repr(self._stop_line)}, lanelet_type={self._lanelet_type}, " \
f"user_one_way={self._user_one_way}, " \
f"user_bidirectional={self._user_bidirectional}, traffic_signs={self._traffic_signs}, " \
f"traffic_lights={self._traffic_lights}"
@property
def distance(self) -> np.ndarray:
"""
:returns cumulative distance along center vertices
"""
if self._distance is None:
self._distance = self._compute_polyline_cumsum_dist([self.center_vertices])
return self._distance
@distance.setter
def distance(self, _):
warnings.warn('<Lanelet/distance> distance of lanelet is immutable')
@property
def inner_distance(self) -> np.ndarray:
"""
:returns minimum cumulative distance along left and right vertices, i.e., along the inner curve:
"""
if self._inner_distance is None:
self._inner_distance = self._compute_polyline_cumsum_dist([self.left_vertices, self.right_vertices])
return self._inner_distance
@property
def lanelet_id(self) -> int:
return self._lanelet_id
@lanelet_id.setter
def lanelet_id(self, l_id: int):
if self._lanelet_id is None:
assert is_natural_number(l_id), '<Lanelet/lanelet_id>: Provided lanelet_id is not valid! id={}'.format(l_id)
self._lanelet_id = l_id
else:
warnings.warn('<Lanelet/lanelet_id>: lanelet_id of lanelet is immutable')
@property
def left_vertices(self) -> np.ndarray:
return self._left_vertices
@left_vertices.setter
def left_vertices(self, polyline: np.ndarray):
if self._left_vertices is None:
self._left_vertices = polyline
assert is_valid_polyline(polyline), '<Lanelet/left_vertices>: The provided polyline ' \
'is not valid! id = {} polyline = {}'.format(self._lanelet_id, polyline)
else:
warnings.warn('<Lanelet/left_vertices>: left_vertices of lanelet are immutable!')
@property
def right_vertices(self) -> np.ndarray:
return self._right_vertices
@right_vertices.setter
def right_vertices(self, polyline: np.ndarray):
if self._right_vertices is None:
assert is_valid_polyline(polyline), '<Lanelet/right_vertices>: The provided polyline ' \
'is not valid! id = {}, polyline = {}'.format(self._lanelet_id,
polyline)
self._right_vertices = polyline
else:
warnings.warn('<Lanelet/right_vertices>: right_vertices of lanelet are immutable!')
@staticmethod
def _compute_polyline_cumsum_dist(polylines: List[np.ndarray], comparator=np.amin):
d = []
for polyline in polylines:
d.append(np.diff(polyline, axis=0))
segment_distances = np.empty((len(polylines[0]), len(polylines)))
for i, d_tmp in enumerate(d):
segment_distances[:, i] = np.append([0], np.sqrt((np.square(d_tmp)).sum(axis=1)))
return np.cumsum(comparator(segment_distances, axis=1))
@property
def center_vertices(self) -> np.ndarray:
return self._center_vertices
@center_vertices.setter
def center_vertices(self, polyline: np.ndarray):
if self._center_vertices is None:
assert is_valid_polyline(
polyline), '<Lanelet/center_vertices>: The provided polyline is not valid! polyline = {}'.format(
polyline)
self._center_vertices = polyline
else:
warnings.warn('<Lanelet/center_vertices>: center_vertices of lanelet are immutable!')
@property
def line_marking_left_vertices(self) -> LineMarking:
return self._line_marking_left_vertices
@line_marking_left_vertices.setter
def line_marking_left_vertices(self, line_marking_left_vertices: LineMarking):
if self._line_marking_left_vertices is None:
assert isinstance(line_marking_left_vertices,
LineMarking), '<Lanelet/line_marking_left_vertices>: Provided lane marking type of ' \
'left boundary is not valid! type = {}'.format(
type(line_marking_left_vertices))
self._line_marking_left_vertices = LineMarking.UNKNOWN
else:
warnings.warn('<Lanelet/line_marking_left_vertices>: line_marking_left_vertices of lanelet is immutable!')
@property
def line_marking_right_vertices(self) -> LineMarking:
return self._line_marking_right_vertices
@line_marking_right_vertices.setter
def line_marking_right_vertices(self, line_marking_right_vertices: LineMarking):
if self._line_marking_right_vertices is None:
assert isinstance(line_marking_right_vertices,
LineMarking), '<Lanelet/line_marking_right_vertices>: Provided lane marking type of ' \
'right boundary is not valid! type = {}'.format(
type(line_marking_right_vertices))
self._line_marking_right_vertices = LineMarking.UNKNOWN
else:
warnings.warn('<Lanelet/line_marking_right_vertices>: line_marking_right_vertices of lanelet is immutable!')
@property
def predecessor(self) -> list:
return self._predecessor
@predecessor.setter
def predecessor(self, predecessor: list):
if self._predecessor is None:
assert (is_list_of_natural_numbers(predecessor) and len(predecessor) >= 0), '<Lanelet/predecessor>: ' \
'Provided list ' \
'of predecessors is not ' \
'valid!' \
'predecessors = {}'.format(
predecessor)
self._predecessor = predecessor
else:
warnings.warn('<Lanelet/predecessor>: predecessor of lanelet is immutable!')
@property
def successor(self) -> list:
return self._successor
@successor.setter
def successor(self, successor: list):
if self._successor is None:
assert (is_list_of_natural_numbers(successor) and len(successor) >= 0), '<Lanelet/predecessor>: Provided ' \
'list of successors is not valid!' \
'successors = {}'.format(successor)
self._successor = successor
else:
warnings.warn('<Lanelet/successor>: successor of lanelet is immutable!')
@property
def adj_left(self) -> int:
return self._adj_left
@adj_left.setter
def adj_left(self, l_id: int):
if self._adj_left is None:
assert is_natural_number(l_id), '<Lanelet/adj_left>: provided id is not valid! id={}'.format(l_id)
self._adj_left = l_id
else:
warnings.warn('<Lanelet/adj_left>: adj_left of lanelet is immutable')
@property
def adj_left_same_direction(self) -> bool:
return self._adj_left_same_direction
@adj_left_same_direction.setter
def adj_left_same_direction(self, same: bool):
if self._adj_left_same_direction is None:
assert isinstance(same, bool), '<Lanelet/adj_left_same_direction>: provided direction ' \
'is not of type bool! type = {}'.format(type(same))
self._adj_left_same_direction = same
else:
warnings.warn('<Lanelet/adj_left_same_direction>: adj_left_same_direction of lanelet is immutable')
@property
def adj_right(self) -> int:
return self._adj_right
@adj_right.setter
def adj_right(self, l_id: int):
if self._adj_right is None:
assert is_natural_number(l_id), '<Lanelet/adj_right>: provided id is not valid! id={}'.format(l_id)
self._adj_right = l_id
else:
warnings.warn('<Lanelet/adj_right>: adj_right of lanelet is immutable')
@property
def adj_right_same_direction(self) -> bool:
return self._adj_right_same_direction
@adj_right_same_direction.setter
def adj_right_same_direction(self, same: bool):
if self._adj_right_same_direction is None:
assert isinstance(same, bool), '<Lanelet/adj_right_same_direction>: provided direction ' \
'is not of type bool! type = {}'.format(type(same))
self._adj_right_same_direction = same
else:
warnings.warn('<Lanelet/adj_right_same_direction>: adj_right_same_direction of lanelet is immutable')
@property
def dynamic_obstacles_on_lanelet(self) -> Dict[int, Set[int]]:
return self._dynamic_obstacles_on_lanelet
@dynamic_obstacles_on_lanelet.setter
def dynamic_obstacles_on_lanelet(self, obstacle_ids: Dict[int, Set[int]]):
assert isinstance(obstacle_ids, dict), '<Lanelet/obstacles_on_lanelet>: provided dictionary of ids is not a ' \
'dictionary! type = {}'.format(type(obstacle_ids))
self._dynamic_obstacles_on_lanelet = obstacle_ids
@property
def static_obstacles_on_lanelet(self) -> Union[None, Set[int]]:
return self._static_obstacles_on_lanelet
@static_obstacles_on_lanelet.setter
def static_obstacles_on_lanelet(self, obstacle_ids: Set[int]):
assert isinstance(obstacle_ids, set), '<Lanelet/obstacles_on_lanelet>: provided list of ids is not a ' \
'set! type = {}'.format(type(obstacle_ids))
self._static_obstacles_on_lanelet = obstacle_ids
@property
def stop_line(self) -> StopLine:
return self._stop_line
@stop_line.setter
def stop_line(self, stop_line: StopLine):
if self._stop_line is None:
assert isinstance(stop_line,
StopLine), '<Lanelet/stop_line>: ''Provided type is not valid! type = {}'.format(
type(stop_line))
self._stop_line = stop_line
else:
warnings.warn('<Lanelet/stop_line>: stop_line of lanelet is immutable!', stacklevel=1)
@property
def lanelet_type(self) -> Set[LaneletType]:
return self._lanelet_type
@lanelet_type.setter
def lanelet_type(self, lanelet_type: Set[LaneletType]):
if self._lanelet_type is None or len(self._lanelet_type) == 0:
assert isinstance(lanelet_type, set) and all(isinstance(elem, LaneletType) for elem in
lanelet_type), '<Lanelet/lanelet_type>: ''Provided type is ' \
'not valid! type = {}, ' \
'expected = Set[LaneletType]'.format(
type(lanelet_type))
self._lanelet_type = lanelet_type
else:
warnings.warn('<Lanelet/lanelet_type>: type of lanelet is immutable!')
@property
def user_one_way(self) -> Set[RoadUser]:
return self._user_one_way
@user_one_way.setter
def user_one_way(self, user_one_way: Set[RoadUser]):
if self._user_one_way is None:
assert isinstance(user_one_way, set) and all(
isinstance(elem, RoadUser) for elem in user_one_way), '<Lanelet/user_one_way>: ' \
'Provided type is ' \
'not valid! type = {}'.format(
type(user_one_way))
self._user_one_way = user_one_way
else:
warnings.warn('<Lanelet/user_one_way>: user_one_way of lanelet is immutable!')
@property
def user_bidirectional(self) -> Set[RoadUser]:
return self._user_bidirectional
@user_bidirectional.setter
def user_bidirectional(self, user_bidirectional: Set[RoadUser]):
if self._user_bidirectional is None:
assert isinstance(user_bidirectional, set) and all(
isinstance(elem, RoadUser) for elem in user_bidirectional), '<Lanelet/user_bidirectional>: ' \
'Provided type is not valid! type' \
' = {}'.format(type(user_bidirectional))
self._user_bidirectional = user_bidirectional
else:
warnings.warn('<Lanelet/user_bidirectional>: user_bidirectional of lanelet is immutable!')
@property
def traffic_signs(self) -> Set[int]:
return self._traffic_signs
@traffic_signs.setter
def traffic_signs(self, traffic_sign_ids: Set[int]):
if self._traffic_signs is None:
assert isinstance(traffic_sign_ids, set), '<Lanelet/traffic_signs>: provided list of ids is not a ' \
'set! type = {}'.format(type(traffic_sign_ids))
self._traffic_signs = traffic_sign_ids
else:
warnings.warn('<Lanelet/traffic_signs>: traffic_signs of lanelet is immutable!')
@property
def traffic_lights(self) -> Set[int]:
return self._traffic_lights
@traffic_lights.setter
def traffic_lights(self, traffic_light_ids: Set[int]):
if self._traffic_lights is None:
assert isinstance(traffic_light_ids, set), '<Lanelet/traffic_lights>: provided list of ids is not a ' \
'set! type = {}'.format(type(traffic_light_ids))
self._traffic_lights = traffic_light_ids
else:
warnings.warn('<Lanelet/traffic_lights>: traffic_lights of lanelet is immutable!')
@property
def polygon(self) -> Polygon:
return self._polygon
def add_predecessor(self, lanelet: int):
"""
Adds the ID of a predecessor lanelet to the list of predecessors.
:param lanelet: Predecessor lanelet ID.
"""
if lanelet not in self.predecessor:
self.predecessor.append(lanelet)
def remove_predecessor(self, lanelet: int):
"""
Removes the ID of a predecessor lanelet from the list of predecessors.
:param lanelet: Predecessor lanelet ID.
"""
if lanelet in self.predecessor:
self.predecessor.remove(lanelet)
def add_successor(self, lanelet: int):
"""
Adds the ID of a successor lanelet to the list of successors.
:param lanelet: Successor lanelet ID.
"""
if lanelet not in self.successor:
self.successor.append(lanelet)
def remove_successor(self, lanelet: int):
"""
Removes the ID of a successor lanelet from the list of successors.
:param lanelet: Successor lanelet ID.
"""
if lanelet in self.successor:
self.successor.remove(lanelet)
def translate_rotate(self, translation: np.ndarray, angle: float):
"""
This method translates and rotates a lanelet
:param translation: The translation given as [x_off,y_off] for the x and y translation
:param angle: The rotation angle in radian (counter-clockwise defined)
"""
assert is_real_number_vector(translation, 2), '<Lanelet/translate_rotate>: provided translation ' \
'is not valid! translation = {}'.format(translation)
assert is_valid_orientation(
angle), '<Lanelet/translate_rotate>: provided angle is not valid! angle = {}'.format(angle)
# create transformation matrix
t_m = commonroad.geometry.transform.translation_rotation_matrix(translation, angle)
# transform center vertices
tmp = t_m.dot(np.vstack((self.center_vertices.transpose(), np.ones((1, self.center_vertices.shape[0])))))
tmp = tmp[0:2, :]
self._center_vertices = tmp.transpose()
# transform left vertices
tmp = t_m.dot(np.vstack((self.left_vertices.transpose(), np.ones((1, self.left_vertices.shape[0])))))
tmp = tmp[0:2, :]
self._left_vertices = tmp.transpose()
# transform right vertices
tmp = t_m.dot(np.vstack((self.right_vertices.transpose(), np.ones((1, self.right_vertices.shape[0])))))
tmp = tmp[0:2, :]
self._right_vertices = tmp.transpose()
# transform the stop line
if self._stop_line is not None:
self._stop_line.translate_rotate(translation, angle)
# recreate polygon in case it existed
| |
no fix is yet
available.
packageType: The type of package (e.g. OS, MAVEN, GO).
"""
class EffectiveSeverityValueValuesEnum(_messages.Enum):
r"""Output only. The distro or language system assigned severity for this
vulnerability when that is available and note provider assigned severity
when it is not available.
Values:
SEVERITY_UNSPECIFIED: Unknown.
MINIMAL: Minimal severity.
LOW: Low severity.
MEDIUM: Medium severity.
HIGH: High severity.
CRITICAL: Critical severity.
"""
SEVERITY_UNSPECIFIED = 0
MINIMAL = 1
LOW = 2
MEDIUM = 3
HIGH = 4
CRITICAL = 5
affectedCpeUri = _messages.StringField(1)
affectedPackage = _messages.StringField(2)
affectedVersion = _messages.MessageField('Version', 3)
effectiveSeverity = _messages.EnumField('EffectiveSeverityValueValuesEnum', 4)
fixAvailable = _messages.BooleanField(5)
fixedCpeUri = _messages.StringField(6)
fixedPackage = _messages.StringField(7)
fixedVersion = _messages.MessageField('Version', 8)
packageType = _messages.StringField(9)
class PackageOccurrence(_messages.Message):
r"""Details on how a particular software package was installed on a system.
Fields:
location: Required. All of the places within the filesystem versions of
this package have been found.
name: Output only. The name of the installed package.
"""
location = _messages.MessageField('Location', 1, repeated=True)
name = _messages.StringField(2)
class ProjectRepoId(_messages.Message):
r"""Selects a repo using a Google Cloud Platform project ID (e.g., winged-
cargo-31) and a repo name within that project.
Fields:
projectId: The ID of the project.
repoName: The name of the repo. Leave empty for the default repo.
"""
projectId = _messages.StringField(1)
repoName = _messages.StringField(2)
class Recipe(_messages.Message):
r"""Steps taken to build the artifact. For a TaskRun, typically each
container corresponds to one step in the recipe.
Messages:
ArgumentsValueListEntry: A ArgumentsValueListEntry object.
EnvironmentValueListEntry: A EnvironmentValueListEntry object.
Fields:
arguments: Collection of all external inputs that influenced the build on
top of recipe.definedInMaterial and recipe.entryPoint. For example, if
the recipe type were "make", then this might be the flags passed to make
aside from the target, which is captured in recipe.entryPoint. Since the
arguments field can greatly vary in structure, depending on the builder
and recipe type, this is of form "Any".
definedInMaterial: Index in materials containing the recipe steps that are
not implied by recipe.type. For example, if the recipe type were "make",
then this would point to the source containing the Makefile, not the
make program itself. Set to -1 if the recipe doesn't come from a
material, as zero is default unset value for int64.
entryPoint: String identifying the entry point into the build. This is
often a path to a configuration file and/or a target label within that
file. The syntax and meaning are defined by recipe.type. For example, if
the recipe type were "make", then this would reference the directory in
which to run make as well as which target to use.
environment: Any other builder-controlled inputs necessary for correctly
evaluating the recipe. Usually only needed for reproducing the build but
not evaluated as part of policy. Since the environment field can greatly
vary in structure, depending on the builder and recipe type, this is of
form "Any".
type: URI indicating what type of recipe was performed. It determines the
meaning of recipe.entryPoint, recipe.arguments, recipe.environment, and
materials.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ArgumentsValueListEntry(_messages.Message):
r"""A ArgumentsValueListEntry object.
Messages:
AdditionalProperty: An additional property for a ArgumentsValueListEntry
object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ArgumentsValueListEntry object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class EnvironmentValueListEntry(_messages.Message):
r"""A EnvironmentValueListEntry object.
Messages:
AdditionalProperty: An additional property for a
EnvironmentValueListEntry object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a EnvironmentValueListEntry object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
arguments = _messages.MessageField('ArgumentsValueListEntry', 1, repeated=True)
definedInMaterial = _messages.IntegerField(2)
entryPoint = _messages.StringField(3)
environment = _messages.MessageField('EnvironmentValueListEntry', 4, repeated=True)
type = _messages.StringField(5)
class RelatedUrl(_messages.Message):
r"""Metadata for any related URL information.
Fields:
label: Label to describe usage of the URL.
url: Specific URL associated with the resource.
"""
label = _messages.StringField(1)
url = _messages.StringField(2)
class RepoId(_messages.Message):
r"""A unique identifier for a Cloud Repo.
Fields:
projectRepoId: A combination of a project ID and a repo name.
uid: A server-assigned, globally unique identifier.
"""
projectRepoId = _messages.MessageField('ProjectRepoId', 1)
uid = _messages.StringField(2)
class Signature(_messages.Message):
r"""Verifiers (e.g. Kritis implementations) MUST verify signatures with
respect to the trust anchors defined in policy (e.g. a Kritis policy).
Typically this means that the verifier has been configured with a map from
`public_key_id` to public key material (and any required parameters, e.g.
signing algorithm). In particular, verification implementations MUST NOT
treat the signature `public_key_id` as anything more than a key lookup hint.
The `public_key_id` DOES NOT validate or authenticate a public key; it only
provides a mechanism for quickly selecting a public key ALREADY CONFIGURED
on the verifier through a trusted channel. Verification implementations MUST
reject signatures in any of the following circumstances: * The
`public_key_id` is not recognized by the verifier. * The public key that
`public_key_id` refers to does not verify the signature with respect to the
payload. The `signature` contents SHOULD NOT be "attached" (where the
payload is included with the serialized `signature` bytes). Verifiers MUST
ignore any "attached" payload and only verify signatures with respect to
explicitly provided payload (e.g. a `payload` field on the proto message
that holds this Signature, or the canonical serialization of the proto
message that holds this signature).
Fields:
publicKeyId: The identifier for the public key that verifies this
signature. * The `public_key_id` is required. * The `public_key_id`
SHOULD be an RFC3986 conformant URI. * When possible, the
`public_key_id` SHOULD be an immutable reference, such as a
cryptographic digest. Examples of valid `public_key_id`s: OpenPGP V4
public key fingerprint: *
"openpgp4fpr:74FAF3B861BDA0870C7B6DEF607E48D2A663AEEA" See
https://www.iana.org/assignments/uri-schemes/prov/openpgp4fpr for more
details on this scheme. RFC6920 digest-named SubjectPublicKeyInfo
(digest of the DER serialization): *
"ni:///sha-256;cD9o9Cq6LG3jD0iKXqEi_vdjJGecm_iXkbqVoScViaU" * "nih:///sh
a-256;703f68f42aba2c6de30f488a5ea122fef76324679c9bf89791ba95a1271589a5"
signature: The content of the signature, an opaque bytestring. The payload
that this signature verifies MUST be unambiguously provided with the
Signature during verification. A wrapper message might provide the
payload explicitly. Alternatively, a message might have a canonical
serialization that can always be unambiguously computed to derive the
payload.
"""
publicKeyId = _messages.StringField(1)
signature = _messages.BytesField(2)
class SlsaBuilder(_messages.Message):
r"""A SlsaBuilder object.
Fields:
id: A string attribute.
"""
id = _messages.StringField(1)
class SlsaCompleteness(_messages.Message):
r"""Indicates that the builder claims certain fields in this message to be
complete.
Fields:
arguments: If true, the builder claims that recipe.arguments is complete,
meaning that all external inputs are properly captured in the recipe.
environment: If true, the builder claims that recipe.environment is
claimed to be complete.
materials: If true, the builder claims that materials are complete,
usually through some controls to prevent network access. Sometimes
called "hermetic".
"""
arguments = _messages.BooleanField(1)
environment = _messages.BooleanField(2)
materials = _messages.BooleanField(3)
class SlsaMetadata(_messages.Message):
r"""Other properties of the build.
Fields:
buildFinishedOn: The timestamp of when the build completed.
buildInvocationId: Identifies the particular build invocation, which can
be useful for finding associated logs or other ad-hoc analysis. The
value SHOULD be globally unique, per in-toto Provenance spec.
buildStartedOn: The timestamp of when the build started.
completeness: Indicates that the builder claims certain fields in this
message to be complete.
reproducible: If true, the builder claims that running the recipe on
materials will produce bit-for-bit identical output.
"""
buildFinishedOn = _messages.StringField(1)
buildInvocationId = _messages.StringField(2)
buildStartedOn = _messages.StringField(3)
completeness = _messages.MessageField('SlsaCompleteness', 4)
reproducible = _messages.BooleanField(5)
class SlsaProvenance(_messages.Message):
r"""A SlsaProvenance object.
Fields:
builder: required
materials: The collection of artifacts that influenced the build including
sources, dependencies, build tools, base images, and so on. This is
considered to be incomplete unless metadata.completeness.materials is
true. Unset or null is equivalent to empty.
metadata: A SlsaMetadata attribute.
recipe: Identifies the configuration used for the build. When combined
with materials, this SHOULD fully describe the build, such that re-
running this recipe results in bit-for-bit identical output (if the
build is reproducible). required
"""
builder = | |
<filename>utils/map_features_utils.py
"""This module is used for computing map features for motion forecasting baselines."""
from typing import Any, Dict, List, Tuple, Sequence
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from shapely.geometry import LineString, Point, Polygon
from shapely.ops import cascaded_union
from argoverse.map_representation.map_api import ArgoverseMap
from argoverse.utils.centerline_utils import (
get_nt_distance,
remove_overlapping_lane_seq,
get_normal_and_tangential_distance_point,
)
from argoverse.utils.mpl_plotting_utils import visualize_centerline
from argoverse.utils.line_projection import project_to_line_seq
from utils.baseline_config import (
_MANHATTAN_THRESHOLD,
_DFS_THRESHOLD_FRONT_SCALE,
_DFS_THRESHOLD_BACK_SCALE,
_MAX_SEARCH_RADIUS_CENTERLINES,
_MAX_CENTERLINE_CANDIDATES_TEST,
NEARBY_DISTANCE_THRESHOLD, # this already exists so following the convention in social utils
FRONT_OR_BACK_OFFSET_THRESHOLD,
)
class MapFeaturesUtils:
"""Utils for computation of map-based features."""
def __init__(self):
"""Initialize class."""
self._MANHATTAN_THRESHOLD = _MANHATTAN_THRESHOLD
self._DFS_THRESHOLD_FRONT_SCALE = _DFS_THRESHOLD_FRONT_SCALE
self._DFS_THRESHOLD_BACK_SCALE = _DFS_THRESHOLD_BACK_SCALE
self._MAX_SEARCH_RADIUS_CENTERLINES = _MAX_SEARCH_RADIUS_CENTERLINES
self._MAX_CENTERLINE_CANDIDATES_TEST = _MAX_CENTERLINE_CANDIDATES_TEST
self.NEARBY_DISTANCE_THRESHOLD = NEARBY_DISTANCE_THRESHOLD
def get_point_in_polygon_score(self, lane_seq: List[int],
xy_seq: np.ndarray, city_name: str,
avm: ArgoverseMap) -> int:
"""Get the number of coordinates that lie insde the lane seq polygon.
Args:
lane_seq: Sequence of lane ids
xy_seq: Trajectory coordinates
city_name: City name (PITT/MIA)
avm: Argoverse map_api instance
Returns:
point_in_polygon_score: Number of coordinates in the trajectory that lie within the lane sequence
"""
lane_seq_polygon = cascaded_union([
Polygon(avm.get_lane_segment_polygon(lane, city_name)).buffer(0)
for lane in lane_seq
])
point_in_polygon_score = 0
for xy in xy_seq:
point_in_polygon_score += lane_seq_polygon.contains(Point(xy))
return point_in_polygon_score
def sort_lanes_based_on_point_in_polygon_score(
self,
lane_seqs: List[List[int]],
xy_seq: np.ndarray,
city_name: str,
avm: ArgoverseMap,
) -> List[List[int]]:
"""Filter lane_seqs based on the number of coordinates inside the bounding polygon of lanes.
Args:
lane_seqs: Sequence of lane sequences
xy_seq: Trajectory coordinates
city_name: City name (PITT/MIA)
avm: Argoverse map_api instance
Returns:
sorted_lane_seqs: Sequences of lane sequences sorted based on the point_in_polygon score
"""
point_in_polygon_scores = []
for lane_seq in lane_seqs:
point_in_polygon_scores.append(
self.get_point_in_polygon_score(lane_seq, xy_seq, city_name,
avm))
randomized_tiebreaker = np.random.random(len(point_in_polygon_scores))
sorted_point_in_polygon_scores_idx = np.lexsort(
(randomized_tiebreaker, np.array(point_in_polygon_scores)))[::-1]
sorted_lane_seqs = [
lane_seqs[i] for i in sorted_point_in_polygon_scores_idx
]
sorted_scores = [
point_in_polygon_scores[i]
for i in sorted_point_in_polygon_scores_idx
]
return sorted_lane_seqs, sorted_scores
def get_heuristic_centerlines_for_test_set(
self,
lane_seqs: List[List[int]],
xy_seq: np.ndarray,
city_name: str,
avm: ArgoverseMap,
max_candidates: int,
scores: List[int],
) -> List[np.ndarray]:
"""Sort based on distance along centerline and return the centerlines.
Args:
lane_seqs: Sequence of lane sequences
xy_seq: Trajectory coordinates
city_name: City name (PITT/MIA)
avm: Argoverse map_api instance
max_candidates: Maximum number of centerlines to return
Return:
sorted_candidate_centerlines: Centerlines in the order of their score
"""
aligned_centerlines = []
aligned_lane_seq = []
diverse_centerlines = []
diverse_lane_seq = []
diverse_scores = []
num_candidates = 0
# Get first half as aligned centerlines
aligned_cl_count = 0
for i in range(len(lane_seqs)):
lane_seq = lane_seqs[i]
score = scores[i]
diverse = True
centerline = avm.get_cl_from_lane_seq([lane_seq], city_name)[0]
if aligned_cl_count < int(max_candidates / 2):
start_dist = LineString(centerline).project(Point(xy_seq[0]))
end_dist = LineString(centerline).project(Point(xy_seq[-1]))
if end_dist > start_dist:
aligned_cl_count += 1
aligned_centerlines.append(centerline)
aligned_lane_seq.append(lane_seq)
diverse = False
if diverse:
diverse_centerlines.append(centerline)
diverse_lane_seq.append(lane_seq)
diverse_scores.append(score)
num_diverse_centerlines = min(len(diverse_centerlines),
max_candidates - aligned_cl_count)
test_centerlines = aligned_centerlines
test_lane_seq = aligned_lane_seq
if num_diverse_centerlines > 0:
probabilities = ([
float(score + 1) / (sum(diverse_scores) + len(diverse_scores))
for score in diverse_scores
] if sum(diverse_scores) > 0 else [1.0 / len(diverse_scores)] *
len(diverse_scores))
diverse_centerlines_idx = np.random.choice(
range(len(probabilities)),
num_diverse_centerlines,
replace=False,
p=probabilities,
)
diverse_centerlines = [
diverse_centerlines[i] for i in diverse_centerlines_idx
]
diverse_lane_seq = [
diverse_lane_seq[i] for i in diverse_centerlines_idx
]
test_centerlines += diverse_centerlines
test_lane_seq += diverse_lane_seq
return test_centerlines, test_lane_seq
def get_candidate_centerlines_for_trajectory(
self,
xy: np.ndarray,
city_name: str,
avm: ArgoverseMap,
viz: bool = False,
max_search_radius: float = 50.0,
seq_len: int = 50,
max_candidates: int = 10,
mode: str = "test",
) -> List[np.ndarray]:
"""Get centerline candidates upto a threshold.
Algorithm:
1. Take the lanes in the bubble of last observed coordinate
2. Extend before and after considering all possible candidates
3. Get centerlines based on point in polygon score.
Args:
xy: Trajectory coordinates,
city_name: City name,
avm: Argoverse map_api instance,
viz: Visualize candidate centerlines,
max_search_radius: Max search radius for finding nearby lanes in meters,
seq_len: Sequence length,
max_candidates: Maximum number of centerlines to return,
mode: train/val/test mode
Returns:
candidate_centerlines: List of candidate centerlines
"""
candidate_lane_segments = None
candidate_centerlines = None
# Get all lane candidates within a bubble
curr_lane_candidates = avm.get_lane_ids_in_xy_bbox(
xy[-1, 0], xy[-1, 1], city_name, self._MANHATTAN_THRESHOLD)
# Keep expanding the bubble until at least 1 lane is found
while (len(curr_lane_candidates) < 1
and self._MANHATTAN_THRESHOLD < max_search_radius):
self._MANHATTAN_THRESHOLD *= 2
curr_lane_candidates = avm.get_lane_ids_in_xy_bbox(
xy[-1, 0], xy[-1, 1], city_name, self._MANHATTAN_THRESHOLD)
assert len(curr_lane_candidates) > 0, "No nearby lanes found!!"
# Set dfs threshold
traj_len = xy.shape[0]
# Assuming a speed of 50 mps, set threshold for traversing in the front and back
dfs_threshold_front = (self._DFS_THRESHOLD_FRONT_SCALE *
(seq_len + 1 - traj_len) / 10)
dfs_threshold_back = self._DFS_THRESHOLD_BACK_SCALE * (traj_len +
1) / 10
# DFS to get all successor and predecessor candidates
obs_pred_lanes: List[Sequence[int]] = []
for lane in curr_lane_candidates:
candidates_future = avm.dfs(lane, city_name, 0,
dfs_threshold_front)
candidates_past = avm.dfs(lane, city_name, 0, dfs_threshold_back,
True)
# Merge past and future
for past_lane_seq in candidates_past:
for future_lane_seq in candidates_future:
assert (
past_lane_seq[-1] == future_lane_seq[0]
), "Incorrect DFS for candidate lanes past and future"
obs_pred_lanes.append(past_lane_seq + future_lane_seq[1:])
# Removing overlapping lanes
obs_pred_lanes = remove_overlapping_lane_seq(obs_pred_lanes)
# Sort lanes based on point in polygon score
obs_pred_lanes, scores = self.sort_lanes_based_on_point_in_polygon_score(
obs_pred_lanes, xy, city_name, avm)
# If the best centerline is not along the direction of travel, re-sort
if mode == "test" or mode == "lanes_only":
# Sort based on alignment with candidate lane
candidate_centerlines, candidate_lane_segments = self.get_heuristic_centerlines_for_test_set(
obs_pred_lanes, xy, city_name, avm, max_candidates, scores)
else:
# Pick oracle centerline
candidate_centerlines = avm.get_cl_from_lane_seq(
[obs_pred_lanes[0]], city_name)
if viz:
plt.figure(0, figsize=(8, 7))
for centerline_coords in candidate_centerlines:
visualize_centerline(centerline_coords)
plt.plot(
xy[:, 0],
xy[:, 1],
"-",
color="#d33e4c",
alpha=1,
linewidth=3,
zorder=15,
)
final_x = xy[-1, 0]
final_y = xy[-1, 1]
plt.plot(
final_x,
final_y,
"o",
color="#d33e4c",
alpha=1,
markersize=10,
zorder=15,
)
plt.xlabel("Map X")
plt.ylabel("Map Y")
plt.axis("off")
plt.title(f"Number of candidates = {len(candidate_centerlines)}")
plt.show()
if mode == "lanes_only":
return candidate_centerlines, candidate_lane_segments
return candidate_centerlines
def compute_map_features(
self,
agent_track: np.ndarray,
obs_len: int,
seq_len: int,
raw_data_format: Dict[str, int],
mode: str,
avm: ArgoverseMap
) -> Tuple[np.ndarray, Dict[str, Any]]:
"""Compute map based features for the given sequence.
If the mode is test, oracle_nt_dist will be empty, candidate_nt_dist will be populated.
If the mode is train/val, oracle_nt_dist will be populated, candidate_nt_dist will be empty.
Args:
agent_track : Data for the agent track
obs_len : Length of observed trajectory
seq_len : Length of the sequence
raw_data_format : Format of the sequence
mode: train/val/test mode
Returns:
oracle_nt_dist (numpy array): normal and tangential distances for oracle centerline
map_feature_helpers (dict): Dictionary containing helpers for map features
"""
obs_pred_lanes = []
unique_segments_future = []
unique_segments_past = []
curr_lane_candidates = []
# Get observed 2 secs of the agent
agent_xy = agent_track[:, [raw_data_format["X"], raw_data_format["Y"]
]].astype("float")
agent_track_obs = agent_track[:obs_len]
agent_xy_obs = agent_track_obs[:, [
raw_data_format["X"], raw_data_format["Y"]
]].astype("float")
# Get API for Argo Dataset map
city_name = agent_track[0, raw_data_format["CITY_NAME"]]
# Get candidate centerlines using observed trajectory
if mode == "test":
oracle_centerline = np.full((seq_len, 2), None)
oracle_nt_dist = np.full((seq_len, 2), None)
candidate_centerlines = self.get_candidate_centerlines_for_trajectory(
agent_xy_obs,
city_name,
avm,
viz=False,
max_search_radius=self._MAX_SEARCH_RADIUS_CENTERLINES,
seq_len=seq_len,
max_candidates=self._MAX_CENTERLINE_CANDIDATES_TEST,
)
# Get nt distance for the entire trajectory using candidate centerlines
candidate_nt_distances = []
for candidate_centerline in candidate_centerlines:
candidate_nt_distance = np.full((seq_len, 2), None)
candidate_nt_distance[:obs_len] = get_nt_distance(
agent_xy_obs, candidate_centerline)
candidate_nt_distances.append(candidate_nt_distance)
elif mode == "compute_all":
# Get oracle centerline
oracle_centerline = self.get_candidate_centerlines_for_trajectory(
agent_xy,
city_name,
avm,
viz=False,
max_search_radius=self._MAX_SEARCH_RADIUS_CENTERLINES,
seq_len=seq_len,
mode="train",
)[0]
# Get NT distance for oracle centerline
oracle_nt_dist = get_nt_distance(agent_xy,
oracle_centerline,
viz=False)
# Get candidate centerl = []ines
candidate_centerlines, obs_pred_lanes, unique_segments_future, unique_segments_past, curr_lane_candidates = self.get_candidate_centerlines_for_trajectory(
agent_xy_obs,
city_name,
avm,
viz=False,
max_search_radius=self._MAX_SEARCH_RADIUS_CENTERLINES,
seq_len=seq_len,
max_candidates=self._MAX_CENTERLINE_CANDIDATES_TEST,
mode=mode
)
# Get nt distance for the entire trajectory using candidate centerlines
candidate_nt_distances = []
for candidate_centerline in candidate_centerlines:
candidate_nt_distance = np.full((seq_len, 2), None)
candidate_nt_distance[:obs_len] = get_nt_distance(
agent_xy_obs, candidate_centerline)
candidate_nt_distances.append(candidate_nt_distance)
elif mode == "lanes_only":
# Get oracle centerline
oracle_centerline = self.get_candidate_centerlines_for_trajectory(
agent_xy,
city_name,
avm,
viz=False,
max_search_radius=self._MAX_SEARCH_RADIUS_CENTERLINES,
seq_len=seq_len,
mode="train",
)[0]
# Not computing oracle nt_distances
oracle_nt_dist = np.full((seq_len, 2), None)
# Get candidate centerl = []ines
candidate_centerlines, obs_pred_lanes, unique_segments_future, unique_segments_past, curr_lane_candidates = self.get_candidate_centerlines_for_trajectory(
agent_xy_obs,
city_name,
avm,
viz=False,
max_search_radius=self._MAX_SEARCH_RADIUS_CENTERLINES,
seq_len=seq_len,
max_candidates=self._MAX_CENTERLINE_CANDIDATES_TEST,
mode=mode
)
# Not computing candidate nt_distances
candidate_nt_distances = []
else:
oracle_centerline = self.get_candidate_centerlines_for_trajectory(
agent_xy,
city_name,
avm,
viz=False,
max_search_radius=self._MAX_SEARCH_RADIUS_CENTERLINES,
seq_len=seq_len,
mode=mode,
)[0]
candidate_centerlines | |
# -*- coding: utf-8 -*-
import os
import re
import tempfile
# Form implementation generated from reading ui file 'frontend_1.ui'
#
# Created by: PyQt5 UI code generator 5.12.3
#
# WARNING! All changes made in this file will be lost!
from difflib import get_close_matches
from pikepdf import _cpphelpers
import webbrowser
import cv2
import fitz
import pikepdf
import pytesseract
import requests
from PIL import Image, ImageFont, ImageDraw
from PySide2 import QtCore, QtGui, QtWidgets
from PySide2.QtCore import Qt
from PySide2.QtGui import QPixmap, QImage, QPainter, QFont, QTransform, QIcon
from PySide2.QtPrintSupport import QPrinter, QPrintPreviewDialog, QPrintDialog
from PySide2.QtWidgets import QMessageBox, QFileDialog, QDialog, QFontDialog, QLineEdit
from dialogboxes.resize import Ui_MainWindow9
from dialogboxes.settingsbox import Ui_MainWindow1
from dialogboxes.trouble1 import Ui_MainWindow3
BINARY_THREHOLD = 180
IMAGE_SIZE = 1800
BASE_URL = 'https://techidentity.herokuapp.com'
pytesseract.pytesseract.tesseract_cmd = r'Tesseract-OCR/tesseract.exe'
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1024, 590)
MainWindow.setMinimumSize(QtCore.QSize(1024, 590))
MainWindow.setMaximumSize(QtCore.QSize(1024, 590))
MainWindow.setStyleSheet("background-color: rgb(220, 220, 220);")
self.switch = 0
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(10, 10, 166, 41))
self.label.setStyleSheet("border-image: url(:/newPrefix/WhatsApp Image 2020-09-11 at 12.46.25 AM.jpeg);")
pixmap = QPixmap(r"images\applogo.jpeg")
self.label.setPixmap(pixmap)
self.label.setScaledContents(True)
self.label.setText("")
self.label.setObjectName("label")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(950, 10, 31, 31))
self.pushButton.setStyleSheet("Background-color:rgb(255,251,549)")
self.pushButton.setIcon(QIcon('images\help.png'))
self.pushButton.setText("")
self.pushButton.setObjectName("pushButton")
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(910, 10, 31, 31))
self.pushButton_2.setStyleSheet('Background-color:rgb(0,10,5)')
self.pushButton_2.setText("")
self.pushButton_2.setIcon(QIcon('images\contact.png'))
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_2.clicked.connect(self.contact)
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(180, 50, 341, 91))
self.label_2.setStyleSheet("border: 2px solid black;\n"
"background-color: rgb(255, 255, 255);")
self.label_2.setText("")
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(540, 50, 341, 91))
self.label_3.setStyleSheet("border: 2px solid black;\n"
"background-color: rgb(255, 255, 255);")
self.label_3.setText("")
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setGeometry(QtCore.QRect(200, 70, 81, 16))
self.label_4.setStyleSheet("font: 87 11pt \"Arial Black\";\n"
"background-color: rgb(255, 255, 255);")
self.label_4.setScaledContents(True)
self.label_4.setIndent(-1)
self.label_4.setObjectName("label_4")
self.label_6 = QtWidgets.QLabel(self.centralwidget)
self.label_6.setGeometry(QtCore.QRect(200, 100, 91, 16))
self.label_6.setStyleSheet("font: 87 11pt \"Arial Black\";\n"
"background-color: rgb(255, 255, 255);")
self.label_6.setScaledContents(True)
self.label_6.setIndent(-1)
self.label_6.setObjectName("label_6")
self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit.setGeometry(QtCore.QRect(300, 70, 161, 21))
self.lineEdit.setObjectName("lineEdit")
self.lineEdit_2 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_2.setGeometry(QtCore.QRect(300, 100, 161, 21))
self.lineEdit_2.setObjectName("lineEdit_2")
self.lineEdit_2.setEchoMode(QLineEdit.Password)
self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_4.setGeometry(QtCore.QRect(470, 100, 31, 21))
self.pushButton_4.setStyleSheet("")
icon1 = QtGui.QPixmap(r'images\submit.png')
self.pushButton_4.setIcon(icon1)
self.pushButton_4.setIconSize(QtCore.QSize(11, 11))
self.pushButton_4.setText("")
self.pushButton_4.setObjectName("pushButton_4")
self.pushButton_4.clicked.connect(self.password)
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setGeometry(QtCore.QRect(470, 70, 31, 21))
self.pushButton_3.setAutoFillBackground(True)
icon = QtGui.QPixmap(r'images\file.jpg')
self.pushButton_3.setStyleSheet("")
self.pushButton_3.setText("")
self.pushButton_3.setIcon(icon)
self.pushButton_3.setIconSize(QtCore.QSize(11, 11))
self.pushButton_3.setObjectName("pushButton_3")
self.pushButton_3.clicked.connect(self.browseImage)
self.pushButton_5 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_5.setGeometry(QtCore.QRect(550, 60, 101, 31))
self.pushButton_5.setStyleSheet("font: 87 8pt \"Arial Black\";")
self.pushButton_5.setObjectName("pushButton_5")
self.pushButton_5.clicked.connect(self.open_settings)
self.pushButton_6 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_6.setGeometry(QtCore.QRect(770, 60, 101, 31))
self.pushButton_6.setStyleSheet("font: 87 8pt \"Arial Black\";")
self.pushButton_6.setObjectName("pushButton_6")
self.pushButton_6.clicked.connect(self.openFontDialog)
self.pushButton_7 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_7.setGeometry(QtCore.QRect(660, 60, 101, 31))
self.pushButton_7.setStyleSheet("font: 87 8pt \"Arial Black\";")
self.pushButton_7.setObjectName("pushButton_7")
self.pushButton_7.clicked.connect(self.printpreviewDialog)
self.pushButton_8 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_8.setGeometry(QtCore.QRect(660, 100, 101, 31))
self.pushButton_8.setStyleSheet("font: 87 8pt \"Arial Black\";")
self.pushButton_8.setObjectName("pushButton_8")
self.pushButton_8.clicked.connect(self.resizeimage)
self.pushButton_9 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_9.setGeometry(QtCore.QRect(550, 100, 101, 31))
self.pushButton_9.setStyleSheet("font: 87 8pt \"Arial Black\";")
self.pushButton_9.setObjectName("pushButton_9")
self.pushButton_9.clicked.connect(self.trouble)
self.pushButton_10 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_10.setGeometry(QtCore.QRect(770, 100, 101, 31))
self.pushButton_10.setStyleSheet("font: 87 8pt \"Arial Black\";")
self.pushButton_10.setObjectName("pushButton_10")
self.line = QtWidgets.QFrame(self.centralwidget)
self.line.setGeometry(QtCore.QRect(0, 160, 1021, 31))
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.checkBox = QtWidgets.QRadioButton(self.centralwidget)
self.checkBox.setGeometry(QtCore.QRect(20, 540, 121, 21))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.checkBox.setFont(font)
self.checkBox.setObjectName("checkBox")
self.checkBox.toggled.connect(self.clickBox)
self.checkBox_2 = QtWidgets.QRadioButton(self.centralwidget)
self.checkBox_2.setGeometry(QtCore.QRect(880, 540, 121, 21))
self.checkBox_2.toggled.connect(self.clickBox2)
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.checkBox_2.setFont(font)
self.checkBox_2.setObjectName("checkBox_2")
self.label_5 = QtWidgets.QLabel(self.centralwidget)
self.label_5.setGeometry(QtCore.QRect(10, 210, 1001, 311))
self.label_5.setStyleSheet("background-color: rgb(239, 239, 239);")
self.label_5.setText("")
self.label_5.setObjectName("label_5")
self.groupBox = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox.setGeometry(QtCore.QRect(18, 215, 490, 300))
self.groupBox.setTitle("")
self.groupBox.setObjectName("groupBox")
self.label_7 = QtWidgets.QLabel(self.groupBox)
self.label_7.setGeometry(QtCore.QRect(30, 0, 451, 61))
self.label_7.setStyleSheet("")
self.label_7.setText("")
self.label_7.setObjectName("label_7")
self.label_9 = QtWidgets.QLabel(self.groupBox)
self.label_9.setGeometry(QtCore.QRect(40, 50, 120, 152))
self.label_9.setStyleSheet("")
self.label_9.setText("")
self.label_9.setObjectName("label_9")
self.label_10 = QtWidgets.QLabel(self.groupBox)
self.label_10.setGeometry(QtCore.QRect(170, 63, 251, 25))
self.label_10.setStyleSheet("")
self.label_10.setText("")
self.label_10.setObjectName("label_10")
self.label_11 = QtWidgets.QLabel(self.groupBox)
self.label_11.setGeometry(QtCore.QRect(170, 89, 251, 25))
self.label_11.setStyleSheet("")
self.label_11.setText("")
self.label_11.setObjectName("label_11")
self.label_12 = QtWidgets.QLabel(self.groupBox)
self.label_12.setGeometry(QtCore.QRect(170, 115, 251, 25))
self.label_12.setStyleSheet("")
self.label_12.setText("")
self.label_12.setObjectName("label_12")
self.label_13 = QtWidgets.QLabel(self.groupBox)
self.label_13.setGeometry(QtCore.QRect(170, 140, 251, 21))
self.label_13.setStyleSheet("")
self.label_13.setText("")
self.label_13.setObjectName("label_13")
self.label_14 = QtWidgets.QLabel(self.groupBox)
self.label_14.setGeometry(QtCore.QRect(170, 170, 251, 25))
self.label_14.setStyleSheet("")
self.label_14.setText("")
self.label_14.setObjectName("label_14")
self.label_15 = QtWidgets.QLabel(self.groupBox)
self.label_15.setGeometry(QtCore.QRect(0, 270, 491, 32))
self.label_15.setStyleSheet("")
self.label_15.setText("")
self.label_15.setObjectName("label_15")
self.label_16 = QtWidgets.QLabel(self.groupBox)
self.label_16.setGeometry(QtCore.QRect(0, 265, 491, 5))
self.label_16.setStyleSheet("")
self.label_16.setText("")
self.label_16.setObjectName("label_16")
self.label_20 = QtWidgets.QLabel(self.groupBox)
self.label_20.setGeometry(QtCore.QRect(10, 60, 16, 141))
self.label_20.setText("")
self.label_20.setObjectName("label_20")
self.label_21 = QtWidgets.QLabel(self.groupBox)
self.label_21.setGeometry(QtCore.QRect(460, 70, 16, 171))
self.label_21.setText("")
self.label_21.setObjectName("label_21")
self.label_23 = QtWidgets.QLabel(self.groupBox)
self.label_23.setGeometry(QtCore.QRect(87, 242, 311, 23))
self.label_23.setStyleSheet("font-size:12")
self.label_23.setFont(QFont('Arial', 12, weight=QtGui.QFont.Bold))
self.label_23.setObjectName("label_23")
self.label_23.setAlignment(Qt.AlignAbsolute)
self.label_24 = QtWidgets.QLabel(self.groupBox)
self.label_24.setGeometry(QtCore.QRect(107, 219, 311, 23))
self.label_24.setStyleSheet("font-size:15")
self.label_24.setFont(QFont('Arial', 12, weight=QtGui.QFont.Bold))
self.label_24.setObjectName("label_24")
self.label_24.setAlignment(Qt.AlignBottom)
self.label_31 = QtWidgets.QLabel(self.groupBox)
self.label_31.setGeometry(QtCore.QRect(10, 60, 21, 141))
self.label_31.setStyleSheet("")
self.label_31.setText("")
self.label_31.setObjectName("label_31")
self.label_32 = QtWidgets.QLabel(self.groupBox)
self.label_32.setGeometry(QtCore.QRect(450, 70, 31, 171))
self.label_32.setStyleSheet("")
self.label_32.setText("")
self.label_32.setObjectName("label_32")
self.groupBox_2 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_2.setGeometry(QtCore.QRect(515, 215, 490, 300))
self.groupBox_2.setTitle("")
self.groupBox_2.setObjectName("groupBox_2")
self.label_8 = QtWidgets.QLabel(self.groupBox_2)
self.label_8.setGeometry(QtCore.QRect(0, 0, 491, 51))
self.label_8.setText("")
self.label_8.setObjectName("label_8")
self.label_17 = QtWidgets.QLabel(self.groupBox_2)
self.label_17.setGeometry(QtCore.QRect(0, 0, 491, 51))
self.label_17.setStyleSheet("")
self.label_17.setText("")
self.label_17.setObjectName("label_17")
self.label_19 = QtWidgets.QLabel(self.groupBox_2)
self.label_19.setGeometry(QtCore.QRect(10, 269, 471, 31))
self.label_19.setStyleSheet("")
self.label_19.setText("")
self.label_19.setObjectName("label_19")
self.label_18 = QtWidgets.QLabel(self.groupBox_2)
self.label_18.setGeometry(QtCore.QRect(0, 262, 491, 5))
self.label_18.setStyleSheet("")
self.label_18.setText("")
self.label_18.setObjectName("label_18")
self.label_22 = QtWidgets.QLabel(self.groupBox_2)
self.label_22.setGeometry(QtCore.QRect(330, 70, 143, 143))
self.label_22.setText("")
self.label_22.setObjectName("label_22")
self.label_25 = QtWidgets.QLabel(self.groupBox_2)
self.label_25.setGeometry(QtCore.QRect(330, 60, 143, 143))
self.label_25.setStyleSheet("")
self.label_25.setText("")
self.label_25.setObjectName("label_25")
self.label_26 = QtWidgets.QLabel(self.groupBox_2)
self.label_26.setGeometry(QtCore.QRect(330, 220, 161, 31))
self.label_26.setStyleSheet("")
self.label_26.setText("")
self.label_26.setObjectName("label_26")
self.label_27 = QtWidgets.QLabel(self.groupBox_2)
self.label_27.setGeometry(QtCore.QRect(70, 220, 257, 21))
self.label_27.setStyleSheet("font-size:15")
self.label_27.setText("")
self.label_27.setFont(QFont('Arial', 12, weight=QtGui.QFont.Bold))
self.label_27.setObjectName("label_27")
self.label_27.setAlignment(Qt.AlignAbsolute)
self.label_28 = QtWidgets.QLabel(self.groupBox_2)
self.label_28.setGeometry(QtCore.QRect(30, 240, 277, 22))
self.label_28.setStyleSheet("")
self.label_28.setStyleSheet("font-size:12")
self.label_28.setText("")
self.label_28.setFont(QFont('Arial', 12, weight=QtGui.QFont.Bold))
self.label_28.setObjectName("label_28")
self.label_28.setAlignment(Qt.AlignAbsolute | Qt.AlignJustify)
self.label_29 = QtWidgets.QLabel(self.groupBox_2)
self.label_29.setGeometry(QtCore.QRect(10, 60, 321, 75))
self.label_29.setStyleSheet("")
self.label_29.setText("")
self.label_29.setObjectName("label_29")
self.label_30 = QtWidgets.QLabel(self.groupBox_2)
self.label_30.setGeometry(QtCore.QRect(10, 140, 341, 75))
self.label_30.setStyleSheet("")
self.label_30.setText("")
self.label_30.setObjectName("label_30")
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
# create temp folder
self.path = tempfile.mkdtemp()
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Aapki Pehchaan"))
self.label_4.setText(_translate("MainWindow", "Filename:"))
self.label_4.adjustSize()
self.label_6.setText(_translate("MainWindow", "Password:"))
self.label_6.adjustSize()
self.pushButton_5.setText(_translate("MainWindow", "SETTINGS"))
self.pushButton_6.setText(_translate("MainWindow", "FONT"))
self.pushButton_7.setText(_translate("MainWindow", "PRINT"))
self.pushButton_8.setText(_translate("MainWindow", "RESIZE"))
self.pushButton_9.setText(_translate("MainWindow", "REPORT"))
self.pushButton_10.setText(_translate("MainWindow", "LICENSE"))
self.checkBox.setText(_translate("MainWindow", "PRINT FRONT"))
self.checkBox.adjustSize()
self.checkBox_2.setText(_translate("MainWindow", "PRINT BACK"))
self.checkBox_2.adjustSize()
def clickBox(self, state):
if state:
print('Checked')
self.take_screenshot()
else:
print('Unchecked')
def clickBox2(self, state):
if state:
print('Checked')
self.take_screenshot2()
else:
print('Unchecked')
def contact(self):
webbrowser.open("https://mltool.in/contact")
def topimage(self):
pixmap = QPixmap(r'images\Ashok Stambh Front.png')
self.label_7.setPixmap(pixmap)
self.label_7.setScaledContents(True)
def nextpagebottomimage(self):
pixmap = QPixmap(r'images\Back side Symbol.png')
self.label_19.setPixmap(pixmap)
self.label_19.setScaledContents(True)
def photoextraction(self, doc):
for i in range(len(doc)):
for img in doc.getPageImageList(i):
xref = img[0]
pix = fitz.Pixmap(doc, xref)
if pix.n < 1:
pix.writePNG(os.path.join(self.path, "p%s-%s.png" % (i, xref)))
else:
pix1 = fitz.Pixmap(fitz.csRGB, pix)
pix1.writePNG(os.path.join(self.path, "p%s-%s.png" % (i, xref)))
try:
# HUMAN IMAGE IN ADHAR
if pix.width == float(0.8) * pix.height or pix.width == 0.75 * pix.height:
self.human_image = os.path.join(self.path, "p%s-%s.png" % (i, xref))
pixmap = QPixmap(self.human_image)
self.label_9.setPixmap(pixmap)
self.label_9.setScaledContents(True)
# SCANNER CODE IN ADHAR
elif pix.width == pix.height:
pixmap = QPixmap(os.path.join(self.path, "p%s-%s.png" % (i, xref)))
self.label_25.setPixmap(pixmap)
self.label_25.setScaledContents(True)
except Exception as e:
print(e)
print("fault in human and scanner image")
def setText_to_elements(self, a):
self.label_13.setText(self.text_ex['DOB'])
self.label_10.setText(self.text_ex['namehindi'])
self.label_11.setText(self.text_ex['englishname'])
self.label_12.setText(self.text_ex['gender string'])
self.label_29.setText(self.text_ex['hindiAddress'])
self.label_30.setText("Address: " + "\n" + self.text_ex['engAddress'])
self.label_30.adjustSize()
self.label_27.setText(self.text_ex['Adhaar no'])
if (self.text_ex['VID'] != None):
self.label_23.setText("VID: " + self.text_ex['VID'])
self.label_23.adjustSize()
self.label_23.setStyleSheet("border-top:0.5px solid rgb(220, 220, 220);")
self.label_28.setText("VID: " + self.text_ex['VID'])
self.label_28.adjustSize()
self.label_28.setStyleSheet("border-top:0.5px solid rgb(220, 220, 220);")
self.label_24.setText(self.text_ex['Adhaar no'])
def password(self):
self.switch = 1
r = self.lineEdit.text()
pwd = self.lineEdit_2.text()
if pwd != "":
print(pwd)
try:
mypdf = pikepdf.open(r, pwd)
r = os.path.join(self.path, "unlocked.pdf")
mypdf.save(r)
except Exception as e:
print(str(e))
self.showdialog2()
# print('cannot decrypt %s with password %s' % (r, pwd))
else:
pikepdf.open(r)
print(r)
# Hit fast api endpoint with PDF file
doc = fitz.open(r)
# # try:
res = requests.post(BASE_URL + '/uploadfile/', files={
'pdf': open(r, 'rb'),
}, timeout=40)
print(res)
if res.status_code == 200:
data = res.json()
self.text_ex = data["text"]
name = self.name_ex(doc)
self.text_ex["englishname"] = name
else:
print('error in server')
return
try:
self.groupBox.setStyleSheet("background-color:rgb(255,255,255)")
self.groupBox_2.setStyleSheet("background-color:rgb(255,255,255)")
self.photoextraction(doc)
self.setText_to_elements(self.text_ex)
self.nextpagetop()
self.topimage()
self.redline()
self.nextpagetop()
self.defaultfooter()
self.nextpagebottomimage()
self.label_26.clear()
except Exception as e:
print(e)
print("Sorry!response invalid")
def image_smoothening(self, img):
ret1, th1 = cv2.threshold(img, BINARY_THREHOLD, 255, cv2.THRESH_BINARY)
ret2, th2 = cv2.threshold(th1, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
blur = cv2.GaussianBlur(th2, (1, 1), 0)
ret3, th3 = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return th3
def remove_noise_and_smooth(self, file_name):
img = cv2.imread(file_name, 0)
img = cv2.resize(img, None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
gaus = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 31, 25)
img = self.image_smoothening(gaus)
return img
def name_ex(self, doc):
page = doc.loadPage(0)
mat = fitz.Matrix(2, 2)
pix = page.getPixmap(matrix=mat)
outfile = os.path.join(self.path, "outfile.png")
output = pix.writePNG(outfile)
image_full = cv2.imread(outfile)
path = os.path.join(self.path, "image_full.png")
cv2.imwrite(path, image_full)
image_front = image_full[1140:1475, 120:500]
a = os.path.join(self.path, "image_front.png")
cv2.imwrite(a, image_front)
image_front1 = self.remove_noise_and_smooth(a)
pytesseract.pytesseract.tesseract_cmd = r'Tesseract-OCR/tesseract.exe'
text = pytesseract.image_to_string(image_front1, lang="eng")
try:
newlist1 = []
for xx in text.split('\n'):
newlist1.append(xx)
newlist1 = list(filter(lambda x: len(x) > 1, newlist1))
a = 0
str2 = "Government"
str1 = "of"
for no in newlist1:
if str2 in no or str1 in no:
b = a
a = a + 1
name = newlist1[b + 2]
name = name.split(" ")
print(name)
if len(name) == 2:
print(name)
name = " ".join(name)
print(name)
return name
else:
name = " ".join(name)
name = re.sub(r'[(' ')]', '', name)
name = re.sub(r'[0-9]+', '', name)
# name = re.sub(r'[;'';;!@#!@#!#!$!=()|:><~~' '__-]+','',name)
wordlist = self.text_ex['raw'].split("\n")
name = get_close_matches(name, wordlist)
print(name)
return name[0]
except Exception as e:
print(e)
pass
def showdialog(self):
msg = QMessageBox()
msg.setIcon(QMessageBox.Warning)
msg.setText("file extension not | |
# Driver for Rhode & Schwartz VNA ZNB40
# <NAME> <<EMAIL>>, 2014
# <NAME> <<EMAIL>>, 2015
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from instrument import Instrument
import visa
import types
import logging
import numpy as np
import struct
import qt
class RhodeSchwartz_ZNB40(Instrument):
'''
This is the driver for the Rohde & Schwarz FSL spectrum analyzer.
Note that, for simplicity, the set functions set the specified
parameter for all channels.
The get functions return the value for the currently active channel.
Usage:
Initialize with
<name> = qt.instruments.create('<name>', 'RhodeSchwartz_ZNB40',
address='TCPIP::<IP-address>::INSTR',
reset=<bool>,)
For GPIB the address is: 'GPIB<interface_nunmber>::<gpib-address>'
'''
def __init__(self, name, address, reset=False):
'''
Initializes a R&S FSL, and communicates with the wrapper.
Input:
name (string) : name of the instrument
address (string) : GPIB address
reset (bool) : resets to default values
'''
# Initialize wrapper functions
logging.info('Initializing instrument Rohde & Schwarz FSL spectrum analyzer')
Instrument.__init__(self, name, tags=['physical'])
# Add some global constants
self._address = address
self._default_timeout = 120000. # ms
self._visainstrument = visa.ResourceManager().open_resource(self._address,
timeout=self._default_timeout)
self._freq_unit = 1
self._freq_unit_symbol = 'Hz'
# Add parameters to wrapper
self.add_parameter('start_frequency', type=types.FloatType, format='%.6e',
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET,
units=self._freq_unit_symbol, minval=20e3/self._freq_unit, maxval=40e9/self._freq_unit)
self.add_parameter('stop_frequency', type=types.FloatType, format='%.6e',
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET,
units=self._freq_unit_symbol, minval=20e3/self._freq_unit, maxval=40e9/self._freq_unit)
self.add_parameter('center_frequency', type=types.FloatType, format='%.06e',
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET,
units=self._freq_unit_symbol, minval=0.020, maxval=40e9)
self.add_parameter('span', type=types.FloatType, format='%.06e',
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET,
units=self._freq_unit_symbol, minval=0.020)
self.add_parameter('numpoints', type=types.IntType, format='%g',
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET,
units='', minval=1)
self.add_parameter('average_mode', type=types.StringType,
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET,
format_map={'AUTO': 'automatic',
'FLAT': 'cumulative average of magnitude and phase',
'RED': 'cumulative average of quadratures',
'MOV': 'simple average of quadratures'})
self.add_parameter('averages', type=types.IntType,
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET,
minval=1, maxval=1000)
self.add_parameter('if_bandwidth', type=types.FloatType, format='%.0e',
minval=1., maxval=1e6,
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET,
units=self._freq_unit_symbol)
self.add_parameter('if_selectivity', type=types.StringType,
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET,
format_map={'NORM': 'normal',
'MED': 'medium',
'HIGH': 'high'})
self.add_parameter('sweeptime', type=types.FloatType, format='%g',
flags=Instrument.FLAG_GET,
units='s')
self.add_parameter('sweeptime_auto', type=types.BooleanType,
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET)
self.add_parameter('source_power', type=types.FloatType,
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET,
units='dBm',minval=-30., maxval=10.)
self.add_parameter('trigger_source', type=types.StringType,
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET,
format_map = {
"imm" : "immediate (continuous)",
"ext" : "external",
"line" : "line",
"tim" : "periodic timer",
"rtcl" : "real time clock",
"man" : "manual"
})
self.add_parameter('sweep_mode', type=types.StringType,
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET,
format_map = {
"single" : "single",
"cont" : "continuous"
})
self.add_parameter('external_reference', type=types.BooleanType,
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET)
self.add_parameter('external_reference_frequency', type=types.IntType,
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET,
units='MHz')
self.add_function('reset')
self.add_function('send_trigger')
self.add_function('get_all')
# for backwards compatibility with the old parameter name ("span_frequency")
self.add_function('get_span_frequency')
self.add_function('set_span_frequency')
if reset:
self.reset()
else:
# self.set_default_window_config()
# self.set_default_channel_config()
self.get_all()
# --------------------------------------
# functions
# --------------------------------------
# for backwards compatibility with the old parameter name "span_frequency"
def get_span_frequency(self): return self.get_span()
def set_span_frequency(self, s): self.set_span(s)
def reset(self):
self._visainstrument.write('*RST') #reset to default settings
# self.set_default_channel_config()
# self.set_default_window_config()
self.set_sweep_mode('single')
self.get_all()
def clear_status_reg(self):
self._visainstrument.write('*CLS')
def send_trigger(self):
s = self.get_trigger_source()
if s == 'imm':
self._visainstrument.write('INIT:IMM:ALL')
elif s == 'man':
self._visainstrument.write('*TRG')
else:
raise Exception('Not sure how to trigger manually when trigger source is set to "%s"' % s)
def trigger_n_times(self, n, block_until_done=False):
''' Trigger exactly n sweeps. Waits until done before returning iff block_until_done==True. '''
self._visainstrument.write('SENSE:SWEEP:COUNT:ALL %u' % n)
cmd = 'INIT:IMM:ALL%s' % ('; *OPC?' if block_until_done else '')
min_wait_time = n*self.get_sweeptime()
if block_until_done and min_wait_time < 10.:
r = self._visainstrument.ask(cmd)
assert r.strip() == '1', r
else:
self._visainstrument.write(cmd)
if block_until_done:
qt.msleep(min_wait_time - 5.)
r = self._visainstrument.read()
assert r.strip() == '1', r
def clear_averages(self):
''' Restart averaging (on all channels). '''
ch_numbers, ch_names = self.ch_catalog()
for chan in ch_numbers:
self._visainstrument.write('SENSE%s:AVER:CLE' % (chan))
def autoscale_once(self):
ch_numbers, ch_names = self.ch_catalog()
for chan in ch_numbers:
self._visainstrument.write('DISPlay:WINDow%u:TRAC%u:Y:SCALe:AUTO ONCE' % (chan,chan))
def set_S21_only_channel_config(self):
self._visainstrument.write('*RST')
self._visainstrument.write('SYSTEM:DISPLAY:UPDATE ON')
self.set_sweep_mode('single')
self.autoscale_once()
self.get_all()
def set_default_channel_config(self):
default_channel_to_s = { 1: 'S11', 2: 'S21', 3: 'S12', 4: 'S22' }
self._visainstrument.write('*RST')
self.set_sweep_mode('single')
for chan in range(1,5):
self._visainstrument.write(':CALCULATE%d:PARAMETER:SDEFINE "Trc%u", "%s"' % (chan,chan, default_channel_to_s[chan]))
self._visainstrument.write(':DISPLAY:WINDOW%u:STATE ON' % chan)
self._visainstrument.write(':DISPLAY:WINDOW%u:TRACE%u:FEED "Trc%u"' % (chan, chan, chan))
self._visainstrument.write('SYSTEM:DISPLAY:UPDATE ON')
self.get_all()
def get_all(self):
self.get_sweep_mode()
self.get_trigger_source()
self.get_start_frequency()
self.get_stop_frequency()
self.get_numpoints()
self.get_average_mode()
self.get_averages()
self.get_if_bandwidth()
self.get_if_selectivity()
self.get_external_reference()
self.get_external_reference_frequency()
self.get_source_power()
self.get_sweeptime()
self.get_sweeptime_auto()
self.get_center_frequency()
self.get_span()
def get_data(self, s_parameter):
'''
Get the measured S parameter.
s_parameter --- must be one of ['S11', 'S21', 'S12', 'S22']
'''
s = s_parameter.upper().strip()
assert s in ['S11', 'S21', 'S12', 'S22'], 'Invalid S-parameter: %s' % s_parameter
logging.debug(__name__ + ' : Get %s data.' % s)
# check that the requested S parameter is being measured on some channel
try:
s2chan = self.s_to_channel_dict()[s_parameter]
except KeyError:
logging.warn('%s is not currently being measured.', s_parameter)
raise
# Verify the function (again <-- seems unnecessary but doesn't hurt either)
r = self._visainstrument.ask('SENSe%u:FUNCtion?' % s2chan)
assert r.strip().strip("'").upper().endswith(s), 'Channel configuration has been changed! (%s)' % r
self._visainstrument.write('FORM REAL,32')
raw = self._visainstrument.query_binary_values('TRAC? CH%uDATA' % s2chan,
datatype='f',
is_big_endian=False,
container=np.array,
header_fmt='ieee')
return np.array([ r + 1j*i for r,i in raw.reshape((-1,2)) ])
def channel_to_s_dict(self):
channel_numbers, channel_names = self.ch_catalog()
meas = []
for chan in channel_numbers:
sparam = self._visainstrument.ask('SENSE%u:FUNCTION?' % chan).strip().strip("'").upper().split(":")[2]
meas.append(sparam)
return dict(zip(channel_numbers, meas))
def s_to_channel_dict(self):
return dict([(v,k) for k,v in self.channel_to_s_dict().iteritems()])
def start_single_sweep(self):
'''
Same as restart sweep in manual operation.
'''
logging.debug(__name__ + 'start a single sweep')
def get_function(self,chan):
r = self._visainstrument.ask('SENSe%u:FUNCtion?' % chan)
return r
def get_frequency_data(self, channel_number=None):
'''
Get the current x-axis frequency values.
If channel_number == None, use the first channel in the channel catalog.
'''
logging.debug(__name__ + 'Get the current x-axis values.')
ch = self.ch_catalog()[0][0] if channel_number == None else channel_number
assert int(ch) == ch, 'channel_number must be an integer'
self._visainstrument.write('FORM REAL,32')
return self._visainstrument.query_binary_values('TRAC:STIM? CH%dDATA' % ch,
datatype='f',
is_big_endian=False,
container=np.array,
header_fmt='ieee')
# return eval('[' + self._visainstrument.ask('TRAC:STIM? CH1DATA') + ']')
def do_get_start_frequency(self):
'''
Start of sweep (Hz)
'''
logging.debug('Reading start frequency')
return float(self._visainstrument.ask('SENS:FREQ:STAR?'))/self._freq_unit
def do_set_start_frequency(self, start): #in Hz
logging.debug('Setting start freq to %s' % start)
ch_numbers, ch_names = self.ch_catalog()
for chan in ch_numbers:
self._visainstrument.write('SENSE%s:FREQ:STAR %E' % (chan, start*self._freq_unit))
self.get_center_frequency()
self.get_span()
def do_get_stop_frequency(self):
'''
End of sweep (Hz)
'''
logging.debug('Reading stop frequency')
return float(self._visainstrument.ask('SENS:FREQ:STOP?'))/self._freq_unit
def do_set_stop_frequency(self, stop): #in Hz
logging.debug('Setting stop freq to %s' % stop)
ch_numbers, ch_names = self.ch_catalog()
for chan in ch_numbers:
self._visainstrument.write('SENSE%s:FREQ:STOP %E' % (chan, stop*self._freq_unit))
self.get_center_frequency()
self.get_span()
def do_get_center_frequency(self):
'''
End of sweep (Hz)
'''
logging.debug('Reading the center frequency')
return float(self._visainstrument.ask('SENS:FREQ:CENT?'))/self._freq_unit
def do_set_center_frequency(self, s): #in Hz
logging.debug('Setting center freq to %s' % s)
ch_numbers, ch_names = self.ch_catalog()
for chan in ch_numbers:
self._visainstrument.write('SENSE%s:FREQ:CENT %s Hz' % (chan,s))
self.get_start_frequency()
self.get_stop_frequency()
def do_get_span(self):
'''
End of sweep (Hz)
'''
logging.debug('Reading the span')
return float(self._visainstrument.ask('SENS:FREQ:SPAN?'))/self._freq_unit
def do_set_span(self, s):
logging.debug('Setting span to %s' % s)
ch_numbers, ch_names = self.ch_catalog()
for chan in ch_numbers:
self._visainstrument.write('SENSE%s:FREQ:SPAN %s Hz' % (chan,s))
self.get_start_frequency()
self.get_stop_frequency()
def do_get_numpoints(self):
'''
Number of points in frequency
'''
logging.debug('Reading sweep points')
return int(self._visainstrument.ask('SENS1:SWE:POIN?'))
def do_set_numpoints(self,numpoints):
logging.debug('Setting sweep points to %f' % numpoints)
ch_numbers, ch_names = self.ch_catalog()
for chan in ch_numbers:
self._visainstrument.write('SENSE%s:SWE:POIN %f' % (chan, numpoints))
return self._visainstrument.ask('SWE:POIN?')
def do_get_if_bandwidth(self):
logging.debug('Reading resolution bandwidth')
r = self._visainstrument.ask('BAND?')
if r.strip().lower().startswith('max'): r = 1e6
return float(r)/self._freq_unit
def do_get_if_selectivity(self):
logging.debug('Reading IF filter selectivity')
r = self._visainstrument.ask('BAND:SEL?')
return r.strip().upper()
def ch_catalog(self):
logging.debug('return numbers and names of all channels')
catalog = self._visainstrument.ask('CONFIGURE:CHANNEL:CATALOG?').strip().strip("'").upper().split(",")
ch_names = catalog[1::2]
ch_numbers = map(int, catalog[0::2])
return ch_numbers, ch_names
def trace_catalog(self):
logging.debug('return numbers and names of all channels')
return self._visainstrument.ask('CONFIGURE:TRACE:CATALOG?').strip().strip("'").upper().split(",")
def window_catalog(self,wnd,wndtr):
logging.debug('return numbers and names of all channels')
return self._visainstrument.ask('DISPLAY:WINDOW%u:TRACE%u:CATALOG?' % (wnd,wndtr))
def list_traces_in_chan(self,chan):
logging.debug('return numbers and names of all channels')
return self._visainstrument.ask('CONFIGURE%u:TRACE:CATALOG?' % chan)
def paramter_select_query(self,chan):
logging.debug('return numbers and names of all channels')
return self._visainstrument.ask(':CALCULATE%u:PARAMETER:SELECT?' % chan)
def do_set_if_bandwidth(self,if_bandwidth):
logging.debug('Setting Resolution BW to %s' % if_bandwidth)
ch_numbers, ch_names = self.ch_catalog()
for chan in ch_numbers:
self._visainstrument.write('SENSE%s:BWIDTH:RESOLUTION %s' % (chan,if_bandwidth))
self.get_sweeptime()
def do_set_if_selectivity(self, if_sel):
logging.debug('Setting IF filter selectivity to %s' % if_sel)
ch_numbers, ch_names = self.ch_catalog()
for chan in ch_numbers:
self._visainstrument.write('SENSE%s:BWIDTH:SEL %s' % (chan, if_sel))
self.get_sweeptime()
def do_get_sweeptime(self):
logging.debug('reading sweeptime')
return float(self._visainstrument.ask('SWE:TIME?'))
def do_get_sweeptime_auto(self):
logging.debug('reading sweeptime')
r = self._visainstrument.ask('SWE:TIME:AUTO?').lower().strip()
return r.startswith('1') or r.startswith('on')
def do_set_sweeptime_auto(self, val): #in seconds
logging.debug('Setting | |
<reponame>thunlp/OHRE
import numpy as np
from copy import deepcopy
import networkx as nx
import community
def find_close(M):
s_index, l_index = 0, 0
min_list = np.zeros([len(M)], dtype=np.float32)
min_index_list = np.zeros([len(M)], dtype=np.int32)
for i, item in enumerate(M):
if len(item):
temp_min = min(item)
min_list[i] = temp_min
min_index_list[i] = item.index(temp_min)
else:
min_list[i] = 10000
l_index = int(np.where(min_list == np.min(min_list))[0][0])
s_index = min_index_list[l_index]
return s_index, l_index # s_index < l_index
# model
def complete_HAC(dataset, HAC_dist, k, datatype=np.int32):
# initialize C and M, C is a list of clusters, M is a list as dist_matrix
print('the len of dataset to cluster is:' + str(len(dataset)))
print('initializing...')
idx_C, M, idxM = [], [], []
for i, item in enumerate(dataset):
idx_Ci = [i]
idx_C.append(idx_Ci)
print('initializing dist_matrix...')
print('preparing idx_list...')
idx_list = []
for i in range(len(idx_C)):
for j in range(len(idx_C)):
if j == i:
break
idx_list.append([i, j])
print('calculating dist_list...')
batch_count = 0
batch_size = 10000
left_data = np.zeros(list((batch_size,) + dataset[0].shape), dtype=datatype)
right_data = np.zeros(list((batch_size,) + dataset[0].shape), dtype=datatype)
dist_list = []
for count, idx_pair in enumerate(idx_list):
left_data[batch_count] = dataset[idx_pair[0]]
right_data[batch_count] = dataset[idx_pair[1]]
batch_count += 1
if batch_count == batch_size:
print('predicting', str(round(count / len(idx_list) * 100, 2)) + '%')
temp_dist_list = HAC_dist(left_data, right_data)
dist_list = dist_list + temp_dist_list.reshape(batch_size).tolist()
batch_count = 0
if batch_count != 0:
print('predicting...')
temp_dist_list = HAC_dist(left_data[:batch_count], right_data[:batch_count])
dist_list = dist_list + temp_dist_list.reshape(batch_count).tolist()
print('preparing dist_matrix...')
count = 0
for i in range(len(idx_C)):
Mi = []
for j in range(len(idx_C)):
if j == i:
break
Mi.append(dist_list[count])
count += 1
M.append(Mi)
# combine two classes
q = len(idx_C)
while q > k:
s_index, l_index = find_close(M)
idx_C[s_index].extend(idx_C[l_index])
del idx_C[l_index]
M_next = deepcopy(M[:-1])
for i in range(len(idx_C)):
for j in range(len(idx_C)):
if j == i:
break
i_old, j_old = i, j
if i >= l_index:
i_old = i + 1
if j >= l_index:
j_old = j + 1
if i != s_index and j != s_index:
M_next[i][j] = M[i_old][j_old]
elif i == s_index:
M_next[i][j] = max(M[s_index][j_old], M[l_index][j_old])
elif j == s_index:
if i_old < l_index:
M_next[i][j] = max(M[i_old][s_index], M[l_index][i_old])
elif i_old > l_index:
M_next[i][j] = max(M[i_old][s_index], M[i_old][l_index])
q -= 1
print('temp cluster num is:', q, ',', s_index, 'and', l_index, 'are combined, metric is:', M[l_index][s_index])
M = M_next
# decode to get label_list
label_list = [0] * len(dataset)
for label, temp_cluster in enumerate(idx_C):
for idx in temp_cluster:
label_list[idx] = label
return label_list, create_msg(label_list)
def Louvain(dataset, edge_measure, datatype=np.int32):
print('initializing the graph...')
g = nx.Graph()
g.add_nodes_from(np.arange(len(dataset)).tolist())
print('preparing idx_list...')
idx_list = []
for i in range(len(dataset)):
for j in range(len(dataset)):
if j == i:
break
idx_list.append((i, j))
print('calculating edges...')
batch_count = 0
batch_size = 10000
left_data = np.zeros(list((batch_size,) + dataset[0].shape), dtype=datatype)
right_data = np.zeros(list((batch_size,) + dataset[0].shape), dtype=datatype)
edge_list = []
for count, idx_pair in enumerate(idx_list):
left_data[batch_count] = dataset[idx_pair[0]]
right_data[batch_count] = dataset[idx_pair[1]]
batch_count += 1
if batch_count == batch_size:
print('predicting...', str(round(count / len(idx_list) * 100, 2)) + '%')
temp_edge_list = edge_measure(left_data, right_data)
edge_list = edge_list + temp_edge_list.reshape(batch_size).tolist()
batch_count = 0
if batch_count != 0:
print('predicting...')
temp_edge_list = edge_measure(left_data[:batch_count], right_data[:batch_count])
edge_list = edge_list + temp_edge_list.reshape(batch_count).tolist()
edge_list = np.int32(np.round(edge_list))
print('adding edges...')
true_edge_list = []
for i in range(len(idx_list)):
if edge_list[i] == 0:
true_edge_list.append(idx_list[i])
g.add_edges_from(true_edge_list)
print('Clustering...')
partition = community.best_partition(g)
# decode to get label_list
print('decoding to get label_list...')
label_list = [0] * len(dataset)
for key in partition:
label_list[key] = partition[key]
return label_list, create_msg(label_list)
def Louvain_no_isolation(dataset, edge_measure, datatype=np.int32, iso_thres=5, weighted=False):
# print('initializing the graph...')
g = nx.Graph()
g.add_nodes_from(np.arange(len(dataset)).tolist())
# print('preparing idx_list...')
idx_list = []
for i in range(len(dataset)):
for j in range(len(dataset)):
if j == i:
break
idx_list.append((i, j))
# print('calculating edges...')
batch_count = 0
print_count = 0
batch_size = 12000
left_data = np.zeros(list((batch_size,) + dataset[0].shape), dtype=datatype)
right_data = np.zeros(list((batch_size,) + dataset[0].shape), dtype=datatype)
edge_list = []
for count, idx_pair in enumerate(idx_list):
left_data[batch_count] = dataset[idx_pair[0]]
right_data[batch_count] = dataset[idx_pair[1]]
batch_count += 1
if batch_count == batch_size:
temp_edge_list, a, b = edge_measure(left_data, right_data)
# print(temp_edge_list)
edge_list = edge_list + temp_edge_list.reshape(batch_size).tolist()
batch_count = 0
print_count += 1
# if print_count % 100 == 0:
# print('predicting...', str(round(count / len(idx_list) * 100, 2)) + '%')
if batch_count != 0:
# print('predicting...')
temp_edge_list, a, b = edge_measure(left_data[:batch_count], right_data[:batch_count])
edge_list = edge_list + temp_edge_list.reshape(batch_count).tolist()
simi_list, edge_list = np.array(edge_list), np.array(edge_list)
simi_list[simi_list > 1] = 1
edge_list[edge_list > 1] = 1
if not weighted:
edge_list[edge_list >= 0.5] = 1
edge_list[edge_list < 0.5] = 0
edge_list = edge_list.tolist()
simi_list = simi_list.tolist()
# ------------------
# print('forming simi_matrix...')
simi_matrix = np.zeros([len(dataset), len(dataset)])
for count, idx_pair in enumerate(idx_list):
simi_matrix[idx_pair[0], idx_pair[1]] = simi_list[count]
simi_matrix[idx_pair[1], idx_pair[0]] = simi_list[count]
# ------------------
# print('adding edges...')
true_edge_list = []
for i in range(len(idx_list)):
if not weighted:
if edge_list[i] == 0:
true_edge_list.append(idx_list[i])
else:
if edge_list[i] < 0.5:
true_edge_list.append((idx_list[i][0], idx_list[i][1], {'weight': 1 - edge_list[i]}))
g.add_edges_from(true_edge_list)
# print('Clustering...')
partition = community.best_partition(g)
# decode to get label_list
# print('decoding to get label_list...')
label_list = [0] * len(dataset)
for key in partition:
label_list[key] = partition[key]
# ------------------
# print('solving isolation...')
cluster_datanum_dict = {}
for reltype in label_list:
if reltype in cluster_datanum_dict.keys():
cluster_datanum_dict[reltype] += 1
else:
cluster_datanum_dict[reltype] = 1
iso_reltype_list = []
for reltype in cluster_datanum_dict:
if cluster_datanum_dict[reltype] <= iso_thres:
iso_reltype_list.append(reltype)
for point_idx, reltype in enumerate(label_list):
if reltype in iso_reltype_list:
search_idx_list = np.argsort(simi_matrix[point_idx]) # from small to big
for idx in search_idx_list:
if label_list[idx] not in iso_reltype_list:
label_list[point_idx] = label_list[idx]
break
# ------------------
return label_list, create_msg(label_list)
def Hierarchical_Louvain(train_dataset: list, test_dataset: list, trainset_relids, edge_measure,
get_two_relation_distance, datatype=np.int32, iso_thres=5, weighted=True):
"""
:param train_dataset: all data instances in train set.
:param test_dataset: all data instances in test set.
:param trainset_relids: the relation ids in trainset. the length is equal to train_dataset.
:param edge_measure: function to calculate similarity between two instances.
:param get_two_relation_distance: function to get distance of two relation.
:param datatype:
:param iso_thres: least num of instances to support a cluster
:param weighted: if similarity is only 0 or 1, or weighted.
:return: hierarchy info
"""
print('initializing the graph...')
g = nx.Graph()
train_and_test_dataset = train_dataset.copy() + test_dataset.copy()
# train set instance similarities are the shortest path related metric.
# test set instance similarities are calculated based on the RSN model.
g.add_nodes_from(np.arange(len(train_and_test_dataset)).tolist())
print('preparing idx_list...')
idx_list = []
for i in range(len(train_and_test_dataset)):
# the similarity of trainset is certain.
left_boundary = max(len(train_dataset), i + 1)
for j in range(left_boundary, len(train_and_test_dataset)):
idx_list.append((i, j))
train_idx_list = []
for i in range(len(train_dataset)):
for j in range(i + 1, len(train_dataset)):
train_idx_list.append((i, j))
print("Forming distance of trainset...")
train_edge_list = []
for count, trainset_pairs in enumerate(train_idx_list):
instance1, instance2 = trainset_pairs
rel1, rel2 = trainset_relids[instance1], trainset_relids[instance2]
train_edge_list.append(get_two_relation_distance(rel1, rel2))
print('calculating edges...')
batch_count = 0
batch_size = 5000
left_data = np.zeros(list((batch_size,) + train_and_test_dataset[0].shape), dtype=datatype)
right_data = np.zeros(list((batch_size,) + train_and_test_dataset[0].shape), dtype=datatype)
# only for test
# left_data = np.zeros(list((batch_size,) + (1,)), dtype=datatype)
# right_data = np.zeros(list((batch_size,) + (1,)), dtype=datatype)
edge_list = []
for count, idx_pair in enumerate(idx_list): # note this is the similarity need to calculated.
left_data[batch_count] = train_and_test_dataset[idx_pair[0]]
right_data[batch_count] = train_and_test_dataset[idx_pair[1]]
batch_count += 1
if batch_count == batch_size:
print('predicting...', str(round(count / len(idx_list) * 100, 2)) + '%')
temp_edge_list, a, b = edge_measure(left_data, right_data)
# print(temp_edge_list)
edge_list = edge_list + temp_edge_list.reshape(batch_size).tolist()
batch_count = 0
if batch_count != 0:
print('predicting...')
temp_edge_list, a, b = edge_measure(left_data[:batch_count], right_data[:batch_count])
edge_list = edge_list + temp_edge_list.reshape(batch_count).tolist()
distance_list, edge_list = np.array(edge_list), np.array(edge_list)
distance_list[distance_list > 1] = 1
edge_list[edge_list > 1] = 1
if not weighted:
edge_list[edge_list >= 0.5] = 1
edge_list[edge_list < 0.5] = 0
edge_list = edge_list.tolist()
distance_list = distance_list.tolist()
# ------------------
print('forming simi_matrix...')
simi_matrix = np.zeros([len(train_and_test_dataset), len(train_and_test_dataset)])
for count, idx_pair in enumerate(idx_list):
simi_matrix[idx_pair[0], idx_pair[1]] = distance_list[count]
simi_matrix[idx_pair[1], idx_pair[0]] = distance_list[count]
for count, idx_pair in enumerate(train_idx_list):
simi_matrix[idx_pair[0], idx_pair[1]] = train_edge_list[count]
simi_matrix[idx_pair[1], idx_pair[0]] = train_edge_list[count]
# ------------------
print('adding edges...')
true_edge_list = []
for i in range(len(idx_list)):
if not weighted:
if edge_list[i] == 0:
true_edge_list.append(idx_list[i])
else:
if edge_list[i] < 0.5:
true_edge_list.append((idx_list[i][0], idx_list[i][1], {'weight': 1 - edge_list[i]}))
# add train edges.
for i in range(len(train_idx_list)):
if train_edge_list[i] < 1:
true_edge_list.append((train_idx_list[i][0], train_idx_list[i][1], {'weight': 1 - train_edge_list[i]}))
g.add_edges_from(true_edge_list)
print('Clustering...')
# list of clustering hierarchy.
dendrogram = community.generate_dendrogram(g)
new_dendrogram = []
# the lowest level, do the isolation thing, etc.
# lowest_partition = | |
This changes hyphen to + to match width of math mode minus sign.
if param:
text = text.replace('-', '+')
f = 0 if fontsize == self._style['fs'] else 1
sum_text = 0.0
for c in text:
try:
sum_text += self._char_list[c][f]
except KeyError:
# if non-ASCII char, use width of 'c', an average size
sum_text += self._char_list['c'][f]
if f == 1:
sum_text *= self._subfont_factor
return sum_text
def _param_parse(self, v):
param_parts = [None] * len(v)
for i, e in enumerate(v):
try:
param_parts[i] = pi_check(e, output='mpl', ndigits=3)
except TypeError:
param_parts[i] = str(e)
param_parts = ', '.join(param_parts).replace('-', '$-$')
return param_parts
def _get_gate_ctrl_text(self, op):
op_label = getattr(op.op, 'label', None)
base_name = None if not hasattr(op.op, 'base_gate') else op.op.base_gate.name
base_label = None if not hasattr(op.op, 'base_gate') else op.op.base_gate.label
ctrl_text = None
if base_label:
gate_text = base_label
ctrl_text = op_label
elif op_label and isinstance(op.op, ControlledGate):
gate_text = base_name
ctrl_text = op_label
elif op_label:
gate_text = op_label
elif base_name:
gate_text = base_name
else:
gate_text = op.name
if gate_text in self._style['disptex']:
gate_text = "{}".format(self._style['disptex'][gate_text])
else:
gate_text = "{}".format(gate_text[0].upper() + gate_text[1:])
if ctrl_text:
ctrl_text = "{}".format(ctrl_text[0].upper() + ctrl_text[1:])
return gate_text, ctrl_text
def _get_colors(self, op):
base_name = None if not hasattr(op.op, 'base_gate') else op.op.base_gate.name
if op.name in self._style['dispcol']:
color = self._style['dispcol'][op.name]
# Backward compatibility for style dict using 'displaycolor' with
# gate color and no text color, so test for str first
if isinstance(color, str):
fc = color
gt = self._style['gt']
else:
fc = color[0]
gt = color[1]
# Treat special case of classical gates in iqx style by making all
# controlled gates of x, dcx, and swap the classical gate color
elif self._style['name'] == 'iqx' and base_name in ['x', 'dcx', 'swap']:
color = self._style['dispcol'][base_name]
if isinstance(color, str):
fc = color
gt = self._style['gt']
else:
fc = color[0]
gt = color[1]
else:
fc = self._style['gc']
gt = self._style['gt']
if self._style['name'] == 'bw':
ec = self._style['ec']
lc = self._style['lc']
else:
ec = fc
lc = fc
# Subtext needs to be same color as gate text
sc = gt
return fc, ec, gt, self._style['tc'], sc, lc
def _multiqubit_gate(self, xy, fc=None, ec=None, gt=None, sc=None, text='', subtext=''):
xpos = min([x[0] for x in xy])
ypos = min([y[1] for y in xy])
ypos_max = max([y[1] for y in xy])
fs = self._style['fs']
sfs = self._style['sfs']
# added .21 is for qubit numbers on the left side
text_width = self._get_text_width(text, fs) + .21
sub_width = self._get_text_width(subtext, sfs, param=True) + .21
wid = max((text_width, sub_width, WID))
qubit_span = abs(ypos) - abs(ypos_max) + 1
height = HIG + (qubit_span - 1)
box = patches.Rectangle(
xy=(xpos - 0.5 * wid, ypos - .5 * HIG), width=wid, height=height,
fc=fc, ec=ec, linewidth=self._lwidth15, zorder=PORDER_GATE)
self._ax.add_patch(box)
# annotate inputs
for bit, y in enumerate([x[1] for x in xy]):
self._ax.text(xpos + .07 - 0.5 * wid, y, str(bit), ha='left', va='center',
fontsize=fs, color=gt,
clip_on=True, zorder=PORDER_TEXT)
if text:
if subtext:
self._ax.text(xpos + .11, ypos + 0.4 * height, text, ha='center',
va='center', fontsize=fs,
color=gt, clip_on=True,
zorder=PORDER_TEXT)
self._ax.text(xpos + .11, ypos + 0.2 * height, subtext, ha='center',
va='center', fontsize=sfs,
color=sc, clip_on=True,
zorder=PORDER_TEXT)
else:
self._ax.text(xpos + .11, ypos + .5 * (qubit_span - 1), text,
ha='center', va='center', fontsize=fs,
color=gt, clip_on=True,
zorder=PORDER_TEXT, wrap=True)
def _gate(self, xy, fc=None, ec=None, gt=None, sc=None, text='', subtext=''):
xpos, ypos = xy
fs = self._style['fs']
sfs = self._style['sfs']
text_width = self._get_text_width(text, fs)
sub_width = self._get_text_width(subtext, sfs, param=True)
wid = max((text_width, sub_width, WID))
box = patches.Rectangle(xy=(xpos - 0.5 * wid, ypos - 0.5 * HIG),
width=wid, height=HIG, fc=fc, ec=ec,
linewidth=self._lwidth15, zorder=PORDER_GATE)
self._ax.add_patch(box)
if text:
if subtext:
self._ax.text(xpos, ypos + 0.15 * HIG, text, ha='center',
va='center', fontsize=fs, color=gt,
clip_on=True, zorder=PORDER_TEXT)
self._ax.text(xpos, ypos - 0.3 * HIG, subtext, ha='center',
va='center', fontsize=sfs, color=sc,
clip_on=True, zorder=PORDER_TEXT)
else:
self._ax.text(xpos, ypos, text, ha='center', va='center',
fontsize=fs, color=gt,
clip_on=True, zorder=PORDER_TEXT)
def _sidetext(self, xy, tc=None, text=''):
xpos, ypos = xy
# 0.08 = the initial gap, add 1/2 text width to place on the right
text_width = self._get_text_width(text, self._style['sfs'])
xp = xpos + 0.08 + text_width / 2
self._ax.text(xp, ypos + HIG, text, ha='center', va='top',
fontsize=self._style['sfs'], color=tc,
clip_on=True, zorder=PORDER_TEXT)
def _line(self, xy0, xy1, lc=None, ls=None, zorder=PORDER_LINE):
x0, y0 = xy0
x1, y1 = xy1
linecolor = self._style['lc'] if lc is None else lc
linestyle = 'solid' if ls is None else ls
if linestyle == 'doublet':
theta = np.arctan2(np.abs(x1 - x0), np.abs(y1 - y0))
dx = 0.05 * WID * np.cos(theta)
dy = 0.05 * WID * np.sin(theta)
self._ax.plot([x0 + dx, x1 + dx], [y0 + dy, y1 + dy],
color=linecolor, linewidth=self._lwidth2,
linestyle='solid', zorder=zorder)
self._ax.plot([x0 - dx, x1 - dx], [y0 - dy, y1 - dy],
color=linecolor, linewidth=self._lwidth2,
linestyle='solid', zorder=zorder)
else:
self._ax.plot([x0, x1], [y0, y1],
color=linecolor, linewidth=self._lwidth2,
linestyle=linestyle, zorder=zorder)
def _measure(self, qxy, cxy, cid, fc=None, ec=None, gt=None, sc=None):
qx, qy = qxy
cx, cy = cxy
# draw gate box
self._gate(qxy, fc=fc, ec=ec, gt=gt, sc=sc)
# add measure symbol
arc = patches.Arc(xy=(qx, qy - 0.15 * HIG), width=WID * 0.7,
height=HIG * 0.7, theta1=0, theta2=180, fill=False,
ec=gt, linewidth=self._lwidth2, zorder=PORDER_GATE)
self._ax.add_patch(arc)
self._ax.plot([qx, qx + 0.35 * WID], [qy - 0.15 * HIG, qy + 0.20 * HIG],
color=gt, linewidth=self._lwidth2, zorder=PORDER_GATE)
# arrow
self._line(qxy, [cx, cy + 0.35 * WID], lc=self._style['cc'], ls=self._style['cline'])
arrowhead = patches.Polygon(((cx - 0.20 * WID, cy + 0.35 * WID),
(cx + 0.20 * WID, cy + 0.35 * WID),
(cx, cy + 0.04)), fc=self._style['cc'], ec=None)
self._ax.add_artist(arrowhead)
# target
if self._cregbundle:
self._ax.text(cx + .25, cy + .1, str(cid), ha='left', va='bottom',
fontsize=0.8 * self._style['fs'], color=self._style['tc'],
clip_on=True, zorder=PORDER_TEXT)
def _conditional(self, xy, istrue=False):
xpos, ypos = xy
fc = self._style['lc'] if istrue else self._style['bg']
box = patches.Circle(xy=(xpos, ypos), radius=WID * 0.15, fc=fc,
ec=self._style['lc'], linewidth=self._lwidth15, zorder=PORDER_GATE)
self._ax.add_patch(box)
def _ctrl_qubit(self, xy, fc=None, ec=None, tc=None, text='', text_top=None):
xpos, ypos = xy
box = patches.Circle(xy=(xpos, ypos), radius=WID * 0.15,
fc=fc, ec=ec, linewidth=self._lwidth15, zorder=PORDER_GATE)
self._ax.add_patch(box)
# display the control label at the top or bottom if there is one
if text_top is True:
self._ax.text(xpos, ypos + 0.7 * HIG, text, ha='center', va='top',
fontsize=self._style['sfs'], color=tc,
clip_on=True, zorder=PORDER_TEXT)
elif text_top is False:
self._ax.text(xpos, ypos - 0.3 * HIG, text, ha='center', va='top',
fontsize=self._style['sfs'], color=tc,
clip_on=True, zorder=PORDER_TEXT)
def _set_ctrl_bits(self, ctrl_state, num_ctrl_qubits, qbit, ec=None, tc=None,
text='', qargs=None):
# place the control label at the top or bottom of controls
if text:
qlist = [qubit.index for qubit in qargs]
ctbits = qlist[:num_ctrl_qubits]
qubits = qlist[num_ctrl_qubits:]
max_ctbit = max(ctbits)
min_ctbit = min(ctbits)
top = min(qubits) > min_ctbit
# display the control qubits as open or closed based on ctrl_state
cstate = "{:b}".format(ctrl_state).rjust(num_ctrl_qubits, '0')[::-1]
for i in range(num_ctrl_qubits):
fc_open_close = ec if cstate[i] == '1' else self._style['bg']
text_top = None
if text:
if top and qlist[i] == min_ctbit:
text_top = True
elif not top and qlist[i] == max_ctbit:
text_top = False
self._ctrl_qubit(qbit[i], fc=fc_open_close, ec=ec, tc=tc,
text=text, text_top=text_top)
def _x_tgt_qubit(self, xy, ec=None, ac=None):
linewidth = self._lwidth2
xpos, ypos = xy
box = patches.Circle(xy=(xpos, ypos), radius=HIG * 0.35,
fc=ec, ec=ec, linewidth=linewidth,
zorder=PORDER_GATE)
self._ax.add_patch(box)
# add '+' symbol
self._ax.plot([xpos, xpos], [ypos - 0.2 * HIG, ypos + 0.2 * HIG],
color=ac, linewidth=linewidth, zorder=PORDER_GATE + 1)
self._ax.plot([xpos - 0.2 * HIG, xpos + 0.2 * HIG], [ypos, ypos],
color=ac, linewidth=linewidth, zorder=PORDER_GATE + 1)
def _swap(self, xy, color=None):
xpos, ypos = xy
self._ax.plot([xpos - 0.20 * WID, xpos + 0.20 * WID],
[ypos - 0.20 * WID, ypos + 0.20 * WID],
color=color, linewidth=self._lwidth2, zorder=PORDER_LINE + 1)
self._ax.plot([xpos - 0.20 * WID, xpos + 0.20 * WID],
[ypos + 0.20 * WID, ypos - 0.20 * WID],
color=color, linewidth=self._lwidth2, zorder=PORDER_LINE + 1)
def _barrier(self, config):
xys = config['coord']
for xy in xys:
xpos, ypos = xy
self._ax.plot([xpos, xpos], [ypos + 0.5, ypos - 0.5],
linewidth=self._scale, linestyle="dashed",
color=self._style['lc'], zorder=PORDER_TEXT)
box = patches.Rectangle(xy=(xpos - (0.3 * WID), ypos - 0.5),
width=0.6 * WID, height=1,
fc=self._style['bc'], ec=None, alpha=0.6,
linewidth=self._lwidth15, zorder=PORDER_GRAY)
self._ax.add_patch(box)
def draw(self, filename=None, verbose=False):
"""Draw method called from circuit_drawer"""
self._draw_regs()
self._draw_ops(verbose)
_xl = - self._style['margin'][0]
_xr = self._xmax + self._style['margin'][1]
_yb = - self._ymax - self._style['margin'][2] + | |
import numpy as np
import scipy
import scipy.io
import pickle
from scipy.special import lpmv, spherical_jn, spherical_yn
class Directivity:
def __init__(self, data_path, rho0, c0, freq_vec, simulated_ir_duration, measurement_radius, sh_order, type, sample_rate=44100, **kwargs):
'''
This script encodes the measured impulse responses, representing the
directivity of a source into sperical harmonic coefficients
source_data_path -> path that leads to the .mat file that contains the source data
source_name -> string. It is gonna be used as the name of the file where the solution is gonna be saved
rho0 -> air density
c0 -> speed of sound
simulated_ir_duration -> length of simulation [s]
measurement_radius -> distance from source to measurement positions [m]
existing_pre_delay -> delay before direct sound arrives as provided in GRAS dataset [samples]
'''
self.data_path = data_path
self.rho0 = rho0
self.c0 = c0
self.freq_vec = freq_vec
self.simulated_ir_duration = simulated_ir_duration
self.measurement_radius = measurement_radius
self.sh_order = sh_order
self.type = type
self.sample_rate = sample_rate
try:
self.existing_pre_delay = kwargs["existing_pre_delay"]
except:
pass
def encode_directivity (self, file_name):
self.file_name = file_name
# Derived parameters:
nfft = self.sample_rate*self.simulated_ir_duration # Number of FFT points
f_list = self.sample_rate*np.arange(nfft)/nfft # List of FFT frequencies
fi_lim_lo = np.argmin(np.abs(f_list - self.freq_vec[0])) # FFT bins above which to encode
fi_lim_hi = np.argmin(np.abs(f_list - self.freq_vec[-1])) # FFT bins below which to encode (Hz)
f_list = f_list[fi_lim_lo:fi_lim_hi + 1] # Only retain frequencies to be encoded
if self.type == "source":
## Load and adjust meaured impulse responses:
# Load measured impulse responses:
print ('Loading source data. It might be computationally costing...')
source_data = scipy.io.loadmat(self.data_path) # loads variables IR, Phi, Theta
ir = np.array (source_data['IR'])
# Convert measurement angles from degrees to radians & ensure are column vectors
beta = np.array(source_data['Theta']) * np.pi/180
beta = beta.reshape((np.size(beta), 1))
alpha = np.array(source_data['Phi']) * np.pi/180
alpha = alpha.reshape((np.size(alpha), 1))
del source_data
# Correct initial time delay for measurement distance and window:
desired_pre_delay = round(self.sample_rate * self.measurement_radius / self.c0) # Delay before direct sound arrives based on measurement radius (#samples)
half_window = np.concatenate((np.array(0.5-0.5*np.cos(np.pi*np.linspace(0, 1, self.existing_pre_delay))).conj().T, np.array(np.ones((np.size(ir,0) - self.existing_pre_delay))))) # Rising window
half_window = half_window.reshape((np.size(half_window), 1))
ir = np.concatenate((np.zeros((desired_pre_delay - self.existing_pre_delay, np.size(ir,1))), np.multiply(ir, half_window)))
half_window = np.concatenate((np.ones((np.ceil(np.size(ir,0)/2).astype(int),1)), 0.5+0.5*np.cos(np.pi*np.linspace(0, 1, np.floor(np.size(ir,0)/2).astype(int)).conj().T.reshape(np.floor(np.size(ir,0)/2).astype(int),1)))) # Falling window
ir = np.multiply(ir, half_window);
# Derived parameters:
num_meas = np.size(ir,1); # Number of measurment points
## Fourier transform the impulse responses:
# Loop over measurement points and FFT:
phi_meas = np.zeros((fi_lim_hi-fi_lim_lo+1, num_meas), dtype = np.complex128)
print ('Computing FFTs')
for iMeas in range(num_meas):
fft_ir = np.conj(np.fft.fft(ir[:,iMeas], n = nfft)) # conj used because project uses exp(-1i*w*t) Fourier Transform
phi_meas[:,iMeas] = np.array([fft_ir[fi_lim_lo:fi_lim_hi + 1]]) # Only retain frequencies to be encoded
del ir, fft_ir, iMeas, fi_lim_lo, fi_lim_hi
# Transpose to optimise memory access for encoding step:
print ('Transposing transfer function array...')
phi_meas = np.transpose(phi_meas)
print ('Complete.')
## Encoding:
# Create weighting vector:
w = np.pi/180*(np.cos(beta-np.pi/360)-np.cos(beta+np.pi/360));
w[0] = 2*np.pi*(1-np.cos(np.pi/360));
w[-1] = w[0];
w = w.reshape((np.size(w), ))
print ('Weight addition error = %s.' % abs(np.sum(w) - (4*np.pi)))
# Pre-calculate spherical harmonic functions:
y_nm, dy_dbeta, dy_dalpha = spherical_harmonic_all(self.sh_order, alpha,beta);
# Loop over frequency:
self.sh_coefficients = []
i = 0
for fi, f in enumerate (f_list):
if f == self.freq_vec[i]:
# Calculate spherical Hankel functions:
hnOut = np.zeros(((self.sh_order+1)**2, 1), dtype = np.complex128);
for n in np.arange(self.sh_order+1):
for m in np.arange(-n, n + 1):
hnOut[sub2indSH(m,n),0] = spherical_hankel_out(n, self.measurement_radius*2*np.pi*f/self.c0)
# Calculate b_nm coefficients via a mode-matching approach (Eq. 9 in paper):
sh_coefficients_f = np.matmul(y_nm.conj().T, np.transpose(np.divide(np.multiply(w, phi_meas[:,fi]), hnOut)))
sh_coefficients_f = np.diagonal(sh_coefficients_f)
self.sh_coefficients.append(sh_coefficients_f)
i+=1
elif self.type == "receiver":
# Load measured impulse responses:
print ('Loading receiver directionality data. It might be computationally costing...')
receiver_data = scipy.io.loadmat(self.data_path) # loads variables HRIR_R,HRIR_L, Phi, Theta
hrir_l = np.array(receiver_data['HRIR_L'])
hrir_r = np.array(receiver_data['HRIR_R'])
azimuth = np.array(receiver_data['azimuth'])
azimuth = azimuth.reshape((np.size(azimuth), 1))
elevation = np.array(receiver_data['elevation'])
elevation = elevation.reshape((np.size(elevation), 1))
# Convert measurement angles from degrees to radians, and from elevation to polar
alpha = np.multiply(np.divide(azimuth, 360), (2*np.pi))
beta = np.multiply(np.divide(np.subtract(90, elevation), 360), (2*np.pi))
del receiver_data, azimuth, elevation
# Derived parameters:
ir_length = np.size(hrir_l, 0) # Length of recorded impulse response (#samples)
num_meas = np.size(hrir_l, 1) # Number of measurment points
## Fourier transform the impulse responses - left:
# The IR is windowed with a half-Hanning window applied to its last 25%, to
# avoid a wrap-around discontinity of and then zero-padded to achieve the
# required frequency resolution.
half_window = np.concatenate((np.ones((np.ceil(ir_length/2).astype(int),1)), np.array(0.5+0.5*np.cos(np.pi*np.linspace(0, 1, np.floor(ir_length/2).astype(int)).conj().T)).reshape((np.size(np.linspace(0, 1, np.floor(ir_length/2).astype(int))), 1))))
half_window = half_window.reshape((np.size(half_window), ))
## Encoding:
# Pre-calculate spherical harmonic functions:
y_nm, dy_dbeta, dy_dalpha = spherical_harmonic_all(self.sh_order, alpha, beta)
# Pre-calculate spherical Hankel functions:
hnOut = np.zeros(((self.sh_order+1)**2, np.size(f_list)), dtype = np.complex128)
for fi, f in enumerate(f_list):
for n in np.arange(self.sh_order + 1):
for m in np.arange(-n, n + 1):
hnOut[sub2indSH(m,n),fi] = spherical_hankel_out(n, self.measurement_radius*2*np.pi*f/self.c0)
# Loop over measurement points and FFT - left:
hrtf = np.zeros((fi_lim_hi-fi_lim_lo+1, num_meas), dtype = np.complex128)
for i_meas in range(num_meas):
fft_hrir = np.conj(np.fft.fft(np.multiply(half_window, hrir_l[:,i_meas]), n = nfft)) # conj used because project uses exp(-1i*w*t) Fourier Transform
hrtf[:,i_meas] = np.array([fft_hrir[fi_lim_lo:fi_lim_hi + 1]]) # Only retain frequencies to be encoded
del hrir_l, fft_hrir, i_meas
# Transpose to optimise memory access for encoding step:
print('\tTransposing transfer function array...')
hrtf = np.transpose(hrtf)
print('Complete.\n')
# Loop over frequency - left:
self.sh_coefficients_left = []
i = 0
for fi, f in enumerate (f_list):
if f == self.freq_vec[i]:
# Calculate Lnm coefficients by a least-squares fit approach:
A = np.multiply(4*np.pi*np.transpose(hnOut[:,fi])/hnOut[0,fi], np.conj(y_nm))
sh_coefficients_left_f = np.linalg.lstsq (A, hrtf[:,fi])
self.sh_coefficients_left.append(sh_coefficients_left_f[0])
i+=1
# Loop over measurement points and FFT - right:
hrtf = np.zeros((fi_lim_hi-fi_lim_lo+1, num_meas), dtype = np.complex128)
for i_meas in range(num_meas):
fft_hrir = np.conj(np.fft.fft(np.multiply(half_window, hrir_r[:,i_meas]), n = nfft)) # conj used because project uses exp(-1i*w*t) Fourier Transform
hrtf[:,i_meas] = np.array([fft_hrir[fi_lim_lo:fi_lim_hi + 1]]) # Only retain frequencies to be encoded
del hrir_r, fft_hrir, i_meas
# Transpose to optimise memory access for encoding right:
print('\tTransposing transfer function array...')
hrtf = np.transpose(hrtf)
print('Complete.\n')
# Loop over frequency - right:
self.sh_coefficients_right = []
i = 0
for fi, f in enumerate (f_list):
if f == self.freq_vec[i]:
# Calculate Lnm coefficients by a least-squares fit approach:
A = np.multiply(4*np.pi*np.transpose(hnOut[:,fi])/hnOut[0,fi], np.conj(y_nm))
sh_coefficients_right_f = np.linalg.lstsq (A, hrtf[:,fi])
self.sh_coefficients_right.append(sh_coefficients_right_f[0])
i+=1
else:
raise ValueError("Type is not valid. It must be source or receiver.")
save_name = "%s.pickle" % self.file_name
pickle_obj = open(save_name, "wb")
pickle.dump(self, pickle_obj)
pickle_obj.close()
print ("Saved results to %s.pickle" % self.file_name)
#### Functions #####
def sub2indSH (m,n):
"""
i = sub2indSH(m,n)
Convert Spherical Harmonic (m,n) indices to array index i
Assumes that i iterates from 0 (Python style)
"""
i = n**2 + n + m
return i
def spherical_harmonic_all (max_order, alpha, beta):
"""
(y, dy_dbeta, dy_dalpha) = spherical_harmonic_all(max_order, alpha, sinbeta, cosbeta)
Computes a Spherical Harmonic function and it's angular derivatives for
all (m,n) up to the given maximum order. The algorithm is equivalent to that
implemented in SphericalHarmonic, but this version avoids repeated calls
to lpmv, since that is very time consuming.
Arguments - these should all be scalars:
r is radius
alpha is azimuth angle (angle in radians from the positive x axis, with
rotation around the positive z axis according to the right-hand screw rule)
beta is polar angle, but it is specified as two arrays of its cos and sin values.
max_order is maximum Spherical Harmonic order and should be a non-negative real integer scalar
Returned data will be vectors of length (max_order+1)^2.
"""
cosbeta = np.cos(beta)
sinbeta = np.sin(beta)
# Preallocate output arrays:
y = np.zeros((np.size(alpha),(max_order+1)**2), np.complex128)
dy_dbeta = np.zeros((np.size(alpha),(max_order+1)**2), np.complex128)
dy_dalpha = np.zeros((np.size(alpha),(max_order+1)**2), np.complex128)
#% Loop over n and | |
#!/usr/bin/env python3
#
# Script to manage S3-stored backups
#
# Copyright (c) 2009-2013 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import optparse
import os
import pwd
import secrets
import sys
import time
from boto.s3.connection import S3Connection
from collections import defaultdict
from math import log10
from subprocess import Popen
class BackupManager:
def __init__(self, accesskey, sharedkey):
self._accesskey = accesskey
self._connection = S3Connection(accesskey, sharedkey)
self._buckets = None
self._bucketbackups = {}
self._backups = None
def _generate_backup_buckets(self):
bucket_prefix = self._accesskey.lower() + '-bkup-'
buckets = self._connection.get_all_buckets()
self._buckets = []
for bucket in buckets:
if bucket.name.startswith(bucket_prefix):
self._buckets.append(bucket)
@property
def backup_buckets(self): # property
if self._buckets is None:
self._generate_backup_buckets()
return self._buckets
def _list_backups(self, bucket):
"""Returns a dict of backups in a bucket, with dicts of:
{hostname (str):
{Backup number (int):
{'date': Timestamp of backup (int),
'keys': A list of keys comprising the backup,
'hostname': Hostname (str),
'backupnum': Backup number (int),
'finalized': 0, or the timestamp the backup was finalized
}
}
}
"""
backups = {}
for key in bucket.list():
keyparts = key.key.split('.')
final = False
if keyparts[-1] == 'COMPLETE':
final = True
keyparts.pop() # back to tar
keyparts.pop() # back to backup number
else:
if keyparts[-1] == 'gpg':
keyparts.pop()
if keyparts[-1] != 'tar' and len(keyparts[-1]) == 2:
keyparts.pop()
if keyparts[-1] == 'tar':
keyparts.pop()
nextpart = keyparts.pop()
if nextpart == 'COMPLETE':
print(("Stray file: %s" % key.key))
continue
backupnum = int(nextpart)
hostname = '.'.join(keyparts)
lastmod = time.strptime(key.last_modified,
'%Y-%m-%dT%H:%M:%S.000Z')
if hostname in list(backups.keys()):
if not backupnum in list(backups[hostname].keys()):
backups[hostname][backupnum] = {
'date': lastmod,
'hostname': hostname,
'backupnum': backupnum,
'finalized': 0,
'keys': [],
'finalkey': None,
'finalized_age': -1,
}
else:
backups[hostname] = {
backupnum: {
'date': lastmod,
'hostname': hostname,
'backupnum': backupnum,
'finalized': 0,
'keys': [],
'finalkey': None,
'finalized_age': -1,
}
}
if final:
backups[hostname][backupnum]['finalized'] = lastmod
backups[hostname][backupnum]['finalkey'] = key
timestamp = time.mktime(lastmod)
delta = int(time.time() - timestamp + time.timezone)
backups[hostname][backupnum]['finalized_age'] = delta
else:
if lastmod < backups[hostname][backupnum]['date']:
backups[hostname][backupnum]['date'] = lastmod
backups[hostname][backupnum]['keys'].append(key)
return backups
def get_backups_by_bucket(self, bucket):
if bucket.name not in self._bucketbackups:
self._bucketbackups[bucket.name] = self._list_backups(bucket)
return self._bucketbackups[bucket.name]
@property
def all_backups(self): # property
if self._backups is None:
sys.stderr.write("Enumerating backups")
self._backups = {}
for bucket in self.backup_buckets:
backups_dict = self.get_backups_by_bucket(bucket)
for hostname, backups in list(backups_dict.items()):
sys.stderr.write('.')
sys.stderr.flush()
if hostname not in self._backups:
self._backups[hostname] = {}
self._backups[hostname].update(backups)
sys.stderr.write("\n")
return self._backups
def invalidate_host_cache(self, hostname):
nuke = []
for bucket in self._bucketbackups:
if hostname in self._bucketbackups[bucket]:
nuke.append(bucket)
for bucket in nuke:
if bucket in self._bucketbackups:
del self._bucketbackups[bucket]
self._backups = None
@property
def backups_by_age(self): # property
"Returns a dict of {hostname: [(backupnum, age), ...]}"
results = defaultdict(list)
for hostname, backups in list(self.all_backups.items()):
for backupnum, statusdict in list(backups.items()):
results[hostname].append((backupnum,
statusdict['finalized_age']))
return results
def choose_host_to_backup(agedict, target_count=2):
"Takes a dict from backups_by_age, returns a hostname to back up."
host_scores = defaultdict(int)
for hostname, backuplist in list(agedict.items()):
bl = sorted(backuplist, key=lambda x: x[1])
if len(bl) > 0 and bl[0][1] == -1:
# unfinalized backup alert
host_scores[hostname] += 200
bl.pop(0)
if len(bl) >= target_count:
host_scores[hostname] -= 100
host_scores[hostname] -= len(bl)
if len(bl) > 0:
# age of oldest backup helps score
oldest = bl[0]
host_scores[hostname] += log10(oldest[1])
# recency of newest backup hurts score
newest = bl[-1]
host_scores[hostname] -= log10(max(1, (oldest[1] - newest[1])))
for candidate, score in sorted(list(host_scores.items()),
key=lambda x: x[1], reverse=True):
yield (candidate, score)
def choose_backups_to_delete(agedict, target_count=2, max_age=30):
"Takes a dict from backups_by_age, returns a list of backups to delete"
decimate = defaultdict(list)
for hostname, backuplist in list(agedict.items()):
bl = []
for backup in sorted(backuplist, key=lambda x: x[1]):
if backup[1] > 0:
bl.append(backup)
while len(bl) > target_count:
backup = bl.pop()
if backup[1] > (max_age * 24 * 60 * 60):
decimate[hostname].append(backup)
return decimate
def iter_urls(keyset, expire=86400):
"""Given a list of keys and an optional expiration time (in seconds),
returns an iterator of URLs to fetch to reassemble the backup."""
for key in keyset:
yield key.generate_url(expires_in=expire)
def make_restore_script(backup, expire=86400):
"""Returns a quick and easy restoration script to restore the given system,
requires a backup, and perhaps expire"""
myhostname = backup['hostname']
mybackupnum = backup['backupnum']
myfriendlytime = time.strftime('%Y-%m-%d at %H:%M GMT', backup['date'])
myexpiretime = time.strftime('%Y-%m-%d at %H:%M GMT',
time.gmtime(time.time() + expire))
myexpiretimestamp = time.time() + expire
output = []
output.append('#!/bin/sh\n')
output.append('# Restoration script for %s backup %s,\n' % (
myhostname, mybackupnum))
output.append('# a backup created on %s.\n' % (myfriendlytime))
output.append('# To use: bash scriptname /path/to/put/the/files\n\n')
output.append('# WARNING: THIS FILE EXPIRES AFTER %s\n' % (myexpiretime))
output.append('if [ "`date +%%s`" -gt "%i" ];\n' % (myexpiretimestamp))
output.append(' then echo "Sorry, this restore script is too old.";\n')
output.append(' exit 1;\n')
output.append('fi\n\n')
output.append('if [ -z "$1" ];\n')
output.append(' then echo "Usage: ./scriptname /path/to/restore/to";\n')
output.append(' exit 1;\n')
output.append('fi\n\n')
output.append('# Check the destination\n')
output.append('if [ ! -d $1 ];\n')
output.append(' then echo "Target $1 does not exist!";\n')
output.append(' exit 1;\n')
output.append('fi\n\n')
output.append('if [ -n "`ls --almost-all $1`" ];\n')
output.append(' then echo "Target $1 is not empty!";\n')
output.append(' exit 1;\n')
output.append('fi\n\n')
output.append('# cd to the destination, create a temporary workspace\n')
output.append('cd $1\n')
output.append('mkdir .restorescript-scratch\n\n')
output.append('# retrieve files\n')
mysortedfilelist = []
for key in backup['keys']:
output.append('wget -O $1/.restorescript-scratch/%s "%s"\n' % (
key.name, key.generate_url(expires_in=expire)))
mysortedfilelist.append('.restorescript-scratch/' + key.name)
mysortedfilelist.sort()
output.append('\n# decrypt files\n')
output.append('gpg --decrypt-files << EOF\n')
output.append('\n'.join(mysortedfilelist))
output.append('\nEOF\n')
output.append('\n# join and untar files\n')
output.append('cat .restorescript-scratch/*.tar.?? | tar -xf -\n\n')
output.append('echo "DONE! Have a nice day."\n##\n')
return output
def start_archive(hosts):
"Starts an archive operation for a list of hosts."
if 'LOGNAME' in os.environ:
username = os.environ['LOGNAME']
else:
try:
username = pwd.getpwuid(os.getuid()).pw_name
except KeyError:
username = 'nobody'
scriptdir = os.path.dirname(sys.argv[0])
cmd = [os.path.join(scriptdir, 'BackupPC_archiveStart'), 'archives3',
username]
cmd.extend(hosts)
proc = Popen(cmd)
proc.communicate()
def main():
# check command line options
parser = optparse.OptionParser(
usage="usage: %prog [options] [list|delete|script]",
description="" +
"Companion maintenance script for BackupPC_archiveHost_s3. " +
"By default, it assumes the 'list' command, which displays all " +
"of the backups currently archived on S3. The 'delete' command " +
"is used to delete backups. The 'script' command produces a " +
"script that can be used to download and restore a backup.")
parser.add_option("-H", "--host", dest="host",
help="Name of backed-up host")
parser.add_option("-b", "--backup-number", dest="backupnum",
help="Backup number")
parser.add_option("-a", "--age", dest="age",
help="Delete backups older than AGE days")
parser.add_option("-k", "--keep", dest="keep",
help="When used with --age, keep this many recent " +
"backups (default=1)", default=1)
parser.add_option("-f", "--filename", dest="filename",
help="Output filename for script")
parser.add_option("-x", "--expire", dest="expire",
help="Maximum age of script, default 86400 seconds")
parser.add_option("-t", "--test", dest="test", action="store_true",
help="Test mode; don't actually delete")
parser.add_option("-u", "--unfinalized", dest="unfinalized",
action="store_true", help="Consider unfinalized backups")
parser.add_option("-s", "--start-backups", dest="start",
action="store_true",
help="When used with --age, start backups for hosts " +
"with fewer than keep+1 backups")
parser.add_option("-l", "--list", dest="list", action="store_true",
help="List stored backups after completing operations")
(options, args) = parser.parse_args()
bmgr = BackupManager(secrets.accesskey, secrets.sharedkey)
if options.backupnum and not options.host:
parser.error('Must specify --host when specifying --backup-number')
if options.backupnum:
options.backupnum = int(options.backupnum)
if len(args) == 0:
args.append('list')
if len(args) > 1:
parser.error('Too many arguments.')
if args[0] != 'delete' and options.age:
parser.error('--age only makes sense with delete')
if options.start and not (args[0] == 'delete' and options.age):
parser.error('--start-backups only makes sense with delete and --age')
if args[0] != 'script' and (options.expire or options.filename):
parser.error('--expire and --filename only make sense with script')
if args[0] in ['list', 'script', 'delete']:
if options.host:
if options.host not in bmgr.all_backups:
parser.error('No | |
the mote has a typical RF output power of -2 dBm when the PA is disabled, then set the txPower parameter to -2 to turn off the PA. This command may be issued at any time and takes effect at the next mote boot. To change the transmit power immediately, use the write RAM option of this command, which can also be used at any time.
#
# \param txPower 1-byte field formatted as a ints.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_setNVParameter_txPower named tuple.
#
def dn_setNVParameter_txPower(self, txPower) :
res = HartMoteConnectorInternal.send(self, ['setNVParameter', 'txPower'], {"txPower" : txPower})
return HartMoteConnector.Tuple_dn_setNVParameter_txPower(**res)
##
# The named tuple returned by the dn_setNVParameter_powerInfo() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 3: RC_BUSY
# - 4: RC_INVALID_LEN
# - 5: RC_INVALID_STATE
# - 6: RC_UNSUPPORTED
# - 7: RC_UNKNOWN_PARAM
# - 8: RC_UNKNOWN_CMD
# - 9: RC_WRITE_FAIL
# - 10: RC_READ_FAIL
# - 11: RC_LOW_VOLTAGE
# - 12: RC_NO_RESOURCES
# - 13: RC_INCOMPLETE_JOIN_INFO
# - 14: RC_NOT_FOUND
# - 15: RC_INVALID_VALUE
# - 19: RC_ERASE_FAIL
#
Tuple_dn_setNVParameter_powerInfo = collections.namedtuple("Tuple_dn_setNVParameter_powerInfo", ['RC'])
##
# The setNVParameter<powerInfo> command specifies the average current that is available to the mote. Using the write RAM option will only have an effect if the command is called while the mote is in Idle state. Otherwise, the new value will be used after the next mote boot.
#
# \param powerSource 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: Line
# - 1: Battery
# - 2: Rechargeable/Scavenging
# \param dischargeCur 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param dischargeTime 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# \param recoverTime 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_setNVParameter_powerInfo named tuple.
#
def dn_setNVParameter_powerInfo(self, powerSource, dischargeCur, dischargeTime, recoverTime) :
res = HartMoteConnectorInternal.send(self, ['setNVParameter', 'powerInfo'], {"powerSource" : powerSource, "dischargeCur" : dischargeCur, "dischargeTime" : dischargeTime, "recoverTime" : recoverTime})
return HartMoteConnector.Tuple_dn_setNVParameter_powerInfo(**res)
##
# The named tuple returned by the dn_setNVParameter_ttl() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 3: RC_BUSY
# - 4: RC_INVALID_LEN
# - 5: RC_INVALID_STATE
# - 6: RC_UNSUPPORTED
# - 7: RC_UNKNOWN_PARAM
# - 8: RC_UNKNOWN_CMD
# - 9: RC_WRITE_FAIL
# - 10: RC_READ_FAIL
# - 11: RC_LOW_VOLTAGE
# - 12: RC_NO_RESOURCES
# - 13: RC_INCOMPLETE_JOIN_INFO
# - 14: RC_NOT_FOUND
# - 15: RC_INVALID_VALUE
# - 19: RC_ERASE_FAIL
#
Tuple_dn_setNVParameter_ttl = collections.namedtuple("Tuple_dn_setNVParameter_ttl", ['RC'])
##
# The setNVParameter<ttl> command sets the mote'spersistentpacket Time To Live (TTL) value.TTL specifies the maximum number of hops a packet may traverse before it is discarded from the network. A mote sets the initial value of the TTL field in the packets it generates to this value.The mote reads the value from persistent storage at boot time.To change the TTL used currently, this command may be issued with the RAM option.
#
# The mote defaults TTL to 127. For compliant devices, the HART specification currently defaults to 32, but this will change to 249 in spec version 7.4, as will the mote default. We suggest not changing the mote default unless HART specifically raises it as a compliance issue when you submit your device for testing.
#
# \param timeToLive 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_setNVParameter_ttl named tuple.
#
def dn_setNVParameter_ttl(self, timeToLive) :
res = HartMoteConnectorInternal.send(self, ['setNVParameter', 'ttl'], {"timeToLive" : timeToLive})
return HartMoteConnector.Tuple_dn_setNVParameter_ttl(**res)
##
# The named tuple returned by the dn_setNVParameter_HARTantennaGain() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 3: RC_BUSY
# - 4: RC_INVALID_LEN
# - 5: RC_INVALID_STATE
# - 6: RC_UNSUPPORTED
# - 7: RC_UNKNOWN_PARAM
# - 8: RC_UNKNOWN_CMD
# - 9: RC_WRITE_FAIL
# - 10: RC_READ_FAIL
# - 11: RC_LOW_VOLTAGE
# - 12: RC_NO_RESOURCES
# - 13: RC_INCOMPLETE_JOIN_INFO
# - 14: RC_NOT_FOUND
# - 15: RC_INVALID_VALUE
# - 19: RC_ERASE_FAIL
#
Tuple_dn_setNVParameter_HARTantennaGain = collections.namedtuple("Tuple_dn_setNVParameter_HARTantennaGain", ['RC'])
##
# The setNVParameter<HARTantennaGain> command stores value of the antenna gain in the mote's persistent storage.This value is added to the conducted output power of the mote when replying to HART command 797 (Write Radio Power Output) and to HART command 798 (Read Radio Output Power). The antenna gain should take into account both the gain of the antenna and any loss (for example, attenuation from a long coax cable) between the mote and the antenna. By default, this value is 2, assuming a +2 dBi antenna gain.To change the transmit power immediately, use the write RAM option of this command.
#
# \param antennaGain 1-byte field formatted as a ints.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_setNVParameter_HARTantennaGain named tuple.
#
def dn_setNVParameter_HARTantennaGain(self, antennaGain) :
res = HartMoteConnectorInternal.send(self, ['setNVParameter', 'HARTantennaGain'], {"antennaGain" : antennaGain})
return HartMoteConnector.Tuple_dn_setNVParameter_HARTantennaGain(**res)
##
# The named tuple returned by the dn_setNVParameter_OTAPlockout() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 3: RC_BUSY
# - 4: RC_INVALID_LEN
# - 5: RC_INVALID_STATE
# - 6: RC_UNSUPPORTED
# - 7: RC_UNKNOWN_PARAM
# - 8: RC_UNKNOWN_CMD
# - 9: RC_WRITE_FAIL
# - 10: RC_READ_FAIL
# - 11: RC_LOW_VOLTAGE
# - 12: RC_NO_RESOURCES
# - 13: RC_INCOMPLETE_JOIN_INFO
# - 14: RC_NOT_FOUND
# - 15: RC_INVALID_VALUE
# - 19: RC_ERASE_FAIL
#
Tuple_dn_setNVParameter_OTAPlockout = collections.namedtuple("Tuple_dn_setNVParameter_OTAPlockout", ['RC'])
##
# The setNVParameter<OTAPlockout> command specifies whether the mote's firmware can be updated over the air. Over-The-Air-Programming (OTAP) is allowed by default. The mote reads the OTAPlockout value from persistent storage at boot time. To change the value used currently, this command may be issued with RAM option.
#
# Dust Networks recommends that OEMs allow their devices to receive firmware updates, either by leaving the OTAPlockout parameter at its default value, or by making OTAPlockout settable using a WirelessHART command that is available both over the air and through its wired maintenance port. OEMs have the option of making such a command password protected.
#
# \param otapLockout 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: OTAP allowed (default)
# - 1: OTAP disabled
#
# \returns The response to the command, formatted as a #Tuple_dn_setNVParameter_OTAPlockout named tuple.
#
def dn_setNVParameter_OTAPlockout(self, otapLockout) :
res = HartMoteConnectorInternal.send(self, ['setNVParameter', 'OTAPlockout'], {"otapLockout" : otapLockout})
return HartMoteConnector.Tuple_dn_setNVParameter_OTAPlockout(**res)
##
# The named tuple returned by the dn_setNVParameter_hrCounterMode() function.
#
# - <tt>RC</tt>: 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: RC_OK
# - 3: RC_BUSY
# - 4: RC_INVALID_LEN
# - 5: RC_INVALID_STATE
# - 6: RC_UNSUPPORTED
# - 7: RC_UNKNOWN_PARAM
# - 8: RC_UNKNOWN_CMD
# - 9: RC_WRITE_FAIL
# - 10: RC_READ_FAIL
# - 11: RC_LOW_VOLTAGE
# - 12: RC_NO_RESOURCES
# | |
the outfit of the client to Checkered Renegade.",
help="Sets the outfit of the client to Checkered Renegade.\n"
"Example: !checkeredrenegade"
)
async def checkeredrenegade(self, ctx: fortnitepy.ext.commands.Context) -> None:
skin_variants = self.bot.party.me.create_variants(
material=2
)
await self.bot.party.me.set_outfit(
asset='CID_028_Athena_Commando_F',
variants=skin_variants
)
await ctx.send('Skin set to Checkered Renegade!')
print(self.bot.message % f'Skin set to Checkered Renegade.')
@commands.dm_only()
@commands.command(
description="[Cosmetic] Sets the outfit of the client to Minty Elf.",
help="Sets the outfit of the client to Minty Elf.\n"
"Example: !mintyelf"
)
async def mintyelf(self, ctx: fortnitepy.ext.commands.Context) -> None:
skin_variants = self.bot.party.me.create_variants(
material=2
)
await self.bot.party.me.set_outfit(
asset='CID_051_Athena_Commando_M_HolidayElf',
variants=skin_variants
)
await ctx.send('Skin set to Minty Elf!')
print(self.bot.message % f'Skin set to Minty Elf.')
@commands.dm_only()
@commands.command(
description="[Cosmetic] Sets the emote of the client using EID.",
help="Sets the emote of the client using EID.\n"
"Example: !eid EID_Floss"
)
async def eid(self, ctx: fortnitepy.ext.commands.Context, emote_id: str) -> None:
await self.bot.party.me.clear_emote()
await self.bot.party.me.set_emote(
asset=emote_id
)
await ctx.send(f'Emote set to {emote_id}!')
@commands.dm_only()
@commands.command(
description="[Cosmetic] Clears/stops the emote currently playing.",
help="Clears/stops the emote currently playing.\n"
"Example: !stop"
)
async def stop(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.bot.party.me.clear_emote()
await ctx.send('Stopped emoting.')
print(self.bot.message % f'Stopped emoting.')
@commands.dm_only()
@commands.command(
description="[Cosmetic] Sets the backpack of the client using BID.",
help="Sets the backpack of the client using BID.\n"
"Example: !bid BID_023_Pinkbear"
)
async def bid(self, ctx: fortnitepy.ext.commands.Context, backpack_id: str) -> None:
await self.bot.party.me.set_backpack(
asset=backpack_id
)
await ctx.send(f'Backbling set to {backpack_id}!')
print(self.bot.message % f'Backbling set to {backpack_id}!')
@commands.dm_only()
@commands.command(
aliases=['legacypickaxe'],
description="[Cosmetic] Sets the pickaxe of the client using PICKAXE_ID",
help="Sets the pickaxe of the client using PICKAXE_ID\n"
"Example: !pickaxe_id Pickaxe_ID_073_Balloon"
)
async def pickaxe_id(self, ctx: fortnitepy.ext.commands.Context, pickaxe_id_: str) -> None:
await self.bot.party.me.set_pickaxe(
asset=pickaxe_id_
)
await ctx.send(f'Pickaxe set to {pickaxe_id_}')
print(self.bot.message % f'Pickaxe set to {pickaxe_id_}')
@commands.dm_only()
@commands.command(
description="[Cosmetic] Sets the pet of the client using PetCarrier_.",
help="Sets the pet of the client using PetCarrier_.\n"
"Example: !pet_carrier PetCarrier_002_Chameleon"
)
async def pet_carrier(self, ctx: fortnitepy.ext.commands.Context, pet_carrier_id: str) -> None:
await self.bot.party.me.set_pet(
asset=pet_carrier_id
)
await ctx.send(f'Pet set to {pet_carrier_id}!')
print(self.bot.message % f'Pet set to {pet_carrier_id}!')
@commands.dm_only()
@commands.command(
description="[Cosmetic] Sets the emoji of the client using Emoji_.",
help="Sets the emoji of the client using Emoji_.\n"
"Example: !emoji_id Emoji_PeaceSign"
)
async def emoji_id(self, ctx: fortnitepy.ext.commands.Context, emoji_: str) -> None:
await self.bot.party.me.clear_emote()
await self.bot.party.me.set_emoji(
asset=emoji_
)
await ctx.send(f'Emoji set to {emoji_}!')
print(self.bot.message % f'Emoji set to {emoji_}!')
@commands.dm_only()
@commands.command(
description="[Cosmetic] Sets the contrail of the client using Trails_.",
help="Sets the contrail of the client using Trails_.\n"
"Example: !trails Trails_ID_075_Celestial"
)
async def trails(self, ctx: fortnitepy.ext.commands.Context, trails_: str) -> None:
await self.bot.party.me.set_contrail(
asset=trails_
)
await ctx.send(f'Contrail set to {trails}!')
print(self.bot.message % f'Contrail set to {trails}!')
@commands.dm_only()
@commands.command(
description="[Cosmetic] Sets pickaxe using PICKAXE_ID or display name & does 'Point it Out'. If no pickaxe is "
"specified, only the emote will be played.",
help="Sets pickaxe using PICKAXE_ID or display name & does 'Point it Out'. If no pickaxe is "
"specified, only the emote will be played.\n"
"Example: !point Pickaxe_ID_029_Assassin"
)
async def point(self, ctx: fortnitepy.ext.commands.Context, *, content: Optional[str] = None) -> None:
if content is None:
await self.bot.party.me.set_emote(asset='EID_IceKing')
await ctx.send(f'Point it Out played.')
elif 'pickaxe_id' in content.lower():
await self.bot.party.me.set_pickaxe(asset=content)
await self.bot.party.me.set_emote(asset='EID_IceKing')
await ctx.send(f'Pickaxe set to {content} & Point it Out played.')
else:
try:
cosmetic = await self.bot.fortnite_api.cosmetics.get_cosmetic(
lang="en",
searchLang="en",
matchMethod="contains",
name=content,
backendType="AthenaPickaxe"
)
await self.bot.party.me.set_pickaxe(asset=cosmetic.id)
await self.bot.party.me.clear_emote()
await self.bot.party.me.set_emote(asset='EID_IceKing')
await ctx.send(f'Pickaxe set to {content} & Point it Out played.')
except FortniteAPIAsync.exceptions.NotFound:
await ctx.send(f"Failed to find a pickaxe with the name: {content}")
@commands.dm_only()
@commands.command(
description="[Cosmetic] Copies the cosmetic loadout of the defined user. If user is left blank, "
"the message author will be used.",
help="Copies the cosmetic loadout of the defined user. If user is left blank, "
"the message author will be used.\n"
"Example: !copy Terbau"
)
async def copy(self, ctx: fortnitepy.ext.commands.Context, *, epic_username: Optional[str] = None) -> None:
if epic_username is None:
member = [m for m in self.bot.party.members if m.id == ctx.author.id][0]
else:
user = await self.bot.fetch_user(epic_username)
member = [m for m in self.bot.party.members if m.id == user.id][0]
await self.bot.party.me.edit(
functools.partial(
fortnitepy.ClientPartyMember.set_outfit,
asset=member.outfit,
variants=member.outfit_variants
),
functools.partial(
fortnitepy.ClientPartyMember.set_backpack,
asset=member.backpack,
variants=member.backpack_variants
),
functools.partial(
fortnitepy.ClientPartyMember.set_pickaxe,
asset=member.pickaxe,
variants=member.pickaxe_variants
),
functools.partial(
fortnitepy.ClientPartyMember.set_banner,
icon=member.banner[0],
color=member.banner[1],
season_level=member.banner[2]
),
functools.partial(
fortnitepy.ClientPartyMember.set_battlepass_info,
has_purchased=True,
level=member.battlepass_info[1]
)
)
if member.emote is not None:
await self.bot.party.me.set_emote(asset=member.emote)
await ctx.send(f'Copied the loadout of {member.display_name}.')
print(self.bot.message % f'Copied the loadout of {member.display_name}.')
@commands.dm_only()
@commands.command(
description="[Cosmetic] Shortcut for equipping the skin CID_VIP_Athena_Commando_M_GalileoGondola_SG.",
help="Shortcut for equipping the skin CID_VIP_Athena_Commando_M_GalileoGondola_SG.\n"
"Example: !hologram"
)
async def hologram(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.bot.party.me.set_outfit(
asset='CID_VIP_Athena_Commando_M_GalileoGondola_SG'
)
await ctx.send('Skin set to Star Wars Hologram!')
print(self.bot.message % f'Skin set to Star Wars Hologram.')
@commands.dm_only()
@commands.command(
description="[Cosmetic] Shortcut for equipping the skin CID_VIP_Athena_Commando_M_GalileoGondola_SG.",
help="Shortcut for equipping the skin CID_VIP_Athena_Commando_M_GalileoGondola_SG.\n"
"Example: !gift is a joke command."
)
async def gift(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.bot.party.me.clear_emote()
await self.bot.party.me.set_emote(
asset='EID_NeverGonna'
)
await ctx.send('What did you think would happen?')
@commands.dm_only()
@commands.command(
description="[Cosmetic] Shortcut for equipping the emote EID_TourBus.",
help="Shortcut for equipping the emote EID_TourBus.\n"
"Example: !ponpon"
)
async def ponpon(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.bot.party.me.set_emote(
asset='EID_TourBus'
)
await ctx.send('Emote set to Ninja Style!')
@commands.dm_only()
@commands.command(
description="[Cosmetic] Sets the enlightened value of a skin "
"(used for skins such as glitched Scratch or Golden Peely).",
help="Sets the enlightened value of a skin.\n"
"Example: !enlightened CID_701_Athena_Commando_M_BananaAgent 2 350"
)
async def enlightened(self, ctx: fortnitepy.ext.commands.Context, cosmetic_id: str, br_season: int,
skin_level: int) -> None:
variant_types = {
1: self.bot.party.me.create_variants(progressive=4),
2: self.bot.party.me.create_variants(progressive=4),
3: self.bot.party.me.create_variants(material=2)
}
if 'cid' in cosmetic_id.lower():
await self.bot.party.me.set_outfit(
asset=cosmetic_id,
variants=variant_types[br_season] if br_season in variant_types else variant_types[2],
enlightenment=(br_season, level)
)
await ctx.send(f'Skin set to {character_id} at level {skin_level} (for Season 1{br_season}).')
elif 'bid' in cosmetic_id.lower():
await self.bot.party.me.set_backpack(
asset=cosmetic_id,
variants=self.bot.party.me.create_variants(progressive=2),
enlightenment=(br_season, level)
)
await ctx.send(f'Backpack set to {character_id} at level {skin_level} (for Season 1{br_season}).')
print(
self.bot.message % f'Enlightenment for {cosmetic_id} set to level {skin_level} '
f'(for Season 1{br_season}).')
@commands.dm_only()
@commands.command(
description="[Cosmetic] Shortcut for equipping the skin CID_605_Athena_Commando_M_TourBus.",
help="Shortcut for equipping the skin CID_605_Athena_Commando_M_TourBus.\n"
"Example: !ninja"
)
async def ninja(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.bot.party.me.set_outfit(
asset='CID_605_Athena_Commando_M_TourBus'
)
await ctx.send('Skin set to Ninja!')
print(self.bot.message % f'Skin set to Ninja.')
@commands.dm_only()
@commands.command(
description="[Cosmetic] Equips all very rare skins.",
help="Equips all very rare skins.\n"
"Example: !rareskins"
)
async def rareskins(self, ctx: fortnitepy.ext.commands.Context) -> None:
await ctx.send('Showing all rare skins now.')
await self.bot.party.me.set_outfit(
asset='CID_030_Athena_Commando_M_Halloween',
variants=self.bot.party.me.create_variants(clothing_color=1)
)
await ctx.send('Skin set to Purple Skull Trooper!')
print(self.bot.message % f"Skin set to Purple Skull Trooper.")
await asyncio.sleep(2)
await self.bot.party.me.set_outfit(
asset='CID_029_Athena_Commando_F_Halloween',
variants=self.bot.party.me.create_variants(material=3)
)
await ctx.send('Skin set to Pink Ghoul Trooper!')
print(self.bot.message % f"Skin set to Pink Ghoul Trooper.")
await asyncio.sleep(2)
for rare_skin in ('CID_028_Athena_Commando_F', 'CID_017_Athena_Commando_M'):
await self.bot.party.me.set_outfit(
asset=rare_skin
)
await ctx.send(f'Skin set to {rare_skin}!')
print(self.bot.message % f"Skin set to: {rare_skin}!")
await asyncio.sleep(2)
@commands.dm_only()
@commands.command(
description="[Cosmetic] Sets the outfit of the client to Golden Peely "
"(shortcut for !enlightened CID_701_Athena_Commando_M_BananaAgent 2 350).",
help="Sets the outfit of the client to Golden Peely.\n"
"Example: !goldenpeely"
)
async def goldenpeely(self, ctx: fortnitepy.ext.commands.Context) -> None:
await self.bot.party.me.set_outfit(
asset='CID_701_Athena_Commando_M_BananaAgent',
variants=self.bot.party.me.create_variants(progressive=4),
enlightenment=(2, 350)
)
await ctx.send(f'Skin set to Golden Peely.')
@commands.dm_only()
@commands.command(
description="[Cosmetic] Randomly finds & equips a skin. Types currently include skin, backpack, emote & all. "
"If type is left blank, a random skin will be equipped.",
help="Randomly finds & equips a skin.\n"
"Example: !random skin"
)
async def random(self, ctx: fortnitepy.ext.commands.Context, cosmetic_type: str = 'skin') -> None:
if cosmetic_type == 'skin':
all_outfits = await self.bot.fortnite_api.cosmetics.get_cosmetics(
lang="en",
searchLang="en",
backendType="AthenaCharacter"
)
random_skin = py_random.choice(all_outfits).id
await self.bot.party.me.set_outfit(
asset=random_skin,
variants=self.bot.party.me.create_variants(profile_banner='ProfileBanner')
)
await ctx.send(f'Skin randomly set to {random_skin}.')
print(self.bot.message % f"Set skin randomly to: {random_skin}.")
elif cosmetic_type == 'backpack':
all_backpacks = await self.bot.fortnite_api.cosmetics.get_cosmetics(
lang="en",
searchLang="en",
backendType="AthenaBackpack"
)
random_backpack = py_random.choice(all_backpacks).id
await self.bot.party.me.set_backpack(
asset=random_backpack,
variants=self.bot.party.me.create_variants(profile_banner='ProfileBanner')
)
await ctx.send(f'Backpack randomly set to {random_backpack}.')
print(self.bot.message % f"Set backpack randomly to: {random_backpack}.")
elif cosmetic_type == 'emote':
all_emotes = await self.bot.fortnite_api.cosmetics.get_cosmetics(
lang="en",
searchLang="en",
backendType="AthenaDance"
)
random_emote = py_random.choice(all_emotes).id
await self.bot.party.me.set_emote(
asset=random_emote
)
await ctx.send(f'Emote randomly set to {random_emote}.')
print(self.bot.message % f"Set emote randomly to: {random_emote}.")
elif cosmetic_type == 'all':
all_outfits = await self.bot.fortnite_api.cosmetics.get_cosmetics(
lang="en",
searchLang="en",
backendType="AthenaCharacter"
)
| |
#!/usr/local/bin/python3.4
#
# fetch.py
#
# <NAME> and <NAME>, Climate Code Foundation.
# Copyright (C) 2008-2010 Ravenbrook Limited.
# Copyright (C) 2011 Climate Code Foundation.
# <NAME>, Revision 2017-07-28
"""
fetch.py [--help] [--list] [--force] [--store <dir>] [--config <file>] [pattern] ...
Script to fetch (download from the internet) the inputs required for
the cccgistemp program.
The groups, bundles, bundle members, and individual files fetchable
are defined by the configuration file specified by --config <file>
(default 'config/sources').
Everything fetched ends up in the directory specified by --store <dir>
(default 'input').
If no such arguments are given, the default is to fetch those files in
the default group. In the config file provided with cccgistemp, that
means the files required for normal cccgistemp operation.
Any arguments are treated as regular expressions and matched against
groups, bundles, files, or bundle members (in that order). The first
matching item for each argument is fetched.
Unless --force is set, no file that already exists is created.
--list lists all things that can be fetched.
The config file syntax is as follows:
Comments begin '#' and run to the end of the line.
Every line begins with a keyword, followed by a colon.
Keywords are not case-sensitive.
A fetchable item is either:
file: <url> [<local filename>]
which denotes a fetchable item which is also a source dataset, or
bundle: <url> [<local filename>]
which denotes a 'bundle': a file which can be unpacked into
a number of files, one or more of which may be source datasets,
identified thusly:
member: <pattern> [<local filename>]
<pattern> is a regular expression matching the tail of a pathname
within the most-recently described bundle.
The system works out for itself how to unpack a bundle. These may
be based on the bundle's name or contents: you shouldn't have to
worry about it.
<local filename> in each of the above is an optional name to give
the fetched item or extracted member. If absent, the system uses
a filename derived from the fetched item or extracted member.
<url> may be any ftp:// or http:// URL. It may also be of this
form:
ftpmatch://<site>/<path>/<pattern>
In which case the directory <path> on the FTP site <site> is
searched for filenames matching <pattern> and the last such file
is fetched. This 'feature' was developed for USHCN version 2
datasets.
All the contents of this file may be divided into disjoint
'groups'. Each group is named. Groups are introduced with group
lines:
group: <group name>
The default group name is the empty string.
"""
# http://www.python.org/doc/2.4.4/lib/module-getopt.html
import getopt
# http://docs.python.org/release/2.4.4/lib/module-os.html
# http://www.python.org/doc/2.4.4/lib/module-sys.html
import sys
# https://docs.python.org/2.6/library/urllib2.html
import urllib.request
import ssl
import itertools
import re
from settings import *
import tarfile
import zipfile
class Fetcher(object):
def __init__(self, **kwargs):
self.force = kwargs.pop('force', False)
self.output = kwargs.pop('output', sys.stdout)
self.output.write("Fetching Input Files:\n")
self.prefix = kwargs.pop('prefix', INPUT_DIR)
self.config_file = kwargs.pop('config_file', SOURCES_DIR + 'sources.txt')
self.requests = kwargs.pop('requests', None)
def fetch(self):
(bundles, files) = self.find_requests(self.requests)
for url, local in files:
# first, check if ghcn file exists
if "ghcnm.tavg.qcf.dat" in url:
if not os.path.exists(INPUT_DIR + "ghcnm.tavg.qcf.dat"):
self.get_ghcn_file(url)
else:
self.fetch_one(url, local)
for ((url, local), members) in bundles.items():
self.fetch_one(url, local, members=members)
sys.stdout.flush()
# Get most recent GHCN data file
def get_ghcn_file(self, url):
public_dir = url.replace("ghcnm.tavg.qcf.dat", "")
import datetime
import urllib.request as urllib
response = urllib.urlopen(public_dir)
html = response.read().decode()
filenames = re.findall(r'href=[\'"]?([^\'" >]+)', html)
ghcn_filenames = [x for x in filenames if x[-4:] == ".dat" and "v4" in x]
last_modifieds = []
for file in ghcn_filenames:
conn = urllib.urlopen(public_dir + file)
last_modified = conn.headers["last-modified"]
last_modified = datetime.datetime.strptime(last_modified[5:-4], '%d %b %Y %H:%M:%S')
last_modifieds.append((file, last_modified))
last_modifieds.sort(key=lambda x: x[1])
recent_ghcn = [x[0] for x in last_modifieds[-2:]]
if "qcf" in recent_ghcn[0]:
qcf_file = recent_ghcn[0]
else:
qcf_file = recent_ghcn[1]
print(qcf_file)
urllib.urlretrieve(public_dir + qcf_file, INPUT_DIR + "ghcnm.tavg.qcf.dat")
def make_prefix(self):
try:
os.makedirs(self.prefix)
except OSError:
# Expected if the directories already exist.
pass
def key_lines(self):
comment_re = re.compile(r'((.*?[^\\])??)#')
key_re = re.compile('^([a-zA-Z_]+)\s*:\s*(.*)$')
for (no, l) in zip(itertools.count(1), open(self.config_file)):
m = comment_re.match(l)
if m:
bare = m.group(1)
else:
bare = l
bare = bare.strip()
# ignore blank lines
if len(bare) == 0:
continue
m = key_re.match(bare)
if m:
yield (no, m.groups())
else:
raise Error("%s:%d: malformed line '%s'" % (self.config_file, no, l.strip()))
def read_config(self):
valid_keys = dict(group=re.compile(r'^\s*(.*?)\s*$'),
file=re.compile(r'^([^\s]+)(\s+.*)?\s*$'),
bundle=re.compile(r'^([^\s]+)(\s+.*)?\s*$'),
member=re.compile(r'^([^\s]+)(\s+.*)?\s*$'))
group = ''
config = {'': dict(files=[], bundles={})}
for (no, (k, v)) in self.key_lines():
k = k.lower()
if k not in valid_keys:
raise Error("%s:%d: unknown key '%s'" % (self.config_file, no, k))
m = valid_keys[k].match(v)
if not m:
raise Error("%s:%d: malformed '%s' line" % (self.config_file, no, k))
# 'bundle' only persists over 'member' lines.
if k != 'member':
bundle = None
if k == 'group':
group = m.group(1)
config[group] = dict(files=[], bundles={})
elif k == 'file':
config[group]['files'].append(m.groups())
pattern = m.group(1)
local = m.group(2)
elif k == 'bundle':
bundle = m.groups()
members = []
config[group]['bundles'][bundle] = members
pattern = m.group(1)
local = m.group(2)
elif k == 'member':
if bundle is None:
raise Error("%s:%d: 'member' line with no bundle." % (self.config_file, no))
config[group]['bundles'][bundle].append(m.groups())
pattern = m.group(1)
local = m.group(2)
return config
def list_things(self):
"""List the things that we know how to fetch."""
config = self.read_config()
group_names = config.keys()
group_names = sorted(group_names)
for g in group_names:
if g == '':
self.output.write("Default group: \n")
else:
self.output.write("Group '%s':\n" % g)
bs = config[g]['bundles'].items()
bs = sorted(bs)
for ((pattern, local), members) in bs:
self.output.write(" bundle '%s':\n" % pattern)
if local:
self.output.write(" (read to '%s')\n" % local)
for (p, l) in members:
self.output.write(" member '%s'\n" % p)
if l:
self.output.write(" (read to '%s')\n" % l)
fs = config[g]['files']
fs = sorted(fs)
for (pattern, local) in fs:
self.output.write(" file '%s'\n" % pattern)
if local:
self.output.write(" (read to '%s')\n" % local)
def find_requests(self, requests):
config = self.read_config()
bundles = {}
files = []
def add(fs, bs):
for f in fs:
files.append(f)
for (b, ms) in bs.items():
bundles[b] = bundles.get(b, []) + ms
if not requests:
requests = ['']
for request in list(requests):
if request in config:
add(config[request]['files'], config[request]['bundles'])
requests.remove(request)
for request in list(requests):
for group_name in config.keys():
if re.search(request, group_name):
self.output.write("No group named '%s', using '%s' instead.\n"
% (request, group_name))
add(config[group_name]['files'], config[group_name]['bundles'])
try:
requests.remove(request)
except ValueError:
# Happens when request matches several groups.
pass
for request in list(requests):
for dict in config.values():
for (b, ms) in dict['bundles'].items():
(pattern, local) = b
if re.search(request, pattern) or (local is not None and re.search(request, local)):
self.output.write("No group matching '%s',\n"
" using bundle '%s:%s' instead.\n"
% (request, pattern, local))
add([], {(pattern, local): ms})
try:
requests.remove(request)
except ValueError:
# Happens when request matches several bundles.
pass
for request in list(requests):
for dict in config.values():
for (pattern, local) in dict['files']:
if re.search(request, pattern) or (local is not None and re.search(request, local)):
self.output.write("No group or bundle matching '%s',\n"
" using file '%s:%s' instead.\n"
% (request, pattern, local))
add([(pattern, local)], {})
try:
requests.remove(request)
except ValueError:
# Happens when request matches several files.
pass
for request in list(requests):
for dict in config.values():
for (b, ms) in dict['bundles'].items():
for (pattern, local) in ms:
if re.search(request, pattern) or (local is not None and re.search(request, local)):
self.output.write("No group or bundle matching '%s',\n"
" using member '%s:%s'\n"
" of bundle '%s:%s' instead.\n"
% (request, pattern, local, b[0], b[1]))
add([], {b: [(pattern, local)]})
try:
requests.remove(request)
except ValueError:
# Happens when request matches several members.
pass
if requests:
raise Error("Don't know how to fetch these items: %s" % requests)
return bundles, files
def fetch_one(self, url, local, members=None):
if members is None:
members = []
m = re.match('([a-z]+)://([^/]+)/(.*/)([^/]+)$', url)
if m is None:
raise Error("Malformed URL '%s'" % url)
protocol = m.group(1)
if protocol in 'https http ftp'.split():
self.fetch_url(url, local, members)
elif protocol == 'ftpmatch':
host = m.group(2)
path = m.group(3)
pattern = m.group(4)
self.ftpmatch(host, path, pattern, local, members)
else:
raise Error("Unknown protocol '%s' in URL '%s'" % (protocol, url))
def fetch_url(self, url, local, members=None):
import os
if local is None:
local = url.split('/')[-1]
name = os.path.join(self.prefix, local.strip())
if os.path.exists(name) and os.path.getsize(name) == 0:
self.output.write("%s is empty; removing it.\n" % name)
os.remove(name)
if os.path.exists(name) and not self.force:
self.output.write("%s already exists.\n" % name)
else:
self.make_prefix()
self.output.write("Fetching %s to %s\n" % (url, name))
# We have to | |
<reponame>marioantao/Text_Analytics_Project-_SA
from src.features.process_text.patterns import get_contraction_dict, get_special_characters_pattern,\
get_end_characters_pattern, get_hyperlink_pattern, get_apostrophe_pattern, get_whitespace_pattern, get_number_pattern, \
get_abbreviation_dict, get_emoticon_dict
from src.features.process_text.correct_spelling import correct_word
from src.features.process_text.tokenize import is_tokenized, merge_tokens, word_tokenize
from re import IGNORECASE, DOTALL, sub, compile
from nltk.corpus import stopwords
def expand_abbreviations(text):
"""Expands contractions in text."""
# If text is empty, return None.
if not text: return None
# If texts is tokenized, merge tokens.
if is_tokenized(text):
was_tokenized = True
normalized_text = merge_tokens(text)
else:
was_tokenized = False
normalized_text = text
# If last character is not space, add space.
try:
if normalized_text[-1] != ' ':
normalized_text += ' '
except IndexError:
print(1)
# Creates abbreviations pattern.
abbreviations_pattern = compile('({})'.format(r'\.?\s|'.join(get_abbreviation_dict().keys())), flags=IGNORECASE | DOTALL)
def expand_match(abbreviation):
"""Expands matched contraction."""
# Retrieves matched contraction from string.
match = abbreviation.group(0)
# If last character is space, remove space.
if match[-1] == " ":
match = match[:-1]
remove_space = True
else:
remove_space = False
# If last character is dot, remove dot.
if match[-1] == r'.':
match = match[:-1]
# Find expanded contraction in dictionary, based on contraction key.
expanded_contraction = get_abbreviation_dict().get(match.lower())
if not expanded_contraction:
return abbreviation.group(0)
if remove_space:
expanded_contraction += " "
# Add first character to expanded contraction.
return expanded_contraction
# Replaces contractions with expanded contractions in text.
normalized_text = abbreviations_pattern.sub(expand_match, normalized_text)
# Strip text.
normalized_text = normalized_text.strip()
# If text was tokenized, re-tokenize text.
if was_tokenized:
normalized_text = word_tokenize(normalized_text)
# Return expanded text.
return normalized_text
def remove_numbers(text):
"""Remove numbers from text."""
# If text is empty, return None.
if not text: return None
# If texts is tokenized, merge tokens.
if is_tokenized(text):
was_tokenized = True
normalized_text = merge_tokens(text)
else:
was_tokenized = False
normalized_text = text
# Remove all tailing white spaces.
normalized_text = normalized_text.strip()
# Replace all special characters with spaces.
normalized_text = sub(get_number_pattern(), r' ', normalized_text)
# Then remove multiple adjacent spaces.
normalized_text = sub(' +', ' ', normalized_text)
# Strip text.
normalized_text = normalized_text.strip()
# If text was tokenized, re-tokenize text.
if was_tokenized:
normalized_text = word_tokenize(normalized_text)
# Return normalized text.
return normalized_text
def _get_single_match(match):
"""Returns single match of multiple match."""
word = match.group()
return word[0]
def replace_multiple_stopwords(text):
"""Replaces multiple stopwords with single stopwords."""
# If text is empty, return None.
if not text: return None
# If texts is tokenized, merge tokens.
if is_tokenized(text):
was_tokenized = True
normalized_text = merge_tokens(text)
else:
was_tokenized = False
normalized_text = text
# Replaces apostrophe pattern with '.
normalized_text = sub('[.!?]+', _get_single_match, normalized_text)
# Strip text.
normalized_text = normalized_text.strip()
# If was tokenized, re-tokenize text.
if was_tokenized:
normalized_text = word_tokenize(normalized_text)
# Return normalized text.
return normalized_text
def replace_whitespaces(text):
"""Replaces all whitespaces with one space."""
# If text is empty, return None.
if not text: return None
# If texts is tokenized, merge tokens.
if is_tokenized(text):
was_tokenized = True
normalized_text = merge_tokens(text)
else:
was_tokenized = False
normalized_text = text
# Replaces all whitespaces with ' '.
normalized_text = sub(get_whitespace_pattern(), ' ', normalized_text)
# Strip text.
normalized_text = normalized_text.strip()
# If was tokenized, re-tokenize text.
if was_tokenized:
normalized_text = word_tokenize(normalized_text)
# Return normalized text.
return normalized_text
def replace_apostrophes(text):
"""Replaces apostrophe pattern with '."""
# If text is empty, return None.
if not text: return None
# If texts is tokenized, merge tokens.
if is_tokenized(text):
was_tokenized = True
normalized_text = merge_tokens(text)
else:
was_tokenized = False
normalized_text = text
# Replaces apostrophe pattern with '.
normalized_text = sub(get_apostrophe_pattern(), "'", normalized_text)
# Strip text.
normalized_text = normalized_text.strip()
# If was tokenized, re-tokenize text.
if was_tokenized:
normalized_text = word_tokenize(normalized_text)
# Return normalized text.
return normalized_text
def expand_contractions(text):
"""Expands contractions in text."""
# If text is empty, return None.
if not text: return None
# If texts is tokenized, merge tokens.
if is_tokenized(text):
was_tokenized = True
normalized_text = merge_tokens(text)
else:
was_tokenized = False
normalized_text = text
# Creates contractions pattern.
contractions_pattern = compile('({})'.format('|'.join(get_contraction_dict().keys())), flags=IGNORECASE | DOTALL)
def expand_match(contraction):
"""Expands matched contraction."""
# Retrieves matched contraction from string.
match = contraction.group(0)
# Stores first character for case sensitivity.
first_char = match[0]
# Find expanded contraction in dictionary, based on contraction key.
expanded_contraction = get_contraction_dict().get(match)
# If the contraction could not be found, try again with lower case.
if not expanded_contraction:
expanded_contraction = get_contraction_dict().get(match.lower())
# Add first character to expanded contraction.
expanded_contraction = first_char + expanded_contraction[1:]
return expanded_contraction
# Replaces contractions with expanded contractions in text.
normalized_text = contractions_pattern.sub(expand_match, normalized_text)
# Strip text.
normalized_text = normalized_text.strip()
# If text was tokenized, re-tokenize text.
if was_tokenized:
normalized_text = word_tokenize(normalized_text)
# Return expanded text.
return normalized_text
def convert_case(text, to_lower=True):
"""Converts text to defined case."""
# If text is empty, return None.
if not text: return None
# If texts is tokenized, merge tokens.
if is_tokenized(text):
was_tokenized = True
normalized_text = merge_tokens(text)
else:
was_tokenized = False
normalized_text = text
# If to lower, convert to lower case. Else, convert to upper case.
if to_lower:
normalized_text = normalized_text.lower()
else:
normalized_text = normalized_text.upper()
# If text was tokenized, re-tokenize text.
if was_tokenized:
normalized_text = word_tokenize(normalized_text)
# Return normalized text.
return normalized_text
def emoticon_convert(text):
if not text: return None
normalized_text = []
emoticon_dict = get_emoticon_dict()
for word in text.split():
if word in emoticon_dict:
normalized_text.append(emoticon_dict[word])
else:
normalized_text.append(word)
return ' '.join(normalized_text)
def emoticon_expand(text):
"""Expands emoticon in text."""
# If text is empty, return None.
if not text: return None
# If texts is tokenized, merge tokens.
if is_tokenized(text):
was_tokenized = True
normalized_text = merge_tokens(text)
else:
was_tokenized = False
normalized_text = text
# Creates emoticon pattern.
emoticon_pattern = compile('({})'.format('r|'.join(get_emoticon_dict().keys())), flags=IGNORECASE | DOTALL)
def expand_match(emoticon):
"""Expands matched emoticon."""
# Retrieves matched emoticon from string.
match = emoticon.group(0)
# Stores first character for case sensitivity.
first_char = match[0]
# Find expanded emoticon in dictionary, based on emoticon key.
expanded_emoticon = get_emoticon_dict().get(match)
# If the emoticon could not be found, try again with lower case.
if not expanded_emoticon:
expanded_emoticon = get_emoticon_dict().get(match.lower())
# Add first character to expanded emoticon.
expanded_emoticon = first_char + expanded_emoticon[1:]
return expanded_emoticon
# Replaces emoticon with expanded emoticon in text.
normalized_text = emoticon_pattern.sub(expand_match, normalized_text)
# Strip text.
normalized_text = normalized_text.strip()
# If text was tokenized, re-tokenize text.
if was_tokenized:
normalized_text = word_tokenize(normalized_text)
# Return expanded text.
return normalized_text
def remove_special_characters(text):
"""Removes special characters from text."""
# If text is empty, return None.
if not text: return None
# If texts is tokenized, merge tokens.
if is_tokenized(text):
was_tokenized = True
normalized_text = merge_tokens(text)
else:
was_tokenized = False
normalized_text = text
# Retrieve special characters pattern.
special_characters_pattern = get_special_characters_pattern()
# Remove all tailing white spaces.
normalized_text = normalized_text.strip()
# Replace all special characters with spaces.
normalized_text = sub(special_characters_pattern, r' ', normalized_text)
# Then remove multiple adjacent spaces.
normalized_text = sub(' +', ' ', normalized_text)
# Strip text.
normalized_text = normalized_text.strip()
# If text was tokenized, re-tokenize text.
if was_tokenized:
normalized_text = word_tokenize(normalized_text)
# Return normalized text.
return normalized_text
def remove_end_characters(text):
"""Removes end characters from word token list."""
# If text is empty, return None.
if not text: return None
# If text is not tokenize, tokenize text.
if is_tokenized(text):
was_tokenized = True
normalized_text = merge_tokens(text)
else:
was_tokenized = False
normalized_text = text
normalized_text += ' '
# Replace stopwords with spaces.
normalized_text = sub(get_end_characters_pattern(), r' ', normalized_text)
# Then remove multiple adjacent spaces.
normalized_text = sub(' +', ' ', normalized_text)
# Then strip text.
normalized_text = normalized_text.strip()
if normalized_text[-1] == r'.':
normalized_text = normalized_text[:-1]
# If text was tokenized, then re-tokenize.
if was_tokenized:
normalized_text = word_tokenize(normalized_text)
# Return normalized text.
return normalized_text
def remove_stopwords(text):
"""Remove stopwords from word token list"""
# If text is empty, return None.
if not text: return None
# If text is not tokenize, tokenize text.
if is_tokenized(text):
was_tokenized = True
normalized_text = text
else:
was_tokenized = False
normalized_text = word_tokenize(text, 'whitespace')
# Create stopwords list.
stop_set = set(stopwords.words('english'))
rem_word = ['very', 'not', 'nor', 'no', 'same', 'more']
for word in rem_word:
stop_set.remove(word)
stop_set.update(['amp','would','one','get','make','buy','time','use',
'go','think','first','old','put','two','even','look','come','year','also',
'time','way','give','quotation','work','say','could','take','back','want','find','new','try',
'money','fill','hear','know','thing', 'see','seem', 'day', 'another', 'month'])
# Filter stopwords from text.
normalized_text = [token for token in normalized_text if token not in stop_set]
# If text was not tokenize, | |
+ m.b69 - m.b90 <= 0)
m.c2851 = Constraint(expr= - m.b49 + m.b50 - m.b91 <= 0)
m.c2852 = Constraint(expr= - m.b49 + m.b51 - m.b92 <= 0)
m.c2853 = Constraint(expr= - m.b49 + m.b52 - m.b93 <= 0)
m.c2854 = Constraint(expr= - m.b49 + m.b53 - m.b94 <= 0)
m.c2855 = Constraint(expr= - m.b49 + m.b54 - m.b95 <= 0)
m.c2856 = Constraint(expr= - m.b49 + m.b55 - m.b96 <= 0)
m.c2857 = Constraint(expr= - m.b49 + m.b56 - m.b97 <= 0)
m.c2858 = Constraint(expr= - m.b49 + m.b57 - m.b98 <= 0)
m.c2859 = Constraint(expr= - m.b49 + m.b58 - m.b99 <= 0)
m.c2860 = Constraint(expr= - m.b49 + m.b59 - m.b100 <= 0)
m.c2861 = Constraint(expr= - m.b49 + m.b60 - m.b101 <= 0)
m.c2862 = Constraint(expr= - m.b49 + m.b61 - m.b102 <= 0)
m.c2863 = Constraint(expr= - m.b49 + m.b62 - m.b103 <= 0)
m.c2864 = Constraint(expr= - m.b49 + m.b63 - m.b104 <= 0)
m.c2865 = Constraint(expr= - m.b49 + m.b64 - m.b105 <= 0)
m.c2866 = Constraint(expr= - m.b49 + m.b65 - m.b106 <= 0)
m.c2867 = Constraint(expr= - m.b49 + m.b66 - m.b107 <= 0)
m.c2868 = Constraint(expr= - m.b49 + m.b67 - m.b108 <= 0)
m.c2869 = Constraint(expr= - m.b49 + m.b68 - m.b109 <= 0)
m.c2870 = Constraint(expr= - m.b49 + m.b69 - m.b110 <= 0)
m.c2871 = Constraint(expr= - m.b50 + m.b51 - m.b111 <= 0)
m.c2872 = Constraint(expr= - m.b50 + m.b52 - m.b112 <= 0)
m.c2873 = Constraint(expr= - m.b50 + m.b53 - m.b113 <= 0)
m.c2874 = Constraint(expr= - m.b50 + m.b54 - m.b114 <= 0)
m.c2875 = Constraint(expr= - m.b50 + m.b55 - m.b115 <= 0)
m.c2876 = Constraint(expr= - m.b50 + m.b56 - m.b116 <= 0)
m.c2877 = Constraint(expr= - m.b50 + m.b57 - m.b117 <= 0)
m.c2878 = Constraint(expr= - m.b50 + m.b58 - m.b118 <= 0)
m.c2879 = Constraint(expr= - m.b50 + m.b59 - m.b119 <= 0)
m.c2880 = Constraint(expr= - m.b50 + m.b60 - m.b120 <= 0)
m.c2881 = Constraint(expr= - m.b50 + m.b61 - m.b121 <= 0)
m.c2882 = Constraint(expr= - m.b50 + m.b62 - m.b122 <= 0)
m.c2883 = Constraint(expr= - m.b50 + m.b63 - m.b123 <= 0)
m.c2884 = Constraint(expr= - m.b50 + m.b64 - m.b124 <= 0)
m.c2885 = Constraint(expr= - m.b50 + m.b65 - m.b125 <= 0)
m.c2886 = Constraint(expr= - m.b50 + m.b66 - m.b126 <= 0)
m.c2887 = Constraint(expr= - m.b50 + m.b67 - m.b127 <= 0)
m.c2888 = Constraint(expr= - m.b50 + m.b68 - m.b128 <= 0)
m.c2889 = Constraint(expr= - m.b50 + m.b69 - m.b129 <= 0)
m.c2890 = Constraint(expr= - m.b51 + m.b52 - m.b130 <= 0)
m.c2891 = Constraint(expr= - m.b51 + m.b53 - m.b131 <= 0)
m.c2892 = Constraint(expr= - m.b51 + m.b54 - m.b132 <= 0)
m.c2893 = Constraint(expr= - m.b51 + m.b55 - m.b133 <= 0)
m.c2894 = Constraint(expr= - m.b51 + m.b56 - m.b134 <= 0)
m.c2895 = Constraint(expr= - m.b51 + m.b57 - m.b135 <= 0)
m.c2896 = Constraint(expr= - m.b51 + m.b58 - m.b136 <= 0)
m.c2897 = Constraint(expr= - m.b51 + m.b59 - m.b137 <= 0)
m.c2898 = Constraint(expr= - m.b51 + m.b60 - m.b138 <= 0)
m.c2899 = Constraint(expr= - m.b51 + m.b61 - m.b139 <= 0)
m.c2900 = Constraint(expr= - m.b51 + m.b62 - m.b140 <= 0)
m.c2901 = Constraint(expr= - m.b51 + m.b63 - m.b141 <= 0)
m.c2902 = Constraint(expr= - m.b51 + m.b64 - m.b142 <= 0)
m.c2903 = Constraint(expr= - m.b51 + m.b65 - m.b143 <= 0)
m.c2904 = Constraint(expr= - m.b51 + m.b66 - m.b144 <= 0)
m.c2905 = Constraint(expr= - m.b51 + m.b67 - m.b145 <= 0)
m.c2906 = Constraint(expr= - m.b51 + m.b68 - m.b146 <= 0)
m.c2907 = Constraint(expr= - m.b51 + m.b69 - m.b147 <= 0)
m.c2908 = Constraint(expr= - m.b52 + m.b53 - m.b148 <= 0)
m.c2909 = Constraint(expr= - m.b52 + m.b54 - m.b149 <= 0)
m.c2910 = Constraint(expr= - m.b52 + m.b55 - m.b150 <= 0)
m.c2911 = Constraint(expr= - m.b52 + m.b56 - m.b151 <= 0)
m.c2912 = Constraint(expr= - m.b52 + m.b57 - m.b152 <= 0)
m.c2913 = Constraint(expr= - m.b52 + m.b58 - m.b153 <= 0)
m.c2914 = Constraint(expr= - m.b52 + m.b59 - m.b154 <= 0)
m.c2915 = Constraint(expr= - m.b52 + m.b60 - m.b155 <= 0)
m.c2916 = Constraint(expr= - m.b52 + m.b61 - m.b156 <= 0)
m.c2917 = Constraint(expr= - m.b52 + m.b62 - m.b157 <= 0)
m.c2918 = Constraint(expr= - m.b52 + m.b63 - m.b158 <= 0)
m.c2919 = Constraint(expr= - m.b52 + m.b64 - m.b159 <= 0)
m.c2920 = Constraint(expr= - m.b52 + m.b65 - m.b160 <= 0)
m.c2921 = Constraint(expr= - m.b52 + m.b66 - m.b161 <= 0)
m.c2922 = Constraint(expr= - m.b52 + m.b67 - m.b162 <= 0)
m.c2923 = Constraint(expr= - m.b52 + m.b68 - m.b163 <= 0)
m.c2924 = Constraint(expr= - m.b52 + m.b69 - m.b164 <= 0)
m.c2925 = Constraint(expr= - m.b53 + m.b54 - m.b165 <= 0)
m.c2926 = Constraint(expr= - m.b53 + m.b55 - m.b166 <= 0)
m.c2927 = Constraint(expr= - m.b53 + m.b56 - m.b167 <= 0)
m.c2928 = Constraint(expr= - m.b53 + m.b57 - m.b168 <= 0)
m.c2929 = Constraint(expr= - m.b53 + m.b58 - m.b169 <= 0)
m.c2930 = Constraint(expr= - m.b53 + m.b59 - m.b170 <= 0)
m.c2931 = Constraint(expr= - m.b53 + m.b60 - m.b171 <= 0)
m.c2932 = Constraint(expr= - m.b53 + m.b61 - m.b172 <= 0)
m.c2933 = Constraint(expr= - m.b53 + m.b62 - m.b173 <= 0)
m.c2934 = Constraint(expr= - m.b53 + m.b63 - m.b174 <= 0)
m.c2935 = Constraint(expr= - m.b53 + m.b64 - m.b175 <= 0)
m.c2936 = Constraint(expr= - m.b53 + m.b65 - m.b176 <= 0)
m.c2937 = Constraint(expr= - m.b53 + m.b66 - m.b177 <= 0)
m.c2938 = Constraint(expr= - m.b53 + m.b67 - m.b178 <= 0)
m.c2939 = Constraint(expr= - m.b53 + m.b68 - m.b179 <= 0)
m.c2940 = Constraint(expr= - m.b53 + m.b69 - m.b180 <= 0)
m.c2941 = Constraint(expr= - m.b54 + m.b55 - m.b181 <= 0)
m.c2942 = Constraint(expr= - m.b54 + m.b56 - m.b182 <= 0)
m.c2943 = Constraint(expr= - m.b54 + m.b57 - m.b183 <= 0)
m.c2944 = Constraint(expr= - m.b54 + m.b58 - m.b184 <= 0)
m.c2945 = Constraint(expr= - m.b54 + m.b59 - m.b185 <= 0)
m.c2946 = Constraint(expr= - m.b54 + m.b60 - m.b186 <= 0)
m.c2947 = Constraint(expr= - m.b54 + m.b61 - m.b187 <= 0)
m.c2948 = Constraint(expr= - m.b54 + m.b62 - m.b188 <= 0)
m.c2949 = Constraint(expr= - m.b54 + m.b63 - m.b189 <= 0)
m.c2950 = Constraint(expr= - m.b54 + m.b64 - m.b190 <= 0)
m.c2951 = Constraint(expr= - m.b54 + m.b65 - m.b191 <= 0)
m.c2952 = Constraint(expr= - m.b54 + m.b66 - m.b192 <= 0)
m.c2953 = Constraint(expr= - m.b54 + m.b67 - m.b193 <= 0)
m.c2954 = Constraint(expr= - m.b54 + m.b68 - m.b194 <= 0)
m.c2955 = Constraint(expr= - m.b54 + m.b69 - m.b195 <= 0)
m.c2956 = Constraint(expr= - m.b55 + m.b56 - m.b196 <= 0)
m.c2957 = Constraint(expr= - m.b55 + m.b57 - m.b197 <= 0)
m.c2958 = Constraint(expr= - m.b55 + m.b58 - m.b198 <= 0)
m.c2959 = Constraint(expr= - m.b55 + m.b59 - m.b199 <= 0)
m.c2960 = Constraint(expr= - m.b55 + m.b60 - m.b200 <= 0)
m.c2961 = Constraint(expr= - m.b55 + m.b61 - m.b201 <= 0)
m.c2962 = Constraint(expr= - m.b55 + m.b62 - m.b202 <= 0)
m.c2963 = Constraint(expr= - m.b55 + m.b63 - m.b203 <= 0)
m.c2964 = Constraint(expr= - m.b55 + m.b64 - m.b204 <= 0)
m.c2965 = Constraint(expr= - m.b55 + m.b65 - m.b205 <= 0)
m.c2966 = Constraint(expr= - m.b55 + m.b66 - m.b206 <= 0)
m.c2967 = Constraint(expr= - m.b55 + m.b67 - m.b207 <= 0)
m.c2968 = Constraint(expr= - m.b55 + m.b68 - m.b208 <= 0)
m.c2969 = Constraint(expr= - m.b55 + m.b69 - m.b209 <= 0)
m.c2970 = Constraint(expr= - m.b56 + m.b57 - m.b210 <= 0)
m.c2971 = Constraint(expr= - m.b56 + m.b58 - m.b211 <= 0)
m.c2972 = Constraint(expr= - m.b56 + m.b59 - m.b212 <= 0)
m.c2973 = Constraint(expr= - m.b56 + m.b60 - m.b213 <= 0)
m.c2974 = Constraint(expr= - m.b56 + m.b61 - m.b214 <= 0)
m.c2975 = Constraint(expr= - m.b56 + m.b62 - m.b215 <= 0)
m.c2976 = Constraint(expr= - m.b56 + m.b63 - m.b216 <= 0)
m.c2977 = Constraint(expr= - m.b56 + m.b64 - m.b217 <= 0)
m.c2978 = Constraint(expr= - m.b56 | |
<reponame>greggius9891/transient-algorithm
# ###########################################################################
# Author: <NAME>
# Github: ...
# Email: <EMAIL>, <EMAIL>
# This is a machine learning project able to detect where notes are played
# in audio tracks.
# If you are interested in it please please beofre using this code contact me
# ###########################################################################
# This file containg the classes used to apply classification and prediction
# python modules
import os
import csv
import numpy as np
# librosa module
import librosa
# Weka related modules
from weka.classifiers import Classifier
from weka.core.converters import Loader
# Parent class of Classification and Prediction
class Extract():
# mean: an empty list filled with the mean of each sub-sample
# var: an empty list filled with the variance of each sub-sample
# mean_der_1: an empty list filled with the 1st degree derivative of the mean
# mean_der_2: an empty list filled with the 2nd degree derivative of the mean
# audio_ext: are the accepted audio formats
def __init__(self):
self._mean = []
self._var = []
self._mean_der_1 = []
self._mean_der_2 = []
self._note_class = []
self._audio_ext = [".wav", ".aiff", ".aif"]
# def __init__
# Calculaitng the needed audio features given a source
# smean, svar, s1der, s2der: bool values that specify what to calculate
# smean is standing for "settings mean"
# **args is used to print a fixed char in self._note_class
# e.g. if features are calculated to do a prediction
# the note_class value (1, 2, 3, 4, 5) will be unkown so must be "?"
def calc_features(self, source: str, smean: bool, svar: bool, s1der: bool, s2der: bool, win_len: int, overlap: int, *args):
if source == "":
raise Exception ("Please fill all the fields")
# if
if not (os.path.isfile(source) or source.endswith(tuple(self._audio_ext))):
raise Exception("The audio source path must be a file ending with .wav .aif .aiff ")
# if
# importing audio data and samplerate given path
# sr=None to preserve original sample rate
# if the sample rate is higher than 44100 resample!
data, samplerate = librosa.load(source, sr=None)
if samplerate > 44100:
data = librosa.resample(data, samplerate, 44100)
# if
# min_data_len = int(2*samplerate/10) # is the minimum sub-sample and accepted length
min_data_len = win_len # is the minimum sub-sample and accepted length
# The sample has not enough points of measuerement
# return the filepath string to print a warning or log info
if len(data) < min_data_len:
return source
# if
# defining sub sample starting and ending point
sub_sample_start = 0
sub_sample_end = min_data_len
# sub sample will be shifted 0.5 times the min_data_len
# If sub_sample_start = 0 and sub_sample_end = min_data_len = 8820
# then sub_sample_shift = 4410, new sub_sample will start from 0+4410, end in 8820+4410
# sub_sample_shift = int(min_data_len/2)
sub_sample_shift = overlap
# Every sub sample will be classified with a value in range(1,10)
# value 10 means this is certanly a the begin of a note
# value 1 is not at all the begin of a note
sub_sample_value = 5
# While the sample as enough points of measurement it'll loop
while sub_sample_end < len(data):
# Selecting alll samples from the 1st to the 8820st
sub_sample = data[sub_sample_start:sub_sample_end]
# Calculating and writing mean
if smean is True:
self._mean.append(np.mean(sub_sample))
# Caluclating and writing variance
if svar is True:
self._var.append(np.var(sub_sample))
# Calculating and writing 1st degree mean derivative
if s1der is True:
self._mean_der_1.append(np.mean(np.polyder(sub_sample, 1)))
# Calculating and writing 2nd degree mean derivative
if s2der is True:
self._mean_der_2.append(np.mean(np.polyder(sub_sample, 2)))
# If a value is specified (as it can be specified in prediction)
# this value will be added in the note_class list
if len(args):
if args[0] == "?":
self._note_class.append(args[0])
elif sub_sample_value < 1:
self._note_class.append("1")
else:
self._note_class.append(str(sub_sample_value))
sub_sample_value-=1
# if
# Selecting all samples from the i-st to the i+8820-st
sub_sample_start+=sub_sample_shift
sub_sample_end+=sub_sample_shift
# while
# def calc_features
# Normalizing features to be able to compare them before applying classification and prediction
def norm_features(self, smean: bool, svar: bool, s1der: bool, s2der: bool):
normalize = lambda current, feature: (current - min(feature)) / (max(feature) - min(feature))
# norm_mean = []
# norm_var = []
# norm_mean_der_1 = []
# norm_mean_der_2 = []
# for i, (mean, var, mean_der_1, mean_der_2) in enumerate(itertools.zip_longest(self._mean, self._var, self._mean_der_1, self._mean_der_2)):
# # normalizing mean
# if smean is True:
# norm_mean.append(normalize(mean, self._mean))
# # normalizing variance
# if svar is True:
# norm_var.append(normalize(var, self._var))
# # normalizing the mean 1st degree derivative
# if s1der is True:
# norm_mean_der_1.append(normalize(mean_der_1, self._mean_der_1))
# # normalizing the mean 2nd degree derivative
# if s2der is True:
# norm_mean_der_2.append(normalize(mean_der_2, self._mean_der_2))
# print(i)
# # for
if smean is True:
norm_mean = []
for i in range(len(self._mean)):
print(i)
norm_mean.append(normalize(self._mean[i], self._mean))
# for
self._mean = norm_mean
# if
if svar is True:
norm_var = []
for i in range(len(self._var)):
print(i)
norm_var.append(normalize(self._var[i], self._var))
# for
self._var = norm_var
# if
if s1der is True:
norm_mean_der_1 = []
for i in range(len(self._mean_der_1)):
print(i)
norm_mean_der_1.append(normalize(self._mean_der_1[i], self._mean_der_1))
# for
self._mean_der_1 = norm_mean_der_1
# if
if s2der is True:
norm_mean_der_2 = []
for i in range(len(self._mean_der_2)):
print(i)
norm_mean_der_2.append(normalize(self._mean_der_2[i], self._mean_der_2))
# for
self._mean_der_2 = norm_mean_der_2
# if
# def norm_features
# Prints arff file with calculated features
# rname: is the rleation name, also the file name
# destination: is the folder where to store the rname.arff file
def save_arff(self, rname: str, destination: str, smean: bool, svar: bool, s1der: bool, s2der: bool):
if rname == "" or destination == "":
raise Exception ("Please fill all the fields")
# if
if not os.path.isdir(destination):
raise Exception("The destination field must be a valid direcotry")
# if
try:
# Generating *.arff path to store extracted features
extension = "arff"
arff_file = os.path.join(destination, rname + "." + extension)
# Writing arff_file
with open(arff_file, "w") as fp:
fp.write('@relation ' + rname + '\n')
if smean is True: fp.write('@attribute mean numeric\n')
if svar is True: fp.write('@attribute variance numeric\n')
if s1der is True: fp.write('@attribute mean_der_1 numeric\n')
if s2der is True: fp.write('@attribute mean_der_2 numeric\n')
fp.write('@attribute note {1, 2, 3, 4, 5}\n\n')
fp.write('@data\n')
max_i = max(len(self._mean), len(self._var), len(self._mean_der_1), len(self._mean_der_2))
i = 0
for i in range(max_i):
if smean is True:
fp.write("{0:1.4f}".format(self._mean[i]))
if svar is True:
fp.write(",{0:1.4f}".format(self._var[i]))
if s1der is True:
fp.write(",{0:1.4f}".format(self._mean_der_1[i]))
if s2der is True:
fp.write(",{0:1.4f}".format(self._mean_der_2[i]))
fp.write(",{0:s}\n".format(self._note_class[i]))
# for mean, var, mean_der_1, mean_der_2, note in zip(self._mean, self._var, self._mean_der_1, self._mean_der_2, self._note_class):
# fp.write("{0:1.4f},{1:1.4f},{2:1.4f},{3:1.4f},{4:s}\n".format(mean, var, mean_der_1, mean_der_2, note))
# for
# with
except:
raise Exception("Unexpected error during the creation of the arff file")
# except
return arff_file
# def save_arff
# class Extract
# Chilren class from Extract
# used to classify samples to use in prediction
class Classification(Extract):
# wav_files: an empty list filled with audio paths
# wav_files_qty: the number of found audio files
def __init__(self):
self._wav_files = []
self._wav_files_qty = 0
Extract.__init__(self)
# def __init__
# Property methods
@property
def wav_files(self):
return self._wav_files
# def wav_files
# End property methods
# Find_audio finds all wav files in the current directory
# append them in "wav_files"
# source: is the folder where to search the files
def find_audio(self, source: str):
if source == "":
raise Exception ("Please fill all the fields")
# if
if not os.path.isdir(source):
raise Exception("The source field must be a valid direcotry")
# if
try:
ext = [".wav", ".aiff", ".aif"]
for wav_file in os.listdir(source):
if wav_file.endswith(tuple(self._audio_ext)):
self._wav_files.append(os.path.join(source, wav_file))
self._wav_files_qty += 1
# if
# for
except:
raise OSError("Unexpected error during the reading of audio files")
# except
if self._wav_files_qty == 0:
raise Exception("No audio files were found in the specified directory")
# if
self._wav_files.sort()
# def find_audio
# NOT NEEDED ANT THE MOMENT
# # Create the model to use during predictions
# # rname: is the rleation name, also the file name
# # destination: is the folder where to store the rname.arff file
# def save_model(self, rname: str, destination: str):
# # Generate the path to save *.model file
# extension | |
* best / total)
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1,
total, totalsum):
b += 1
return best
def func_1691ea258a804e008ed80b0b27bfe0b0(infile):
T, = line(infile)
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a,
b + 1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
print >> stderr, 'Case #%d' % T
print 'Case #%d: %.10f' % (T, 1.0 * best / total)
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1,
total, totalsum):
b += 1
return q
def func_684fcc825d8c47bcad7082f202f2183f(infile):
T, = line(infile)
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a,
b + 1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
print >> stderr, 'Case #%d' % T
print 'Case #%d: %.10f' % (T, 1.0 * best / total)
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1,
total, totalsum):
b += 1
return i
def func_c1bd00b0cf194523b77e76a0ff6c7b63(infile):
T, = line(infile)
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a,
b + 1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
print >> stderr, 'Case #%d' % T
print 'Case #%d: %.10f' % (T, 1.0 * best / total)
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1,
total, totalsum):
b += 1
return b
def func_cedb8992c5f34bb4a21e2acfb535ab9a(infile):
T, = line(infile)
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a,
b + 1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
print >> stderr, 'Case #%d' % T
print 'Case #%d: %.10f' % (T, 1.0 * best / total)
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1,
total, totalsum):
b += 1
return T
def func_f25afc94d1bf4c3cb31d44c394340e06(infile):
T, = line(infile)
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a,
b + 1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
print >> stderr, 'Case #%d' % T
print 'Case #%d: %.10f' % (T, 1.0 * best / total)
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1,
total, totalsum):
b += 1
return N
def func_754e6e270ffc43aea9841c9db716860a(infile):
T, = line(infile)
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a,
b + 1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
print >> stderr, 'Case #%d' % T
print 'Case #%d: %.10f' % (T, 1.0 * best / total)
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1,
total, totalsum):
b += 1
return total
def func_ad489c77b0394a6d991496c4c1d96e9a(infile):
T, = line(infile)
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a,
b + 1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
print >> stderr, 'Case #%d' % T
print 'Case #%d: %.10f' % (T, 1.0 * best / total)
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1,
total, totalsum):
b += 1
return totalsum
def func_d2f5cfd435914ff79a6b5ec165eb991c(infile):
T, = line(infile)
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a,
b + 1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
print >> stderr, 'Case #%d' % T
print 'Case #%d: %.10f' % (T, 1.0 * best / total)
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1,
total, totalsum):
b += 1
return a
def func_a834e99521a342968ed049cd9018765f(infile):
T, = line(infile)
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for | |
import logging
import random
import asyncio
import time
from typing import Union
import discord
from discord.ext import commands
from discord.ext.commands import Cog, Bot, Context, BucketType
from tools import embeds, record
from tools.bank import Bank
import constants
log = logging.getLogger(__name__)
class Chance(Cog):
"""Chance"""
def __init__(self, bot: Bot):
self.bot = bot
@commands.before_invoke(record.record_usage)
@commands.bot_has_permissions(embed_links=True, read_message_history=True)
@commands.command(name="roll")
async def roll(
self, ctx: Context, number_of_dice: int = 1, number_of_sides: int = 6
):
"""Simulates rolling dice."""
number_of_dice = int(number_of_dice)
number_of_sides = int(number_of_sides)
if number_of_dice > 500:
await embeds.error_message(
ctx=ctx, description="Too many dice, try again in smaller batches."
)
return
dice = [
int(random.choice(range(1, number_of_sides + 1)))
for _ in range(number_of_dice)
]
embed = embeds.make_embed(
ctx=ctx,
description=(", ".join(str(x) for x in dice)),
title="Rolling Results",
)
if number_of_dice > 1:
embed.set_footer(text=f"Total sum: {sum(dice)}")
await ctx.reply(embed=embed)
@commands.before_invoke(record.record_usage)
@commands.bot_has_permissions(embed_links=True, read_message_history=True)
@commands.command(name="flip")
async def flip(self, ctx: Context):
"""Simulates flipping a coin."""
coin = [str(random.choice(["Heads", "Tails"]))]
embed = embeds.make_embed(
ctx=ctx, description=coin[0], title="Coin Flip Results"
)
await ctx.reply(embed=embed)
@commands.before_invoke(record.record_usage)
@commands.bot_has_permissions(
manage_messages=True,
add_reactions=True,
embed_links=True,
external_emojis=True,
use_external_emojis=True,
read_message_history=True,
)
@commands.guild_only()
@commands.max_concurrency(number=1, per=BucketType.user, wait=False)
@commands.max_concurrency(number=5, per=BucketType.default, wait=False)
@commands.command(name="cups", aliases=["cup"])
async def cups(self, ctx: Context, bet: int = 10):
"""
There are 3 cups and only one has the prize, you have to guess which one has it
"""
# start_bet = bet
if bet < 1:
await embeds.error_message(
ctx=ctx, description="Bet must be higher or equal to 1"
)
return
if (bank := Bank(ctx.author)) < bet:
await embeds.error_message(
ctx=ctx,
description=f"You can't bet more than you have\n{str(bank)}",
)
return
CUP_EMOJI = constants.Emojis.cup # [:cup:]
COIN_EMOJI = constants.Emojis.coin # [:coin:]
ONE_EMOJI = constants.Emojis.number_one # [:one:]
TWO_EMOJI = constants.Emojis.number_two # [:two:]
THREE_EMOJI = constants.Emojis.number_three # [:three:]
CASH_EMOJI = constants.Emojis.cash_out # [:money_with_wings:]
CROSS_EMOJI = constants.Emojis.cross_mark # [:x:]
def check(
reaction: discord.Reaction, user: Union[discord.Member, discord.User]
) -> bool:
"""Checks if reaction is from author & is applicable to game"""
foo = all(
(
# Checking if user who used a reaction, was the same user who issued the command.
user == bank.user,
# Checking the reaction was to the same message as the slot machine embed.
reaction.message.id == message.id,
# Checking if the reaction emoji is applicable to the slot machine commands.
str(reaction.emoji)
in [
ONE_EMOJI,
TWO_EMOJI,
THREE_EMOJI,
CASH_EMOJI,
],
)
)
if foo:
# Logging the action in case something breaks in the future.
log.debug(f"{ctx.author=} reacted with {reaction.emoji=} in cups")
return foo
async def default_embed(message: discord.Message, bet: int) -> None:
embed = embeds.make_embed(
ctx=ctx, title="Cups", description="Where's the coin? :coin:"
)
embed.add_field(name=f"{CUP_EMOJI} {CUP_EMOJI} {CUP_EMOJI}", value="")
embed.set_footer(
text=f"Bet: {bet}\nChoices: {ONE_EMOJI} {TWO_EMOJI} {THREE_EMOJI}"
)
if message is None:
return await ctx.reply(embed=embed)
else:
await message.edit(embed=embed)
def check_win(choice: int, elements: list) -> bool:
return elements[choice - 1] == COIN_EMOJI
async def spin(bet: int, choice: int) -> int:
elements = random.choices(choices_config)[0] # picking random option
log.debug(f"{ctx.author}, options: {elements}")
if check_win(choice, elements):
log.trace(f"{ctx.author=} wins in cups with {choice=} {elements=}")
bet *= 2
embed = embeds.make_embed(
ctx=ctx, title="Cups", description="**WINNER WINNER!**"
)
embed.add_field(name=" ".join(elements), value=f"You won {bet} coin!")
else:
log.trace(f"{ctx.author=} looses it all with {choice=} {elements=}")
embed = embeds.make_embed(
ctx=ctx, title="Cups", description="Better luck next time."
)
embed.add_field(
name=(" ".join(elements)), value=f"You lost {bet} coin."
)
bet = 0
embed.set_footer(
text=f"Risking: {bet} \nChoices: {ONE_EMOJI} {TWO_EMOJI} {THREE_EMOJI}"
)
log.trace("cups, Sending embed")
await message.edit(embed=embed)
if bet != 0:
time.sleep(2)
await default_embed(message, bet)
return bet
async def cash_out(message: discord.Message, bet: int, bal: int):
await message.clear_reactions()
log.debug("clearing reactions")
if bet != 0:
emb = embeds.make_embed(
ctx=ctx,
color="green",
title="Cups",
description=f"Awarded {bet} :coin:",
)
bank.add(bet, "Cups game")
await message.edit(embed=emb)
return
choices_config = [
[COIN_EMOJI, CROSS_EMOJI, CROSS_EMOJI],
[CROSS_EMOJI, COIN_EMOJI, CROSS_EMOJI],
[CROSS_EMOJI, CROSS_EMOJI, COIN_EMOJI],
]
bal = float(bank.subtract(bet, "Cups game"))
message = await default_embed(None, bet)
# getting the message object for editing and reacting
# Adding reactions to act like buttons
for emoji in [ONE_EMOJI, TWO_EMOJI, THREE_EMOJI, CASH_EMOJI]:
await message.add_reaction(emoji)
while True:
try:
reaction, user = await self.bot.wait_for(
"reaction_add", timeout=60, check=check
)
# waiting for a reaction to be added - times out after x seconds, 60 in this example
if str(reaction.emoji) == ONE_EMOJI and bet > 0:
bet = await spin(bet, choice=1)
await message.remove_reaction(reaction, user)
elif str(reaction.emoji) == TWO_EMOJI and bet > 0:
bet = await spin(bet, choice=2)
await message.remove_reaction(reaction, user)
elif str(reaction.emoji) == THREE_EMOJI and bet > 0:
bet = await spin(bet, choice=3)
await message.remove_reaction(reaction, user)
elif str(reaction.emoji) == CASH_EMOJI:
await cash_out(message=message, bet=bet, bal=bal)
return
else:
await message.remove_reaction(reaction, user)
if bet == 0:
await cash_out(message=message, bet=bet, bal=bal)
return
except asyncio.TimeoutError: # ending the loop if user doesn't react after x seconds
await cash_out(message=message, bet=bet, bal=bal)
return
@commands.before_invoke(record.record_usage)
@commands.bot_has_permissions(
manage_messages=True,
add_reactions=True,
embed_links=True,
external_emojis=True,
use_external_emojis=True,
read_message_history=True,
)
@commands.guild_only()
@commands.max_concurrency(number=1, per=BucketType.user, wait=False)
@commands.max_concurrency(number=4, per=BucketType.default, wait=False)
@commands.command(name="connect4", aliases=["4", "connect"])
async def connect(self, ctx: Context, bet: int = 0):
"""
Connect four checkers in a row, pillar, or diagonal first to win.
"""
# setting global emoji's that will be used in the program
ONE_EMOJI = constants.Emojis.number_one
TWO_EMOJI = constants.Emojis.number_two
THREE_EMOJI = constants.Emojis.number_three
FOUR_EMOJI = constants.Emojis.number_four
FIVE_EMOJI = constants.Emojis.number_five
SIX_EMOJI = constants.Emojis.number_six
SEVEN_EMOJI = constants.Emojis.number_seven
RED_CIRCLE = constants.Emojis.red_circle
YELLOW_CIRCLE = constants.Emojis.yellow_circle
BLACK_CIRCLE = constants.Emojis.black_circle
REACTIONS = [
ONE_EMOJI,
TWO_EMOJI,
THREE_EMOJI,
FOUR_EMOJI,
FIVE_EMOJI,
SIX_EMOJI,
SEVEN_EMOJI,
]
BOTTOM = (
f":black_large_square:{ONE_EMOJI}{TWO_EMOJI}{THREE_EMOJI}{FOUR_EMOJI}"
f"{FIVE_EMOJI}{SIX_EMOJI}{SEVEN_EMOJI}"
)
if bet and bet > Bank(ctx.author):
await embeds.error_message(
ctx=ctx, description="You do not have enough coin to bet that much"
)
return
Bank(ctx.author).subtract(bet)
"""Need to find another player to play against. Polling the server"""
embed = embeds.make_embed(
ctx=ctx,
title=f"{ctx.author.display_name} wants to play Connect Four",
description="Press play to accept the challenge",
)
embed.set_author(icon_url=ctx.author.avatar_url, name=str(ctx.author))
embed.set_footer(text=f"Bet Amount: **{bet:,}**")
message = await ctx.reply(embed=embed)
await message.add_reaction("▶️")
await message.add_reaction("🛑")
def check_start(reaction, user) -> bool:
"""Checks if reaction is from author & is applicable to command"""
# Checking if user who used a reaction, was the same user who issued the command
author_check = not user.bot
# Checking the reaction was to the same message as the command embed
message_check = reaction.message.id == message.id
# Checking if the reaction emoji is applicable to the command embed
reaction_check = str(reaction.emoji) in ["▶️", "🛑"]
if x := (author_check and message_check and reaction_check):
# logging the action in case something breaks in the future
log.trace(f"{user.name=} accepted to play connect 4 {reaction.emoji=}")
return x
while True:
try:
reaction, user = await self.bot.wait_for(
"reaction_add", timeout=60, check=check_start
)
# waiting for a reaction to be added - times out after x seconds, 60 in this example
if (
bet == 0
or bet < (bal := Bank(user))
and str(reaction.emoji) == "▶️"
):
Bank(user).subtract(bet)
players = {RED_CIRCLE: ctx.author, YELLOW_CIRCLE: user}
await message.clear_reactions()
break
elif str(reaction.emoji) == "🛑" and ctx.author == user:
Bank(ctx.author).add(bet)
try:
await message.delete()
except discord.NotFound:
pass
return
elif bet > bal:
await embeds.warning_message(
ctx,
f"Sorry, {user.display_name}, you do not have enough coin to join in on the bet.",
False,
)
else:
await message.remove_reaction(reaction, user)
# removes reactions if the user tries to go forward on the last page or
# backwards on the first page
except asyncio.TimeoutError:
Bank(ctx.author).add(bet)
try:
await message.clear_reactions()
except discord.NotFound:
await embeds.warning_message(
ctx,
"Request message was deleted unexpectedly or can no longer be found\n"
f"{ctx.author.mention}'s money has been returned",
)
log.warning(
msg="Connect4, message to play was deleted unexpectedly"
)
return
# ending the loop if user doesn't react after x seconds
"""Setting up the game"""
def check_win(board: list, player) -> bool:
# Check horizontal locations for win
for column in range(6 - 3):
for row in range(7):
try:
if (
board[row][column] == player
and board[row][column + 1] == player
and board[row][column + 2] == player
and board[row][column + 3] == player
):
return True
except IndexError:
continue
# Check vertical locations for win
for column in range(6):
for row in range(7 - 3):
try:
if (
board[row][column] == player
and board[row + 1][column] == player
and board[row + 2][column] == player
and board[row + 3][column] == player
):
return True
except IndexError:
continue
# Check positively sloped diagonals
for column in range(6 - 3):
for row in range(7 - 3):
try:
if (
board[row][column] == player
and board[row + 1][column + 1] | |
<reponame>yaskh/EvalAI
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import contextlib
import importlib
import json
import logging
import os
import requests
import signal
import shutil
import sys
import tempfile
import time
import traceback
import zipfile
from os.path import join
# all challenge and submission will be stored in temp directory
BASE_TEMP_DIR = tempfile.mkdtemp()
COMPUTE_DIRECTORY_PATH = join(BASE_TEMP_DIR, "compute")
logger = logging.getLogger(__name__)
AUTH_TOKEN = <PASSWORD>.get("AUTH_TOKEN")
DJANGO_SERVER = os.environ.get("DJANGO_SERVER", "localhost")
DJANGO_SERVER_PORT = os.environ.get("DJANGO_SERVER_PORT", "8000")
QUEUE_NAME = os.environ.get("QUEUE_NAME", "evalai_submission_queue")
CHALLENGE_DATA_BASE_DIR = join(COMPUTE_DIRECTORY_PATH, "challenge_data")
SUBMISSION_DATA_BASE_DIR = join(COMPUTE_DIRECTORY_PATH, "submission_files")
CHALLENGE_DATA_DIR = join(CHALLENGE_DATA_BASE_DIR, "challenge_{challenge_id}")
PHASE_DATA_BASE_DIR = join(CHALLENGE_DATA_DIR, "phase_data")
PHASE_DATA_DIR = join(PHASE_DATA_BASE_DIR, "phase_{phase_id}")
PHASE_ANNOTATION_FILE_PATH = join(PHASE_DATA_DIR, "{annotation_file}")
SUBMISSION_DATA_DIR = join(
SUBMISSION_DATA_BASE_DIR, "submission_{submission_id}"
)
SUBMISSION_INPUT_FILE_PATH = join(SUBMISSION_DATA_DIR, "{input_file}")
CHALLENGE_IMPORT_STRING = "challenge_data.challenge_{challenge_id}"
EVALUATION_SCRIPTS = {}
URLS = {
"get_message_from_sqs_queue": "/api/jobs/challenge/queues/{}/",
"delete_message_from_sqs_queue": "/api/jobs/queues/{}/",
"get_submission_by_pk": "/api/jobs/submission/{}",
"get_challenge_phases_by_challenge_pk": "/api/challenges/{}/phases/",
"get_challenge_by_queue_name": "/api/challenges/challenge/queues/{}/",
"get_challenge_phase_by_pk": "/api/challenges/challenge/{}/challenge_phase/{}",
"update_submission_data": "/api/jobs/challenge/{}/update_submission/",
}
EVALAI_ERROR_CODES = [400, 401, 406]
# map of challenge id : phase id : phase annotation file name
# Use: On arrival of submission message, lookup here to fetch phase file name
# this saves db query just to fetch phase annotation file name
PHASE_ANNOTATION_FILE_NAME_MAP = {}
class GracefulKiller:
kill_now = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self, signum, frame):
self.kill_now = True
class ExecutionTimeLimitExceeded(Exception):
pass
@contextlib.contextmanager
def stdout_redirect(where):
sys.stdout = where
try:
yield where
finally:
sys.stdout = sys.__stdout__
@contextlib.contextmanager
def stderr_redirect(where):
sys.stderr = where
try:
yield where
finally:
sys.stderr = sys.__stderr__
def alarm_handler(signum, frame):
raise ExecutionTimeLimitExceeded
def download_and_extract_file(url, download_location):
"""
* Function to extract download a file.
* `download_location` should include name of file as well.
"""
try:
response = requests.get(url)
except Exception as e:
logger.error("Failed to fetch file from {}, error {}".format(url, e))
traceback.print_exc()
response = None
if response and response.status_code == 200:
with open(download_location, "wb") as f:
f.write(response.content)
def download_and_extract_zip_file(url, download_location, extract_location):
"""
* Function to extract download a zip file, extract it and then removes the zip file.
* `download_location` should include name of file as well.
"""
try:
response = requests.get(url)
except Exception as e:
logger.error("Failed to fetch file from {}, error {}".format(url, e))
response = None
if response and response.status_code == 200:
with open(download_location, "wb") as f:
f.write(response.content)
# extract zip file
zip_ref = zipfile.ZipFile(download_location, "r")
zip_ref.extractall(extract_location)
zip_ref.close()
# delete zip file
try:
os.remove(download_location)
except Exception as e:
logger.error(
"Failed to remove zip file {}, error {}".format(
download_location, e
)
)
traceback.print_exc()
def create_dir(directory):
"""
Creates a directory if it does not exists
"""
if not os.path.exists(directory):
os.makedirs(directory)
def create_dir_as_python_package(directory):
"""
Create a directory and then makes it a python
package by creating `__init__.py` file.
"""
create_dir(directory)
init_file_path = join(directory, "__init__.py")
with open(init_file_path, "w") as init_file: # noqa
# to create empty file
pass
def return_url_per_environment(url):
base_url = "http://{0}:{1}".format(DJANGO_SERVER, DJANGO_SERVER_PORT)
url = "{0}{1}".format(base_url, url)
return url
def load_challenge():
"""
Creates python package for a challenge and extracts relevant data
"""
# make sure that the challenge base directory exists
create_dir_as_python_package(CHALLENGE_DATA_BASE_DIR)
try:
challenge = get_challenge_by_queue_name()
except Exception:
logger.exception(
"Challenge with queue name %s does not exists" % (QUEUE_NAME)
)
raise
challenge_pk = challenge.get("id")
phases = get_challenge_phases_by_challenge_pk(challenge_pk)
extract_challenge_data(challenge, phases)
def extract_challenge_data(challenge, phases):
"""
* Expects a challenge object and an array of phase object
* Extracts `evaluation_script` for challenge and `annotation_file` for each phase
"""
challenge_data_directory = CHALLENGE_DATA_DIR.format(
challenge_id=challenge.get("id")
)
evaluation_script_url = challenge.get("evaluation_script")
create_dir_as_python_package(challenge_data_directory)
# set entry in map
PHASE_ANNOTATION_FILE_NAME_MAP[challenge.get("id")] = {}
challenge_zip_file = join(
challenge_data_directory,
"challenge_{}.zip".format(challenge.get("id")),
)
download_and_extract_zip_file(
evaluation_script_url, challenge_zip_file, challenge_data_directory
)
phase_data_base_directory = PHASE_DATA_BASE_DIR.format(
challenge_id=challenge.get("id")
)
create_dir(phase_data_base_directory)
for phase in phases:
phase_data_directory = PHASE_DATA_DIR.format(
challenge_id=challenge.get("id"), phase_id=phase.get("id")
)
# create phase directory
create_dir(phase_data_directory)
annotation_file_url = phase.get("test_annotation")
annotation_file_name = os.path.basename(phase.get("test_annotation"))
PHASE_ANNOTATION_FILE_NAME_MAP[challenge.get("id")][
phase.get("id")
] = annotation_file_name
annotation_file_path = PHASE_ANNOTATION_FILE_PATH.format(
challenge_id=challenge.get("id"),
phase_id=phase.get("id"),
annotation_file=annotation_file_name,
)
download_and_extract_file(annotation_file_url, annotation_file_path)
try:
# import the challenge after everything is finished
challenge_module = importlib.import_module(
CHALLENGE_IMPORT_STRING.format(challenge_id=challenge.get("id"))
)
EVALUATION_SCRIPTS[challenge.get("id")] = challenge_module
except Exception:
logger.exception(
"Exception raised while creating Python module for challenge_id: %s"
% (challenge.get("id"))
)
raise
def process_submission_callback(body):
try:
logger.info("[x] Received submission message %s" % body)
process_submission_message(body)
except Exception as e:
logger.exception(
"Exception while processing message from submission queue with error {}".format(
e
)
)
def process_submission_message(message):
"""
Extracts the submission related metadata from the message
and send the submission object for evaluation
"""
challenge_pk = int(message.get("challenge_pk"))
phase_pk = message.get("phase_pk")
submission_pk = message.get("submission_pk")
submission_instance = extract_submission_data(submission_pk)
# so that the further execution does not happen
if not submission_instance:
return
challenge = get_challenge_by_queue_name()
remote_evaluation = challenge.get("remote_evaluation")
challenge_phase = get_challenge_phase_by_pk(challenge_pk, phase_pk)
if not challenge_phase:
logger.exception(
"Challenge Phase {} does not exist for queue {}".format(
phase_pk, QUEUE_NAME
)
)
raise
user_annotation_file_path = join(
SUBMISSION_DATA_DIR.format(submission_id=submission_pk),
os.path.basename(submission_instance.get("input_file")),
)
run_submission(
challenge_pk,
challenge_phase,
submission_instance,
user_annotation_file_path,
remote_evaluation,
)
def extract_submission_data(submission_pk):
"""
* Expects submission id and extracts input file for it.
"""
submission = get_submission_by_pk(submission_pk)
if not submission:
logger.critical("Submission {} does not exist".format(submission_pk))
traceback.print_exc()
# return from here so that the message can be acked
# This also indicates that we don't want to take action
# for message corresponding to which submission entry
# does not exist
return
submission_input_file = submission.get("input_file")
submission_data_directory = SUBMISSION_DATA_DIR.format(
submission_id=submission.get("id")
)
submission_input_file_name = os.path.basename(submission_input_file)
submission_input_file_path = SUBMISSION_INPUT_FILE_PATH.format(
submission_id=submission.get("id"),
input_file=submission_input_file_name,
)
create_dir_as_python_package(submission_data_directory)
download_and_extract_file(
submission_input_file, submission_input_file_path
)
return submission
def get_request_headers():
headers = {"Authorization": "Token {}".format(AUTH_TOKEN)}
return headers
def make_request(url, method, data=None):
headers = get_request_headers()
if method == "GET":
try:
response = requests.get(url=url, headers=headers)
response.raise_for_status()
except requests.exceptions.RequestException:
logger.info(
"The worker is not able to establish connection with EvalAI"
)
raise
return response.json()
elif method == "PUT":
try:
response = requests.put(url=url, headers=headers, data=data)
response.raise_for_status()
except requests.exceptions.RequestException:
logger.exception(
"The worker is not able to establish connection with EvalAI due to {}"
% (response.json())
)
raise
except requests.exceptions.HTTPError:
logger.exception(
"The request to URL {} is failed due to {}"
% (url, response.json())
)
raise
return response.json()
elif method == "PATCH":
try:
response = requests.patch(url=url, headers=headers, data=data)
response.raise_for_status()
except requests.exceptions.RequestException:
logger.info(
"The worker is not able to establish connection with EvalAI"
)
raise
except requests.exceptions.HTTPError:
logger.info(
"The request to URL {} is failed due to {}"
% (url, response.json())
)
raise
return response.json()
elif method == "POST":
try:
response = requests.post(url=url, headers=headers, data=data)
response.raise_for_status()
except requests.exceptions.RequestException:
logger.info(
"The worker is not able to establish connection with EvalAI"
)
raise
except requests.exceptions.HTTPError:
logger.info(
"The request to URL {} is failed due to {}"
% (url, response.json())
)
raise
return response.json()
def get_message_from_sqs_queue():
url = URLS.get("get_message_from_sqs_queue").format(QUEUE_NAME)
url = return_url_per_environment(url)
response = make_request(url, "GET")
return response
def delete_message_from_sqs_queue(receipt_handle):
url = URLS.get("delete_message_from_sqs_queue").format(
QUEUE_NAME
)
url = return_url_per_environment(url)
response = make_request(url, "POST", data={
"receipt_handle": receipt_handle
}) # noqa
return response
def get_submission_by_pk(submission_pk):
url = URLS.get("get_submission_by_pk").format(submission_pk)
url = return_url_per_environment(url)
response = make_request(url, "GET")
return response
def get_challenge_phases_by_challenge_pk(challenge_pk):
url = URLS.get("get_challenge_phases_by_challenge_pk").format(challenge_pk)
url = return_url_per_environment(url)
response = make_request(url, "GET")
return response
def get_challenge_by_queue_name():
url = URLS.get("get_challenge_by_queue_name").format(QUEUE_NAME)
url = return_url_per_environment(url)
response = make_request(url, "GET")
return response
def get_challenge_phase_by_pk(challenge_pk, challenge_phase_pk):
url = URLS.get("get_challenge_phase_by_pk").format(
challenge_pk, challenge_phase_pk
)
url = return_url_per_environment(url)
response = make_request(url, "GET")
return response
def update_submission_data(data, challenge_pk, submission_pk):
url = URLS.get("update_submission_data").format(challenge_pk)
url = return_url_per_environment(url)
response = make_request(url, "PUT", data=data)
return response
def update_submission_status(data, challenge_pk):
url = "/api/jobs/challenge/{}/update_submission/".format(challenge_pk)
url = return_url_per_environment(url)
response = make_request(url, "PATCH", data=data)
return response
def read_file_content(file_path):
with open(file_path, "r") as obj:
file_content = obj.read()
if not file_content:
file_content = " "
return file_content
def run_submission(
challenge_pk,
challenge_phase,
submission,
user_annotation_file_path,
remote_evaluation,
):
"""
* Checks whether the corresponding evaluation script and the annotation file for the challenge exists or not
* Calls evaluation script to evaluate the particular submission
Arguments:
challenge_pk -- challenge Id in which the submission is created
challenge_phase -- challenge phase JSON object in which the submission is created
submission -- JSON object for the submisson
user_annotation_file_path -- File submitted by user as a submission
"""
# Send the submission data to the evaluation script
# so that challenge hosts can use data for webhooks or any other service.
submission_output = None
phase_pk = challenge_phase.get("id")
submission_pk = submission.get("id")
annotation_file_name = PHASE_ANNOTATION_FILE_NAME_MAP[challenge_pk][
phase_pk
]
annotation_file_path = PHASE_ANNOTATION_FILE_PATH.format(
challenge_id=challenge_pk,
phase_id=phase_pk,
annotation_file=annotation_file_name,
)
submission_data_dir = SUBMISSION_DATA_DIR.format(
submission_id=submission.get("id")
)
submission_data = {
"submission_status": "running",
"submission": submission_pk,
}
update_submission_status(submission_data, challenge_pk)
status = "running"
# create a temporary run directory under submission directory, so that
# main directory does not gets polluted
temp_run_dir = join(submission_data_dir, "run")
create_dir(temp_run_dir)
stdout_file = join(temp_run_dir, "temp_stdout.txt")
stderr_file = join(temp_run_dir, "temp_stderr.txt")
stdout = open(stdout_file, "a+")
stderr = open(stderr_file, "a+")
try:
logger.info(
"Sending submission {} for evaluation".format(submission_pk)
)
with stdout_redirect(stdout), stderr_redirect(stderr):
| |
# -----------------------------------------------------------------------------
# (c) 2005 by Basler Vision Technologies
# Section: Vision Components
# Project: GenApiTest
# Author:
# $Header:
# -----------------------------------------------------------------------------
from genicam import *
import unittest
from genicamtestcase import GenicamTestCase
from testport import CTestPort, cast_data, CStructTestPort, cast_buffer, sizeof
from callbackhelper import CallbackObject
import sys
import genicam
class NodeTestSuite(GenicamTestCase):
def test_AccessMode(self):
"""[ GenApiTest@NodeTestSuite_TestAccessMode_1.xml|gxml
<Category Name="Node0">
<pFeature>Node00</pFeature>
<pFeature>Node01</pFeature>
</Category>
<Category Name="Cat">
<pIsImplemented>CatImplemented</pIsImplemented>
<pIsAvailable>CatAvailable</pIsAvailable>
<pFeature>CatNode</pFeature>
</Category>
<Node Name="CatNode">
</Node>
<Category Name="CatFtrNI">
<pFeature>FtrNI</pFeature>
</Category>
<Integer Name="FtrNI">
<pIsImplemented>AMCtrl</pIsImplemented>
<Value>0</Value>
</Integer>
<Category Name="CatFtrNA">
<pFeature>FtrNA</pFeature>
</Category>
<Integer Name="FtrNA">
<pIsAvailable>AMCtrl</pIsAvailable>
<Value>0</Value>
</Integer>
<Integer Name="AMCtrl">
<Value>0</Value>
</Integer>
<Node Name="Node00">
<pIsImplemented>Implemented0</pIsImplemented>
<pIsAvailable>Available</pIsAvailable>
<pIsLocked>Locked</pIsLocked>
</Node>
<Category Name="Node01">
<pFeature>Node010</pFeature>
</Category>
<Node Name="Node010">
<pIsImplemented>Implemented010</pIsImplemented>
</Node>
<Integer Name="Implemented0">
<pIsAvailable>Implemented0Available</pIsAvailable>
<Value>1</Value>
</Integer>
<Integer Name="Implemented010">
<Value>1</Value>
</Integer>
<Integer Name="Available">
<pIsAvailable>AvailableAvailable</pIsAvailable>
<Value>1</Value>
</Integer>
<Integer Name="Locked">
<pIsAvailable>LockedAvailable</pIsAvailable>
<Value>0</Value>
</Integer>
<Integer Name="Implemented0Available">
<Value>1</Value>
</Integer>
<Integer Name="AvailableAvailable">
<Value>1</Value>
</Integer>
<Integer Name="LockedAvailable">
<Value>1</Value>
</Integer>
<Integer Name="CatImplemented">
<pIsAvailable>CatImplementedAvailable</pIsAvailable>
<Value>1</Value>
</Integer>
<Integer Name="CatAvailable">
<pIsAvailable>CatAvailableAvailable</pIsAvailable>
<Value>1</Value>
</Integer>
<Integer Name="CatImplementedAvailable">
<Value>1</Value>
</Integer>
<Integer Name="CatAvailableAvailable">
<Value>1</Value>
</Integer>
<Node Name="NodeImplWO">
<pIsImplemented>WONode</pIsImplemented>
</Node>
<Node Name="NodeAvailWO">
<pIsAvailable>WONode</pIsAvailable>
</Node>
<Node Name="NodeLockedWO">
<pIsLocked>WONode</pIsLocked>
</Node>
<Integer Name="WONode">
<ImposedAccessMode>WO</ImposedAccessMode>
<Value>0</Value>
</Integer>
<Integer Name="WONode2">
<pIsLocked>LockedAvailable</pIsLocked>
<ImposedAccessMode>WO</ImposedAccessMode>
<Value>0</Value>
</Integer>
"""
Camera = CNodeMapRef()
Camera._LoadXMLFromFile("GenApiTest", "NodeTestSuite_TestAccessMode_1")
Node0 = Camera.GetNode("Node0")
Cat = Camera.GetNode("Cat")
CatFtrNI = Camera.GetNode("CatFtrNI")
CatFtrNA = Camera.GetNode("CatFtrNA")
Node00 = Camera.GetNode("Node00")
Node01 = Camera.GetNode("Node01")
Node010 = Camera.GetNode("Node010")
Implemented0 = Camera.GetNode("Implemented0")
Implemented010 = Camera.GetNode("Implemented010")
Available = Camera.GetNode("Available")
Locked = Camera.GetNode("Locked")
CatImplemented = Camera.GetNode("CatImplemented")
CatAvailable = Camera.GetNode("CatAvailable")
# CatLocked = Camera.GetNode("CatLocked")
CatNode = Camera.GetNode("CatNode")
NodeImplWO = Camera.GetNode("NodeImplWO")
NodeAvailWO = Camera.GetNode("NodeAvailWO")
NodeLockedWO = Camera.GetNode("NodeLockedWO")
WONode = Camera.GetNode("WONode")
WONode2 = Camera.GetNode("WONode2")
######## test to GetParents
# NodeList_t mParents
# NodeImplWO.GetParents(mParents)
self.assertEqual(intfIValue, CatNode.Node.GetPrincipalInterfaceType())
self.assertEqual(intfIValue, Node00.Node.GetPrincipalInterfaceType())
self.assertEqual(intfICategory, Node0.Node.GetPrincipalInterfaceType())
MyNode = Camera.GetNode("Node00")
self.assertEqual(intfIValue, MyNode.Node.GetPrincipalInterfaceType())
# Check default access mode
with self.assertRaises(LogicalErrorException):
Node0.Node.GetAlias()
self.assertEqual(RO, Node0.Node.GetAccessMode())
self.assertEqual(RW, Node00.Node.GetAccessMode())
self.assertEqual(RO, Node01.Node.GetAccessMode())
self.assertEqual(RW, Node010.Node.GetAccessMode())
# Use cache
self.assertEqual(RO, Node0.GetAccessMode())
# Check Implemented
Implemented0.SetValue(0)
self.assertEqual(NI, Node00.GetAccessMode())
Implemented0.SetValue(1)
# Check Available
Available.SetValue(0)
self.assertEqual(NA, Node00.GetAccessMode())
Available.SetValue(1)
# Check Locked
Locked.SetValue(1)
self.assertEqual(RO, Node00.GetAccessMode())
Locked.SetValue(0)
# Check Implemented of a <pFeature> node
Implemented010.SetValue(0)
self.assertEqual(RO, Node0.GetAccessMode())
# note that the access mode is either NI or RO since NI does not change
# the access mode can be always(!) cacheable
# as a consequence this test case is not valid for categories
self.assertEqual(RO, Node01.GetAccessMode())
Implemented010.SetValue(1)
# Check Category access mode implied by pFeature
self.assertEqual(NI, CatFtrNI.GetAccessMode())
self.assertEqual(RO, CatFtrNA.GetAccessMode())
# Check flag access
Implemented0.SetValue(0)
Available.SetValue(0)
Locked.SetValue(1)
self.assertEqual(False, IsImplemented(Node00))
self.assertEqual(False, IsAvailable(Node00))
Implemented0.SetValue(1)
Available.SetValue(0)
self.assertEqual(True, IsImplemented(Node00))
self.assertEqual(False, IsAvailable(Node00))
self.assertEqual(False, IsReadable(Node00))
self.assertEqual(False, IsWritable(Node00))
Available.SetValue(1)
self.assertEqual(True, IsAvailable(Node00))
Locked.SetValue(1)
self.assertEqual(RO, Node00.GetAccessMode())
self.assertEqual(True, IsReadable(Node00))
self.assertEqual(False, IsWritable(Node00))
Locked.SetValue(0)
self.assertEqual(True, IsWritable(Node00))
# and again with the flags made "not available"
Implemented0Available = Camera.GetNode("Implemented0Available")
AvailableAvailable = Camera.GetNode("AvailableAvailable")
LockedAvailable = Camera.GetNode("LockedAvailable")
Implemented0Available.SetValue(0)
self.assertEqual(True, IsImplemented(Node00))
self.assertEqual(False, IsAvailable(Node00))
Implemented0Available.SetValue(1)
self.assertEqual(True, IsImplemented(Node00))
AvailableAvailable.SetValue(0)
self.assertEqual(True, IsImplemented(Node00))
self.assertEqual(False, IsAvailable(Node00))
self.assertEqual(False, IsReadable(Node00))
self.assertEqual(False, IsWritable(Node00))
AvailableAvailable.SetValue(1)
self.assertEqual(True, IsWritable(Node00))
# WO flags
self.assertEqual(False, IsReadable(WONode))
self.assertEqual(NA, NodeImplWO.GetAccessMode())
self.assertEqual(NA, NodeAvailWO.GetAccessMode())
self.assertEqual(NA, NodeLockedWO.GetAccessMode())
LockedAvailable.SetValue(0)
self.assertEqual(NA, Node00.GetAccessMode())
self.assertEqual(False, IsReadable(Node00))
self.assertEqual(False, IsWritable(Node00))
LockedAvailable.SetValue(1)
self.assertEqual(True, IsWritable(Node00))
# Do similar checks for category
CatImplemented.SetValue(0)
self.assertEqual(NI, Cat.GetAccessMode())
CatImplemented.SetValue(1)
CatAvailable.SetValue(0)
self.assertEqual(NI, Cat.GetAccessMode())
CatAvailable.SetValue(1)
self.assertEqual(False, IsAvailable(Cat))
CatImplementedAvailable = Camera.GetNode("CatImplementedAvailable")
CatAvailableAvailable = Camera.GetNode("CatAvailableAvailable")
CatImplementedAvailable.SetValue(0)
self.assertEqual(False, IsImplemented(Cat))
self.assertEqual(False, IsAvailable(Cat))
CatImplementedAvailable.SetValue(1)
CatAvailableAvailable.SetValue(0)
IsImplemented(Cat)
IsAvailable(Cat)
self.assertEqual(False, IsImplemented(Cat))
self.assertEqual(False, IsAvailable(Cat))
self.assertEqual(False, IsReadable(Cat))
self.assertEqual(False, IsWritable(Cat))
CatAvailableAvailable.SetValue(1)
self.assertEqual(False, IsAvailable(Cat))
# Check visibility
self.assertEqual(True, IsVisible(Beginner, Guru))
self.assertEqual(False, IsVisible(Invisible, Expert))
# Check flag combinations
self.assertEqual(NI, Combine(RO, NI))
self.assertEqual(NA, Combine(RO, NA))
self.assertEqual(NA, Combine(WO, RO))
# check what happens if a WO node is locked
LockedAvailable.SetValue(0)
self.assertTrue(IsWritable(WONode2))
self.assertTrue(not IsReadable(WONode2))
LockedAvailable.SetValue(1)
self.assertTrue(not IsWritable(WONode2))
self.assertTrue(not IsReadable(WONode2))
# ticket #693
self.assertTrue(not IsReadable(None))
self.assertTrue(not IsWritable(None))
self.assertTrue(not IsImplemented(None))
self.assertTrue(not IsAvailable(None))
self.assertTrue(not IsReadable(None))
self.assertTrue(not IsWritable(None))
self.assertTrue(not IsImplemented(None))
self.assertTrue(not IsAvailable(None))
# to complete happy path, check also locked node
# whose value had different access mode than RW
"""[ GenApiTest@NodeTestSuite_TestAccessMode_2.xml|gxml
<Integer Name="Value">
<pIsLocked>Locked</pIsLocked>
<pValue>ValueReg</pValue>
<Min>0</Min>
<Max>4294967296</Max>
<Inc>1</Inc>
</Integer>
<IntReg Name="ValueReg">
<Address>0x0104</Address>
<Length>4</Length>
<AccessMode>WO</AccessMode>
<pPort>Port</pPort>
<Sign>Unsigned</Sign>
<Endianess>LittleEndian</Endianess>
<Representation>Linear</Representation>
</IntReg>
<Integer Name="Locked">
<Value>1</Value>
</Integer>
<Port Name="Port"/>
"""
Camera2 = CNodeMapRef()
Camera2._LoadXMLFromFile("GenApiTest", "NodeTestSuite_TestAccessMode_2")
Port2 = CTestPort()
Port2.CreateEntry(0x0104, "uint32_t", 1024, RW, LittleEndian)
Camera2._Connect(Port2, "Port")
Value = Camera2.GetNode("Value")
with self.assertRaises(AccessException):
Value.GetValue()
self.assertEqual(False, IsAvailable(Value))
def test_NameSpace(self):
"""[ GenApiTest@NodeTestSuite_TestNameSpace.xml|gxml
<Category Name="Root">
<pFeature>MyDefault</pFeature>
<pFeature>MyStandard</pFeature>
<pFeature>MyCustom</pFeature>
</Category>
<Node Name="MyDefault"/>
<Node Name="MyStandard" NameSpace="Standard"/>
<Node Name="MyCustom" NameSpace="Custom"/>
"""
Camera = CNodeMapRef()
Camera._LoadXMLFromFile("GenApiTest", "NodeTestSuite_TestNameSpace")
with self.assertRaises(LogicalErrorException):
Camera.GetNode("Std::") # check an invalid name. This must work but has to return a None-pointer!
Node = Camera.GetNode("Cust::MyDefault")
with self.assertRaises(LogicalErrorException):
Node = Camera.GetNode("Std::MyDefault")
Node = Camera.GetNode("MyDefault").Node
self.assertEqual(Custom, Node.GetNameSpace())
self.assertEqual("MyDefault", Node.GetName())
self.assertEqual("Cust::MyDefault", Node.GetName(True))
with self.assertRaises(LogicalErrorException):
Node = Camera.GetNode("Cust::MyStandard")
Node = Camera.GetNode("Std::MyStandard")
Node = Camera.GetNode("MyStandard").Node
self.assertEqual(Standard, Node.GetNameSpace())
self.assertEqual("MyStandard", Node.GetName())
self.assertEqual("Std::MyStandard", Node.GetName(True))
Node = Camera.GetNode("Cust::MyCustom")
with self.assertRaises(LogicalErrorException):
Node = Camera.GetNode("Std::MyCustom")
Node = Camera.GetNode("MyCustom").Node
self.assertEqual(Custom, Node.GetNameSpace())
self.assertEqual("MyCustom", Node.GetName())
self.assertEqual("Cust::MyCustom", Node.GetName(True))
with self.assertRaises(LogicalErrorException):
Node = Camera.GetNode("Trallala::MyDefault")
with self.assertRaises(LogicalErrorException):
Node = Camera.GetNode("Std::Trallala")
with self.assertRaises(LogicalErrorException):
Node = Camera.GetNode("Trallala")
def test_PropertyAccess(self):
# Note that DisplayName is handled slightly different way for EnumEntry
# where Symbolic is also considered
"""[ GenApiTest@NodeTestSuite_TestPropertyAccess.xml|gxml
<Node Name="MyName" NameSpace="Standard">
<ToolTip>MyToolTip</ToolTip>
<Description>MyDescription</Description>
<DisplayName>MyDisplayName</DisplayName>
<Visibility>Guru</Visibility>
</Node>
<Enumeration Name="Enum0">
<EnumEntry Name="EnumEntry0">
<DisplayName>EnumDisplayName</DisplayName>
<Value>0</Value>
</EnumEntry>
<EnumEntry Name="EnumEntry1">
<Value>1</Value>
<Symbolic>EnumSymbolic</Symbolic>
</EnumEntry>
<EnumEntry Name="EnumEntry2">
<Value>2</Value>
</EnumEntry>
<Value>0</Value>
</Enumeration>
"""
Camera = CNodeMapRef()
Camera._LoadXMLFromFile("GenApiTest", "NodeTestSuite_TestPropertyAccess")
MyName = Camera.GetNode("MyName")
self.assertEqual(RW, MyName.GetAccessMode())
self.assertEqual("MyName", MyName.Node.GetName())
self.assertEqual("Std::MyName", MyName.Node.GetName(True))
self.assertEqual(Standard, MyName.Node.GetNameSpace())
self.assertEqual("MyDisplayName", MyName.Node.GetDisplayName())
self.assertEqual(Guru, MyName.Node.GetVisibility())
self.assertEqual("MyToolTip", MyName.Node.GetToolTip())
self.assertEqual("MyDescription", MyName.Node.GetDescription())
def test_CachingMode(self):
"""[ GenApiTest@NodeTestSuite_TestCachingMode.xml|gxml
<IntReg Name="Int01" >
<Address>0x10</Address>
<Length>2</Length>
<AccessMode>RW</AccessMode>
<pPort>MyPort</pPort>
<Cachable>NoCache</Cachable>
<Sign>Unsigned</Sign>
<Endianess>LittleEndian</Endianess>
</IntReg>
<IntReg Name="Int02" >
<Address>0x12</Address>
<Length>2</Length>
<AccessMode>RW</AccessMode>
<pPort>MyPort</pPort>
<Cachable>WriteThrough</Cachable>
<Sign>Unsigned</Sign>
<Endianess>LittleEndian</Endianess>
</IntReg>
<IntReg Name="Int03" >
<Address>0x14</Address>
<Length>2</Length>
<AccessMode>RW</AccessMode>
<pPort>MyPort</pPort>
<Cachable>WriteAround</Cachable>
<Sign>Unsigned</Sign>
<Endianess>LittleEndian</Endianess>
</IntReg>
<IntReg Name="Int04" >
<pAddress>Int01</pAddress> <!-- note: a connection to a node with NoCache -->
<Length>2</Length>
<AccessMode>RW</AccessMode>
<pPort>MyPort</pPort>
<Cachable>WriteAround</Cachable>
<Sign>Unsigned</Sign>
<Endianess>LittleEndian</Endianess>
</IntReg>
<IntReg Name="Int05" >
<pAddress>Int02</pAddress> <!-- note: a connection to a node with WriteThrough -->
<Length>2</Length>
<AccessMode>RW</AccessMode>
<pPort>MyPort</pPort>
<Cachable>WriteAround</Cachable>
<Sign>Unsigned</Sign>
<Endianess>LittleEndian</Endianess>
</IntReg>
<IntReg Name="Int06" >
<pAddress>Int03</pAddress> <!-- note: a connection to a node with WriteAround -->
<Length>2</Length>
<AccessMode>RW</AccessMode>
<pPort>MyPort</pPort>
<Cachable>WriteAround</Cachable>
<Sign>Unsigned</Sign>
<Endianess>LittleEndian</Endianess>
</IntReg>
<IntReg Name="Int07" >
<pAddress>Int01</pAddress> <!-- note: a connection to a node with NoCache -->
<Length>2</Length>
<AccessMode>RW</AccessMode>
<pPort>MyPort</pPort>
<Cachable>WriteThrough</Cachable>
<Sign>Unsigned</Sign>
<Endianess>LittleEndian</Endianess>
</IntReg>
<IntReg Name="Int08" >
<pAddress>Int02</pAddress> <!-- note: a connection to a node with WriteThrough -->
<Length>2</Length>
<AccessMode>RW</AccessMode>
<pPort>MyPort</pPort>
<Cachable>WriteThrough</Cachable>
<Sign>Unsigned</Sign>
<Endianess>LittleEndian</Endianess>
</IntReg>
<IntReg Name="Int09" >
<pAddress>Int03</pAddress> <!-- note: a connection to a node with WriteAround -->
<Length>2</Length>
<AccessMode>RW</AccessMode>
<pPort>MyPort</pPort>
<Cachable>WriteThrough</Cachable>
<Sign>Unsigned</Sign>
<Endianess>LittleEndian</Endianess>
</IntReg>
<IntReg Name="Int10" > <!-- note : no explicite CachingMode given -->
<Address>0</Address>
<Length>2</Length>
<AccessMode>RW</AccessMode>
<pPort>MyPort</pPort>
<Sign>Unsigned</Sign>
<Endianess>LittleEndian</Endianess>
</IntReg>
<IntReg Name="Int11" >
<pAddress>Int10</pAddress> <!-- note: a connection to a node with implicite WriteThrough -->
<Length>2</Length>
<AccessMode>RW</AccessMode>
<pPort>MyPort</pPort>
<Sign>Unsigned</Sign>
<Endianess>LittleEndian</Endianess>
</IntReg>
<Integer Name="IntNoCache">
<pValue>Int01</pValue>
</Integer>
<Integer Name="IntThrough">
<pValue>Int02</pValue>
</Integer>
<Integer Name="IntAround">
<pValue>Int03</pValue>
</Integer>
<Converter Name="ConvAround">
<FormulaTo> FROM </FormulaTo>
<FormulaFrom> TO </FormulaFrom>
<pValue>Int03</pValue>
<Slope>Increasing</Slope>
</Converter>
<Float Name="FloatAround">
<pValue>ConvAround</pValue>
</Float>
<Port Name="MyPort"/>
"""
Camera = CNodeMapRef()
Camera._LoadXMLFromFile("GenApiTest", "NodeTestSuite_TestCachingMode")
Int01 = Camera.GetNode("Int01")
Int02 = Camera.GetNode("Int02")
Int03 = Camera.GetNode("Int03")
Int04 = Camera.GetNode("Int04")
Int05 = Camera.GetNode("Int05")
Int06 = Camera.GetNode("Int06")
Int07 = Camera.GetNode("Int07")
Int08 = Camera.GetNode("Int08")
Int09 = Camera.GetNode("Int09")
Int10 = Camera.GetNode("Int10")
Int11 = Camera.GetNode("Int11")
self.assertEqual(genicam.NoCache, Int01.Node.GetCachingMode())
self.assertEqual(genicam.WriteThrough, Int02.Node.GetCachingMode())
self.assertEqual(genicam.WriteAround, Int03.Node.GetCachingMode())
self.assertEqual(genicam.NoCache, Int04.Node.GetCachingMode())
self.assertEqual(genicam.WriteAround, Int05.Node.GetCachingMode())
self.assertEqual(genicam.WriteAround, Int06.Node.GetCachingMode())
self.assertEqual(genicam.NoCache, Int07.Node.GetCachingMode())
self.assertEqual(genicam.WriteThrough, Int08.Node.GetCachingMode())
self.assertEqual(genicam.WriteAround, Int09.Node.GetCachingMode())
# ensure that the default caching mode is WriteThrough (single node)
self.assertEqual(genicam.WriteThrough, Int10.Node.GetCachingMode())
# ensure that the default caching mode is WriteThrough (for both, child and parent node, there is no caching mode specified
self.assertEqual(genicam.WriteThrough, Int11.Node.GetCachingMode())
# create and initialize a test port
Port = CTestPort()
# short s = (short)0xfdfd # *JS* removed warning
s = 0xfdfd
Port.CreateEntry(0x0010, "uint16_t", s, RW, LittleEndian)
Port.CreateEntry(0x0012, "uint16_t", s, RW, LittleEndian)
Port.CreateEntry(0x0014, "uint16_t", s, RW, LittleEndian)
# connect the node map to the port
Camera._Connect(Port, "MyPort")
# No cache
Int = Int01
Int.SetValue(42)
self.assertEqual(42, Int.GetValue())
# Write through
Int = Int02
n1 = 4711
Int.SetValue(n1)
self.assertEqual(n1, Int.GetValue())
# Write around
Int = Int03
n2 = 123
Int.SetValue(n2)
self.assertEqual(n2, Int.GetValue())
# Let's play directly with the values within the port
NoCache = Camera.GetNode("Int01")
Through = Camera.GetNode("Int02")
Around = Camera.GetNode("Int03")
self.assertEqual(genicam.NoCache, NoCache.GetNode().GetCachingMode())
self.assertEqual(genicam.WriteThrough, Through.GetNode().GetCachingMode())
self.assertEqual(genicam.WriteAround, Around.GetNode().GetCachingMode())
IntNoCache = Camera.GetNode("IntNoCache")
IntThrough = Camera.GetNode("IntThrough")
IntAround = Camera.GetNode("IntAround")
NoCache.SetValue(0)
Through.SetValue(0)
Around.SetValue(0)
self.assertEqual(0, NoCache.GetValue())
self.assertEqual(0, Around.GetValue())
self.assertEqual(0, Through.GetValue())
self.assertEqual(0, IntNoCache.GetValue())
| |
<gh_stars>1-10
"""Tests for the presentation module."""
import logging
import sys
import pytest
from pydicom import __version__
from pydicom._uid_dict import UID_dictionary
from pydicom.uid import UID
from pynetdicom import AE, _config
from pynetdicom._globals import DEFAULT_TRANSFER_SYNTAXES
from pynetdicom.pdu_primitives import SCP_SCU_RoleSelectionNegotiation
from pynetdicom.presentation import (
build_context,
build_role,
PresentationContext,
negotiate_as_acceptor,
negotiate_as_requestor,
negotiate_unrestricted,
ApplicationEventLoggingPresentationContexts,
BasicWorklistManagementPresentationContexts,
ColorPalettePresentationContexts,
DefinedProcedureProtocolPresentationContexts,
DisplaySystemPresentationContexts,
HangingProtocolPresentationContexts,
ImplantTemplatePresentationContexts,
InstanceAvailabilityPresentationContexts,
MediaCreationManagementPresentationContexts,
MediaStoragePresentationContexts,
NonPatientObjectPresentationContexts,
PrintManagementPresentationContexts,
ProcedureStepPresentationContexts,
ProtocolApprovalPresentationContexts,
QueryRetrievePresentationContexts,
RelevantPatientInformationPresentationContexts,
RTMachineVerificationPresentationContexts,
StoragePresentationContexts,
StorageCommitmentPresentationContexts,
SubstanceAdministrationPresentationContexts,
UnifiedProcedurePresentationContexts,
VerificationPresentationContexts,
)
from pynetdicom.sop_class import (
Verification,
CompositeInstanceRetrieveWithoutBulkDataGet,
CTImageStorage,
)
PYDICOM_VERSION = __version__.split(".")[:2]
@pytest.fixture(
params=[
(1, "1.1.1", ["1.2.840.10008.1.2"]),
(1, "1.1.1", ["1.2.840.10008.1.2", "1.2.840.10008.1.2.1"]),
(1, "1.1.1", ["1.2.840.10008.1.2", "1.2.840.10008.1.2.1", "1.2.3"]),
(1, "1.1.1", []),
(1, "1.2.840.10008.1.2.1.1.3", ["1.2.3"]),
(1, "1.2.840.10008.1.2.1.1.3", ["1.2.3", "1.2.840.10008.1.2"]),
]
)
def good_init(request):
"""Good init values."""
return request.param
class TestPresentationContext:
"""Test the PresentationContext class"""
def setup(self):
self.default_conformance = _config.ENFORCE_UID_CONFORMANCE
def teardown(self):
_config.ENFORCE_UID_CONFORMANCE = self.default_conformance
def test_setter(self, good_init):
"""Test the presentation context class init"""
(context_id, abs_syn, tran_syn) = good_init
pc = PresentationContext()
pc.context_id = context_id
pc.abstract_syntax = abs_syn
pc.transfer_syntax = tran_syn
assert pc.context_id == context_id
assert pc.abstract_syntax == abs_syn
assert pc.transfer_syntax == tran_syn
assert pc._scu_role is None
assert pc._scp_role is None
assert pc._as_scu is None
assert pc._as_scp is None
assert pc.result is None
def test_add_transfer_syntax(self):
"""Test adding transfer syntaxes"""
pc = PresentationContext()
pc.context_id = 1
pc.add_transfer_syntax("1.2.840.10008.1.2")
pc.add_transfer_syntax(b"1.2.840.10008.1.2.1")
pc.add_transfer_syntax(UID("1.2.840.10008.1.2.2"))
# Test adding an invalid value
pc.add_transfer_syntax(1234)
assert 1234 not in pc.transfer_syntax
# Test adding None does nothing
pc.add_transfer_syntax(None)
assert None not in pc.transfer_syntax
def test_add_transfer_syntax_nonconformant(self, caplog):
"""Test adding non-conformant transfer syntaxes"""
_config.ENFORCE_UID_CONFORMANCE = True
caplog.set_level(logging.DEBUG, logger="pynetdicom.presentation")
pc = PresentationContext()
pc.context_id = 1
msg = r"'transfer_syntax' contains an invalid UID"
with pytest.raises(ValueError, match=msg):
pc.add_transfer_syntax("1.2.3.")
assert msg in caplog.text
pc.add_transfer_syntax("1.2.840.10008.1.1")
assert (
"A UID has been added to 'transfer_syntax' that is not a "
"transfer syntax" in caplog.text
)
_config.ENFORCE_UID_CONFORMANCE = False
pc.add_transfer_syntax("1.2.3.")
assert "1.2.3." in pc.transfer_syntax
def test_add_private_transfer_syntax(self):
"""Test adding private transfer syntaxes"""
_config.ENFORCE_UID_CONFORMANCE = False
pc = PresentationContext()
pc.context_id = 1
pc.add_transfer_syntax("2.16.840.1.113709.1.2.2")
assert "2.16.840.1.113709.1.2.2" in pc._transfer_syntax
pc.transfer_syntax = ["2.16.840.1.113709.1.2.1"]
assert "2.16.840.1.113709.1.2.1" in pc._transfer_syntax
_config.ENFORCE_UID_CONFORMANCE = True
pc = PresentationContext()
pc.context_id = 1
pc.add_transfer_syntax("2.16.840.1.113709.1.2.2")
assert "2.16.840.1.113709.1.2.2" in pc._transfer_syntax
pc.transfer_syntax = ["2.16.840.1.113709.1.2.1"]
assert "2.16.840.1.113709.1.2.1" in pc._transfer_syntax
def test_add_transfer_syntax_duplicate(self):
"""Test add_transfer_syntax with a duplicate UID"""
pc = PresentationContext()
pc.add_transfer_syntax("1.3")
pc.add_transfer_syntax("1.3")
assert pc.transfer_syntax == ["1.3"]
def test_equality(self):
"""Test presentation context equality"""
pc_a = PresentationContext()
pc_a.context_id = 1
pc_a.abstract_syntax = "1.1.1"
pc_a.transfer_syntax = ["1.2.840.10008.1.2"]
pc_b = PresentationContext()
pc_b.context_id = 1
pc_b.abstract_syntax = "1.1.1"
pc_b.transfer_syntax = ["1.2.840.10008.1.2"]
assert pc_a == pc_a
assert pc_a == pc_b
assert not pc_a != pc_b
assert not pc_a != pc_a
# scp/scu role start off as None
pc_a._scp_role = False
assert not pc_a == pc_b
pc_a._scp_role = True
assert not pc_a == pc_b
pc_b._scu_role = False
assert not pc_a == pc_b
pc_b._scu_role = True
pc_a._scu_role = True
pc_b._scp_role = True
assert pc_a == pc_b
assert not "a" == pc_b
def test_hash(self):
"""Test hashing the context"""
cx_a = build_context("1.2.3", "1.2.3.4")
cx_b = build_context("1.2.3", "1.2.3.4")
assert hash(cx_a) == hash(cx_b)
cx_a.transfer_syntax = ["1.2.3.4"]
assert hash(cx_a) == hash(cx_b)
cx_a.transfer_syntax[0] = "1.2.3.4"
assert hash(cx_a) == hash(cx_b)
cx_a.transfer_syntax[0] = "1.2.3.5"
assert hash(cx_a) != hash(cx_b)
cx_a.transfer_syntax = ["1.2.3.5"]
assert hash(cx_a) != hash(cx_b)
cx_c = build_context("1.2.3", ["1.1", "1.2"])
assert hash(cx_c) != hash(cx_b)
cx_d = build_context("1.2.3", ["1.1", "1.2"])
assert hash(cx_c) == hash(cx_d)
cx_c.transfer_syntax[1] = "1.2.3.5"
cx_d.transfer_syntax[1] = "1.2.3.5"
assert hash(cx_c) == hash(cx_d)
def test_string_output(self):
"""Test string output"""
pc = PresentationContext()
pc.context_id = 1
pc.abstract_syntax = "1.1.1"
pc.transfer_syntax = ["1.2.840.10008.1.2", "1.2.3"]
pc._scp_role = True
pc._scu_role = False
pc.result = 0x02
out = [
"ID: 1",
"Abstract Syntax: 1.1.1",
"Transfer Syntax(es):",
" =Implicit VR Little Endian",
" =1.2.3",
"Result: Provider Rejected",
]
assert "\n".join(out) == str(pc)
pc._as_scu = True
pc._as_scp = False
assert "Role: SCU only" in str(pc)
pc._as_scp = True
assert "Role: SCU and SCP" in str(pc)
pc._as_scu = False
assert "Role: SCP only" in str(pc)
pc._as_scp = False
assert "Role: (none)" in str(pc)
pc._transfer_syntax = []
out = [
"ID: 1",
"Abstract Syntax: 1.1.1",
"Transfer Syntax(es):",
" (none)",
"Result: Provider Rejected",
"Role: (none)",
]
assert "\n".join(out) == str(pc)
def test_context_id(self):
"""Test setting context_id."""
pc = PresentationContext()
pc.context_id = 1
assert pc.context_id == 1
pc.context_id = 255
assert pc.context_id == 255
with pytest.raises(ValueError):
pc.context_id = 0
with pytest.raises(ValueError):
pc.context_id = 256
with pytest.raises(ValueError):
pc.context_id = 12
def test_abstract_syntax(self):
"""Test abstract syntax setter"""
pc = PresentationContext()
pc.context_id = 1
pc.abstract_syntax = "1.1.1"
assert pc.abstract_syntax == UID("1.1.1")
assert isinstance(pc.abstract_syntax, UID)
pc.abstract_syntax = b"1.2.1"
assert pc.abstract_syntax == UID("1.2.1")
assert isinstance(pc.abstract_syntax, UID)
pc.abstract_syntax = UID("1.3.1")
assert pc.abstract_syntax == UID("1.3.1")
assert isinstance(pc.abstract_syntax, UID)
def test_abstract_syntax_raises(self):
"""Test exception raised if invalid abstract syntax"""
pc = PresentationContext()
with pytest.raises(TypeError):
pc.abstract_syntax = 1234
def test_abstract_syntax_nonconformant(self, caplog):
"""Test adding non-conformant abstract syntaxes"""
_config.ENFORCE_UID_CONFORMANCE = True
caplog.set_level(logging.DEBUG, logger="pynetdicom.presentation")
pc = PresentationContext()
pc.context_id = 1
msg = "Invalid 'abstract_syntax' value '1.4.1.' - UID is non-conformant"
with pytest.raises(ValueError, match=msg):
pc.abstract_syntax = UID("1.4.1.")
assert pc.abstract_syntax is None
_config.ENFORCE_UID_CONFORMANCE = False
pc.abstract_syntax = UID("1.4.1.")
assert pc.abstract_syntax == UID("1.4.1.")
assert isinstance(pc.abstract_syntax, UID)
assert msg in caplog.text
def test_transfer_syntax(self):
"""Test transfer syntax setter"""
pc = PresentationContext()
pc.context_id = 1
pc.transfer_syntax = ["1.2.840.10008.1.2"]
assert pc.transfer_syntax[0] == UID("1.2.840.10008.1.2")
assert isinstance(pc.transfer_syntax[0], UID)
pc.transfer_syntax = [b"1.2.840.10008.1.2.1"]
assert pc.transfer_syntax[0] == UID("1.2.840.10008.1.2.1")
assert isinstance(pc.transfer_syntax[0], UID)
pc.transfer_syntax = [UID("1.2.840.10008.1.2.2")]
assert pc.transfer_syntax[0] == UID("1.2.840.10008.1.2.2")
assert isinstance(pc.transfer_syntax[0], UID)
with pytest.raises(TypeError):
pc.transfer_syntax = UID("1.4.1")
pc.transfer_syntax = [1234, UID("1.4.1")]
assert pc.transfer_syntax == [UID("1.4.1")]
def test_transfer_syntax_duplicate(self):
"""Test the transfer_syntax setter with duplicate UIDs."""
pc = PresentationContext()
pc.transfer_syntax = ["1.3", "1.3"]
assert pc.transfer_syntax == ["1.3"]
def test_transfer_syntax_nonconformant(self, caplog):
"""Test setting non-conformant transfer syntaxes"""
caplog.set_level(logging.DEBUG, logger="pynetdicom.presentation")
pc = PresentationContext()
pc.context_id = 1
pc.transfer_syntax = ["1.4.1.", "1.2.840.10008.1.2"]
assert pc.transfer_syntax == ["1.4.1.", "1.2.840.10008.1.2"]
assert "A non-conformant UID has been added to 'transfer_syntax'" in caplog.text
def test_status(self):
"""Test presentation context status"""
pc = PresentationContext()
pc.context_id = 1
statuses = [None, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05]
results = [
"Pending",
"Accepted",
"User Rejected",
"Provider Rejected",
"Abstract Syntax Not Supported",
"Transfer Syntax(es) Not Supported",
"Unknown",
]
for status, result in zip(statuses, results):
pc.result = status
assert pc.status == result
def test_tuple(self):
"""Test the .as_tuple"""
context = PresentationContext()
context.context_id = 3
context.abstract_syntax = "1.2.840.10008.1.1"
context.transfer_syntax = ["1.2.840.10008.1.2"]
out = context.as_tuple
assert out.context_id == 3
assert out.abstract_syntax == "1.2.840.10008.1.1"
assert out.transfer_syntax == "1.2.840.10008.1.2"
def test_as_scp(self):
"""Test the Presentation.as_scp property."""
context = build_context("1.2.3")
assert context.as_scp is None
with pytest.raises(AttributeError, match=r"can't set attribute"):
context.as_scp = True
context._as_scp = True
assert context.as_scp
context._as_scp = False
assert not context.as_scp
def test_as_scu(self):
"""Test the Presentation.as_scu property."""
context = build_context("1.2.3")
assert context.as_scu is None
with pytest.raises(AttributeError, match=r"can't set attribute"):
context.as_scu = True
context._as_scu = True
assert context.as_scu
context._as_scu = False
assert not context.as_scu
def test_scu_role(self):
"""Test Presentation.scu_role setter/getter."""
context = build_context("1.2.3")
assert context.scu_role is None
context.scu_role = True
assert context.scu_role
context.scu_role = False
assert not context.scu_role
context.scu_role = None
assert context.scu_role is None
with pytest.raises(TypeError, match=r"'scu_role' must be a bool"):
context.scu_role = 1
def test_scp_role(self):
"""Test Presentation.scp_role setter/getter."""
context = build_context("1.2.3")
assert context.scp_role is None
context.scp_role = True
assert context.scp_role
context.scp_role = False
assert not context.scp_role
context.scp_role = None
assert context.scp_role is None
with pytest.raises(TypeError, match=r"'scp_role' must be a bool"):
context.scp_role = 1
def test_repr(self):
"""Test PresentationContext.__repr__"""
cx = build_context("1.2.3")
assert "1.2.3" == repr(cx)
cx = build_context("1.2.840.10008.1.1")
assert "Verification SOP Class" == repr(cx)
class TestNegotiateAsAcceptor:
"""Tests negotiation_as_acceptor."""
def setup(self):
self.test_func = negotiate_as_acceptor
def test_no_req_no_acc(self):
"""Test negotiation with no contexts."""
result = self.test_func([], [], [])
assert result == ([], [])
def test_one_req_no_acc(self):
"""Test negotiation with one requestor, no acceptor contexts."""
context = PresentationContext()
context.context_id = 1
context.abstract_syntax = "1.2.840.10008.5.1.4.1.1.2"
context.transfer_syntax = ["1.2.840.10008.1.2"]
result, roles = self.test_func([context], [])
assert len(result) == 1
assert roles == []
context = result[0]
assert context.context_id == 1
assert context.abstract_syntax == "1.2.840.10008.5.1.4.1.1.2"
assert context.transfer_syntax == ["1.2.840.10008.1.2"]
assert context.result == 0x03
def test_no_req_one_acc(self):
"""Test negotiation with no requestor, one acceptor contexts."""
context = PresentationContext()
context.context_id = 1
context.abstract_syntax = "1.2.840.10008.5.1.4.1.1.2"
context.transfer_syntax = ["1.2.840.10008.1.2"]
result, roles = self.test_func([], [context])
assert result == []
assert roles == []
def test_dupe_abs_req_no_acc(self):
"""Test negotiation with duplicate requestor, no acceptor contexts."""
context_a = PresentationContext()
context_a.context_id = 1
context_a.abstract_syntax = "1.2.840.10008.5.1.4.1.1.2"
context_a.transfer_syntax = ["1.2.840.10008.1.2"]
context_b = PresentationContext()
context_b.context_id = 3
context_b.abstract_syntax = "1.2.840.10008.5.1.4.1.1.2"
| |
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import itertools
import logging
import pickle
import numpy as np
import ray
from zoo.common.utils import enable_multi_fs_load, enable_multi_fs_save
from zoo.orca.data.ray_xshards import RayXShards
from zoo.orca.learn.dl_cluster import RayDLCluster
from zoo.orca.learn.tf2.tf_runner import TFRunner
from zoo.orca.learn.ray_estimator import Estimator as OrcaRayEstimator
from zoo.orca.learn.utils import maybe_dataframe_to_xshards, dataframe_to_xshards, \
convert_predict_xshards_to_dataframe, update_predict_xshards, \
process_xshards_of_pandas_dataframe
from zoo.orca.data.utils import process_spark_xshards
from zoo.ray import RayContext
logger = logging.getLogger(__name__)
class Estimator(object):
@staticmethod
def from_keras(*,
model_creator,
config=None,
verbose=False,
workers_per_node=1,
compile_args_creator=None,
backend="tf2",
cpu_binding=False,
):
"""
Create an Estimator for tensorflow 2.
:param model_creator: (dict -> Model) This function takes in the `config`
dict and returns a compiled TF model.
:param config: (dict) configuration passed to 'model_creator',
'data_creator'. Also contains `fit_config`, which is passed
into `model.fit(data, **fit_config)` and
`evaluate_config` which is passed into `model.evaluate`.
:param verbose: (bool) Prints output of one model if true.
:param workers_per_node: (Int) worker number on each node. default: 1.
:param compile_args_creator: (dict -> dict of loss, optimizer and metrics) Only used when
the backend="horovod". This function takes in the `config` dict and returns a
dictionary like {"optimizer": tf.keras.optimizers.SGD(lr), "loss":
"mean_squared_error", "metrics": ["mean_squared_error"]}
:param backend: (string) You can choose "horovod" or "tf2" as backend. Default: `tf2`.
:param cpu_binding: (bool) Whether to binds threads to specific CPUs. Default: False
"""
return TensorFlow2Estimator(model_creator=model_creator, config=config,
verbose=verbose, workers_per_node=workers_per_node,
backend=backend, compile_args_creator=compile_args_creator,
cpu_binding=cpu_binding)
def make_data_creator(refs):
def data_creator(config, batch_size):
return refs
return data_creator
def data_length(data):
x = data["x"]
if isinstance(x, np.ndarray):
return x.shape[0]
else:
return x[0].shape[0]
class TensorFlow2Estimator(OrcaRayEstimator):
def __init__(self,
model_creator,
compile_args_creator=None,
config=None,
verbose=False,
backend="tf2",
workers_per_node=1,
cpu_binding=False):
self.model_creator = model_creator
self.compile_args_creator = compile_args_creator
self.config = {} if config is None else config
self.verbose = verbose
ray_ctx = RayContext.get()
if "batch_size" in self.config:
raise Exception("Please do not specify batch_size in config. Input batch_size in the"
" fit/evaluate function of the estimator instead.")
if "inter_op_parallelism" not in self.config:
self.config["inter_op_parallelism"] = 1
if "intra_op_parallelism" not in self.config:
self.config["intra_op_parallelism"] = ray_ctx.ray_node_cpu_cores // workers_per_node
if backend == "horovod":
assert compile_args_creator is not None, "compile_args_creator should not be None," \
" when backend is set to horovod"
params = {
"model_creator": model_creator,
"compile_args_creator": compile_args_creator,
"config": self.config,
"verbose": self.verbose,
}
if backend == "tf2":
cores_per_node = ray_ctx.ray_node_cpu_cores // workers_per_node
num_nodes = ray_ctx.num_ray_nodes * workers_per_node
self.cluster = RayDLCluster(
num_workers=num_nodes,
worker_cores=cores_per_node,
worker_cls=TFRunner,
worker_param=params,
cpu_binding=cpu_binding
)
self.remote_workers = self.cluster.get_workers()
ips = ray.get(
[worker.get_node_ip.remote() for worker in self.remote_workers])
ports = ray.get(
[worker.find_free_port.remote() for worker in self.remote_workers])
urls = ["{ip}:{port}".format(ip=ips[i], port=ports[i])
for i in range(len(self.remote_workers))]
ray.get([worker.setup.remote() for worker in self.remote_workers])
# Get setup tasks in order to throw errors on failure
ray.get([
worker.setup_distributed.remote(urls, i, len(self.remote_workers))
for i, worker in enumerate(self.remote_workers)])
elif backend == "horovod":
# it is necessary to call self.run first to set horovod environment
from zoo.orca.learn.horovod.horovod_ray_runner import HorovodRayRunner
horovod_runner = HorovodRayRunner(ray_ctx,
worker_cls=TFRunner,
worker_param=params,
workers_per_node=workers_per_node)
horovod_runner.run(lambda: print("worker initialized"))
self.remote_workers = horovod_runner.remote_workers
ray.get([worker.setup.remote() for worker in self.remote_workers])
ray.get([
worker.setup_horovod.remote()
for i, worker in enumerate(self.remote_workers)])
else:
raise Exception("Only \"tf2\" and \"horovod\" are legal "
"values of backend, but got {}".format(backend))
self.num_workers = len(self.remote_workers)
def fit(self, data, epochs=1, batch_size=32, verbose=1,
callbacks=None, validation_data=None, class_weight=None,
steps_per_epoch=None, validation_steps=None, validation_freq=1,
data_config=None, feature_cols=None,
label_cols=None):
"""
Train this tensorflow model with train data.
:param data: train data. It can be XShards, Spark DataFrame or creator function which
returns Iter or DataLoader.
If data is XShards, each partition can be a Pandas DataFrame or a dictionary of
{'x': feature, 'y': label}, where feature(label) is a numpy array or a tuple of
numpy arrays.
:param epochs: Number of epochs to train the model. Default: 1.
:param batch_size: Batch size used for training. Default: 32.
:param verbose: Prints output of one model if true.
:param callbacks: List of Keras compatible callbacks to apply during training.
:param validation_data: validation data. Validation data type should be the same
as train data.
:param class_weight: Optional dictionary mapping class indices (integers) to a weight
(float) value, used for weighting the loss function. This can be useful to tell
the model to "pay more attention" to samples from an under-represented class.
:param steps_per_epoch: Total number of steps (batches of samples) before declaring one
epoch finished and starting the next epoch. If `steps_pre_epoch` is `None`, the
epoch will run until the input dataset is exhausted. When passing an infinitely
repeating dataset, you must specify the `step_per_epoch` argument.
:param validation_steps: Total number of steps (batches of samples) to draw before stopping
when performing validation at the end of every epoch. Default: None.
:param validation_freq: Only relevant if validation data is provided. Integer of
`collections_abc.Container` instance (e.g. list, tuple, etc.). If an integer,
specifies how many training epochs to run before a new validation run is performed,
e.g. `validation_freq=2` runs validation every 2 epochs. If a Container, specifies
the epochs on which to run validation, e.g. `validation_freq=[1, 2, 10]` runs
validation at the end of the 1st, 2nd, and 10th epochs.
:param data_config: An optional dictionary that can be passed to data creator function.
:param feature_cols: Feature column name(s) of data. Only used when data is a Spark
DataFrame or an XShards of Pandas DataFrame. Default: None.
:param label_cols: Label column name(s) of data. Only used when data is a Spark DataFrame or
an XShards of Pandas DataFrame.
Default: None.
:return:
"""
params = dict(
epochs=epochs,
batch_size=batch_size,
verbose=verbose,
callbacks=callbacks,
class_weight=class_weight,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
validation_freq=validation_freq,
data_config=data_config
)
from zoo.orca.data import SparkXShards
data, validation_data = maybe_dataframe_to_xshards(data, validation_data,
feature_cols, label_cols,
mode="fit",
num_workers=self.num_workers,
accept_str_col=True)
if isinstance(data, SparkXShards):
if data._get_class_name() == 'pandas.core.frame.DataFrame':
data, validation_data = process_xshards_of_pandas_dataframe(data, feature_cols,
label_cols,
validation_data, "fit")
ray_xshards = process_spark_xshards(data, self.num_workers)
if validation_data is None:
def transform_func(worker, partition_refs):
params["data_creator"] = make_data_creator(partition_refs)
return worker.step.remote(**params)
worker_stats = ray_xshards.reduce_partitions_for_actors(self.remote_workers,
transform_func)
else:
val_ray_xshards = process_spark_xshards(validation_data, self.num_workers)
def zip_func(worker, this_partition_refs, that_partition_refs):
params["data_creator"] = make_data_creator(this_partition_refs)
params["validation_data_creator"] = \
make_data_creator(that_partition_refs)
return worker.step.remote(**params)
worker_stats = ray_xshards.zip_reduce_shards_with_actors(val_ray_xshards,
self.remote_workers,
zip_func)
else:
params["data_creator"] = data
params["validation_data_creator"] = validation_data
params_list = [params] * self.num_workers
worker_stats = ray.get([self.remote_workers[i].step.remote(**params_list[i])
for i in range(self.num_workers)])
worker_stats = list(itertools.chain.from_iterable(worker_stats))
stats = worker_stats[0].copy()
return stats
def evaluate(self, data, batch_size=32, num_steps=None, verbose=1,
sample_weight=None, callbacks=None, data_config=None,
feature_cols=None, label_cols=None):
"""
Evaluates the model on the validation data set.
:param data: evaluate data. It can be XShards, Spark DataFrame or creator function which
returns Iter or DataLoader.
If data is XShards, each partition can be a Pandas DataFrame or a dictionary of
{'x': feature, 'y': label}, where feature(label) is a numpy array or a tuple of
numpy arrays.
:param batch_size: Batch size used for evaluation. Default: 32.
:param num_steps: Total number of steps (batches of samples) before declaring the evaluation
round finished. Ignored with the default value of `None`.
:param verbose: Prints output of one model if true.
:param sample_weight: Optional Numpy array of weights for the training samples, used for
weighting the loss function. You can either pass a flat (1D) Numpy array with the
same length as the input samples (1:1 mapping between weights and samples), or in
the case of temporal data, you can pass a 2D array with shape (samples,
sequence_length), to apply a different weight to every timestep of every sample.
:param callbacks: List of Keras compatible callbacks to apply during evaluation.
:param data_config: An optional dictionary that can be passed to data creator function.
:param feature_cols: Feature column name(s) of data. Only used when data is a Spark
DataFrame or an XShards of Pandas DataFrame. Default: None.
:param label_cols: Label column name(s) of data. Only used when data is a Spark DataFrame or
an XShards of Pandas DataFrame.
Default: None.
:return: validation result
"""
logger.info("Starting validation step.")
params = dict(
batch_size=batch_size,
verbose=verbose,
sample_weight=sample_weight,
steps=num_steps,
callbacks=callbacks,
data_config=data_config,
)
from zoo.orca.data import SparkXShards
data, _ | |
#
# # * SegmentIO no longer takes an ordinary IO to access the physical file.
# # Instead, it takes a LineReadIO instance. LineReadIO is an interface.
# # To implement it, one needs to provide a function that reads lines.
# #
# # * SegmentIO implements LineReadIO, providing a function to read lines
# # of a virtual file. This makes it possible to base a SegmentIO on
# # another SegmentIO.
# #
# # * FileLineReadIO implements LineReadIO, providing a function to read lines
# # of a physical file. It can be given an index to find the lines efficiently.
# # NOTE: The index for each file is generated only once, for the initial revision
# # when the file is added to the GitFileSystem.
# # To avoid having to re-generate the index after every commit, a VirtualIndex
# # is used as the index for the head revision. It takes the index for the
# # initial revision and shifts it using the OffsetTable of the changelog
# # from the initial revision to the head revision.
# #diff_from_head = GitFileSystem().diff_from_head(self._file_path, self._revision)
# #changelog_from_head_to_my_base = segmentio.changelog_from_patch_file_content(diff_from_head)
# file_io = io.open(physical_file, 'r', encoding='utf-8')
# index = segmentio.LineIndex(physical_file + '.index')
# headrev_io = segmentio.FileLineReadIO(file_io, index)
# #my_base_io = segmentio.SegmentIO(headrev_io, changelog_from_head_to_my_base)
# #my_io = segmentio.SegmentIO(my_base_io, self._window_changelog)
# ###total_changelog = self.total_changelog()
# ###my_io = segmentio.SegmentIO(headrev_io, total_changelog)
# my_io = segmentio.SegmentIO(headrev_io, self._window_changelog)
my_io = self._my_io()
seg = my_io.get_segment(block_begin_line=self._viewport_line, block_num_lines=self._viewport_nlines)
#dbg(seg._old_segment_data)
#dbg(seg._segment_data)
#dbg(seg.changelog(lineNumbersFromDest=True))
#dbg(self._window_changelog)
#dbg("before:", my_io.file_changelog())
seg.set_block_data(new_content)
# TODO before saving the segment, check for collision with any other window of any user
# idea: first back up the things that are changed by save_viewport_content(); then save and check
# if file_changelog has any intersecting lines with any other window's file_changelog;
# if it does, restore the previous state from backup and raise an exception
my_io.save_segment(seg)
self._window_changelog = my_io.file_changelog()
#dbg("after:", my_io.file_changelog())
#file_io.close()
#my_io.close()
#my_base_io.close()
#headrev_io.close()
if not self._basic_mode:
GitFileSystem().release_file(self._file_path)
def commit(self):
assert not self._basic_mode
GitFileSystem().lock_file(self._file_path)
#diff_from_head = GitFileSystem().diff_from_head(self._file_path, self._revision)
#changelog_from_head = segmentio.changelog_from_patch_file_content(diff_from_head)
#changelog = segmentio.compose_changelogs(changelog_from_head, self._window_changelog)
###changelog = self.total_changelog()
assert self._revision == GitFileSystem().head_revision(self._file_path)
changelog = self._window_changelog
patchfile = u'--- \n+++ \n' + changelog
GitFileSystem().patch_and_commit_file(self._file_path, patchfile)
self._window_changelog = u''
GitFileSystem().release_file(self._file_path)
def lock(self):
assert not self._basic_mode
success = GitFileSystem().cmtlock_file(self._file_path)
assert success
def unlock(self):
assert not self._basic_mode
success = GitFileSystem().cmtunlock_file(self._file_path)
assert success
class GitFileSystem(Singleton): # (see termdemo.sh)
def __init__(self, main_dir=None, username=None):
# TODO: fix this in singleton.py
# (hand-testing revealed it doesn't actually raise the exception when there are no instances)
# otherwise the singleton works:
# >>> from model import GitFileSystem
# >>> gfs = GitFileSystem(main_dir='kak')
# >>> gfs._main_dir
# 'kak'
# >>> g = GitFileSystem()
# >>> gfs._main_dir
# 'kak'
# >>> g._main_dir
# 'kak'
if main_dir is None:
# constructor called without args (just getting an instance; see Singleton)
if self.__class__._instances is None:
# getting an instance but the singleton doesn't exist yet -> error:
raise Exception('error: cannot get an instance of Application the instance does not exist yet')
else:
# getting an instance, the singleton instance exists already -> OK,
# __init__() doesn't do anything in this case
return
self._main_dir = main_dir
if username == None:
self._username = 'GFS_USER'
else:
self._username = username
def storage_dir(self):
return self._main_dir
def has_file(self, file_path):
repo_dir = self._repo_dir(file_path)
if os.path.isdir(repo_dir):
return True
else:
return False
def all_files(self):
files = []
items = os.listdir(self._main_dir)
for item in items:
repo_dir = os.path.join(self._main_dir, item)
if os.path.isdir(repo_dir):
with io.open(os.path.join(repo_dir, 'origpath')) as f:
file_path = f.readline().rstrip()
if os.path.isfile(os.path.join(repo_dir, 'corpus')):
with io.open(os.path.join(repo_dir, 'corpus')) as f:
corpus_name = f.readline().rstrip()
else:
# no corpus name assigned yet
corpus_name = None
files.append({'file_path': file_path, 'corpus_name': corpus_name})
return files
def assign_corpus_name(self, file_path, corpus_name):
'''Assigns the corpus name to the file to let it be known that
the file can be accessed through that corpus name.
Call this method when someone accesses the file by corpus name.
'''
assert self.has_file(file_path)
with io.open(os.path.join(self._repo_dir(file_path), 'corpus'), 'w') as f:
f.write(unicode(corpus_name + os.linesep))
def add_file(self, file_path):
#assert os.stat(file_path).st_nlink == 1
# NOTE: These link count checks are made on the premise that each file is only repoed once.
# When there are multiple users editing the same file, each user has the file in their
# repo so the link count is more than 2.
#
# Even running all the tests in model_test.ApplicationTestCase means there are multiple
# users per file.
#
# So uncomment these checks only when you need to debug with just one user (such as
# when debugging a single test).
assert not self.has_file(file_path)
# make the repo directory for the file
repo_dir = self._repo_dir(file_path)
os.mkdir(repo_dir)
# link the file into the repo directory
repoed_file_path = self.file_fspath(file_path)
self._create_link(file_path, repoed_file_path)
# put a .gitignore file in the repo directory to prevent git from
# tracking the index file, the origpath file, and the corpus name file
with io.open(os.path.join(repo_dir, '.gitignore'), 'w', encoding='utf-8') as f:
index_file_name = os.path.basename(file_path) + '.index'
f.write(unicode(index_file_name + os.linesep))
f.write(unicode('origpath' + os.linesep))
f.write(unicode('corpus' + os.linesep))
# set up (init; add -A; commit) a git repository in the repo directory
self._set_up_git_repo(repo_dir)
# build the index
segmentio.LineIndex.build(repoed_file_path, repoed_file_path + '.index')
# create the origpath file: that is a file containing file_path
# (it is not really necessary to have such file but good for clarity;
# it allows to identify which file this repository belongs to and
# restore the link any time later if it is broken for some reason)
with io.open(os.path.join(repo_dir, 'origpath'), 'w', encoding='utf-8') as f:
f.write(unicode(file_path + os.linesep))
assert self.has_file(file_path)
#dbg(["LINKCOUNT_ORIG", os.stat(file_path).st_nlink])
#dbg(["LINKCOUNT_REPOED", os.stat(repoed_file_path).st_nlink])
#assert os.stat(file_path).st_nlink == 2
#assert os.stat(repoed_file_path).st_nlink == 2
def file_fspath(self, file_path):
'''returns the path of the file in the system (that is, in its git repository)'''
repo_dir = self._repo_dir(file_path)
file_basename = os.path.basename(file_path)
return os.path.join(repo_dir, file_basename)
def remove_file(self, file_path):
assert self.has_file(file_path)
repo_dir = self._repo_dir(file_path)
shutil.rmtree(repo_dir)
assert not self.has_file(file_path)
def cmtlock_file(self, file_path):
'''locks file against commits'''
file_id = self._file_id(file_path)
lockdir = os.path.join(self._main_dir, file_id + '_cmtlock')
# TODO use lockdir.py
try:
os.mkdir(lockdir)
# locked successfully
return True
except OSError:
# couldn't lock
return False
def cmtunlock_file(self, file_path):
'''unlocks file so it can be committed to again'''
file_id = self._file_id(file_path)
lockdir = os.path.join(self._main_dir, file_id + '_cmtlock')
try:
os.rmdir(lockdir)
# unlocked successfully
return True
except OSError:
# couldn't unlock
return False
def file_is_cmtlocked(self, file_path):
'''checks if the file is locked against commits'''
file_id = self._file_id(file_path)
lockdir = os.path.join(self._main_dir, file_id + '_cmtlock')
return os.path.isdir(lockdir)
def lock_file(self, file_path):
repo_dir = self._repo_dir(file_path)
lockdir.lock(repo_dir, lockdir.RWLOCK)
assert lockdir.is_locked(repo_dir, lockdir.RWLOCK)
assert lockdir.pid_of_lock(repo_dir, lockdir.RWLOCK) == os.getpid()
def release_file(self, file_path):
repo_dir = self._repo_dir(file_path)
lockdir.release(repo_dir, lockdir.RWLOCK)
def head_revision(self, file_path):
assert self.has_file(file_path)
repo_dir = self._repo_dir(file_path)
repoed_file_path = self.file_fspath(file_path)
# NOTE: This function is always called before reading or writing the file.
# As such a bottleneck, it executes the steps (1) and (2) to get the
# file in the user's personal repository in sync with the state of the
# physical file, before the repoed file in it is read from or written to
# in any way.
#
# TODO: The whole process of accessing the file, including these steps,
# should be an atomic transaction. Optionally (would be nice), distinguish
# read-only access (can be non-exclusive) and read/write access (must be exclusive).
# (1)
# the physical file and the repoed file must be hardlinked,
# that is: be two hardlinks to the same file
# (if they are not, _fix_broken_link will take the physical file and
| |
None, date_range_end: str = None, csv_delimiter: str = 'comma',
decimal_char: str = None, return_type: str = 'flat', data_format: str = 'json',
error_format: str = 'json', *args, **kwargs) -> dict:
"""This method allows you to export a set of records for a project.
:param records: An array of record names specifying specific records you wish to pull. If 'None', all records will
be pulled. Default: 'None'.
:type records: list
:param fields: An array of field names specifying specific fields you wish to pull. If 'None', all fields will be
pulled. Default: 'None'.
:type fields: list
:param forms: An array of form names you wish to pull records for. If 'None', then all forms will be pulled.
Default: 'None'
:type forms: list
:param events: An array of unique event names that you wish to pull records for. If 'None', all events will be
pulled. Default: 'None'. *Note: only for longitudinal projects.
:type events: list
:param raw_or_label: Export the raw coded values or labels for the options of multiple choice fields. Options:
'raw', 'label'. Default: 'raw'.
:type raw_or_label: str
:param raw_or_label_headers: For the CSV headers, export the variable/field names (raw) or the field labels (label).
Options: 'raw', 'label'. Default: 'raw'. *Note: for 'csv' format 'flat' type only.
:type raw_or_label_headers: str
:param export_checkbox_label: Specifies the format of checkbox field values specifically when exporting the data as
labels (i.e., when raw_or_label=label) in flat format (i.e., when return_type=flat). When exporting labels, by
default (if export_checkbox_label=false), all checkboxes will either have a value 'Checked' if they are checked
or 'Unchecked' if not checked. But if export_checkbox_label is set to true, it will instead export the
checkbox value as the checkbox option's label (e.g., 'Choice 1') if checked or it will be blank/empty (no value)
if not checked. If raw_or_label=false or if return_type=eav, then the export_checkbox_label flag is ignored.
(The export_checkbox_label parameter is ignored for return_type=eav because 'eav' type always exports checkboxes
differently anyway, in which checkboxes are exported with their true variable name (whereas the 'flat' type
exports them as variable___code format), and another difference is that 'eav' type *always* exports checkbox
values as the choice label for labels export, or as 0 or 1 (if unchecked or checked, respectively) for raw
export.). Default: False.
:type export_checkbox_label: bool
:param export_survey_fields: Specifies whether or not to export the survey identifier field
(e.g., 'redcap_survey_identifier') or survey timestamp fields (e.g., instrument+'_timestamp') when surveys are
utilized in the project. If set to 'true', it will return the redcap_survey_identifier field and also the survey
timestamp field for a particular survey when at least one field from that survey is being exported. *NOTE: If
the survey identifier field or survey timestamp fields are imported via API data import, they will simply be
ignored since they are not real fields in the project but rather are pseudo-fields. Default: False.
:type export_survey_fields: bool
:param export_dags: Specifies whether or not to export the 'redcap_data_access_group' field when data access groups
are utilized in the project. *NOTE: This flag is only viable if the user whose token is being used to make the
API request is *not* in a data access group. If the user is in a group, then this flag will revert to its
default value. Default: False.
:type export_dags: bool
:param filter_logic: String of logic text (e.g., [age] > 30) for filtering the data to be returned by this API
method, in which the API will only return the records (or record-events, if a longitudinal project) where the
logic evaluates as TRUE. *NOTE: if the filter logic contains any incorrect syntax, the API will respond with an
error message.
:type filter_logic: str
:param date_range_begin: A timestamp to return only records that have been created or modified after a given
date/time. Format is YYYY-MM-DD HH:MM:SS (e.g., '2017-01-01 00:00:00' for January 1, 2017 at
midnight server time). If 'None', it will assume no begin time. Default: 'None'.
:type date_range_begin: str
:param date_range_end: A timestamp to return only records that have been created or modified before a given
date/time. Format is YYYY-MM-DD HH:MM:SS (e.g., '2017-01-01 00:00:00' for January 1, 2017 at
midnight server time). If 'None', it will assume no end time. Default: 'None'.
:type date_range_end: str
:param csv_delimiter: Set the delimiter used to separate values in the CSV data file. Options: 'comma', 'tab',
'semicolon', 'pipe', or 'caret'. Default: 'comma'. *NOTE: for CSV format only.
:type csv_delimiter: str
:param decimal_char: If specified, force all numbers into same decimal format. You may choose to force all data
values containing a decimal to have the same decimal character, which will be applied to all calc fields and
number-validated text fields. Options: 'comma', 'dot'. If 'None', numbers will be exported using the fields'
native decimal format.
:type decimal_char: str
:param return_type: Specifies the method in which returned data is modeled. 'flat' option outputs one record per
row. 'eav' option outputs one data point per row. *Note: For non-longitudinal studies, the fields will be
record, field_name, value. For longitudinal studies, the fields will be record, field_name, redcap_event_name.
Options: 'flat', 'eav'. Default: 'flat'.
:type return_type: str
:param data_format: Specifies the format that data are returned. Options: 'csv', 'json', 'xml'. Default: 'json'.
:type data_format: str
:param error_format: Specifies the format of error messages. If you do not pass in this flag, it will select the
default format for you passed based on the 'data_format' flag you passed in. Options: 'csv', 'json', 'xml'.
Default: 'json'.
:type error_format: str
"""
post = {
'content': 'record',
'rawOrLabel': raw_or_label,
'rawOrLabelHeaders': raw_or_label_headers,
'exportCheckboxLabel': str(export_checkbox_label),
'exportSurveyFields': str(export_survey_fields),
'exportDataAccessGroups': str(export_dags),
'type': return_type,
'returnFormat': error_format,
'format': data_format
}
# add filter logic, if passed
if filter_logic is not None:
post['filterLogic'] = filter_logic
# add date begin, if passed
if date_range_begin is not None:
post['dateRangeBegin'] = date_range_begin
# add date end, if passed
if date_range_end is not None:
post['dateRangeEnd'] = date_range_end
# translate the csv_delimiter arg to the appropriate string for the API call
# note: don't need to translate 'tab'
if csv_delimiter.lower() == 'comma':
csv_delimiter = ','
elif csv_delimiter.lower() == 'semicolon':
csv_delimiter = ';'
elif csv_delimiter.lower() == 'pipe':
csv_delimiter = '|'
elif csv_delimiter.lower() == 'caret':
csv_delimiter = '^'
post['csvDelimiter'] = csv_delimiter
# translate the decimal_char arg to the appropriate string for the API call
if decimal_char is not None:
if decimal_char == 'comma':
decimal_char = ','
elif decimal_char == 'dot':
decimal_char = '.'
post['decimalCharacter'] = decimal_char
# add specific records, if passed
if records is not None:
post = _helper_to_add_key_values_to_post_dict('records', records, post)
# add specific fields, if passed
if fields is not None:
post = _helper_to_add_key_values_to_post_dict('fields', fields, post)
# add specific forms, if passed
if forms is not None:
post = _helper_to_add_key_values_to_post_dict('forms', forms, post)
# add specific events, if passed
if events is not None:
post = _helper_to_add_key_values_to_post_dict('events', events, post)
return post
@PostAPI
def import_records(data: dict, records: list = None, overwrite: bool = False, auto_number: bool = False,
data_type: str = 'flat', date_format: str = 'YMD', return_content: str = 'count',
csv_delimiter: str = 'comma', data_format: str = 'json', error_format: str = 'json', *args,
**kwargs):
# todo: add documentation
post = {
'content': 'record',
'data': data,
'type': data_type,
'forceAutoNumber': str(auto_number),
'returnContent': return_content,
'format': data_format,
'returnFormat': error_format,
'dateFormat': date_format
}
# translate overwrite argument to the appropriate string for the API call
if overwrite is False:
overwrite_str = 'normal'
else:
overwrite_str = 'overwrite'
post['overwriteBehavior'] = overwrite_str
# translate the csv_delimiter arg to the appropriate string for the API call
# note: don't need to translate 'tab'
if csv_delimiter.lower() == 'comma':
csv_delimiter = ','
elif csv_delimiter.lower() == 'semicolon':
csv_delimiter = ';'
elif csv_delimiter.lower() == 'pipe':
csv_delimiter = '|'
elif csv_delimiter.lower() == 'caret':
csv_delimiter = '^'
post['csvDelimiter'] = csv_delimiter
# add specific records, if passed
if records is not None:
post = _helper_to_add_key_values_to_post_dict('records', records, post)
return post
def _helper_to_add_key_values_to_post_dict(key_basename: str, values_list: list, post_dict: dict) -> dict:
for idx, value | |
brush)
brush = QtGui.QBrush(QtGui.QColor(108, 255, 63, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.legFront_R0_ankle_ctl.setPalette(palette)
self.legFront_R0_ankle_ctl.setAutoFillBackground(True)
self.legFront_R0_ankle_ctl.setObjectName("legFront_R0_ankle_ctl")
self.legBack_L0_upv_ctl = SelectBtn_RIkBox(biped_body)
self.legBack_L0_upv_ctl.setGeometry(QtCore.QRect(274, 339, 21, 21))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(108, 255, 63, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(108, 255, 63, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(108, 255, 63, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.legBack_L0_upv_ctl.setPalette(palette)
self.legBack_L0_upv_ctl.setAutoFillBackground(True)
self.legBack_L0_upv_ctl.setObjectName("legBack_L0_upv_ctl")
self.legBack_L0_roll_ctl = SelectBtn_RIkCircle(biped_body)
self.legBack_L0_roll_ctl.setGeometry(QtCore.QRect(306, 336, 21, 20))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(108, 255, 63, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(108, 255, 63, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(108, 255, 63, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.legBack_L0_roll_ctl.setPalette(palette)
self.legBack_L0_roll_ctl.setAutoFillBackground(True)
self.legBack_L0_roll_ctl.setObjectName("legBack_L0_roll_ctl")
self.legFront_R0_upv_ctl = SelectBtn_greenBox(biped_body)
self.legFront_R0_upv_ctl.setGeometry(QtCore.QRect(54, 230, 21, 21))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(108, 255, 63, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(108, 255, 63, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, | |
self.count = 0
self.log = log
# NOTE: For some reason tree items have to have a data object in
# order to be sorted. Since our compare just uses the labels
# we don't need any real data, so we'll just use None below for
# the item data.
if self.MainFrame.isPremium == True:
self.RootParentID = "Artifact classification"
self.LoadData("Artifact classification")
else:
self.RootParentID = "Artifact classification"
self.LoadData("UserDefine(Favorite)")
self.PreSelectedItem = self.root
self.Bind(wx.EVT_LEFT_DCLICK, self.OnLeftDClick)
self.Bind(wx.EVT_IDLE, self.OnIdle)
self.eventdict = {'EVT_TREE_BEGIN_DRAG': self.OnBeginDrag, 'EVT_TREE_BEGIN_LABEL_EDIT': self.OnBeginEdit,
'EVT_TREE_BEGIN_RDRAG': self.OnBeginRDrag, 'EVT_TREE_DELETE_ITEM': self.OnDeleteItem,
'EVT_TREE_END_DRAG': self.OnEndDrag, 'EVT_TREE_END_LABEL_EDIT': self.OnEndEdit,
'EVT_TREE_ITEM_ACTIVATED': self.OnActivate, 'EVT_TREE_ITEM_CHECKED': self.OnItemCheck,
'EVT_TREE_ITEM_CHECKING': self.OnItemChecking, 'EVT_TREE_ITEM_COLLAPSED': self.OnItemCollapsed,
'EVT_TREE_ITEM_COLLAPSING': self.OnItemCollapsing, 'EVT_TREE_ITEM_EXPANDED': self.OnItemExpanded,
'EVT_TREE_ITEM_EXPANDING': self.OnItemExpanding, 'EVT_TREE_ITEM_GETTOOLTIP': self.OnToolTip,
'EVT_TREE_ITEM_MENU': self.OnItemMenu, 'EVT_TREE_ITEM_RIGHT_CLICK': self.OnRightDown,
'EVT_TREE_KEY_DOWN': self.OnKey, 'EVT_TREE_SEL_CHANGED': self.OnSelChanged,
'EVT_TREE_SEL_CHANGING': self.OnSelChanging, "EVT_TREE_ITEM_HYPERLINK": self.OnHyperLink}
mainframe = wx.GetTopLevelParent(self)
if not hasattr(mainframe, "leftpanel"):
self.Bind(CT.EVT_TREE_ITEM_EXPANDED, self.OnItemExpanded)
self.Bind(CT.EVT_TREE_ITEM_COLLAPSED, self.OnItemCollapsed)
self.Bind(CT.EVT_TREE_SEL_CHANGED, self.OnSelChanged)
self.Bind(CT.EVT_TREE_SEL_CHANGING, self.OnSelChanging)
self.Bind(wx.EVT_RIGHT_DOWN, self.OnRightDown)
self.Bind(wx.EVT_RIGHT_UP, self.OnRightUp)
else:
for combos in mainframe.treeevents:
self.BindEvents(combos)
if hasattr(mainframe, "leftpanel"):
self.ChangeStyle(mainframe.treestyles)
if not(self.GetAGWWindowStyleFlag() & CT.TR_HIDE_ROOT):
self.SelectItem(self.root)
self.Expand(self.root)
self.DoSelectItem(self.root)
def LoadData(self, ParentID, ParentNode=None):
if ParentNode != None:
con = sqlite3.connect( self.PublicDBPath )
cursor = con.cursor()
cursor.execute("Select Location, Text, ContentsPath, Description, ContentsID, UserContentsLocation from ProcessContentsTable where ParentID = '" + ParentID + "' and isDeleted = 'n' order by cast(Sequence as decimal)")
PublicResultRows = cursor.fetchall()
con = sqlite3.connect( self.UserDBPath )
cursor = con.cursor()
cursor.execute("Select Location, Text, ContentsPath, Description, ContentsID, UserContentsLocation from ProcessContentsTable where ParentID = '" + ParentID + "' and isDeleted = 'n' order by cast(Sequence as decimal)")
UserResultRows = cursor.fetchall()
ResultRows = []
for UserRow in UserResultRows:
if "top" in UserRow[5] and UserRow[0] == "ProcessGroup":
ResultRows.append(UserRow)
for PublicRow in PublicResultRows:
if PublicRow[0] == "ProcessGroup":
ResultRows.append(PublicRow)
for UserRow in UserResultRows:
if UserRow[5] == PublicRow[4] and UserRow[0] == "ProcessGroup":
ResultRows.append(UserRow)
for ResultRow in ResultRows:
child = self.AppendItem(ParentNode, ResultRow[1])
self.SetPyData(child, ResultRow[4])
self.SetItemImage(child, self.folder_close_idx, wx.TreeItemIcon_Normal)
self.SetItemImage(child, self.folder_open_idx, wx.TreeItemIcon_Expanded)
try:
if int(ResultRow[4]) < 100000:
self.SetItemBackgroundColour(child, '#e6f1f5')
except:
print ""
con = sqlite3.connect( self.PublicDBPath )
cursor = con.cursor()
cursor.execute("Select Location, Text, ContentsPath, Description, ContentsID, UserContentsLocation from ProcessContentsTable where ParentID = '" + ResultRow[4] + "' and isDeleted = 'n' order by cast(Sequence as decimal)")
SubPublicResultRows = cursor.fetchall()
con = sqlite3.connect( self.UserDBPath )
cursor = con.cursor()
cursor.execute("Select Location, Text, ContentsPath, Description, ContentsID, UserContentsLocation from ProcessContentsTable where ParentID = '" + ResultRow[4] + "' and isDeleted = 'n' order by cast(Sequence as decimal)")
SubUserResultRows = cursor.fetchall()
SubResultRows = []
for UserRow in SubUserResultRows:
if "top" in UserRow[5] and UserRow[0] == "ProcessGroup":
SubResultRows.append(UserRow)
for PublicRow in SubPublicResultRows:
if PublicRow[0] == "ProcessGroup":
SubResultRows.append(PublicRow)
for UserRow in SubUserResultRows:
if UserRow[5] == PublicRow[4] and UserRow[0] == "ProcessGroup":
SubResultRows.append(UserRow)
if len(SubResultRows) > 0:
self.LoadData( ResultRow[4], child)
else:
self.DeleteAllItems()
self.UserDBPath = self.User_Process_SQLite
self.PublicDBPath = self.Public_Process_SQLite
#print "\n\n\n\\ this is !!!" + self.DBPath + "\n\n\n\\"
con = sqlite3.connect( self.PublicDBPath )
cursor = con.cursor()
cursor.execute("Select Location, Text, ContentsPath, Description, ContentsID, UserContentsLocation from ProcessContentsTable where Location = 'ProcessRoot' and ParentID = '" + ParentID + "' and isDeleted = 'n' order by cast(Sequence as decimal)")
ResultRows = cursor.fetchall()
con.close()
for ResultRow in ResultRows:
self.root = self.AddRoot(ResultRow[1])
if not(self.GetAGWWindowStyleFlag() & CT.TR_HIDE_ROOT):
self.SetPyData(self.root, ResultRow[4])
self.RootParentID = ResultRow[4]
self.SetItemImage(self.root, self.folder_close_idx, wx.TreeItemIcon_Normal)
self.SetItemImage(self.root, self.folder_open_idx, wx.TreeItemIcon_Expanded)
con = sqlite3.connect( self.PublicDBPath )
cursor = con.cursor()
cursor.execute("Select Location, Text, ContentsPath, Description, ContentsID, UserContentsLocation from ProcessContentsTable where ParentID = '" + ResultRow[4] + "' and isDeleted = 'n' order by cast(Sequence as decimal)")
SubPublicResultRows = cursor.fetchall()
con = sqlite3.connect( self.UserDBPath )
cursor = con.cursor()
cursor.execute("Select Location, Text, ContentsPath, Description, ContentsID, UserContentsLocation from ProcessContentsTable where ParentID = '" + ResultRow[4] + "' and isDeleted = 'n' order by cast(Sequence as decimal)")
SubUserResultRows = cursor.fetchall()
SubResultRows = []
for UserRow in SubUserResultRows:
if "top" in UserRow[5] and UserRow[0] == "ProcessGroup":
SubResultRows.append(UserRow)
for PublicRow in SubPublicResultRows:
if PublicRow[0] == "ProcessGroup":
SubResultRows.append(PublicRow)
for UserRow in SubUserResultRows:
if UserRow[5] == PublicRow[4] and UserRow[0] == "ProcessGroup":
SubResultRows.append(UserRow)
if len(SubResultRows) > 0:
self.LoadData( ResultRow[4], self.root)
con.close()
#Load Selected members
RelatedContentsWindow = self.GetParent().FindWindowByName('RelatedContents')
RelatedContentsWindow.DeleteAllItems()
con = sqlite3.connect( self.PublicDBPath )
cursor = con.cursor()
cursor.execute("Select Location, Text, ContentsPath, Description, ContentsID, UserContentsLocation from ProcessContentsTable where ParentID = '" + self.RootParentID + "' and isDeleted = 'n' order by cast(Sequence as decimal)")
PublicResultRows = cursor.fetchall()
con = sqlite3.connect( self.UserDBPath )
cursor = con.cursor()
cursor.execute("Select Location, Text, ContentsPath, Description, ContentsID, UserContentsLocation from ProcessContentsTable where ParentID = '" + self.RootParentID + "' and isDeleted = 'n' order by cast(Sequence as decimal)")
UserResultRows = cursor.fetchall()
ResultRows = []
for UserRow in UserResultRows:
if "top" in UserRow[5]:
ResultRows.append(UserRow)
for PublicRow in PublicResultRows:
ResultRows.append(PublicRow)
for UserRow in UserResultRows:
if UserRow[5] == PublicRow[4]:
ResultRows.append(UserRow)
idx = 0
for ResultRow in ResultRows:
RelatedContentsWindow.InsertStringItem(idx, ResultRow[0])
if ResultRow[0] == "ProcessGroup":
RelatedContentsWindow.SetItemColumnImage(idx, 0, 0)
elif ResultRow[0] == "Category":
RelatedContentsWindow.SetItemColumnImage(idx, 0, 1)
elif ResultRow[0] == "Analysis Point":
RelatedContentsWindow.SetItemColumnImage(idx, 0, 2)
elif ResultRow[0] == "Target":
RelatedContentsWindow.SetItemColumnImage(idx, 0, 3)
RelatedContentsWindow.SetStringItem(idx, 1, ResultRow[1])
RelatedContentsWindow.SetStringItem(idx, 2, ResultRow[2])
RelatedContentsWindow.SetStringItem(idx, 3, ResultRow[4])
#try:
if int(ResultRow[4]) < 100000 :
RelatedContentsWindow.SetItemBackgroundColour(idx, '#e6f1f5')
idx += 1
con.close()
self.PreSelectedID = self.GetPyData(self.root)
self.PreSelectedItem = self.root
"""
textctrl = wx.TextCtrl(self, -1, "I Am A Simple\nMultiline wx.TexCtrl", style=wx.TE_MULTILINE)
self.gauge = wx.Gauge(self, -1, 50, style=wx.GA_HORIZONTAL|wx.GA_SMOOTH)
self.gauge.SetValue(0)
combobox = wx.ComboBox(self, -1, choices=["That", "Was", "A", "Nice", "Holyday!"], style=wx.CB_READONLY|wx.CB_DROPDOWN)
textctrl.Bind(wx.EVT_CHAR, self.OnTextCtrl)
combobox.Bind(wx.EVT_COMBOBOX, self.OnComboBox)
lenArtIds = len(ArtIDs) - 2
"""
"""
for x in range(15):
if x == 1:
child = self.AppendItem(self.root, "Item %d" % x)# + "\nHello World\nHappy wxPython-ing!")
self.SetItemBold(child, True)
else:
child = self.AppendItem(self.root, "Item %d" % x)
self.SetPyData(child, None)
self.SetItemImage(child, folder_close_idx, wx.TreeItemIcon_Normal)
self.SetItemImage(child, folder_open_idx, wx.TreeItemIcon_Expanded)
if random.randint(0, 3) == 0:
self.SetItemLeftImage(child, random.randint(0, lenArtIds))
for y in range(5):
if y == 0 and x == 1:
last = self.AppendItem(child, "item %d-%s" % (x, chr(ord("a")+y)), ct_type=2, wnd=self.gauge)
elif y == 1 and x == 2:
last = self.AppendItem(child, "Item %d-%s" % (x, chr(ord("a")+y)), ct_type=1, wnd=textctrl)
if random.randint(0, 3) == 1:
self.SetItem3State(last, True)
elif 2 < y < 4:
last = self.AppendItem(child, "item %d-%s" % (x, chr(ord("a")+y)))
elif y == 4 and x == 1:
last = self.AppendItem(child, "item %d-%s" % (x, chr(ord("a")+y)), wnd=combobox)
else:
last = self.AppendItem(child, "item %d-%s" % (x, chr(ord("a")+y)), ct_type=2)
self.SetPyData(last, None)
self.SetItemImage(last, folder_close_idx, wx.TreeItemIcon_Normal)
self.SetItemImage(last, folder_open_idx, wx.TreeItemIcon_Expanded)
if random.randint(0, 3) == 0:
self.SetItemLeftImage(last, random.randint(0, lenArtIds))
for z in range(5):
if z > 2:
item = self.AppendItem(last, "item %d-%s-%d" % (x, chr(ord("a")+y), z), ct_type=1)
if random.randint(0, 3) == 1:
self.SetItem3State(item, True)
elif 0 < z <= 2:
item = self.AppendItem(last, "item %d-%s-%d" % (x, chr(ord("a")+y), z), ct_type=2)
elif z == 0:
item = self.AppendItem(last, "item %d-%s-%d" % (x, chr(ord("a")+y), z))
self.SetItemHyperText(item, True)
self.SetPyData(item, None)
self.SetItemImage(item, folder_close_idx, wx.TreeItemIcon_Normal)
self.SetItemImage(item, folder_open_idx, wx.TreeItemIcon_Expanded)
if random.randint(0, 3) == 0:
self.SetItemLeftImage(item, random.randint(0, lenArtIds))
"""
return
def BindEvents(self, choice, recreate=False):
value = choice.GetValue()
text = choice.GetLabel()
evt = "CT." + text
binder = self.eventdict[text]
if value == 1:
if evt == "CT.EVT_TREE_BEGIN_RDRAG":
self.Bind(wx.EVT_RIGHT_DOWN, None)
self.Bind(wx.EVT_RIGHT_UP, None)
self.Bind(eval(evt), binder)
else:
self.Bind(eval(evt), None)
if evt == "CT.EVT_TREE_BEGIN_RDRAG":
self.Bind(wx.EVT_RIGHT_DOWN, self.OnRightDown)
self.Bind(wx.EVT_RIGHT_UP, self.OnRightUp)
def ChangeStyle(self, combos):
style = 0
for combo in combos:
if combo.GetValue() == 1:
style = style | eval("CT." + combo.GetLabel())
if self.GetAGWWindowStyleFlag() != style:
self.SetAGWWindowStyleFlag(style)
def OnCompareItems(self, item1, item2):
t1 = self.GetItemText(item1)
t2 = self.GetItemText(item2)
self.log.write('compare: ' + t1 + ' <> ' + t2 + "\n")
if t1 < t2:
return -1
if t1 == t2:
return 0
return 1
def OnIdle(self, event):
"""
if self.gauge:
try:
if self.gauge.IsEnabled() and self.gauge.IsShown():
| |
"""
- EMOS 'computer.py' Source Code -
(C) Cubeflix 2021 (EMOS)
"""
# Imports
from .misc import *
from .memory import *
class FileSystem:
"""A hard drive managed by the OS."""
def __init__(self, computer, outfile):
"""Create the hard drive.
Args: computer -> the computer the hard drive is attached to
outfile -> the name of the output file/virtual hard disk"""
self.computer = computer
self.outfile = outfile
self.filesystem = {}
def read_file(self, path):
"""Read a file from the file system.
Args: path -> the path to the file"""
# Split the path
split_path = os.path.normpath(path).split(os.path.sep)
# Find the file by iterating through the path into the file system
traversal_history = [self.filesystem]
for item in split_path:
if item in ('', '.'):
continue
elif item == '..':
if len(traversal_history) == 1:
return (31, "Cannot traverse back from root directory.")
# Move back
del traversal_history[-1]
# Traverse to the next section
try:
traversal_history.append(traversal_history[-1][item])
except (KeyError, TypeError):
return (32, "Path is invalid.")
# Get the final file
if not type(traversal_history[-1]) in (bytes, bytearray):
return (32, "Path is invalid.")
return (0, traversal_history[-1])
def write_file(self, path, data):
"""Write to a new or existing file on the file system.
Args: path -> the path to the file
data -> the data to write to the file"""
# Split the path
split_path = os.path.normpath(path).split(os.path.sep)
final_name = split_path.pop()
if any([char in final_name for char in INVALID_FILENAME_CHARS]):
return (34, "Invalid filename.")
# Find the file by iterating through the path into the file system
traversal_history = [self.filesystem]
for item in split_path:
if item in ('', '.'):
continue
elif item == '..':
if len(traversal_history) == 1:
return (31, "Cannot traverse back from root directory.")
# Move back
del traversal_history[-1]
# Traverse to the next section
try:
traversal_history.append(traversal_history[-1][item])
except (KeyError, TypeError):
return (32, "Path is invalid.")
# Check for a folder
if final_name in traversal_history[-1] and type(traversal_history[-1][final_name]) == dict:
return (32, "Path is invalid.")
# Write to the final file
traversal_history[-1][final_name] = data
# Update
self._backend_update()
return (0, None)
def delete_file(self, path):
"""Delete a file from the file system.
Args: path -> the path to the file"""
# Split the path
split_path = os.path.normpath(path).split(os.path.sep)
# Find the file by iterating through the path into the file system
traversal_history = [self.filesystem]
for item in split_path:
if item in ('', '.'):
continue
elif item == '..':
if len(traversal_history) == 1:
return (31, "Cannot traverse back from root directory.")
# Move back
del traversal_history[-1]
# Traverse to the next section
try:
traversal_history.append(traversal_history[-1][item])
except (KeyError, TypeError):
return (32, "Path is invalid.")
# Check the final file
if not type(traversal_history[-1]) in (bytes, bytearray):
return (32, "Path is invalid.")
# Check for environment file
if split_path[-1] == '__enviro':
return (41, "Cannot delete environment file.")
# Delete the file using the second to last reference in the traversal history
del traversal_history[-2][split_path[-1]]
# Update
self._backend_update()
return (0, None)
def rename_file(self, path, new_name):
"""Rename a file within the file system.
Args: path -> the path to the file
new_name -> new file name"""
if any([char in new_name for char in INVALID_FILENAME_CHARS]):
return (34, "Invalid filename.")
# Split the path
split_path = os.path.normpath(path).split(os.path.sep)
# Find the file by iterating through the path into the file system
traversal_history = [self.filesystem]
for item in split_path:
if item in ('', '.'):
continue
elif item == '..':
if len(traversal_history) == 1:
return (31, "Cannot traverse back from root directory.")
# Move back
del traversal_history[-1]
# Traverse to the next section
try:
traversal_history.append(traversal_history[-1][item])
except (KeyError, TypeError):
return (32, "Path is invalid.")
# Allow folders and files
# Rename the file using the second to last reference in the traversal history
# Check for environment file
if split_path[-1] == '__enviro':
return (41, "Cannot delete environment file.")
traversal_history[-2][new_name] = traversal_history[-2].pop(split_path[-1])
# Update
self._backend_update()
return (0, None)
def create_directory(self, path):
"""Create a directory in the file system.
Args: path -> the path to the folder"""
# Split the path
split_path = os.path.normpath(path).split(os.path.sep)
final_name = split_path.pop()
if any([char in final_name for char in INVALID_FILENAME_CHARS]):
return (34, "Invalid directory name.")
# Find the folder by iterating through the path into the file system
traversal_history = [self.filesystem]
for item in split_path:
if item in ('', '.'):
continue
elif item == '..':
if len(traversal_history) == 1:
return (31, "Cannot traverse back from root directory.")
# Move back
del traversal_history[-1]
# Traverse to the next section
try:
traversal_history.append(traversal_history[-1][item])
except (KeyError, TypeError):
return (32, "Path is invalid.")
# Check for an existing folder
if final_name in traversal_history[-1] and type(traversal_history[-1][final_name]) == dict:
return (33, "Folder already exists.")
# Check for an existing file
if final_name in traversal_history[-1] and type(traversal_history[-1][final_name]) in (bytes, bytearray):
return (32, "Path is invalid.")
# Create the directory
traversal_history[-1][final_name] = {}
# Update
self._backend_update()
return (0, None)
def delete_directory(self, path):
"""Delete a directory from the file system.
Args: path -> the path to the folder"""
# Split the path
split_path = os.path.normpath(path).split(os.path.sep)
# Find the folder by iterating through the path into the file system
traversal_history = [self.filesystem]
for item in split_path:
if item in ('', '.'):
continue
elif item == '..':
if len(traversal_history) == 1:
return (31, "Cannot traverse back from root directory.")
# Move back
del traversal_history[-1]
# Traverse to the next section
try:
traversal_history.append(traversal_history[-1][item])
except (KeyError, TypeError):
return (32, "Path is invalid.")
# Check the final directory
if not type(traversal_history[-1]) == dict:
return (32, "Path is invalid.")
# Delete the folder using the second to last reference in the traversal history
try:
del traversal_history[-2][split_path[-1]]
except Exception as e:
return (32, "Path is invalid.")
# Update
self._backend_update()
return (0, None)
def list_directory(self, path):
"""List a directory path, seperated by newlines.
Args: path -> the path to the directory"""
# Split the path
split_path = os.path.normpath(path).split(os.path.sep)
# Find the folder by iterating through the path into the file system
traversal_history = [self.filesystem]
for item in split_path:
if item in ('', '.'):
continue
elif item == '..':
if len(traversal_history) == 1:
return (31, "Cannot traverse back from root directory.")
# Move back
del traversal_history[-1]
# Traverse to the next section
try:
traversal_history.append(traversal_history[-1][item])
except (KeyError, TypeError):
return (32, "Path is invalid.")
# Check the final directory
if not type(traversal_history[-1]) == dict:
return (32, "Path is invalid.")
# List the folder
data = '\n'.join(traversal_history[-1])
# Update
self._backend_update()
return (0, data)
def get_full_buffer(self):
"""Get the full file system buffer."""
return pickle.dumps([self.filesystem, self.password])
def _backend_load(self):
"""Load the file system from the output file."""
try:
f = open(self.outfile, 'rb')
except Exception as e:
raise SysError("Output file for FileSystem does not exist.")
self.filesystem, self.password = pickle.loads(f.read())
f.close()
if not '__enviro' in self.filesystem:
self.filesystem['__enviro'] = b'{}'
self._backend_update()
def _backend_update(self):
"""Update the virtual hard drive file."""
f = open(self.outfile, 'wb')
f.write(self.get_full_buffer())
f.close()
def _format(self, password=None):
"""Format the hard drive."""
self.filesystem = {'__enviro' : b'{}'}
self.password = hashlib.sha256(bytes(password, ENCODING)).digest()
self._backend_update()
class Computer:
"""The main computer object."""
def __init__(self):
"""Create the computer."""
self.peripherals = {}
self.peripheral_ids = []
def set_memory(self, memory):
"""Set the memory for the computer.
Args: memory -> the memory to set"""
self.memory = memory
def set_cpu(self, cpu):
"""Set the CPU for the computer.
Args: cpu -> the CPU to set"""
self.cpu = cpu
def set_os(self, os):
"""Set the operating system for the computer.
Args: os -> the operating system to set"""
self.operatingsystem = os
def set_filesystem(self, filesystem):
"""Set the file system/hard drive for the computer.
Args: filesystem -> the file system to set"""
self.filesystem = filesystem
def start(self):
"""Start up the computer."""
self.operatingsystem.start_os()
def shutdown(self):
"""Shut down the computer."""
# NOTE: To avoid errors with user-side shutdown, terminals and other peripherals should not be used after the shutdown,
# and processes should not be accessed either, apart from debugging purposes.
self.operatingsystem.stop_os()
def add_peripheral(self, peripheral):
"""Add a peripheral to the computer.
Args: peripheral -> the peripheral to add"""
# Find a valid peripheral ID
for i in range(max(self.peripheral_ids) if self.peripheral_ids else 0):
if not i in self.peripheral_ids:
# This peripheral ID is free
current_pid = i
break
# No holes, so add a new PID
current_pid = (max(self.peripheral_ids) if self.peripheral_ids else -1) + 1
# Add the peripheral
self.peripheral_ids.append(current_pid)
self.peripherals[current_pid] = peripheral
return current_pid
def remove_peripheral(self, pid):
"""Remove a peripheral from the computer.
Args: pid -> peripheral ID to remove"""
self.peripheral_ids.remove(pid)
del self.peripherals[pid]
def interrupt(self, iid, pid, tid):
"""Call an interrupt.
Args: iid -> interrupt ID to call
pid -> process ID
tid -> thread ID"""
iid = int.from_bytes(iid, byteorder='little')
for peripheral_id, peripheral in self.peripherals.items():
# Check if the peripheral supports the interrupt ID
if iid in peripheral.defined_interrupts:
# Call the interrupt
return peripheral.handle(iid, pid, tid)
return (22, "Not a supported interrupt.")
def __repr__(self):
"""Get the string representation of the computer."""
return "<Computer>"
def __str__(self):
"""Get the string representation of the computer."""
return self.__repr__()
class Peripheral:
"""The base class for all peripherals."""
defined_interrupts = []
def __init__(self, computer):
"""Create the peripheral.
Args: computer -> computer the peripheral is attached to"""
self.computer = computer
def start(self, pid):
"""Run starting or initialization protocols.
Args: pid -> peripheral ID"""
self.pid = pid
def end(self):
"""Run ending protocols."""
del self.pid
def handle(self, iid, pid, tid):
"""Handle the interrupt.
Args: iid -> the interrupt ID
pid -> the process ID
tid -> the thread ID"""
...
def __repr__(self):
"""Get the string representation of the peripheral."""
return "<Peripheral>"
def __str__(self):
"""Get the string representation of the peripheral."""
return self.__repr__()
class TerminalScreen(Peripheral):
"""The terminal screen class."""
defined_interrupts = [0xe0, 0xe1, 0xe2, 0xe3]
def __init__(self, computer):
"""Create the terminal."""
self.computer = computer
def start(self, pid):
"""Start the terminal.
Args: pid -> peripheral ID"""
self.pid = pid
# Get screen size
size = os.get_terminal_size()
self.rows = size.lines
self.cols = size.columns
# Create the designated memory for the terminal's printout
self.computer.memory.add_memory_partition(('perp', self.pid), MemorySection('terminal_perp_' + str(self.pid), self.rows * self.cols + self.rows, bytes(self.rows * self.cols + self.rows)))
# Set the window title and clear
clear()
os.system("title [EMOS] TERMINAL_SCREEN_" + str(self.pid))
def end(self):
"""Run ending protocols."""
# Remove the designated memory for the terminal's printout
self.computer.memory.delete_memory_partition(('perp', self.pid))
del self.pid
del | |
<filename>madgraph/fks/fks_helas_objects.py
################################################################################
#
# Copyright (c) 2009 The MadGraph5_aMC@NLO Development team and Contributors
#
# This file is a part of the MadGraph5_aMC@NLO project, an application which
# automatically generates Feynman diagrams and matrix elements for arbitrary
# high-energy processes in the Standard Model and beyond.
#
# It is subject to the MadGraph5_aMC@NLO license which should accompany this
# distribution.
#
# For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch
#
################################################################################
"""Definitions of the Helas objects needed for the implementation of MadFKS
from born"""
import madgraph.core.base_objects as MG
import madgraph.core.helas_objects as helas_objects
import madgraph.core.diagram_generation as diagram_generation
import madgraph.core.color_amp as color_amp
import madgraph.core.color_algebra as color_algebra
import madgraph.fks.fks_base as fks_base
import madgraph.fks.fks_common as fks_common
import madgraph.loop.loop_helas_objects as loop_helas_objects
import copy
import logging
import array
logger = logging.getLogger('madgraph.fks_helas_objects')
class FKSHelasMultiProcess(helas_objects.HelasMultiProcess):
"""class to generate the helas calls for a FKSMultiProcess"""
def get_sorted_keys(self):
"""Return particle property names as a nicely sorted list."""
keys = super(FKSHelasMultiProcess, self).get_sorted_keys()
keys += ['real_matrix_elements', ['has_isr'], ['has_fsr']]
return keys
def filter(self, name, value):
"""Filter for valid leg property values."""
if name == 'real_matrix_elements':
if not isinstance(value, helas_objects.HelasMultiProcess):
raise self.PhysicsObjectError, \
"%s is not a valid list for real_matrix_element " % str(value)
def __init__(self, fksmulti, loop_optimized = False, gen_color =True, decay_ids =[]):
"""Initialization from a FKSMultiProcess"""
#swhich the other loggers off
loggers_off = [logging.getLogger('madgraph.diagram_generation'),
logging.getLogger('madgraph.helas_objects')]
old_levels = [logg.level for logg in loggers_off]
for logg in loggers_off:
logg.setLevel(logging.WARNING)
self.loop_optimized = loop_optimized
# generate the real ME's if they are needed.
# note that it may not be always the case, e.g. it the NLO_mode is LOonly
if fksmulti['real_amplitudes']:
logger.info('Generating real emission matrix-elements...')
self['real_matrix_elements'] = self.generate_matrix_elements(
copy.copy(fksmulti['real_amplitudes']), combine_matrix_elements = False)
else:
self['real_matrix_elements'] = helas_objects.HelasMatrixElementList()
self['matrix_elements'] = self.generate_matrix_elements_fks(
fksmulti,
gen_color, decay_ids)
self['initial_states']=[]
self['has_isr'] = fksmulti['has_isr']
self['has_fsr'] = fksmulti['has_fsr']
self['has_loops'] = len(self.get_virt_matrix_elements()) > 0
for i, logg in enumerate(loggers_off):
logg.setLevel(old_levels[i])
def get_used_lorentz(self):
"""Return a list of (lorentz_name, conjugate, outgoing) with
all lorentz structures used by this HelasMultiProcess."""
helas_list = []
for me in self.get('matrix_elements'):
helas_list.extend(me.get_used_lorentz())
return list(set(helas_list))
def get_used_couplings(self):
"""Return a list with all couplings used by this
HelasMatrixElement."""
coupling_list = []
for me in self.get('matrix_elements'):
coupling_list.extend([c for l in me.get_used_couplings() for c in l])
return list(set(coupling_list))
def get_matrix_elements(self):
"""Extract the list of matrix elements"""
return self.get('matrix_elements')
def get_virt_matrix_elements(self):
"""Extract the list of virtuals matrix elements"""
return [me.virt_matrix_element for me in self.get('matrix_elements') \
if me.virt_matrix_element]
def generate_matrix_elements_fks(self, fksmulti, gen_color = True,
decay_ids = []):
"""Generate the HelasMatrixElements for the amplitudes,
identifying processes with identical matrix elements, as
defined by HelasMatrixElement.__eq__. Returns a
HelasMatrixElementList and an amplitude map (used by the
SubprocessGroup functionality). decay_ids is a list of decayed
particle ids, since those should not be combined even if
matrix element is identical."""
fksprocs = fksmulti['born_processes']
assert isinstance(fksprocs, fks_base.FKSProcessList), \
"%s is not valid FKSProcessList" % \
repr(fksprocs)
# Keep track of already generated color objects, to reuse as
# much as possible
list_colorize = []
list_color_links =[]
list_color_basis = []
list_color_matrices = []
real_me_list = []
me_id_list = []
matrix_elements = FKSHelasProcessList()
for i, proc in enumerate(fksprocs):
logger.info("Generating Helas calls for FKS %s (%d / %d)" % \
(proc.born_amp.get('process').nice_string(print_weighted = False).\
replace('Process', 'process'),
i + 1, len(fksprocs)))
matrix_element_list = [FKSHelasProcess(proc, self['real_matrix_elements'],
fksmulti['real_amplitudes'],
loop_optimized = self.loop_optimized,
decay_ids=decay_ids,
gen_color=False)]
for matrix_element in matrix_element_list:
assert isinstance(matrix_element, FKSHelasProcess), \
"Not a FKSHelasProcess: %s" % matrix_element
try:
# If an identical matrix element is already in the list,
# then simply add this process to the list of
# processes for that matrix element
other = \
matrix_elements[matrix_elements.index(matrix_element)]
except ValueError:
# Otherwise, if the matrix element has any diagrams,
# add this matrix element.
if matrix_element.born_matrix_element.get('processes') and \
matrix_element.born_matrix_element.get('diagrams'):
matrix_elements.append(matrix_element)
if not gen_color:
continue
# Always create an empty color basis, and the
# list of raw colorize objects (before
# simplification) associated with amplitude
col_basis = color_amp.ColorBasis()
new_amp = matrix_element.born_matrix_element.get_base_amplitude()
matrix_element.born_matrix_element.set('base_amplitude', new_amp)
colorize_obj = col_basis.create_color_dict_list(new_amp)
try:
# If the color configuration of the ME has
# already been considered before, recycle
# the information
col_index = list_colorize.index(colorize_obj)
logger.info(\
"Reusing existing color information for %s" % \
matrix_element.born_matrix_element.get('processes')\
[0].nice_string(print_weighted=False).\
replace('Process', 'process'))
except ValueError:
# If not, create color basis and color
# matrix accordingly
list_colorize.append(colorize_obj)
col_basis.build()
list_color_basis.append(col_basis)
col_matrix = color_amp.ColorMatrix(col_basis)
list_color_matrices.append(col_matrix)
col_index = -1
logger.info(\
"Processing color information for %s" % \
matrix_element.born_matrix_element.\
get('processes')[0].nice_string(print_weighted=False).\
replace('Process', 'process'))
matrix_element.born_matrix_element.set('color_basis',
list_color_basis[col_index])
matrix_element.born_matrix_element.set('color_matrix',
list_color_matrices[col_index])
else:
# this is in order not to handle valueErrors coming from other plaeces,
# e.g. from the add_process function
other.add_process(matrix_element)
for me in matrix_elements:
me.set_color_links()
return matrix_elements
class FKSHelasProcessList(MG.PhysicsObjectList):
"""class to handle lists of FKSHelasProcesses"""
def is_valid_element(self, obj):
"""Test if object obj is a valid FKSProcess for the list."""
return isinstance(obj, FKSHelasProcess)
class FKSHelasProcess(object):
"""class to generate the Helas calls for a FKSProcess. Contains:
-- born ME
-- list of FKSHelasRealProcesses
-- color links"""
def __init__(self, fksproc=None, real_me_list =[], real_amp_list=[],
loop_optimized = False, **opts):#test written
""" constructor, starts from a FKSProcess,
sets reals and color links. Real_me_list and real_amp_list are the lists of pre-genrated
matrix elements in 1-1 correspondence with the amplitudes"""
if fksproc != None:
self.born_matrix_element = helas_objects.HelasMatrixElement(
fksproc.born_amp, **opts)
self.real_processes = []
self.orders = fksproc.born_proc.get('orders')
self.perturbation = fksproc.perturbation
real_amps_new = []
# combine for example u u~ > t t~ and d d~ > t t~
for proc in fksproc.real_amps:
fksreal_me = FKSHelasRealProcess(proc, real_me_list, real_amp_list, **opts)
try:
other = self.real_processes[self.real_processes.index(fksreal_me)]
other.matrix_element.get('processes').extend(\
fksreal_me.matrix_element.get('processes') )
except ValueError:
if fksreal_me.matrix_element.get('processes') and \
fksreal_me.matrix_element.get('diagrams'):
self.real_processes.append(fksreal_me)
real_amps_new.append(proc)
fksproc.real_amps = real_amps_new
if fksproc.virt_amp:
self.virt_matrix_element = \
loop_helas_objects.LoopHelasMatrixElement(fksproc.virt_amp,
optimized_output = loop_optimized)
else:
self.virt_matrix_element = None
# self.color_links_info = fksproc.find_color_links()
self.color_links = []
def set_color_links(self):
"""this function computes and returns the color links, it should be called
after the initialization and the setting of the color basis"""
if not self.color_links:
legs = self.born_matrix_element.get('base_amplitude').get('process').get('legs')
model = self.born_matrix_element.get('base_amplitude').get('process').get('model')
color_links_info = fks_common.find_color_links(fks_common.to_fks_legs(legs, model),
symm = True,pert = self.perturbation)
col_basis = self.born_matrix_element.get('color_basis')
self.color_links = fks_common.insert_color_links(col_basis,
col_basis.create_color_dict_list(
self.born_matrix_element.get('base_amplitude')),
color_links_info)
def get_fks_info_list(self):
"""Returns the list of the fks infos for all processes in the format
{n_me, pdgs, fks_info}, where n_me is the number of real_matrix_element the configuration
belongs to"""
info_list = []
for n, real in enumerate(self.real_processes):
pdgs = [l['id'] for l in real.matrix_element.get_base_amplitude()['process']['legs']]
for info in real.fks_infos:
info_list.append({'n_me' : n + 1,'pdgs' : pdgs, 'fks_info' : info})
return info_list
def get_lh_pdg_string(self):
"""Returns the pdgs of the legs in the form "i1 i2 -> f1 f2 ...", which may
be useful (eg. to be written in a B-LH order file)"""
initial = ''
final = ''
for leg in self.born_matrix_element.get('processes')[0].get('legs'):
if leg.get('state'):
final += '%d ' % leg.get('id')
else:
initial += '%d ' % leg.get('id')
return initial + '-> ' + final
def get(self, key):
"""the get function references to born_matrix_element"""
return self.born_matrix_element.get(key)
def get_used_lorentz(self):
"""the get_used_lorentz function references to born, reals
and virtual matrix elements"""
lorentz_list = self.born_matrix_element.get_used_lorentz()
for real in self.real_processes:
lorentz_list.extend(real.matrix_element.get_used_lorentz())
if self.virt_matrix_element:
lorentz_list.extend(self.virt_matrix_element.get_used_lorentz())
return list(set(lorentz_list))
def get_used_couplings(self):
"""the get_used_couplings function references to born, reals
and virtual matrix elements"""
coupl_list = self.born_matrix_element.get_used_couplings()
for real in self.real_processes:
coupl_list.extend([c for c in\
real.matrix_element.get_used_couplings()])
if self.virt_matrix_element:
coupl_list.extend(self.virt_matrix_element.get_used_couplings())
return coupl_list
def get_nexternal_ninitial(self):
"""the nexternal_ninitial function references to the real emissions if they have been
generated, otherwise to the born"""
if self.real_processes:
(nexternal, ninitial) = self.real_processes[0].matrix_element.get_nexternal_ninitial()
else:
(nexternal, ninitial) = self.born_matrix_element.get_nexternal_ninitial()
nexternal += 1
return (nexternal, ninitial)
def __eq__(self, other):
"""the equality between two FKSHelasProcesses is defined up to the
color links"""
selftag = helas_objects.IdentifyMETag.create_tag(self.born_matrix_element.get('base_amplitude'))
othertag = helas_objects.IdentifyMETag.create_tag(other.born_matrix_element.get('base_amplitude'))
if self.born_matrix_element != other.born_matrix_element or \
selftag != othertag:
return False
reals2 = copy.copy(other.real_processes)
for real in self.real_processes:
try:
reals2.remove(real)
except ValueError:
return False
if not reals2:
return True
else:
return False
def add_process(self, other): #test written, ppwj
"""adds processes from born and reals of other to itself. Note that
corresponding real processes may not be in the same order. This is
taken care of by constructing the list of self_reals."""
self.born_matrix_element.get('processes').extend(
other.born_matrix_element.get('processes'))
if self.virt_matrix_element and other.virt_matrix_element:
self.virt_matrix_element.get('processes').extend(
other.virt_matrix_element.get('processes'))
self_reals = | |
is preety amazing i like the way she move i mean work',
]
word = lst[random.randint(0, len(lst) - 1)]
speak(word)
elif exist(['my friends', 'my friend']):
speak("I know one of your friend. Her name is Elva. Oh wow Its me.")
elif exist(['weather', 'mosam', 'outside', 'city']):
weather.weather("indore")
bool_weather = False
else:
bool_wiki = False
wiki(query)
# YOU ARE
elif exist(['you are', 'your']) and exist(['awesome', 'crazy', 'wonderful', 'amazing', 'insane', 'good', 'nice',
'kind', 'cool', 'better']):
speak(Reply.thank())
bool_good = False
# ENTERTAIN
elif exist(['draw', 'entertain me', 'star']):
speak("Should i draw cool star for you ?")
ask = take_command().lower()
lst = ['ok', 'sure', 'definitely', 'yup', "yep", "respectable", "of course", "green light", "confirm",
"okeydokey", "surely", "satisfactory", "tolerable", "correct", "good", "not bad", "up to scratch",
"accurate", "no problem", "alright", "yes", "Here we go", "sounds good", "alright then", "absolutely",
"as you say"]
for word in lst:
if word in ask:
a = turtle.Turtle()
a.getscreen().bgcolor("black")
a.penup()
a.goto(-200, 100)
a.pendown()
a.color("yellow")
a.speed(25)
star(a, 360)
turtle.done()
else:
speak("Hopefully you liked it")
# STONE PAPER SCSISSORS
elif bool_sps and exist(["game"]) and exist(['stone', 'paper', 'scissors']):
stone_paper_scissors.stone_paper_scissor()
print("Middle respond")
print("Query : ", query)
# OPEN THINGS ------------------------------------------------------------------------------------------------------
if exist(['open', 'run']):
if exist(['your mouth', 'you are mouth']):
speak("Ha Ha Ha")
elif ('new' in query) and ('tab' in query):
try:
keyboard.press_and_release('ctrl + t')
speak(Reply.okay())
speak("New and Fresh tab is Opening")
except:
speak("Unable to Open New Tab for You")
elif bool_sps and exist(['stone', 'paper', 'scissors']):
stone_paper_scissors.stone_paper_scissor()
# SUBLIME TEXT EDITOR
elif exist(['sublime', 'sublime', 'code editor']):
speak(Reply.okay())
speak(f"{user.name}. I am opening Sublime text editor")
try:
subprocess.Popen("C:\\Program Files\\Sublime Text 3\\sublime_text.exe")
except:
speak(f"I am unable to open Sublime text editor. Please make a contact with {user.name}")
# VISUAL STUDIO
elif exist(['vs ', 'vs ', 'visual studio', 'visual studio']):
speak(Reply.okay())
speak(f"{user.name}. I am opening Visual Studio")
try:
subprocess.Popen("C:\\Users\\dhruv\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe")
except:
speak(f"I am unable to open Visual studio code. Please make a contact with {user.name}")
# PYCHARM
# PYCHARM
elif exist(['pycharm', 'pycharm', 'python pro']):
speak(Reply.okay())
speak(f"{user.name}. I am opening pycharm")
try:
subprocess.Popen("C:\\Program Files\\JetBrains\\PyCharm Community Edition 2021.1.1\\bin\\pycharm64.exe")
except:
speak(f"I am unable to open Pycharm. Please make a contact with {user.name}")
# NOTEPAD
# NOTEPAD
elif exist(['notepad', 'notepad', 'note pad', 'note pad']):
speak(Reply.okay())
speak(f"{user.name}. I am opening notepad only for you")
try:
subprocess.Popen('C:\\Windows\\System32\\notepad.exe')
except:
speak(f"I am unable to open notepad. Please make a contact with {user.name}")
# CALCULATOR
# CALCUALTOR
elif exist(['calculator', 'calculator']):
speak(Reply.okay())
speak(f"{user.name}. I am opening calculator only for you")
try:
subprocess.Popen('C:\\Windows\\System32\\calc.exe')
except:
speak(f"I am unable to open Calculator. Please make a contact with {user.name}")
# WORDPAD
# WORDPAD
elif exist(['wordpad', 'wordpad', 'word pad', 'word pad']):
speak(Reply.okay())
speak(f"{user.name}. I am opening calculator only for you")
try:
subprocess.Popen('C:\\Windows\\System32\\write.exe')
except:
speak(f"I am unable to open word pad. Please make a contact with {user.name}")
# BROWSER
elif exist(['browser', 'microsoft edge', 'edge', ]):
speak(Reply.okay())
speak(f"{user.name}. I am Opening Microsoft edge.")
try:
webbrowser.open("https://google.com")
except:
speak(f"I am unable to open Microsoft edge . Please make a contact with {user.name}")
speak("Here you can browse whatever you want.")
# WEBSITES -----------------------------------------------------------------------------------------------------
elif exist(['youtube', 'YouTube']):
speak(Reply.okay())
webbrowser.open("https://youtube.com/")
elif exist(['spotify']):
speak(Reply.okay())
webbrowser.open("https://spotify.com/")
elif exist(['twitter']):
speak(Reply.okay())
webbrowser.open("https://twitter.com/")
elif exist(['instagram']):
speak(Reply.okay())
webbrowser.open("https://instagram.com/")
elif exist(['google', 'Google']):
speak(Reply.okay())
webbrowser.open("https://google.com/")
elif exist(['amazon']):
speak(Reply.okay())
webbrowser.open("https://amazon.com/")
elif exist(['flipkart']):
speak(Reply.okay())
webbrowser.open("https://flipkart.com/")
elif exist(['linkedin']):
speak(Reply.okay())
webbrowser.open("https://linkedin.com/")
else:
try:
speak(Reply.okay())
name = query.split('open')[-1]
webbrowser.open("https://" + name + ".com/")
speak("I am opening" + name)
except:
speak(Reply.okay())
name = query.split('open')[-1]
webbrowser.open("https://google.com/search?q=" + name)
speak("I am opening" + name)
# CLOST THINGS -----------------------------------------------------------------------------------------------------
if bool_close and exist(['close', 'kill']):
speak("Are you sure?")
speak("say yes to confirm")
confirm = take_command().lower()
if ('yes' in confirm) or ('sure' in confirm):
bool_close = False
if exist(['your mouth', 'you are mouth']):
speak("Na Na Na")
# SUBLIME TEXT EDITOR
elif ('tap' in query) or ('tab' in query) or ('time' in query):
try:
keyboard.press_and_release('ctrl + w')
speak(Reply.okay())
speak("Your Current tab is closed")
except:
speak("Unable to Open New Tab for You")
elif exist(['sublime', 'sublime', 'code editor']):
speak(Reply.okay())
speak(f"{user.name}. I am Closing Sublime text editor")
try:
os.system("TASKKILL /F /im sublime_text.exe")
except:
speak(f"I am unable to Close Sublime text editor. Please make a contact with {user.name}")
# VISUAL STUDIO
elif exist(['vs ', 'vs ', 'visual studio', 'visual studio']):
speak(Reply.okay())
speak(f"{user.name}. I am opening Visual Studio")
try:
os.system("TASKKILL /F /im Code.exe")
except:
speak(f"I am unable to Close Visual studio code. Please make a contact with {user.name}")
# PYCHARM
# PYCHARM
elif exist(['pycharm', 'pycharm', 'python pro']):
speak(Reply.okay())
speak(f"{user.name}. I am opening pycharm")
try:
os.system("TASKKILL /F /im pycharm64.exe")
except:
speak(f"I am unable to Close Pycharm. Please make a contact with {user.name}")
# NOTEPAD
# NOTEPAD
elif exist(['notepad', 'notepad', 'note pad', 'note pad']):
speak(Reply.okay())
speak(f"{user.name}. I am opening notepad only for you")
try:
os.system("TASKKILL /F /im notepad.exe")
except:
speak(f"I am unable to Close notepad. Please make a contact with {user.name}")
# CALCULATOR
# CALCUALTOR
elif exist(['calculator', 'calculator']):
speak(Reply.okay())
speak(f"{user.name}. I am opening calculator only for you")
try:
os.system("TASKKILL /F /im calc.exe")
except:
speak(f"I am unable to close Calculator. Please make a contact with {user.name}")
# WORDPAD
# WORDPAD
elif exist(['wordpad', 'wordpad', 'word pad', 'word pad']):
speak(Reply.okay())
speak(f"{user.name}. I am opening calculator only for you")
try:
os.system("TASKKILL /F /im write.exe")
subprocess.Popen('C:\\Windows\\System32\\write.exe')
except:
speak(f"I am unable to Close word pad. Please make a contact with {user.name}")
# BROWSER
elif exist(['browser', 'microsoft edge', 'edge', ]):
speak(Reply.okay())
speak(f"{user.name}. I am Opening Microsoft edge.")
try:
os.system("TASKKILL /F /im google.exe")
except:
speak(f"I am unable to open Microsoft edge . Please make a contact with {user.name}")
speak("Here you can browse whatever you want.")
# WEBSITES -----------------------------------------------------------------------------------------------------
elif exist(['spotify']):
speak(Reply.okay())
webbrowser.open("https://spotify.com/")
elif exist(['twitter']):
speak(Reply.okay())
webbrowser.open("https://twitter.com/")
elif exist(['instagram']):
speak(Reply.okay())
webbrowser.open("https://instagram.com/")
elif exist(['google', 'Google']):
speak(Reply.okay())
webbrowser.open("https://google.com/")
elif exist(['amazon']):
speak(Reply.okay())
webbrowser.open("https://amazon.com/")
elif exist(['flipkart']):
speak(Reply.okay())
webbrowser.open("https://flipkart.com/")
elif exist(['linkedin']):
speak(Reply.okay())
webbrowser.open("https://linkedin.com/")
else:
try:
speak(Reply.okay())
name = query.split('open')[-1]
webbrowser.open("https://" + name + ".com/")
speak("I am opening" + name)
except:
speak(Reply.okay())
name = query.split('open')[-1]
webbrowser.open("https://google.com/search?q=" + name)
speak("I am opening" + name)
# TEXT READER
if bool_read_write and exist(['read text', 'text to speech', 'read for me', 'open text reader', 'text reader']):
try:
speak(Reply.okay())
speak(f"{user.name}. please select text file")
import text_reader
speak("Hopefully you liked it")
except:
speak(f"Something went wrong {user.name}")
finally:
bool_read_write = False
# NOTE WRITER
# NOTE MAKER
elif bool_read_write and exist(['write it down', 'make a note', 'write note', 'write it', 'right it', 'note it']):
try:
note = ""
lst = ['done', 'save it', 'save']
lst1 = ['delete', 'remove', 'discard it']
speak(Reply.okay())
speak(f"{user.name}. please say Clear and loud because i am writing")
while True:
speak("I am writing now")
raw = take_command()
for word in lst:
if word in raw:
speak("I am saving your note ")
file = open('note.txt', 'w')
file.write(note)
file.close()
exit()
for word in lst1:
if word in raw:
speak(f"Done {user.name}. By By")
exit()
note += " " + raw
finally:
bool_read_write = False
# BASIC STUFF ------------------------------------------------------------------------------------------------------
# SCREENSHOT
if bool_shot and exist(['take', 'capture']) and exist(['screenshot', 'snapshot', 'screen']):
try:
speak("What should i name that?")
name = take_command().lower()
myScreenshot = pyautogui.screenshot()
myScreenshot.save("E:\\" + name + ".png")
speak("Your ScreenShot is saved in E Drive as a png format")
except:
speak("I think, The location to save ScreenShot is not Correct")
speak("This feature is comming soon")
finally:
bool_shot = False
# EXCITING
if bool_good and exist(['awesome', 'wonderful', 'amazing', 'exciting', 'wow', 'cool', 'nice', 'good', 'kind', 'crazy']):
speak("Yaah, It is " + query)
bool_good = False
# BLESSED
elif exist(['thanks', 'thank you']):
speak(Reply.pleasure())
# NO
elif exist(['no', 'nope']):
speak("okay")
# BYE
elif exist(['by', 'bye', 'then by', 'okay by']):
by()
# OKAY, GOT IT ETC....................
elif bool_good and exist(['ok', 'sure', 'definitely', 'yup', "yep", "respectable", "of course", "green light", "confirm",
"okeydokey", "surely", "satisfactory", "tolerable", "correct", "good", "not bad", "up to scratch",
"accurate", "no problem", "alright", "yes", "Here we go", "sounds good", "alright then", "absolutely",
"as you say"]):
speak(Reply.okay())
bool_good = False
if exist(['I am', "I'm"]):
if exist(["excited", "amazing", "awesome", "great", "creative", "nice", "good", "best", "happy", "insane",
"hero", "wonderful"]):
speak("Your are Looking happy Today. Do You wanna Share Something with me")
elif exist(['know', 'am awesome', 'am amazing', 'am great']):
lst = | |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
from tqdm import tqdm
from models import utils
class Model(object):
def __init__(self,
cfg,
model_arch,
restore_vars_dict=None):
self.cfg = cfg
self.batch_size = cfg.BATCH_SIZE
self.model_arch = model_arch
self.restore_vars_dict = restore_vars_dict
self.model_arch_info = None
def _get_inputs(self,
input_size,
num_class,
image_size):
"""Get input tensors.
Args:
input_size: the size of input tensor
num_class: number of class of label
image_size: the size of ground truth images, should be 3 dimensional
Returns:
input tensors
"""
_inputs = tf.placeholder(
tf.float32, shape=[self.cfg.BATCH_SIZE, *input_size], name='inputs')
_labels = tf.placeholder(
tf.float32, shape=[self.cfg.BATCH_SIZE, num_class], name='labels')
_input_imgs = tf.placeholder(
tf.float32, shape=[self.cfg.BATCH_SIZE, *image_size], name='input_imgs')
_is_training = tf.placeholder(tf.bool, name='is_training')
return _inputs, _labels, _input_imgs, _is_training
def _optimizer(self,
opt_name='adam',
n_train_samples=None,
global_step=None):
"""Optimizer."""
# Learning rate with exponential decay
if self.cfg.LR_DECAY:
learning_rate_ = tf.train.exponential_decay(
learning_rate=self.cfg.LEARNING_RATE,
global_step=global_step,
decay_steps=self.cfg.LR_DECAY_STEPS,
decay_rate=self.cfg.LR_DECAY_RATE)
learning_rate_ = tf.maximum(learning_rate_, 1e-6)
else:
learning_rate_ = self.cfg.LEARNING_RATE
if opt_name == 'adam':
return tf.train.AdamOptimizer(learning_rate_)
elif opt_name == 'momentum':
n_batches_per_epoch = \
n_train_samples // self.cfg.GPU_BATCH_SIZE * self.cfg.GPU_NUMBER
boundaries = [
n_batches_per_epoch * x
for x in np.array(self.cfg.LR_BOUNDARIES, dtype=np.int64)]
staged_lr = [self.cfg.LEARNING_RATE * x
for x in self.cfg.LR_STAGE]
learning_rate = tf.train.piecewise_constant(
global_step,
boundaries, staged_lr)
return tf.train.MomentumOptimizer(
learning_rate=learning_rate, momentum=self.cfg.MOMENTUM)
elif opt_name == 'gd':
return tf.train.GradientDescentOptimizer(learning_rate_)
else:
raise ValueError('Wrong optimizer name!')
def _inference(self,
inputs,
labels,
input_imgs,
num_class,
is_training=None):
"""Build inference graph.
Args:
inputs: input tensor.
- shape (batch_size, *input_size)
labels: labels tensor.
num_class: number of class of label.
input_imgs: ground truth images.
is_training: Whether or not the model is in training mode.
Return:
logits: output tensor of models
- shape: (batch_size, num_caps, vec_dim)
"""
loss, clf_loss, clf_preds, rec_loss, rec_imgs, self.model_arch_info = \
self.model_arch(self.cfg, inputs, labels, input_imgs,
num_class=num_class, is_training=is_training,
restore_vars_dict=self.restore_vars_dict)
# Accuracy
correct_pred = tf.equal(
tf.argmax(clf_preds, axis=1), tf.argmax(labels, axis=1))
accuracy = tf.reduce_mean(tf.cast(
correct_pred, tf.float32), name='accuracy')
return loss, accuracy, clf_loss, clf_preds, rec_loss, rec_imgs
def build_graph(self,
input_size=(None, None, None),
image_size=(None, None, None),
num_class=None,
n_train_samples=None):
"""Build the graph of CapsNet.
Args:
input_size: size of input tensor
image_size: the size of ground truth images, should be 3 dimensional
num_class: number of class of label
n_train_samples: number of train samples
Returns:
tuple of (global_step, train_graph, inputs, labels, train_op,
saver, summary_op, loss, accuracy, classifier_loss,
reconstruct_loss, reconstructed_images)
"""
tf.reset_default_graph()
train_graph = tf.Graph()
with train_graph.as_default():
# Get input placeholders
inputs, labels, input_imgs, is_training = \
self._get_inputs(input_size, num_class, image_size=image_size)
# Global step
global_step = tf.placeholder(tf.int16, name='global_step')
# Optimizer
optimizer = self._optimizer(global_step=global_step,
opt_name=self.cfg.OPTIMIZER,
n_train_samples=n_train_samples)
# Build inference Graph
loss, accuracy, clf_loss, clf_preds, rec_loss, rec_imgs = self._inference(
inputs, labels, input_imgs, num_class=num_class, is_training=is_training)
# Optimizer
train_op = optimizer.minimize(loss)
# Create a saver.
saver = tf.train.Saver(tf.global_variables(),
max_to_keep=self.cfg.MAX_TO_KEEP_CKP)
# Build the summary operation from the last tower summaries.
tf.summary.scalar('accuracy', accuracy)
tf.summary.scalar('loss', loss)
if self.cfg.WITH_REC:
tf.summary.scalar('clf_loss', clf_loss)
tf.summary.scalar('rec_loss', rec_loss)
summary_op = tf.summary.merge_all()
return global_step, train_graph, inputs, labels, input_imgs, \
is_training, train_op, saver, summary_op, loss, accuracy, \
clf_loss, clf_preds, rec_loss, rec_imgs
class ModelDistribute(Model):
def __init__(self,
cfg,
model_arch,
restore_vars_dict=None):
super(ModelDistribute, self).__init__(cfg, model_arch, restore_vars_dict)
self.batch_size = cfg.BATCH_SIZE // cfg.GPU_NUMBER
@staticmethod
def _average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
This function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the
gradient calculation for each tower.
- shape: [[(grad0_gpu0, var0_gpu0), ..., (gradM_gpu0, varM_gpu0)],
...,
[(grad0_gpuN, var0_gpuN), ..., (gradM_gpuN, varM_gpuN)]]
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Each grad_and_vars looks like:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for grad, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_grad = tf.expand_dims(grad, 0)
# Append on a 'tower' dimension which we will average over.
grads.append(expanded_grad)
# grads: [[grad0_gpu0], [grad0_gpu1], ..., [grad0_gpuN]]
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# The Variables are redundant because they are shared across towers.
# So we will just return the first tower's pointer to the Variable.
v = grad_and_vars[0][1] # varI_gpu0
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
# average_grads: [(grad0, var0), (grad1, var1), ..., (gradM, varM)]
return average_grads
def _average_metrics(self, loss_all, acc_all, clf_loss_all,
clf_preds_all, rec_loss_all, rec_imgs_all):
"""Calculate average of metrics.
Args:
loss_all: final losses of each tower, list
acc_all: accuracies of each tower, list
clf_loss_all: classifier losses of each tower, list
clf_preds_all: predictions of each tower, list
rec_loss_all: reconstruction losses of each tower, list
rec_imgs_all: reconstructed images of each tower, list of 4D tensor
Returns:
tuple of metrics
"""
n_tower = float(len(loss_all))
loss = tf.divide(
tf.add_n(loss_all), n_tower, name='total_loss')
accuracy = tf.divide(
tf.add_n(acc_all), n_tower, name='total_acc')
clf_preds = tf.concat(clf_preds_all, axis=0, name='total_clf_preds')
if self.cfg.WITH_REC:
clf_loss = tf.divide(
tf.add_n(clf_loss_all), n_tower, name='total_clf_loss')
rec_loss = tf.divide(
tf.add_n(rec_loss_all), n_tower, name='total_rec_loss')
rec_imgs = tf.concat(rec_imgs_all, axis=0, name='total_rec_imgs')
else:
clf_loss, rec_loss, rec_imgs = None, None, None
return loss, accuracy, clf_loss, clf_preds, rec_loss, rec_imgs
def _calc_on_gpu(self, gpu_idx, x_tower, y_tower,
imgs_tower, num_class, is_training, optimizer):
# Calculate the loss for one tower.
loss_tower, acc_tower, clf_loss_tower, clf_preds_tower, \
rec_loss_tower, rec_imgs_tower = self._inference(
x_tower, y_tower, imgs_tower,
num_class=num_class, is_training=is_training)
# Calculate the gradients on this tower.
grads_tower = optimizer.compute_gradients(loss_tower)
return grads_tower, loss_tower, acc_tower, clf_loss_tower, \
clf_preds_tower, rec_loss_tower, rec_imgs_tower
def build_graph(self,
input_size=(None, None, None),
image_size=(None, None, None),
num_class=None,
n_train_samples=None):
"""Build the graph of CapsNet.
Args:
input_size: size of input tensor
image_size: the size of ground truth images, should be 3 dimensional
num_class: number of class of label
n_train_samples: number of train samples
Returns:
tuple of (global_step, train_graph, inputs, labels, train_op,
saver, summary_op, loss, accuracy, classifier_loss,
reconstruct_loss, reconstructed_images)
"""
tf.reset_default_graph()
train_graph = tf.Graph()
with train_graph.as_default(), tf.device('/cpu:0'):
# Get inputs tensor
inputs, labels, input_imgs, is_training = \
self._get_inputs(input_size, num_class, image_size=image_size)
# Global step
global_step = tf.placeholder(tf.int16, name='global_step')
# Optimizer
optimizer = self._optimizer(opt_name=self.cfg.OPTIMIZER,
n_train_samples=n_train_samples,
global_step=global_step)
# Split data for each tower
x_splits_tower = tf.split(
axis=0, num_or_size_splits=self.cfg.GPU_NUMBER, value=inputs)
y_splits_tower = tf.split(
axis=0, num_or_size_splits=self.cfg.GPU_NUMBER, value=labels)
imgs_splits_tower = tf.split(
axis=0, num_or_size_splits=self.cfg.GPU_NUMBER, value=input_imgs)
# Calculate the gradients for each models tower.
grads_all, loss_all, acc_all, clf_loss_all, clf_preds_all, \
rec_loss_all, rec_imgs_all = [], [], [], [], [], [], []
for i in range(self.cfg.GPU_NUMBER):
utils.thin_line()
print('Building tower: ', i)
# Dequeues one batch for the GPU
x_tower, y_tower, imgs_tower = \
x_splits_tower[i], y_splits_tower[i], imgs_splits_tower[i]
with tf.variable_scope(tf.get_variable_scope(), reuse=bool(i != 0)):
with tf.device('/gpu:%d' % i):
with tf.name_scope('tower_%d' % i):
grads_tower, loss_tower, acc_tower, clf_loss_tower, \
clf_preds_tower, rec_loss_tower, rec_imgs_tower = \
self._calc_on_gpu(i, x_tower, y_tower, imgs_tower,
num_class, is_training, optimizer)
# Keep track of the gradients across all towers.
grads_all.append(grads_tower)
# Collect metrics of each tower
loss_all.append(loss_tower)
acc_all.append(acc_tower)
clf_loss_all.append(clf_loss_tower)
clf_preds_all.append(clf_preds_tower)
rec_loss_all.append(rec_loss_tower)
rec_imgs_all.append(rec_imgs_tower)
# Calculate the mean of each gradient.
grads = self._average_gradients(grads_all)
# Calculate means of metrics
loss, accuracy, clf_loss, clf_preds, rec_loss, rec_imgs = \
self._average_metrics(loss_all, acc_all, clf_loss_all,
clf_preds_all, rec_loss_all, rec_imgs_all)
# Show variables
utils.thick_line()
print('Variables: ')
for v in tf.global_variables():
print(v)
# Apply the gradients to adjust the shared variables.
apply_gradient_op = optimizer.apply_gradients(grads)
# Track the moving averages of all trainable variables.
if self.cfg.MOVING_AVERAGE_DECAY:
variable_averages = tf.train.ExponentialMovingAverage(
self.cfg.MOVING_AVERAGE_DECAY)
variables_averages_op = variable_averages.apply(
tf.trainable_variables())
# Group all updates to into a single train op.
train_op = tf.group(apply_gradient_op, variables_averages_op)
else:
train_op = apply_gradient_op
# Create a saver.
saver = tf.train.Saver(tf.global_variables(),
max_to_keep=self.cfg.MAX_TO_KEEP_CKP)
# Build the summary operation from the last tower summaries.
tf.summary.scalar('accuracy', accuracy)
tf.summary.scalar('loss', loss)
if self.cfg.WITH_REC:
tf.summary.scalar('clf_loss', clf_loss)
tf.summary.scalar('rec_loss', rec_loss)
summary_op = tf.summary.merge_all()
return global_step, train_graph, inputs, labels, input_imgs, \
is_training, train_op, saver, summary_op, loss, accuracy, \
clf_loss, clf_preds, rec_loss, rec_imgs
class ModelMultiTasks(ModelDistribute):
def __init__(self,
cfg,
model_arch,
restore_vars_dict=None):
super(ModelMultiTasks, self).__init__(cfg, model_arch, restore_vars_dict)
self.batch_size = cfg.BATCH_SIZE // cfg.GPU_NUMBER // cfg.TASK_NUMBER
@staticmethod
def _sum_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
This function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the
gradient calculation for each tower.
- shape: [[(grad0_gpu0, var0_gpu0), ..., (gradM_gpu0, varM_gpu0)],
...,
[(grad0_gpuN, var0_gpuN), ..., (gradM_gpuN, varM_gpuN)]]
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
| |
0., np.inf)
validate(companion+'_rsuma', None, 0., np.inf)
validate(companion+'_cosi', 0., 0., 1.)
validate(companion+'_epoch', 0., -np.inf, np.inf)
validate(companion+'_period', 0., 0., np.inf)
validate(companion+'_sbratio_'+inst, 0., 0., np.inf)
validate(companion+'_K', 0., 0., np.inf)
validate(companion+'_f_s', 0., -1, 1)
validate(companion+'_f_c', 0., -1, 1)
validate('dil_'+inst, 0., -np.inf, np.inf)
#::: limb darkenings, u-space
validate('host_ldc_u1_'+inst, None, 0, 1)
validate('host_ldc_u2_'+inst, None, 0, 1)
validate('host_ldc_u3_'+inst, None, 0, 1)
validate('host_ldc_u4_'+inst, None, 0, 1)
validate(companion+'_ldc_u1_'+inst, None, 0, 1)
validate(companion+'_ldc_u2_'+inst, None, 0, 1)
validate(companion+'_ldc_u3_'+inst, None, 0, 1)
validate(companion+'_ldc_u4_'+inst, None, 0, 1)
#::: limb darkenings, q-space
validate('host_ldc_q1_'+inst, None, 0, 1)
validate('host_ldc_q2_'+inst, None, 0, 1)
validate('host_ldc_q3_'+inst, None, 0, 1)
validate('host_ldc_q4_'+inst, None, 0, 1)
validate(companion+'_ldc_q1_'+inst, None, 0, 1)
validate(companion+'_ldc_q2_'+inst, None, 0, 1)
validate(companion+'_ldc_q3_'+inst, None, 0, 1)
validate(companion+'_ldc_q4_'+inst, None, 0, 1)
#::: catch exceptions
if self.params[companion+'_period'] is None:
self.settings['do_not_phase_fold'] = True
#::: advanced parameters
validate(companion+'_a', None, 0., np.inf)
validate(companion+'_q', 1., 0., np.inf)
validate('didt_'+inst, None, -np.inf, np.inf)
validate('domdt_'+inst, None, -np.inf, np.inf)
validate('host_gdc_'+inst, None, 0., 1.)
validate('host_rotfac_'+inst, 1., 0., np.inf)
validate('host_hf_'+inst, 1.5, -np.inf, np.inf)
validate('host_bfac_'+inst, None, -np.inf, np.inf)
validate('host_heat_'+inst, None, -np.inf, np.inf)
validate('host_lambda', None, -np.inf, np.inf)
validate('host_vsini', None, -np.inf, np.inf)
validate(companion+'_gdc_'+inst, None, 0., 1.)
validate(companion+'_rotfac_'+inst, 1., 0., np.inf)
validate(companion+'_hf_'+inst, 1.5, -np.inf, np.inf)
validate(companion+'_bfac_'+inst, None, -np.inf, np.inf)
validate(companion+'_heat_'+inst, None, -np.inf, np.inf)
validate(companion+'_lambda', None, -np.inf, np.inf)
validate(companion+'_vsini', None, -np.inf, np.inf)
#::: special parameters (list type)
if 'host_spots_'+inst not in self.params:
self.params['host_spots_'+inst] = None
if companion+'_spots_'+inst not in self.params:
self.params[companion+'_spots_'+inst] = None
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: errors and jitters
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#TODO: add validations for all errors / jitters
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: baselines (and backwards compability)
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#TODO: add validations for all baseline params
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: phase curve style: sine_series
# all in ppt
# A1 (beaming)
# B1 (atmospheric), can be split in thermal and reflected
# B2 (ellipsoidal)
# B3 (ellipsoidal 2nd order)
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# if (self.settings['phase_curve_style'] == 'sine_series') and (inst in self.settings['inst_phot']):
if (inst in self.settings['inst_phot']):
validate(companion+'_phase_curve_A1_'+inst, None, 0., np.inf)
validate(companion+'_phase_curve_B1_'+inst, None, -np.inf, 0.)
validate(companion+'_phase_curve_B1_shift_'+inst, 0., -np.inf, np.inf)
validate(companion+'_phase_curve_B1t_'+inst, None, -np.inf, 0.)
validate(companion+'_phase_curve_B1t_shift_'+inst, 0., -np.inf, np.inf)
validate(companion+'_phase_curve_B1r_'+inst, None, -np.inf, 0.)
validate(companion+'_phase_curve_B1r_shift_'+inst, 0., -np.inf, np.inf)
validate(companion+'_phase_curve_B2_'+inst, None, -np.inf, 0.)
validate(companion+'_phase_curve_B3_'+inst, None, -np.inf, 0.)
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: phase curve style: sine_physical
# A1 (beaming)
# B1 (atmospheric), can be split in thermal and reflected
# B2 (ellipsoidal)
# B3 (ellipsoidal 2nd order)
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# if (self.settings['phase_curve_style'] == 'sine_physical') and (inst in self.settings['inst_phot']):
if (inst in self.settings['inst_phot']):
validate(companion+'_phase_curve_beaming_'+inst, None, 0., np.inf)
validate(companion+'_phase_curve_atmospheric_'+inst, None, 0., np.inf)
validate(companion+'_phase_curve_atmospheric_shift_'+inst, 0., -np.inf, np.inf)
validate(companion+'_phase_curve_atmospheric_thermal_'+inst, None, 0., np.inf)
validate(companion+'_phase_curve_atmospheric_thermal_shift_'+inst, 0., -np.inf, np.inf)
validate(companion+'_phase_curve_atmospheric_reflected_'+inst, None, 0., np.inf)
validate(companion+'_phase_curve_atmospheric_reflected_shift_'+inst, 0., -np.inf, np.inf)
validate(companion+'_phase_curve_ellipsoidal_'+inst, None, 0., np.inf)
validate(companion+'_phase_curve_ellipsoidal_2nd_'+inst, None, 0., np.inf)
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: to avoid a bug/feature in ellc, if either property is >0, set the other to 1-15 (not 0):
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if self.params[companion+'_heat_'+inst] is not None:
if (self.params[companion+'_sbratio_'+inst] == 0) and (self.params[companion+'_heat_'+inst] > 0):
self.params[companion+'_sbratio_'+inst] = 1e-15 #this is to avoid a bug/feature in ellc
if (self.params[companion+'_sbratio_'+inst] > 0) and (self.params[companion+'_heat_'+inst] == 0):
self.params[companion+'_heat_'+inst] = 1e-15 #this is to avoid a bug/feature in ellc
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#::: luser proof: avoid conflicting/degenerate phase curve commands
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if (inst in self.settings['inst_phot']) and (self.settings['phase_curve'] == True):
phase_curve_model_1 = (self.params[companion+'_phase_curve_B1_'+inst] is not None)
phase_curve_model_2 = ((self.params[companion+'_phase_curve_B1t_'+inst] is not None) or (self.params[companion+'_phase_curve_B1r_'+inst] is not None))
phase_curve_model_3 = (self.params[companion+'_phase_curve_atmospheric_'+inst] is not None)
phase_curve_model_4 = ((self.params[companion+'_phase_curve_atmospheric_thermal_'+inst] is not None) or (self.params[companion+'_phase_curve_atmospheric_reflected_'+inst] is not None))
phase_curve_model_5 = ((self.params['host_bfac_'+inst] is not None) or (self.params['host_heat_'+inst] is not None) or \
(self.params['host_gdc_'+inst] is not None) or (self.settings['host_shape_'+inst]!='sphere') or \
(self.params[companion+'_bfac_'+inst] is not None) or (self.params[companion+'_heat_'+inst] is not None) or \
(self.params[companion+'_gdc_'+inst] is not None) or (self.settings[companion+'_shape_'+inst]!='sphere'))
if (phase_curve_model_1 + phase_curve_model_2 + phase_curve_model_3 + phase_curve_model_4 + phase_curve_model_5) > 1:
raise ValueError('You can use either\n'\
+'1) the sine_series phase curve model with "*_phase_curve_B1_*",\n'\
+'2) the sine_series phase curve model with "*_phase_curve_B1t_*" and "*_phase_curve_B1r_*", or\n'\
+'3) the sine_physical phase curve model with "*_phase_curve_atmospheric_*",\n'\
+'4) the sine_physical phase curve model with "*_phase_curve_atmospheric_thermal_*" and "*_phase_curve_atmospheric_reflected_*", or\n'\
+'5) the ellc_physical phase curve model with "*_bfac_*", "*_heat_*", "*_gdc_*" etc.\n'\
+'but you shall not pass with a mix&match.')
#==========================================================================
#::: coupled params
#==========================================================================
if 'coupled_with' in buf.dtype.names:
self.coupled_with = buf['coupled_with']
else:
self.coupled_with = [None]*len(self.allkeys)
for i, key in enumerate(self.allkeys):
if isinstance(self.coupled_with[i], str) and (len(self.coupled_with[i])>0):
self.params[key] = self.params[self.coupled_with[i]] #luser proof: automatically set the values of the params coupled to another param
buf['fit'][i] = 0 #luser proof: automatically set fit=0 for the params coupled to another param
#==========================================================================
#::: mark to be fitted params
#==========================================================================
self.ind_fit = (buf['fit']==1) #len(all rows in params.csv)
self.fitkeys = buf['name'][ self.ind_fit ] #len(ndim)
self.fitlabels = self.labels[ self.ind_fit ] #len(ndim)
self.fitunits = self.units[ self.ind_fit ] #len(ndim)
self.fittruths = self.truths[ self.ind_fit ] #len(ndim)
self.theta_0 = buf['value'][ self.ind_fit ] #len(ndim)
if 'init_err' in buf.dtype.names:
self.init_err = buf['init_err'][ self.ind_fit ] #len(ndim)
else:
self.init_err = 1e-8
self.bounds = [ str(item).split(' ') for item in buf['bounds'][ self.ind_fit ] ] #len(ndim)
for i, item in enumerate(self.bounds):
if item[0] in ['uniform', 'normal']:
self.bounds[i] = [ item[0], np.float(item[1]), np.float(item[2]) ]
elif item[0] in ['trunc_normal']:
self.bounds[i] = [ item[0], np.float(item[1]), np.float(item[2]), np.float(item[3]), np.float(item[4]) ]
else:
raise ValueError('Bounds have to be "uniform", "normal" or "trunc_normal". Input from "params.csv" was "'+self.bounds[i][0]+'".')
self.ndim = len(self.theta_0) #len(ndim)
#==========================================================================
#::: luser proof: check if all initial guesses lie within their bounds
#==========================================================================
#TODO: make this part of the validate() function
for th, b, key in zip(self.theta_0, self.bounds, self.fitkeys):
#:::: test bounds
if (b[0] == 'uniform') and not (b[1] <= th <= b[2]):
raise ValueError('The initial guess for '+key+' lies outside of its bounds.')
elif (b[0] == 'normal') and ( np.abs(th - b[1]) > 3*b[2] ):
answer = input('The initial guess for '+key+' lies more than 3 sigma from its prior\n'+\
'What do you want to do?\n'+\
'1 : continue at any sacrifice \n'+\
'2 : stop and let me fix the params.csv file \n')
if answer==1:
pass
else:
raise ValueError('User aborted the run.')
elif (b[0] == 'trunc_normal') and not (b[1] <= th <= b[2]):
raise ValueError('The initial guess for '+key+' lies outside of its bounds.')
elif (b[0] == 'trunc_normal') and ( np.abs(th - b[3]) > 3*b[4] ):
answer = input('The initial guess for '+key+' lies more than 3 sigma from its prior\n'+\
'What do you want to do?\n'+\
'1 : continue at any sacrifice \n'+\
'2 : stop and let me fix the params.csv file \n')
if answer==1:
pass
else:
raise ValueError('User aborted the run.')
###############################################################################
#::: load data
###############################################################################
def load_data(self):
'''
Example:
-------
A lightcurve is stored as
data['TESS']['time'], data['TESS']['flux']
A RV curve is stored as
data['HARPS']['time'], data['HARPS']['flux']
'''
self.fulldata = {}
self.data = {}
#======================================================================
#::: photometry
#======================================================================
for inst in self.settings['inst_phot']:
try:
time, flux, flux_err, custom_series = np.genfromtxt(os.path.join(self.datadir,inst+'.csv'), delimiter=',', dtype=float, unpack=True)[0:4]
except:
time, flux, flux_err = np.genfromtxt(os.path.join(self.datadir,inst+'.csv'), delimiter=',', dtype=float, unpack=True)[0:3]
custom_series = np.zeros_like(time)
if any(np.isnan(time*flux*flux_err*custom_series)):
raise ValueError('There are NaN values in "'+inst+'.csv". Please make sure everything is fine with your data, then exclude these rows from the file and restart.')
if any(flux_err==0):
raise ValueError('There are uncertainties with values of 0 in "'+inst+'.csv". Please make sure everything is fine with your data, then exclude these rows from the file and restart.')
if any(flux_err<0):
raise ValueError('There are uncertainties with negative values in "'+inst+'.csv". Please make sure everything is fine with your data, then exclude these rows from the file and restart.')
if not all(np.diff(time)>=0):
raise ValueError('The time array in "'+inst+'.csv" is not sorted. Please make sure the file is not corrupted, then sort it by time and restart.')
elif not all(np.diff(time)>0):
warnings.warn('There are repeated time stamps in the time array in "'+inst+'.csv". Please make sure the file is not corrupted (e.g. insuffiecient precision in your time stamps).')
# overwrite = str(input('There are repeated time stamps in the time array in "'+inst+'.csv". Please make sure the file is not corrupted (e.g. insuffiecient precision in your time | |
#!/usr/bin/env python
import sys
import os
import socket
import argparse
import subprocess
import signal
import fcntl
import stat
criu_bin = '../../criu/criu'
sk_type_s = {
socket.SOCK_STREAM: "S",
socket.SOCK_DGRAM: "D",
}
# Actions that can be done by test. Actions are not only syscall
# names to call, but also arguments with which to do it
#
# Each action consists of
# - arguments, e.g. type of socket, or socket id to work on
# - act() method which just generates an record
# - do() method, that actually does what's required
# - show() method to return the string description of what's done
def mk_socket(st, typ):
st.sk_id += 1
sk = sock(st.sk_id, typ)
st.add_socket(sk)
return sk
class act_socket:
def __init__(self, typ):
self.typ = typ
def act(self, st):
sk = mk_socket(st, self.typ)
self.sk_id = sk.sk_id
def do(self, st):
sk = socket.socket(socket.AF_UNIX, self.typ, 0)
st.real_sockets[self.sk_id] = sk
def show(self):
return 'socket(%s) = %d' % (sk_type_s[self.typ], self.sk_id)
class act_close:
def __init__(self, sk_id):
self.sk_id = sk_id
def act(self, st):
sk = st.get_socket(self.sk_id)
st.del_socket(sk)
for ic in sk.icons:
sk = st.get_socket(ic)
st.del_socket(sk)
def do(self, st):
sk = st.real_sockets.pop(self.sk_id)
sk.close()
def show(self):
return 'close(%d)' % self.sk_id
class act_listen:
def __init__(self, sk_id):
self.sk_id = sk_id
def act(self, st):
sk = st.get_socket(self.sk_id)
sk.listen = True
def do(self, st):
sk = st.real_sockets[self.sk_id]
sk.listen(10)
def show(self):
return 'listen(%d)' % self.sk_id
class act_bind:
def __init__(self, sk_id, name_id):
self.sk_id = sk_id
self.name_id = name_id
def act(self, st):
sk = st.get_socket(self.sk_id)
sk.name = self.name_id
def do(self, st):
sk = st.real_sockets[self.sk_id]
sk.bind(sock.real_name_for(self.name_id))
def show(self):
return 'bind(%d, $name-%d)' % (self.sk_id, self.name_id)
class act_connect:
def __init__(self, sk_id, listen_sk_id):
self.sk_id = sk_id
self.lsk_id = listen_sk_id
def act(self, st):
sk = st.get_socket(self.sk_id)
if st.sk_type == socket.SOCK_STREAM:
lsk = st.get_socket(self.lsk_id)
psk = mk_socket(st, socket.SOCK_STREAM)
psk.visible = False
sk.peer = psk.sk_id
psk.peer = sk.sk_id
psk.name = lsk.name
lsk.icons.append(psk.sk_id)
lsk.icons_seq += 1
else:
sk.peer = self.lsk_id
psk = st.get_socket(self.lsk_id)
psk.icons_seq += 1
def do(self, st):
sk = st.real_sockets[self.sk_id]
sk.connect(sock.real_name_for(self.lsk_id))
def show(self):
return 'connect(%d, $name-%d)' % (self.sk_id, self.lsk_id)
class act_accept:
def __init__(self, sk_id):
self.sk_id = sk_id
def act(self, st):
lsk = st.get_socket(self.sk_id)
iid = lsk.icons.pop(0)
nsk = st.get_socket(iid)
nsk.visible = True
self.nsk_id = nsk.sk_id
def do(self, st):
sk = st.real_sockets[self.sk_id]
nsk, ai = sk.accept()
if self.nsk_id in st.real_sockets:
raise Exception("SK ID conflict")
st.real_sockets[self.nsk_id] = nsk
def show(self):
return 'accept(%d) = %d' % (self.sk_id, self.nsk_id)
class act_sendmsg:
def __init__(self, sk_id, to_id):
self.sk_id = sk_id
self.to_id = to_id
self.direct_send = None
def act(self, st):
sk = st.get_socket(self.sk_id)
msg = (sk.sk_id, sk.outseq)
self.msg_id = sk.outseq
sk.outseq += 1
psk = st.get_socket(self.to_id)
psk.inqueue.append(msg)
self.direct_send = (sk.peer == psk.sk_id)
def do(self, st):
sk = st.real_sockets[self.sk_id]
msgv = act_sendmsg.msgval(self.msg_id)
if self.direct_send:
sk.send(msgv)
else:
sk.sendto(msgv, sock.real_name_for(self.to_id))
def show(self):
return 'send(%d, %d, $message-%d)' % (self.sk_id, self.to_id,
self.msg_id)
@staticmethod
def msgval(msgid, pref=''):
return '%sMSG%d' % (pref, msgid)
#
# Description of a socket
#
class sock:
def __init__(self, sk_id, sock_type):
# ID of a socket. Since states and sockets are cloned
# while we scan the tree of states the only valid way
# to address a socket is to find one by ID.
self.sk_id = sk_id
# The socket.SOCK_FOO value
self.sk_type = sock_type
# Sockets that haven't yet been accept()-ed are in the
# state, but user cannot operate on them. Also this
# invisibility contributes to state description since
# connection to not accepted socket is not the same
# as connection to accepted one.
self.visible = True
# The listen() was called.
self.listen = False
# The bind() was called. Also set by accept(), the name
# inherits from listener.
self.name = None
# The connect() was called. Set on two sockets when the
# connect() is called.
self.peer = None
# Progress on accepting connections. Used to check when
# it's OK to close the socket (see comment below).
self.icons_seq = 0
# List of IDs of sockets that can be accept()-ed
self.icons = []
# Number to generate message contents.
self.outseq = 0
# Incoming queue of messages.
self.inqueue = []
def clone(self):
sk = sock(self.sk_id, self.sk_type)
sk.visible = self.visible
sk.listen = self.listen
sk.name = self.name
sk.peer = self.peer
sk.icons_seq = self.icons_seq
sk.icons = list(self.icons)
sk.outseq = self.outseq
sk.inqueue = list(self.inqueue)
return sk
def get_actions(self, st):
if not self.visible:
return []
if st.sk_type == socket.SOCK_STREAM:
return self.get_stream_actions(st)
else:
return self.get_dgram_actions(st)
def get_send_action(self, to, st):
# However, if peer has a message from us at
# the queue tail, sending a new one doesn't
# really make sense
want_msg = True
if len(to.inqueue) != 0:
lmsg = to.inqueue[-1]
if lmsg[0] == self.sk_id:
want_msg = False
if want_msg:
return [act_sendmsg(self.sk_id, to.sk_id)]
else:
return []
def get_stream_actions(self, st):
act_list = []
# Any socket can be closed, but closing a socket
# that hasn't contributed to some new states is
# just waste of time, so we close only connected
# sockets or listeners that has at least one
# incoming connection pendig or served
if self.listen:
if self.icons:
act_list.append(act_accept(self.sk_id))
if self.icons_seq:
act_list.append(act_close(self.sk_id))
elif self.peer:
act_list.append(act_close(self.sk_id))
# Connected sockets can send and receive messages
# But receiving seem not to produce any new states,
# so only sending
# Also sending to a closed socket doesn't work
psk = st.get_socket(self.peer, True)
if psk:
act_list += self.get_send_action(psk, st)
else:
for psk in st.sockets:
if psk.listen and psk.name:
act_list.append(act_connect(self.sk_id, psk.sk_id))
# Listen on not-bound socket is prohibited as
# well as binding a listening socket
if not self.name:
# TODO: support for file paths (see real_name_for)
# TODO: these names can overlap each other
act_list.append(act_bind(self.sk_id, self.sk_id))
else:
act_list.append(act_listen(self.sk_id))
return act_list
def get_dgram_actions(self, st):
act_list = []
# Dgram socket can bind at any time
if not self.name:
act_list.append(act_bind(self.sk_id, self.sk_id))
# Can connect to peer-less sockets
for psk in st.sockets:
if psk == self:
continue
if psk.peer != None and psk.peer != self.sk_id:
# Peer by someone else, can do nothing
continue
# Peer-less psk or having us as peer
# We can connect to or send messages
if psk.name and self.peer != psk.sk_id:
act_list.append(act_connect(self.sk_id, psk.sk_id))
if psk.name or self.peer == psk.sk_id:
act_list += self.get_send_action(psk, st)
if self.outseq != 0 or self.icons_seq != 0:
act_list.append(act_close(self.sk_id))
return act_list
@staticmethod
def name_of(sk):
if not sk:
return 'X'
elif not sk.visible:
return 'H'
elif sk.name:
return 'B'
else:
return 'A'
@staticmethod
def real_name_for(sk_id):
return "\0" + "CRSK%d" % sk_id
# The describe() generates a string that represents
# a state of a socket. Called by state.describe(), see
# comment there about what description is.
def describe(self, st):
dsc = '%s' % sk_type_s[self.sk_type]
dsc += sock.name_of(self)
if self.listen:
dsc += 'L'
if self.peer:
psk = st.get_socket(self.peer, True)
dsc += '-C%s' % sock.name_of(psk)
if self.icons:
i_dsc = ''
for c in self.icons:
psk = st.get_socket(c)
psk = st.get_socket(psk.peer, True)
i_dsc += sock.name_of(psk)
dsc += '-I%s' % i_dsc
if self.inqueue:
froms = set()
for m in self.inqueue:
froms.add(m[0])
q_dsc = ''
for f in froms:
fsk = st.get_socket(f, True)
q_dsc += sock.name_of(fsk)
dsc += '-M%s' % q_dsc
return dsc
class state:
def __init__(self, max_sockets, sk_type):
self.sockets = []
self.sk_id = 0
self.steps = []
self.real_sockets = {}
self.sockets_left = max_sockets
self.sk_type = sk_type
def add_socket(self, sk):
self.sockets.append(sk)
def del_socket(self, sk):
self.sockets.remove(sk)
def get_socket(self, sk_id, can_be_null=False):
for sk in self.sockets:
if sk.sk_id == sk_id:
return sk
if not can_be_null:
raise Exception("%d socket not in list" % sk_id)
return None
def get_actions(self):
act_list = []
# Any socket in the state we can change it
for sk in self.sockets:
act_list += sk.get_actions(self)
if self.sockets_left > 0:
act_list.append(act_socket(self.sk_type))
self.sockets_left -= 1
return act_list
def clone(self):
nst = state(self.sockets_left, self.sk_type)
for sk in self.sockets:
nst.sockets.append(sk.clone())
nst.sk_id = self.sk_id
nst.steps = list(self.steps)
return nst
# Generates textual description of a state. Different states
# may have same descriptions, e.g. if we have two sockets and
# only one of them is in listen state, we don't care which
# one in which. At the same time really different states
# shouldn't map to the same string.
def describe(self):
sks = [x.describe(self) for x in self.sockets]
| |
<gh_stars>0
#!/usr/bin/env python3
# OpenPOWER Automated Test Project
#
# Contributors Listed Below - COPYRIGHT 2018
# [] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
# @package OpTestHMC
# This class can contain common functions which are useful for
# FSP_PHYP (HMC) platforms
import os
import sys
import time
import pexpect
import shlex
import OpTestLogger
from common.OpTestError import OpTestError
from common.OpTestSSH import OpTestSSH
from common.OpTestUtil import OpTestUtil
from common.Exceptions import CommandFailed
from common import OPexpect
from .OpTestConstants import OpTestConstants as BMC_CONST
log = OpTestLogger.optest_logger_glob.get_logger(__name__)
WAITTIME = 15
SYS_WAITTIME = 200
BOOTTIME = 500
STALLTIME = 5
class OpHmcState():
'''
This class is used as an enum as to what state op-test *thinks* the LPAR is in.
These states are used to check status of a LPAR.
'''
NOT_ACTIVE = 'Not Activated'
RUNNING = 'Running'
SHUTTING = 'Shutting Down'
OF = 'Open Firmware'
STARTING = 'Starting'
NA = 'Not Available'
class OpManagedState():
'''
This class is used as an enum as to what state op-test *thinks* the managed
system is in. These states are used to check status of managed system.
'''
OPERATING = 'Operating'
INIT = 'Initializing'
OFF = 'Power Off'
PROG_OFF = 'Power Off In Progress'
class ConsoleState():
DISCONNECTED = 0
CONNECTED = 1
class Spawn(OPexpect.spawn):
def __init__(self, command, args=[], maxread=8000,
searchwindowsize=None, logfile=None, cwd=None, env=None,
ignore_sighup=False, echo=True, preexec_fn=None,
encoding='utf-8', codec_errors='ignore', dimensions=None,
failure_callback=None, failure_callback_data=None):
super(Spawn, self).__init__(command, args=args,
maxread=maxread,
searchwindowsize=searchwindowsize,
logfile=logfile,
cwd=cwd, env=env,
ignore_sighup=ignore_sighup,
encoding=encoding,
codec_errors=codec_errors)
def sendline(self, command=''):
# HMC console required an enter to be sent with each sendline
super(Spawn, self).sendline(command)
self.send("\r")
class HMCUtil():
'''
Utility and functions of HMC object
'''
def __init__(self, hmc_ip, user_name, password, scratch_disk="", proxy="",
logfile=sys.stdout, managed_system=None, lpar_name=None, prompt=None,
block_setup_term=None, delaybeforesend=None, timeout_factor=None,
lpar_prof=None, lpar_vios=None, lpar_user=None, lpar_password=<PASSWORD>,
check_ssh_keys=False, known_hosts_file=None, tgt_managed_system=None,
tgt_lpar=None):
self.hmc_ip = hmc_ip
self.user = user_name
self.passwd = password
self.logfile = logfile
self.mg_system = managed_system
self.tgt_mg_system = tgt_managed_system
self.tgt_lpar = tgt_lpar
self.check_ssh_keys = check_ssh_keys
self.known_hosts_file = known_hosts_file
self.lpar_name = lpar_name
self.lpar_prof = lpar_prof
self.lpar_user = lpar_user
self.lpar_password = <PASSWORD>
self.lpar_vios = lpar_vios
self.util = OpTestUtil()
self.prompt = prompt
self.expect_prompt = self.util.build_prompt(prompt) + "$"
self.ssh = OpTestSSH(hmc_ip, user_name, password, logfile=self.logfile,
check_ssh_keys=check_ssh_keys,
known_hosts_file=known_hosts_file,
block_setup_term=block_setup_term)
self.scratch_disk = scratch_disk
self.proxy = proxy
self.scratch_disk_size = None
self.delaybeforesend = delaybeforesend
self.system = None
# OpTestUtil instance is NOT conf's
self.pty = None
# allows caller specific control of when to block setup_term
self.block_setup_term = block_setup_term
# tells setup_term to not throw exceptions, like when system off
self.setup_term_quiet = 0
# flags the object to abandon setup_term operations, like when system off
self.setup_term_disable = 0
# functional simulators are very slow, so multiply all default timeouts by this factor
self.timeout_factor = timeout_factor
# state tracking, reset on boot and state changes
# console tracking done on System object for the system console
self.PS1_set = -1
self.LOGIN_set = -1
self.SUDO_set = -1
def deactivate_lpar_console(self):
self.ssh.run_command("rmvterm -m %s -p %s" %
(self.mg_system, self.lpar_name), timeout=10)
def poweroff_system(self):
if self.get_system_state() != OpManagedState.OPERATING:
raise OpTestError('Managed Systen not in Operating state')
self.ssh.run_command("chsysstate -m %s -r sys -o off" % self.mg_system)
self.wait_system_state(OpManagedState.OFF)
def poweron_system(self):
if self.get_system_state() != OpManagedState.OFF:
raise OpTestError('Managed Systen not is Power off state!')
self.ssh.run_command("chsysstate -m %s -r sys -o on" % self.mg_system)
self.wait_system_state()
if self.lpar_vios:
log.debug("Starting VIOS %s", self.lpar_vios)
self.poweron_lpar(vios=True)
def poweroff_lpar(self):
if self.get_lpar_state() in [OpHmcState.NOT_ACTIVE, OpHmcState.NA]:
log.info('LPAR Already powered-off!')
return
self.ssh.run_command("chsysstate -m %s -r lpar -n %s -o shutdown --immed" %
(self.mg_system, self.lpar_name))
self.wait_lpar_state(OpHmcState.NOT_ACTIVE)
def poweron_lpar(self, vios=False):
if self.get_lpar_state(vios) == OpHmcState.RUNNING:
log.info('LPAR Already powered on!')
return BMC_CONST.FW_SUCCESS
lpar_name = self.lpar_name
if vios:
lpar_name = self.lpar_vios
cmd = "chsysstate -m %s -r lpar -n %s -o on" % (self.mg_system, lpar_name)
if self.lpar_prof:
cmd = "%s -f %s" % (cmd, self.lpar_prof)
self.wait_lpar_state(OpHmcState.NOT_ACTIVE, vios=vios)
self.ssh.run_command(cmd)
self.wait_lpar_state(vios=vios)
time.sleep(STALLTIME)
return BMC_CONST.FW_SUCCESS
def dumprestart_lpar(self):
if self.get_lpar_state() in [OpHmcState.NOT_ACTIVE, OpHmcState.NA]:
log.info('LPAR Already powered-off!')
return
self.ssh.run_command("chsysstate -m %s -r lpar -n %s -o dumprestart" %
(self.mg_system, self.lpar_name))
self.wait_lpar_state()
def restart_lpar(self):
if self.get_lpar_state() in [OpHmcState.NOT_ACTIVE, OpHmcState.NA]:
log.info('LPAR Already powered-off!')
return
self.ssh.run_command("chsysstate -m %s -r lpar -n %s -o shutdown --immed --restart" %
(self.mg_system, self.lpar_name))
self.wait_lpar_state()
def get_lpar_cfg(self):
out = self.ssh.run_command("lssyscfg -r prof -m %s --filter 'lpar_names=%s'" %
(self.mg_system, self.lpar_name))[-1]
cfg_dict = {}
splitter = shlex.shlex(out)
splitter.whitespace += ','
splitter.whitespace_split = True
for values in list(splitter):
data = values.split("=")
key = data[0]
value = data[1]
cfg_dict[key] = value
return cfg_dict
def set_lpar_cfg(self, arg_str):
if not self.lpar_prof:
raise OpTestError("Profile needs to be defined to use this method")
self.ssh.run_command("chsyscfg -r prof -m %s -p %s -i 'lpar_name=%s,name=%s,%s' --force" %
(self.mg_system, self.lpar_name, self.lpar_name, self.lpar_prof,arg_str))
def get_lpar_state(self, vios=False):
lpar_name = self.lpar_name
if vios:
lpar_name = self.lpar_vios
state = self.ssh.run_command(
'lssyscfg -m %s -r lpar --filter lpar_names=%s -F state' % (self.mg_system, lpar_name))[-1]
ref_code = self.ssh.run_command(
'lsrefcode -m %s -r lpar --filter lpar_names=%s -F refcode' % (self.mg_system, lpar_name))[-1]
if state == 'Running':
if 'Linux' in ref_code or not ref_code:
return 'Running'
else:
return 'Booting'
return state
def get_system_state(self):
state = self.ssh.run_command(
'lssyscfg -m %s -r sys -F state' % self.mg_system)
return state[-1]
def wait_lpar_state(self, exp_state=OpHmcState.RUNNING, vios=False, timeout=WAITTIME):
state = self.get_lpar_state(vios)
count = 0
while state != exp_state:
state = self.get_lpar_state(vios)
log.info("Current state: %s", state)
time.sleep(timeout)
count = 1
if count > 120:
raise OpTestError("Time exceeded for reaching %s" % exp_state)
def wait_system_state(self, exp_state=OpManagedState.OPERATING, timeout=SYS_WAITTIME):
state = self.get_system_state()
count = 0
while state != exp_state:
state = self.get_system_state()
log.info("Current state: %s", state)
time.sleep(timeout)
count = 1
if count > 60:
raise OpTestError("Time exceeded for reaching %s" % exp_state)
def is_lpar_in_managed_system(self, mg_system=None, lpar_name=None):
lpar_list = self.ssh.run_command(
'lssyscfg -r lpar -m %s -F name' % mg_system)
if lpar_name in lpar_list:
log.info("%s lpar found in managed system %s" % (mg_system, lpar_name))
return True
return False
def migrate_lpar(self, src_mg_system=None, dest_mg_system=None):
if src_mg_system == None or dest_mg_system == None:
raise OpTestError("Source and Destination Managed System required for LPM")
if not self.is_lpar_in_managed_system(src_mg_system, self.lpar_name):
raise OpTestError("Lpar %s not found in managed system %s" % (self.lpar_name, src_mg_system))
self.ssh.run_command(
'migrlpar -o v -m %s -t %s -p %s' % (src_mg_system, dest_mg_system, self.lpar_name))
self.ssh.run_command(
'migrlpar -o m -m %s -t %s -p %s' % (src_mg_system, dest_mg_system, self.lpar_name))
if self.is_lpar_in_managed_system(dest_mg_system, self.lpar_name):
log.info("Migration of lpar %s from %s to %s is successfull" %
(self.lpar_name, src_mg_system, dest_mg_system))
self.mg_system = dest_mg_system
return True
log.info("Migration of lpar %s from %s to %s failed" %
(self.lpar_name, src_mg_system, dest_mg_system))
return False
def run_command_ignore_fail(self, command, timeout=60, retry=0):
return self.ssh.run_command_ignore_fail(command, timeout*self.timeout_factor, retry)
def run_command(self, i_cmd, timeout=15):
return self.ssh.run_command(i_cmd, timeout)
class OpTestHMC(HMCUtil):
'''
This class contains the modules to perform various HMC operations on an LPAR.
The Host IP, username and password of HMC have to be passed to the class intially
while creating the object for the class.
'''
def __init__(self, hmc_ip, user_name, password, scratch_disk="", proxy="",
logfile=sys.stdout, managed_system=None, lpar_name=None, prompt=None,
block_setup_term=None, delaybeforesend=None, timeout_factor=1,
lpar_prof=None, lpar_vios=None, lpar_user=None, lpar_password=<PASSWORD>,
check_ssh_keys=False, known_hosts_file=None, tgt_managed_system=None,
tgt_lpar=None):
super(OpTestHMC, self).__init__(hmc_ip, user_name, password, scratch_disk,
proxy, logfile, managed_system, lpar_name, prompt,
block_setup_term, delaybeforesend, timeout_factor,
lpar_prof, lpar_vios, lpar_user, lpar_password,
check_ssh_keys, known_hosts_file, tgt_managed_system,
tgt_lpar)
self.console = HMCConsole(hmc_ip, user_name, password, managed_system, lpar_name,
lpar_vios, lpar_prof, lpar_user, lpar_password)
def set_system(self, system):
self.system = system
self.ssh.set_system(system)
self.console.set_system(system)
def get_rest_api(self):
return None
def has_os_boot_sensor(self):
return False
def has_occ_active_sensor(self):
return False
def has_host_status_sensor(self):
return False
def has_inband_bootdev(self):
return False
def get_host_console(self):
return self.console
class HMCConsole(HMCUtil):
"""
HMCConsole Class
Methods to manage the console of LPAR
"""
def __init__(self, hmc_ip, user_name, password, managed_system, lpar_name,
lpar_vios, lpar_prof, lpar_user, lpar_password,
block_setup_term=None, delaybeforesend=None, timeout_factor=1,
logfile=sys.stdout, prompt=None, scratch_disk="",
check_ssh_keys=False, known_hosts_file=None, proxy=""):
self.logfile = logfile
self.hmc_ip = hmc_ip
self.user = user_name
self.passwd = password
self.mg_system = managed_system
self.util = OpTestUtil()
self.expect_prompt = self.util.build_prompt(prompt) + "$"
self.lpar_name = lpar_name
self.lpar_vios = lpar_vios
self.lpar_prof = lpar_prof
self.lpar_user = lpar_user
self.lpar_password = <PASSWORD>
self.scratch_disk = scratch_disk
self.proxy = proxy
self.state = ConsoleState.DISCONNECTED
self.delaybeforesend | |
#!/usr/bin/env python3
# we're using python 3.x style print but want it to work in python 2.x,
from __future__ import print_function
import os
import argparse
import sys
import subprocess
import shutil
import threading
# from collections import defaultdict
# from subprocess import CalledProcessError
# make sure scripts/internal is on the pythonpath.
sys.path = [os.path.abspath(os.path.dirname(sys.argv[0])) + "/internal"] + sys.path
from prune_size_model import PruneSizeModel
# for ExitProgram and RunCommand
from pocolm_common import ExitProgram
from pocolm_common import RunCommand
from pocolm_common import GetCommandStdout
from pocolm_common import LogMessage
parser = argparse.ArgumentParser(description="This script takes an lm-dir, as produced by make_lm_dir.py, "
"that should not have the counts split up into pieces, and it prunes "
"the counts and writes out to a new lm-dir.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--steps", type=str,
default='prune*0.25 EM EM EM prune*0.5 EM EM EM prune*1.0 EM EM EM prune*1.0 EM EM EM',
help='This string specifies a sequence of steps in the pruning sequence.'
'prune*X, with X <= 1.0, tells it to prune with X times the threshold '
'specified with the --final-threshold option. EM specifies one iteration of '
'E-M on the model. ')
parser.add_argument("--final-threshold", type=float,
help="Threshold for pruning, e.g. 0.5, 1.0, 2.0, 4.0.... "
"larger threshold will give you more highly-pruned models."
"Threshold is interpreted as entropy-change times overall "
"weighted data count, for each parameter. It should be "
"larger if you have more data, assuming you want the "
"same-sized models. "
"This is only relevant if --target-num-ngrams is not specified.")
parser.add_argument("--target-num-ngrams", type=int, default=0,
help="Target num-ngrams of final LM after pruning. "
"If setting this to a positive value, the --steps would be "
"ignored and a few steps may be worked out util the num-ngrams "
"of pruned LM match the target-num-ngrams.")
parser.add_argument("--target-lower-threshold", type=int,
help="lower tolerance of target num-ngrams. Default value is"
"5% relativly less than target num-ngrams. "
"This is only relevant if --target-num-ngrams is specified.")
parser.add_argument("--target-upper-threshold", type=int,
help="upper tolerance of target num-ngrams. Default value is"
"5% relativly larger than target num_ngrams. "
"This is only relevant if --target-num-ngrams is specified.")
parser.add_argument("--initial-threshold", type=float, default=0.25,
help="Initial threshold for the pruning steps starting from. "
"This is only relevant if --target-num-ngrams is specified.")
parser.add_argument("--max-iter", type=int, default=20,
help="Max iterations allowed to find the threshold for target-num-ngrams LM. "
"This is only relevant if --target-num-ngrams is specified.")
parser.add_argument("--verbose", type=str, default='false',
choices=['true', 'false'],
help="If true, print commands as we execute them.")
parser.add_argument("--cleanup", type=str, choices=['true', 'false'],
default='true', help='Set this to false to disable clean up of the '
'work directory.')
parser.add_argument("--remove-zeros", type=str, choices=['true', 'false'],
default='true', help='Set this to false to disable an optimization. '
'Only useful for debugging purposes.')
parser.add_argument("--check-exact-divergence", type=str, choices=['true', 'false'],
default='true', help='')
parser.add_argument("--max-memory", type=str, default='',
help="Memory limitation for sort.")
parser.add_argument("lm_dir_in",
help="Source directory, for the input language model.")
parser.add_argument("lm_dir_out",
help="Output directory where the language model is created.")
args = parser.parse_args()
# Add the script dir and the src dir to the path.
os.environ['PATH'] = (os.environ['PATH'] + os.pathsep +
os.path.abspath(os.path.dirname(sys.argv[0])) + os.pathsep +
os.path.abspath(os.path.dirname(sys.argv[0])) + "/../src")
if os.system("validate_lm_dir.py " + args.lm_dir_in) != 0:
ExitProgram("failed to validate input LM-dir")
# verify the input string max_memory
if args.max_memory != '':
# valid string max_memory must have at least two items
if len(args.max_memory) >= 2:
s = args.max_memory
# valid string max_memory can be formatted as:
# "a positive integer + a letter or a '%'" or "a positive integer"
# the unit of memory size can also be 'T', 'P', 'E', 'Z', or 'Y'. They
# are not included here considering their rare use in practice
if s[-1] in ['b', 'B', '%', 'k', 'K', 'm', 'M', 'g', 'G'] or s[-1].isdigit():
for x in s[:-1]:
if not x.isdigit():
sys.exit("prune_lm_dir.py: --max-memory should be formatted as "
"'a positive integer' or 'a positive integer appended "
"with 'b', 'K', 'M','G', or '%''.")
# max memory size must be larger than zero
if int(s[:-1]) == 0:
sys.exit("prune_lm_dir.py: --max-memory must be > 0 {unit}.".format(
unit=s[-1]))
else:
sys.exit("prune_lm_dir.py: the format of string --max-memory is not correct.")
else:
sys.exit("prune_lm_dir.py: the lenght of string --max-memory must >= 2.")
if args.max_memory[-1] == 'B': # sort seems not recognize 'B'
args.max_memory[-1] = 'b'
num_splits = None
if os.path.exists(args.lm_dir_in + "/num_splits"):
f = open(args.lm_dir_in + "/num_splits", encoding="utf-8")
num_splits = int(f.readline())
f.close()
work_dir = args.lm_dir_out + "/work"
if args.target_num_ngrams > 0:
if args.target_lower_threshold is not None:
if args.target_lower_threshold >= args.target_num_ngrams:
ExitProgram("--target-lower-threshold[{0}] should be less than "
"--target-num-ngrams[{1}].".format(
args.target_lower_threshold, args.target_num_ngrams))
else:
args.target_lower_threshold = int(0.95 * args.target_num_ngrams)
if args.target_upper_threshold is not None:
if args.target_upper_threshold <= args.target_num_ngrams:
ExitProgram("--target-upper-threshold[{0}] should be larger than "
"--target-num-ngrams[{1}].".format(
args.target_upper_threshold, args.target_num_ngrams))
else:
args.target_upper_threshold = int(1.05 * args.target_num_ngrams)
if args.max_iter <= 1:
ExitProgram("--max-iter must be bigger than 1, got: " + str(args.max_iter))
steps = []
else:
if args.final_threshold <= 0.0:
ExitProgram("--final-threshold must be positive, got: " + str(args.final_threshold))
steps = args.steps.split()
if len(steps) == 0:
ExitProgram("'steps' cannot be empty.")
# set the memory restriction for "sort"
sort_mem_opt = ''
if args.max_memory != '':
sort_mem_opt = ("--buffer-size={0} ".format(args.max_memory))
# returns num-words in this lm-dir.
def GetNumWords(lm_dir_in):
command = "tail -n 1 {0}/words.txt".format(lm_dir_in)
line = subprocess.check_output(command, shell=True, universal_newlines=True)
try:
a = line.split()
assert len(a) == 2
ans = int(a[1])
except:
ExitProgram("error: unexpected output '{0}' from command {1}".format(
line, command))
return ans
def GetNgramOrder(lm_dir_in):
f = open(lm_dir_in + "/ngram_order", encoding="utf-8")
return int(f.readline())
def GetNumGrams(lm_dir_in):
num_unigrams = 0
# we generally use num_xgrams to refer to num_ngrams - num_unigrams
tot_num_xgrams = 0
f = open(lm_dir_in + "/num_ngrams", encoding="utf-8")
for order, line in enumerate(f):
if order == 0:
num_unigrams = int(line.split()[1])
continue
tot_num_xgrams += int(line.split()[1])
return (num_unigrams, tot_num_xgrams)
# This script creates work/protected.all (listing protected
# counts which may not be removed); it requires work/float.all
# to exist.
def CreateProtectedCounts(work):
command = ("bash -c 'float-counts-to-histories <{0}/float.all | LC_ALL=C sort {1}|"
" histories-to-null-counts >{0}/protected.all'".format(work, sort_mem_opt))
log_file = work + "/log/create_protected_counts.log"
RunCommand(command, log_file, args.verbose == 'true')
def SoftLink(src, dest):
if os.path.lexists(dest):
os.remove(dest)
try:
os.symlink(os.path.abspath(src), dest)
except:
ExitProgram("error linking {0} to {1}".format(os.path.abspath(src), dest))
def CreateInitialWorkDir():
# Creates float.all, stats.all, and protected.all in work_dir/step
work0dir = work_dir + "/step0"
# create float.all
if not os.path.isdir(work0dir + "/log"):
os.makedirs(work0dir + "/log")
SoftLink(args.lm_dir_in + "/num_ngrams", work0dir + "/num_ngrams")
if num_splits is None:
SoftLink(args.lm_dir_in + "/float.all", work0dir + "/float.all")
else:
splits_star = ' '.join([args.lm_dir_in + "/float.all." + str(n)
for n in range(1, num_splits + 1)])
command = "merge-float-counts " + splits_star + " >{0}/float.all".format(work0dir)
log_file = work0dir + "/log/merge_initial_float_counts.log"
RunCommand(command, log_file, args.verbose == 'true')
# create protected.all
CreateProtectedCounts(work0dir)
stats_star = ' '.join(["{0}/stats.{1}".format(work0dir, n)
for n in range(1, ngram_order + 1)])
# create stats.{1,2,3..}
# e.g. command = 'float-counts-to-float-stats 20000 foo/work/step0/stats.1 '
# 'foo/work/step0/stats.2 <foo/work/step0/float.all'
command = ("float-counts-to-float-stats {0} ".format(num_words) +
stats_star +
" <{0}/float.all".format(work0dir))
log_file = work0dir + "/log/float_counts_to_float_stats.log"
RunCommand(command, log_file, args.verbose == 'true')
command = "merge-float-counts {0} > {1}/stats.all".format(
stats_star, work0dir)
log_file = work0dir + "/log/merge_float_counts.log"
RunCommand(command, log_file, args.verbose == 'true')
for f in stats_star.split():
os.remove(f)
# sets initial_logprob_per_word.
def GetInitialLogprob():
work0dir = work_dir + "/step0"
float_star = ' '.join(['/dev/null' for n in range(1, ngram_order + 1)])
command = ('float-counts-estimate {num_words} {work0dir}/float.all '
'{work0dir}/stats.all {float_star} '.format(
num_words=num_words, work0dir=work0dir,
float_star=float_star))
try:
print(command, file=sys.stderr)
p = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True, universal_newlines=True)
# the stdout of this program will be something like:
# 1.63388e+06 -7.39182e+06 10.5411 41.237 49.6758
# representing: total-count, total-like, and for each order, the like-change
# for that order.
line = p.stdout.readline()
print(line, file=sys.stderr)
a = line.split()
tot_count = float(a[0])
tot_like = float(a[1])
like_change = 0.0
logprob_per_word = tot_like / tot_count
for i in range(2, len(a)): # for each n-gram order
like_change += float(a[i])
like_change_per_word = like_change / tot_count
assert like_change_per_word < 0.0001 # should be exactly zero.
except Exception as e:
ExitProgram("error running command '{0}', error is '{1}'".format(
command, repr(e)))
global initial_logprob_per_word
initial_logprob_per_word = logprob_per_word
def WriteNumNgrams(out_dir, num_ngrams):
out_file = out_dir + "/num_ngrams"
try:
f = open(out_file, "w", encoding="utf-8")
for order, num in enumerate(num_ngrams):
print(str(order + 1) + ' ' + str(num), file=f)
f.close()
except:
ExitProgram("error writing num-ngrams to: " + out_file)
def RunPruneStep(work_in, work_out, threshold):
# set float_star = 'work_out/float.1 work_out/float.2 ...'
float_star = " ".join(['{0}/float.{1}'.format(work_out, n)
for n in range(1, ngram_order + 1)])
# create work_out/float.{1,2,..}
log_file = work_out + '/log/float_counts_prune.log'
command = ("float-counts-prune {threshold} {num_words} {work_in}/float.all "
"{work_in}/protected.all {float_star} 2>>{log_file}".format(
threshold=threshold, num_words=num_words,
work_in=work_in, float_star=float_star, log_file=log_file))
with open(log_file, 'w', encoding="utf-8") as f:
print("# " + command, file=f)
try:
| |
self.gamebattle.team2poslist, self.gamebattle.allunitindex,
# self.gamebattle.enactment)
# self.gamebattle.eventlog.addlog([0, str(self.leader[0].name) + "'s parentunit surrender"], [0, 1])
# self.gamebattle.setuparmyicon()
# ^ End retreat function
# v Rotate Function
if self.angle != self.new_angle and self.charging is False and self.state != 10 and self.stamina > 0 and self.collide is False:
self.rotatecal = abs(self.new_angle - self.angle) # amount of angle left to rotate
self.rotatecheck = 360 - self.rotatecal # rotate distance used for preventing angle calculation bug (pygame rotate related)
self.moverotate = True
self.radians_angle = math.radians(360 - self.angle) # for subunit rotate
if self.angle < 0: # negative angle (rotate to left side)
self.radians_angle = math.radians(-self.angle)
# vv Rotate logic to continuously rotate based on angle and shortest length
rotatetiny = self.rotatespeed * dt # rotate little by little according to time
if self.new_angle > self.angle: # rotate to angle more than the current one
if self.rotatecal > 180: # rotate with the smallest angle direction
self.angle -= rotatetiny
self.rotatecheck -= rotatetiny
if self.rotatecheck <= 0:
self.angle = self.new_angle # if rotate pass base_target angle, rotate to base_target angle
else:
self.angle += rotatetiny
if self.angle > self.new_angle:
self.angle = self.new_angle # if rotate pass base_target angle, rotate to base_target angle
elif self.new_angle < self.angle: # rotate to angle less than the current one
if self.rotatecal > 180: # rotate with the smallest angle direction
self.angle += rotatetiny
self.rotatecheck -= rotatetiny
if self.rotatecheck <= 0:
self.angle = self.new_angle # if rotate pass base_target angle, rotate to base_target angle
else:
self.angle -= rotatetiny
if self.angle < self.new_angle:
self.angle = self.new_angle # if rotate pass base_target angle, rotate to base_target angle
# ^^ End rotate tiny
self.set_subunit_target() # generate new pos related to side
elif self.moverotate and abs(self.angle - self.new_angle) < 1: # Finish
self.moverotate = False
if self.rotateonly is False: # continue moving to base_target after finish rotate
self.set_subunit_target(self.base_target)
else:
self.state = 0 # idle state
self.processcommand(self.base_target, othercommand=1)
self.rotateonly = False # reset rotate only condition
# ^ End rotate function
if self.state not in (0, 95) and self.front_pos.distance_to(self.command_target) < 1: # reach destination and not in combat
nothalt = False # check if any subunit in combat
for subunit in self.subunit_sprite:
if subunit.state == 10:
nothalt = True
if subunit.unit_leader and subunit.state != 10:
nothalt = False
break
if nothalt is False:
self.retreat_start = False # reset retreat
self.revert = False # reset revert order
self.processcommand(self.base_target, othercommand=1) # reset command base_target state will become 0 idle
# v Perform range attack, can only enter range attack state after finishing rotate
shootrange = self.max_range
if self.use_min_range == 0: # use minimum range to shoot
shootrange = self.min_range
if self.state in (5, 6) and self.moverotate is False and (
(self.attack_target is not None and self.base_pos.distance_to(self.attack_target.base_pos) <= shootrange)
or self.base_pos.distance_to(self.base_attack_pos) <= shootrange): # in shoot range
self.set_target(self.front_pos)
self.range_combat_check = True # set range combat check to start shooting
elif self.state == 11 and self.attack_target is not None and self.base_pos.distance_to(self.attack_target.base_pos) > shootrange \
and self.hold == 0 and self.collide is False: # chase base_target if it go out of range and hold condition not hold
self.state = self.command_state # set state to attack command state
self.range_combat_check = False # stop range combat check
self.set_target(self.attack_target.base_pos) # move to new base_target
self.new_angle = self.setrotate() # also keep rotate to base_target
# ^ End range attack state
else: # dead parentunit
# v parentunit just got killed
if self.got_killed is False:
if self.team == 1:
self.die(self.gamebattle)
else:
self.die(self.gamebattle)
self.gamebattle.setup_uniticon() # reset army icon (remove dead one)
self.gamebattle.eventlog.addlog([0, str(self.leader[0].name) + "'s parentunit is destroyed"],
[0, 1]) # put destroyed event in troop and army log
self.kill()
for subunit in self.subunit_sprite:
subunit.kill()
# ^ End got killed
def set_target(self, pos):
"""set new base_target, scale base_target from base_target according to zoom scale"""
self.base_target = pygame.Vector2(pos) # Set new base base_target
self.set_subunit_target(self.base_target)
def revertmove(self):
"""Only subunit will rotate to move, not the entire unit"""
self.new_angle = self.angle
self.moverotate = False # will not rotate to move
self.revert = True
newangle = self.setrotate()
for subunit in self.subunit_sprite:
subunit.new_angle = newangle
def processcommand(self, targetpoint, runcommand=False, revertmove=False, enemy=None, othercommand=0):
"""Process input order into state and subunit base_target action
othercommand parameter 0 is default command, 1 is natural pause, 2 is order pause"""
if othercommand == 0: # move or attack command
self.state = 1
if self.attack_place or (enemy is not None and (self.team != enemy.team)): # attack
if self.ammo <= 0 or self.forced_melee: # no magazine_left to shoot or forced attack command
self.state = 3 # move to melee
elif self.ammo > 0: # have magazine_left to shoot
self.state = 5 # Move to range attack
if self.attack_place: # attack specific location
self.set_target(targetpoint)
# if self.magazine_left > 0:
self.base_attack_pos = targetpoint
else:
self.attack_target = enemy
self.base_attack_pos = enemy.base_pos
self.set_target(self.base_attack_pos)
else:
self.set_target(targetpoint)
if runcommand or self.runtoggle == 1:
self.state += 1 # run state
self.command_state = self.state
self.range_combat_check = False
self.command_target = self.base_target
self.new_angle = self.setrotate()
if revertmove: # revert subunit without rotate, cannot run in this state
self.revertmove()
# if runcommand or self.runtoggle:
# self.state -= 1
if self.charging: # change order when attacking will cause authority penalty
self.leader[0].authority -= self.auth_penalty
self.authrecal()
elif othercommand in (1, 2) and self.state != 10: # Pause all action command except combat
if self.charging and othercommand == 2: # halt order instead of auto halt
self.leader[0].authority -= self.auth_penalty # decrease authority of the first leader for stop charge
self.authrecal() # recal authority
self.state = 0 # go into idle state
self.command_state = self.state # reset command state
self.set_target(self.front_pos) # set base_target at self
self.command_target = self.base_target # reset command base_target
self.range_combat_check = False # reset range combat check
self.new_angle = self.setrotate() # set rotation base_target
def processretreat(self, pos):
self.state = 96 # controlled retreat state (not same as 98)
self.command_state = self.state # command retreat
self.leader[0].authority -= self.auth_penalty # retreat reduce gamestart leader authority
if self.charging: # change order when attacking will cause authority penalty
self.leader[0].authority -= self.auth_penalty
self.authrecal()
self.retreat_start = True # start retreat process
self.set_target(pos)
self.revertmove()
self.command_target = self.base_target
def command(self, pos, mouse_right, double_mouse_right, target, keystate, othercommand=0):
"""othercommand is special type of command such as stop all action, raise flag, decimation, duel and so on"""
if self.control and self.state not in (95, 97, 98, 99):
self.revert = False
self.retreat_start = False # reset retreat
self.rotateonly = False
self.forced_melee = False
self.attack_target = None
self.base_attack_pos = 0
self.attack_place = False
self.range_combat_check = False
# register user keyboard
if keystate is not None and (keystate[pygame.K_LCTRL] or keystate[pygame.K_RCTRL]):
self.forced_melee = True
if keystate is not None and (keystate[pygame.K_LALT] or keystate[pygame.K_RALT]):
self.attack_place = True
if self.state != 100:
if mouse_right and 1 <= pos[0] < 998 and 1 <= pos[1] < 998:
if self.state in (10, 96) and target is None:
self.processretreat(pos) # retreat
else:
for subunit in self.subunit_sprite:
subunit.attacking = True
# if self.state == 10:
if keystate is not None and (keystate[pygame.K_LSHIFT] or keystate[pygame.K_RSHIFT]):
self.rotateonly = True
if keystate is not None and keystate[pygame.K_z]:
self.revert = True
self.processcommand(pos, double_mouse_right, self.revert, target)
elif othercommand != 0:
self.processcommand(pos, double_mouse_right, self.revert, target, othercommand)
def switchfaction(self, oldgroup, newgroup, oldposlist, enactment):
"""Change army group and gameid when change side"""
self.colour = (144, 167, 255) # team1 colour
self.control = True # TODO need to change later when player can choose team
if self.team == 2:
self.team = 1 # change to team 1
else: # originally team 1, new team would be 2
self.team = 2 # change to team 2
self.colour = (255, 114, 114) # team2 colour
if enactment is False:
self.control = False
oldgroup.remove(self) # remove from old team group
newgroup.append(self) # add to new team group
oldposlist.pop(self.gameid) # remove from old pos list
self.gameid = newgameid # change game id
# self.changescale() # reset scale to the current zoom
self.icon.changeimage(changeside=True) # change army icon to new team
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/10/31 10:02
# @Author : kimmy-pan
# @File : GET_TAPD.py
import requests
import tablib
from io import BytesIO
from requests.auth import HTTPBasicAuth
from model.util.PUB_DATABASEOPT import *
from model.util.PUB_LOG import *
from model.FUNC.ENUM_OPT import *
from model.util.newID import *
from model.FUNC.GROUP_OPT import *
import random
# from model.FUNC.CASE_FILE_OPT import *
def build_xls(headers, param):
'''
生成导出方法
:param headers:
:param table:
:return:parambook
'''
# if param:
# param = list(param)
# else:
# param = [()]
data_set = tablib.Dataset(*param, headers=headers)
data_book = tablib.Databook()
data_book.add_sheet(data_set)
return data_book
def tapd_to_excel(param):
headers = ('用例ID','用例目录','用例名称','需求ID','前置条件','用例步骤','预期结果','用例类型','用例状态','用例等级','创建人','环境类型','执行类型','执行插件')
parambook = build_xls(headers, param)
output = BytesIO()
output.write(parambook.xls)
return_param = output.getvalue()
return return_param
class GET_TAPD():
def __init__(self,category_id):
user = md_Config.getConfig("TAPD", "user")
pwd = md_Config.getConfig("TAPD", "pwd")
self.auth = HTTPBasicAuth(user, pwd)
self.url = md_Config.getConfig("TAPD", "url")
self.session = requests.Session()
self.session.trust_env = False
self.category_id = category_id
def get_count(self):
r = self.session.get(self.url + "/tcases/count?workspace_id=21840291&category_id={}".format(self.category_id),
auth=self.auth)
if r.json()["data"] != [] :
count = r.json()["data"]["count"]
return count
else:
return 0
def get_case_category(self):
r = self.session.get(self.url + "/tcase_categories?workspace_id=21840291&id={}".
format(str(self.category_id).split("&_=")[0]),
auth=self.auth)
# print("============================================")
# print(str(self.category_id))
# print(r.json()["data"])
category_name = r.json()["data"]["TcaseCategory"]["name"]
return category_name
def get_case(self):
"""
如果测试用例数量大于200,要进行分页获取
:return: list
"""
all_count = int(self.get_count())
if all_count != 0:
name = self.get_case_category()
category_name = {}
if all_count < 200 or all_count == 200:
r = self.session.get(self.url + "/tcases?workspace_id=21840291&category_id={}&limit=200".format(self.category_id),
auth=self.auth)
case = r.json()["data"]
category_name["category_name"] = self.get_case_category()
case.append(category_name)
return case,all_count
else:
all_case = []
loop_time = all_count/200
if isinstance(loop_time,float):
for i in range(1,int(loop_time)+2):
r = self.session.get(
self.url + "/tcases?workspace_id=21840291&category_id={}&limit=200&page={}".format(self.category_id,i),
auth=self.auth)
case = r.json()["data"]
category_name["category_name"] = name
case.append(category_name)
all_case.append(case)
elif isinstance(loop_time,int):
for i in range(1,int(loop_time)+1):
r = self.session.get(
self.url + "/tcases?workspace_id=21840291&category_id={}&limit=200&page={}".format(self.category_id,i),
auth=self.auth)
case = r.json()["data"]
category_name["category_name"] = name
case.append(category_name)
all_case.append(case)
return all_case,all_count
else:
return [],0,'{}不存在或者{}不存在测试用例,请检查!'.format(self.category_id,self.category_id)
class cloud_to_TAPD():
"""
云盾平台测试用例同步到TAPD
先判断目录是否存在于TAPD
否:返回False,目录不存在TAPD,请创建!
是:同步测试
"""
def __init__(self,gourp_id,tapd_id):
user = md_Config.getConfig("TAPD", "user")
pwd = md_Config.getConfig("TAPD", "pwd")
self.auth = HTTPBasicAuth(user, pwd)
self.url = md_Config.getConfig("TAPD", "url")
self.session = requests.Session()
self.session.trust_env = False
self.gourp_id = gourp_id
self.tapd_id = tapd_id
self.n = 0
def get_cloud_category(self,gourp_id):
"""
获取云盾平台的用例目录
:return:
"""
sql = "SELECT group_desc FROM `p_group_info` where code = '{}'".format(gourp_id)
cloud_category = get_JSON(sql)[0]["group_desc"]
return cloud_category
def get_cloud_case(self):
"""
获取云盾平台的测试用例
:return:
"""
sql = "SELECT case_id,case_desc,case_init,case_step,case_prev_data,case_builder,case_type,case_level,case_exe_status,adddate FROM `regress_case_info` where group_id = '{}'".format(self.gourp_id)
case = get_JSON(sql)
return case
def getRqmtCase(self):
sql = "SELECT case_id,case_desc,case_init,case_step,case_prev_data,case_builder,case_type,case_level,case_exe_status,adddate FROM `rqmt_case_info` where rqmt_id = '{}'".format(
self.gourp_id)
case = get_JSON(sql)
return case
def get_case_category(self,gourp_id):
"""
获取TAPD平台的用例目录
:return:
"""
r = self.session.get(self.url + "/tcase_categories?workspace_id=21840291&name={}".
format(str(self.get_cloud_category(gourp_id))),
auth=self.auth)
name = r.json()["data"]
# exeLog("TAPD目录:" + name["TcaseCategory"]["name"])
return name
def get_TAPD_category(self):
"""
判断获取TAPD的用例目录返回是否唯一,且TAPD的父目录是否与云盾平台的父目录一致
否:继续递归,找到对应的父目录,返回唯一值的子目录
:return:
"""
if self.n == 0:
name = self.get_case_category(self.gourp_id)
if len(name) == 1:
if name[0]["TcaseCategory"]["name"] == self.get_cloud_category(self.gourp_id) and \
GET_TAPD(name[0]["TcaseCategory"]["parent_id"]).get_case_category() == self.get_cloud_category(self.gourp_id[:-2]):
return name
else:
return False,self.get_cloud_category(self.gourp_id[:-2])
elif len(name) == 0:
return name
elif len(name) >1:
for i in name:
if i["TcaseCategory"]["name"] == self.get_cloud_category(self.gourp_id):
self.n = self.n + 2
return self.get_TAPD_category()
else:
return False,self.get_cloud_category(self.gourp_id)
else:
name = self.get_case_category(self.gourp_id[:-self.n])
# exeLog("TAPD平台目录:" + str(name))
if len(name) == 1:
if name[0]["TcaseCategory"]["name"] == self.get_cloud_category(self.gourp_id[:-2]):
r = self.session.get(self.url + "/tcase_categories?workspace_id=21840291&parent_id={}".
format(name[0]["TcaseCategory"]["id"]),auth=self.auth)
# print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
# print(r.json()["data"])
return r.json()["data"]
else:
return False,self.get_cloud_category(self.gourp_id[:-2])
elif len(name) >1:
self.n = self.n + 2
return self.get_TAPD_category()
return
def gettapd(self):
"""通过id获取tapd的目录"""
r = self.session.get(self.url + "/tcase_categories?workspace_id=21840291&id={}".
format(self.tapd_id),
auth=self.auth)
name = r.json()["data"]
# exeLog("TAPD目录:" + name["TcaseCategory"]["name"])
return name
def sync_case(self,type=None):
'''
从APT导入用例到TAPD
:return:
'''
# print("*********************************")
# print(self.n)
# category = self.get_TAPD_category()
category = self.gettapd()
# if type(category) is list and category != [] and category != None:
if category :
errorList = []
exeLog("获取TAPD目录成功!~")
param= {}
if type == 1:
case = self.getRqmtCase()
else:
case = self.get_cloud_case()
exeLog("导入用例数量:{}".format(len(case)))
for i in case:
leveldict = {"1": "高", "2": "中", "3": "低"}
statusDict = {"1": "normal", "2": "updating", "3": "abandon"}
typeDict = {"1": "功能测试", "2": "安全测试", "3": "安全测试", "4": "接口测试", "5": "压力测试", "6": "其他"}
# for i in case:
for k, v in leveldict.items():
if k == str(i["case_level"]):
param["priority"] = v
for k, v in statusDict.items():
if k == str(i["case_exe_status"]):
param["status"] = v
for k, v in typeDict.items():
if k == str(i["case_type"]):
param["type"] = v
param["steps_S"] = i["case_step"]
param["precondition"] = i["case_init"]
param["created"] = str(i["adddate"])
param["expectation"] = i["case_prev_data"]
param["creator"] = i["case_builder"]
param["workspace_id"] = "21840291"
# for j in category:
# if j["TcaseCategory"]["name"] == self.get_cloud_category(self.gourp_id):
# param["category_id"] = j["TcaseCategory"]["id"]
param["category_id"] = category["TcaseCategory"]["id"]
param["name"] = i["case_desc"]
print(param)
r = self.session.post(self.url + "/tcases",data=param,
auth=self.auth)
time.sleep(1)
if r.json()["status"] != 1:
errorList.append((i["case_id"],r.json()["info"]))
# print(r.json())
if errorList == []:
exeLog("TAPD导入完成!")
return_data = respdata().sucessMessage('', '导入完成,请检查数据,如有问题请联系管理!')
return json.dumps(return_data, ensure_ascii=False)
else:
exeLog("{}导入失败".format(errorList))
# print(r.json())
# print("导入成功!")
return_data=respdata().failMessage('','导入失败!{}'.format(errorList))
return json.dumps(return_data,ensure_ascii=False)
else:
return_data = respdata().failMessage('', "TAPD不存在<{}>目录,请检查!".format(self.tapd_id))
return json.dumps(return_data, ensure_ascii=False)
# elif type(category) is tuple and category[0] == False:
# a = category[1]
# b = self.get_cloud_category(self.gourp_id[0:3])
# if a == b:
# c = a
# exeLog("TAPD导入失败,错误目录信息:"+c)
# return_data=respdata().failMessage('',"TAPD不存在<{}>目录".format(c))
#
# else:
# c = b + "/" + "..." + "/" +a
# exeLog("TAPD导入失败,错误目录信息:" + c)
# return_data = respdata().failMessage('', "TAPD不存在<{}>目录".format(c))
# return json.dumps(return_data,ensure_ascii=False)
#
# elif category == []:
# a = self.get_cloud_category(self.gourp_id[:-2])
# a1 = self.get_cloud_category(self.gourp_id)
# b = self.get_cloud_category(self.gourp_id[0:3])
# if a != b:
# c = b + "/" + "..." + "/" + a + "/" + a1
# exeLog("TAPD导入失败,错误目录信息:" + c)
# return_data = respdata().failMessage('', "TAPD不存在<{}>目录".format(c))
# else:
# c = b + "/" + "..." + "/" +a1
# return_data = respdata().failMessage('', "TAPD不存在<{}>目录".format(c))
# return json.dumps(return_data,ensure_ascii=False)
def get_all_case(category_id,group_id):
"""
将tapd数据转换成EXCEL文件
:param category_id: tapdID
:return:
"""
param = []
for i in category_id:
result = GET_TAPD(i).get_case()
case = result[0]
count = result[1]
if count != 0:
exeLog("TAPD用例数量:"+str(count))
# print("TAPD用例数量:")
# print(count)
if count > 200 and count != 200:
name = case[0][200]['category_name']
elif count < 200 or count == 200:
name = case[-1]['category_name']
for j in case:
if isinstance(j,list):
for D in j:
if 'Tcase' in D:
testcase = D['Tcase']
case_level=ENUM_OPT("case_level").get_val(testcase["priority"])
case_type = ENUM_OPT("case_type").get_val(testcase["type"])
if testcase["status"] == "normal":
case_exe_status = 1
elif testcase["status"] == "updating":
case_exe_status = 2
elif testcase["status"] =="abandon":
case_exe_status = 3
param.append((testcase["id"][12:],name,testcase["name"],"",testcase["precondition"],testcase["steps_S"],testcase["expectation"],case_type,
case_exe_status,case_level,testcase["creator"],group_id))
exeLog("写入数据库的用例数量:" + str(len(param)))
elif isinstance(j,dict):
if 'Tcase' in j:
testcase = j['Tcase']
case_level = ENUM_OPT("case_level").get_val(testcase["priority"])
case_type = ENUM_OPT("case_type").get_val(testcase["type"])
if testcase["status"] == "normal":
case_exe_status = 1
elif testcase["status"] == "updating":
case_exe_status = 2
elif testcase["status"] == "abandon":
case_exe_status = 3
param.append(
(testcase["id"][12:], name, testcase["name"], "", testcase["precondition"], testcase["steps_S"],
testcase["expectation"], case_type,
case_exe_status, case_level, testcase["creator"],group_id))
exeLog("写入数据库的用例数量:"+str(len(param)))
# print("写入数据库的用例数量:")
# print(len(param))
print(param)
# '用例ID'case_id,'用例目录'case_path,'用例名称'case_desc,'需求ID','前置条件'case_init,'用例步骤'case_step,'预期结果'case_prev_data,'用例类型case_type','用例状态'case_exe_status,'用例等级'case_level,'创建人'case_builder,'环境类型','执行类型','执行插件',缺少case_exe_env,case_exe_type,case_exe_plugin
sql = "INSERT INTO regress_case_info ( case_id,case_path,case_desc,rqmt_id,case_init,case_step,case_prev_data,case_type,case_exe_status,case_level,case_builder,group_id) VALUE (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
result = DB_CONN().db_Batch(sql, param)
if type(result) is tuple and result[0] == False:
return_data = respdata().failMessage('', '操作失败,错误信息:{}'.format(result[1]))
return json.dumps(return_data, ensure_ascii=False)
# tapd_to_excel(param)
else:
return_data = respdata().sucessMessage('', '操作完成,请检查导入情况')
return json.dumps(return_data, ensure_ascii=False)
else:
return_data = respdata().failMessage('', '操作失败,错误信息:{}'.format(result[3]))
return json.dumps(return_data, ensure_ascii=False)
class requiretotapd:
def __init__(self, group_id, tapd_id=None ,rmid=None):
user = md_Config.getConfig("TAPD", "user")
pwd = md_Config.getConfig("TAPD", "pwd")
self.auth = HTTPBasicAuth(user, pwd)
self.url = md_Config.getConfig("TAPD", "url")
self.session = requests.Session()
self.session.trust_env = False
self.group_id = getCode(group_id)
self.tapd_id = tapd_id
self.rmid = rmid
def sync_tapd(self):
"""同步tapd平台的需求(目录id)"""
rmList = []
i = 1
while True:
param = {"workspace_id": "21840291", "category_id": self.tapd_id,"page":i}
r = self.session.get(self.url + "/stories", params=param, auth=self.auth)
if r.json()["data"] != []:
rmList.append(r.json()["data"])
i = i + 1
time.sleep(1)
else:
return rmList
def sync_tapd_rmid(self):
"""同步tapd平台的需求(需求id)"""
param = {"workspace_id": "21840291", "id": self.rmid}
r = self.session.get(self.url + "/stories", params=param, auth=self.auth)
return r.json()["data"]
def write_db(self):
"""把需求写进数据库"""
statusdict = {"planning": 1, "resolved": 2, "rejected": 3, "status_2": 4, "new": 5, "status_3": 6,
"suspended": 7, "status_5": 8, "status_7": 9, "status_4": 10, "status_1": 11}
param = []
idlist = []
if self.tapd_id != None and self.tapd_id != "":
data = self.sync_tapd()
print("$%#$%#########################################$")
print(len(data))
if data != []:
alldata = []
for j in data:
for k in j:
alldata.append(k)
for i in alldata:
id = i["Story"]["id"][-9:]
statuskey=i["Story"]["status"]
status=statusdict.get(statuskey)
param.append((id, i["Story"]["name"], i["Story"]["creator"], i["Story"]["owner"], i["Story"]["due"],
status, self.group_id, i["Story"]["begin"]))
idlist.append((id,))
else:
return_data = respdata().failMessage('',
'操作失败,错误信息:{}不存在或者{}不存在需求,请检查!'.format(self.tapd_id, self.tapd_id))
return json.dumps(return_data, ensure_ascii=False)
elif self.rmid !=None and self.rmid != "":
data = self.sync_tapd_rmid()
for i in data:
if data != []:
id = i["Story"]["id"][-9:]
statuskey = i["Story"]["status"]
status = statusdict.get(statuskey)
param.append(
(id, i["Story"]["name"], i["Story"]["creator"], i["Story"]["owner"], i["Story"]["due"],
status, self.group_id, i["Story"]["begin"]))
idlist.append((id,))
else:
return_data = respdata().failMessage('',
'操作失败,错误信息:{}不存在或者{}不存在需求,请检查!'.format(self.tapd_id,
self.tapd_id))
return json.dumps(return_data, ensure_ascii=False)
if len(idlist)>0:
deletesql="DELETE FROM t_requirements_info WHERE rqmt_id= %s"
DB_CONN().db_Batch(deletesql,idlist)
sql = "INSERT INTO t_requirements_info | |
base pointer registers."
print(tty_colors.red() + self.reportexploitable + tty_colors.default())
else:
print(tty_colors.red()+"Exploitable = %s"%self.is_exploitable+tty_colors.default())
if self.access_type in ("read", "write"):
self.reportexploitable = "Crash "+self.access_type+"'g invalid address."
else:
self.reportexploitable = "Crash accessing invalid address."
print(tty_colors.red() + self.reportexploitable + tty_colors.default())
elif self.exception=="EXC_BAD_INSTRUCTION":
self.is_exploitable = True
print(tty_colors.red()+"Exploitable = %s"%self.is_exploitable+tty_colors.default())
self.reportexploitable="Illegal instruction at %s, probably a exploitable issue unless the crash was in libdispatch/xpc."%self.pc
print(tty_colors.blue() + self.reportexploitable + tty_colors.default())
elif self.exception=="EXC_ARITHMETIC":
self.is_exploitable = False
print(tty_colors.red()+"Exploitable = %s"%self.is_exploitable+tty_colors.default())
self.reportexploitable="Arithmetic exception at %s, probably not exploitable."%self.pc
print(tty_colors.blue() + self.reportexploitable + tty_colors.default())
elif self.exception=="EXC_SOFTWARE":
self.is_exploitable=False
print(tty_colors.red()+"Exploitable = %s"%self.is_exploitable+tty_colors.default())
self.reportexploitable="Software exception, probably not exploitable."
print(tty_colors.blue() + self.reportexploitable + tty_colors.default())
elif self.exception=="EXC_BREAKPOINT":
self.is_exploitable=False
print(tty_colors.red()+"Exploitable = %s"%self.is_exploitable+tty_colors.default())
self.reportexploitable="Software breakpoint, probably not exploitable."
print(tty_colors.blue() + self.reportexploitable + tty_colors.default())
elif self.exc_address=="EXC_CRASH":
self.is_exploitable= False
print(tty_colors.red()+"Exploitable = %s"%self.is_exploitable+tty_colors.default())
def is_stack_suspicious(self):
exc_address = self.exc_address
exception = self.exception
backtrace = self.backtrace
suspicious_functions = """__chk_fail __stack_chk_fail szone_error CFRelease CFRetain
_CFRelease _CFRetain malloc calloc realloc objc_msgSend szone_free free_small
tiny_free_list_add_ptr tiny_free_list_remove_ptr small_free_list_add_ptr
small_free_list_remove_ptr large_entries_free_no_lock large_free_no_lock
szone_batch_free szone_destroy free CSMemDisposeHandle CSMemDisposePtr append_int
release_file_streams_for_task __guard_setup _CFStringAppendFormatAndArgumentsAux
WTF::fastFree WTF::fastMalloc WTF::FastCalloc WTF::FastRealloc WTF::tryFastCalloc
WTF::tryFastMalloc WTF::tryFastRealloc WTF::TCMalloc_Central_FreeList GMfree
GMmalloc_zone_free GMrealloc GMmalloc_zone_realloc""".split()
if exc_address=="0xbbadbeef":
# WebCore functions call CRASH() in various assertions or if the amount to allocate was too big. CRASH writes a null byte to 0xbbadbeef.
self.is_exploitable=False
print(tty_colors.red()+"Exploitable = %s"%self.is_exploitable+tty_colors.default())
self.reportexploitable = "Not exploitable. Seems to be a safe crash. Calls to CRASH() function writes a null byte to 0xbbadbeef"
print(tty_colors.red() + self.reportexploitable + tty_colors.default())
return
if "0 ???" in backtrace:
self.is_exploitable = True
print(tty_colors.red()+"Exploitable = %s"%self.is_exploitable+tty_colors.default())
self.reportexploitable="This crash is suspected to be exploitable because the crashing instruction is outside of a known function, i.e. in dynamically generated code"
print(tty_colors.red() + self.reportexploitable + tty_colors.default())
return
for i in suspicious_functions:
if i in backtrace:
if exception == "EXC_BREAKPOINT" and i in ("CFRelease", "CFRetain"):
self.is_exploitable = "no"
return
elif i=="_CFRelease" or i=="CFRelease" and "CGContextDelegateFinalize" in backtrace:
return
elif i=="objc_msgSend" and exc_address<<PAGE_SIZE:
continue
else:
self.is_exploitable = True
print(tty_colors.red()+"Exploitable = %s"%self.is_exploitable+tty_colors.default())
self.reportexploitable="The crash is suspected to be an exploitable issue due to the suspicious function in the stack trace of the crashing thread."
print(tty_colors.red() + self.reportexploitable + tty_colors.default())
return
def exploitable(debugger,cmd,res,dict):
"""checks if the crash is exploitable"""
lisa_=Lisa(debugger,res,dict)
class ShellStorm:
def __init__(self):
pass
def searchShellcode(self, keyword):
try:
print("Connecting to shell-storm.org...")
s = requests.get("http://shell-storm.org/api/?s=%s"%(keyword))
res = s.text
data_l = res.split('\n')
except:
print("Cannot connect to shell-storm.org")
return None
data_dl = []
for data in data_l:
try:
desc = data.split("::::")
try:
dico = {
'ScAuthor': desc[0],
'ScArch': desc[1],
'ScTitle': desc[2],
'ScId': desc[3],
'ScUrl': desc[4],
'ScSize': int(''.join(x for x in desc[2][-10:-5] if x.isdigit()))
}
except Exception:
dico = {
'ScAuthor': desc[0],
'ScArch': desc[1],
'ScTitle': desc[2],
'ScId': desc[3],
'ScUrl': desc[4],
'ScSize': 0
}
data_dl.append(dico)
except:
pass
try:
return sorted(data_dl, key=lambda x: x['ScSize'], reverse=True)
except Exception:
print("Could not sort by size")
return data_dl
def displayShellcode(self, shellcodeId):
if shellcodeId is None:
return None
try:
s = requests.get("http://shell-storm.org/shellcode/files/shellcode-%s.php"%(shellcodeId))
res = s.text
data = res.split("<pre>")[1].split("<body>")[0]
except:
print("Failed to download shellcode from shell-storm.org")
return None
data = data.replace(""", "\"")
data = data.replace("&", "&")
data = data.replace("<", "<")
data = data.replace(">", ">")
return data
@staticmethod
def version():
print("shell-storm API - v0.1")
print("Search and display all shellcodes in shell-storm database")
print("<NAME> - @JonathanSalwan - 2012")
print("http://shell-storm.org")
return
def extractFromUniversalBinary(debugger,command,result,dict):
"""Uses lipo to extract a given architecture from a Universal binary
Syntax: extract x86_64 <input file> <output file>
Ex: extract x86_64 /usr/lib/system/libsystem_kernel.dylib ./libsystem_kernel.dylib
"""
args = shlex.split(command)
if len(args)==3:
architecture, intputfile, outputfile = args
subprocess.check_output(['lipo', intputfile, '-extract', architecture, '-output', outputfile], shell=False)
else:
print("Syntax: extract x86_64 /usr/lib/system/libsystem_kernel.dylib ./libsystem_kernel.dylib")
def shellcode(debugger, command, result, dict):
"""Searches shell-storm for shellcode
Syntax:shellcode"""
mod = shlex.split(command)
if len(mod)!=2:
print("Syntax: shellcode <option> <arg>\n")
print("Options: -search <keyword>")
print(" -display <shellcode id>")
print(" -save <shellcode id>")
return
mod, arg = mod
if mod not in ("-search", "-display", "-save"):
syntax()
return
if mod == "-search":
api = ShellStorm()
res_dl = api.searchShellcode(arg)
if not res_dl:
print("Shellcode not found")
sys.exit(0)
print(tty_colors.red()+"Found %d shellcodes" % len(res_dl)+tty_colors.default())
print(tty_colors.green()+"%s\t%s %s" %("ScId", "Size", "Title")+tty_colors.default())
for data_d in res_dl:
if data_d['ScSize'] == 0:
print("[%s]\tn/a %s - %s"%(data_d['ScId'], data_d['ScArch'], data_d['ScTitle']))
else:
print("[%s]\t%s%s - %s"%(data_d['ScId'], str(data_d['ScSize']).ljust(5), data_d['ScArch'], data_d['ScTitle']))
elif mod == "-display":
res = ShellStorm().displayShellcode(arg)
if not res:
print("Shellcode id not found")
return
print(tty_colors.red()+res+tty_colors.default())
elif mod == "-save":
res = ShellStorm().displayShellcode(arg)
if not res:
print("Shellcode id not found")
return
filename = 'shellcode_'+str(time.time())+'.c'
with open(filename,'w') as f:
f.write(res)
print(tty_colors.red()+"Written to file shellcode_"+filename+'.c'+tty_colors.default())
return
def coredump(debugger,command,result,dict):
"""
dump entire process memory
"""
binary_name = lldb.target.executable.basename
execute(debugger, 'process save-core "%s.core"'%(binary_name),result,dict)
return
def dump(debugger,args,result,dict):
"""Dump's Memory of the process in a given address range
Syntax: dump outfile 0x6080000fe680 0x6080000fe680+1000
dump will not read over 1024 bytes of data. To overwride this use -f
Syntax: dump -o outfile -s 0x6080000fe680 -e 0x6080000fe680+1000 -f"""
args=shlex.split(args)
parser = argparse.ArgumentParser(prog="dump memory in the memory given range");
parser.add_argument("-s", "--start", required=True, help="start address");
parser.add_argument("-e", "--end", required=True, help="end address");
parser.add_argument("-o", "--outfile", help="file to save the dump to");
parser.add_argument("-f", "--force", default=0, type=int, help="dump will not read over 1024 bytes of data. To overwride this use -f. 0(false) or 1(true)");
args = parser.parse_args(args)
start_range = args.start
end_range = args.end
if args.force==1:
if args.outfile:
output, error=executeReturnOutput(debugger,"memory read -b --force --outfile %s %s %s"%(args.outfile, start_range, end_range),result,dict)
else:
output, error=executeReturnOutput(debugger,"memory read -b %s %s"%(start_range, end_range),result,dict)
else:
if args.outfile:
output, error=executeReturnOutput(debugger,"memory read -b --force --outfile %s %s %s"%(args.outfile, start_range, end_range),result,dict)
else:
output, error=executeReturnOutput(debugger,"memory read -b %s %s"%(start_range, end_range),result,dict)
if not error:
if not args.outfile:
print(output)
else:
if "--force" in error:
print("dump will not read over 1024 bytes of data. To overwride this use -f.")
print("Syntax: dump outfile 0x6080000fe680 0x6080000fe680+1000 -f")
else:
print(error)
def symbols(debugger, string, result, dict):
results = return_symbols(debugger, string)
if results:
for i in results:
print(i)
return
print('Nothing found')
def return_symbols(debugger, string):
module_to_search = None
result = []
if string:
if "`" in string:
module_to_search, string_to_search = string.split("`", 1)
else:
string_to_search = string
modules = debugger.GetSelectedTarget().modules
for module in modules:
if module_to_search and not fnmatch.fnmatch(module.file.basename.lower(), module_to_search.lower()):
continue
prefix = module.file.basename + "`"
for i in module.symbols:
if fnmatch.fnmatch((i.name).lower(), string_to_search.lower()):
result.append('%s '%(prefix+"%s"%i))
return result
return None
class MACH_HEADER(Structure):
_fields_ = [
("magic", c_uint),
("cputype", c_uint),
("cpusubtype", c_uint),
("filetype", c_uint),
("ncmds", c_uint),
("sizeofcmds", c_uint),
("flags", c_uint)
]
class LOAD_COMMAND(Structure):
_fields_ = [
("cmd", c_uint),
("cmdsize", c_uint)
]
class SEGMENT_COMMAND(Structure):
_fields_ = [
("cmd", c_uint),
("cmdsize", c_uint),
("segname", c_ubyte * 16),
("vmaddr", c_uint),
("vmsize", c_uint),
("fileoff", c_uint),
("filesize", c_uint),
("maxprot", c_uint),
("initprot", c_uint),
("nsects", c_uint),
("flags", c_uint)
]
class SEGMENT_COMMAND64(Structure):
_fields_ = [
("cmd", c_uint),
("cmdsize", c_uint),
("segname", c_ubyte * 16),
("vmaddr", c_ulonglong),
("vmsize", c_ulonglong),
("fileoff", c_ulonglong),
("filesize", c_ulonglong),
("maxprot", c_uint),
("initprot", c_uint),
("nsects", c_uint),
("flags", c_uint)
]
class SECTION(Structure):
_fields_ = [
("sectname", c_ubyte * 16),
("segname", c_ubyte * 16),
("addr", c_uint),
("size", c_uint),
("offset", c_uint),
("align", c_uint),
("reloff", c_uint),
("nreloc", c_uint),
("flags", c_uint),
("reserved1", c_uint),
("reserved2", c_uint)
]
class SECTION64(Structure):
_fields_ = [
("sectname", c_ubyte * 16),
("segname", c_ubyte * 16),
("addr", c_ulonglong),
("size", c_ulonglong),
("offset", c_uint),
("align", c_uint),
("reloff", c_uint),
("nreloc", c_uint),
("flags", c_uint),
("reserved1", c_uint),
("reserved2", c_uint)
]
class MACHOFlags:
CPU_TYPE_I386 = 0x7
CPU_TYPE_X86_64 = (CPU_TYPE_I386 | 0x1000000)
CPU_TYPE_MIPS = 0x8
CPU_TYPE_ARM = 12
CPU_TYPE_SPARC = 14
CPU_TYPE_POWERPC = 18
CPU_TYPE_POWERPC64 = (CPU_TYPE_POWERPC | 0x1000000)
LC_SEGMENT = 0x1
LC_SEGMENT_64 = 0x19
S_ATTR_SOME_INSTRUCTIONS = 0x00000400
S_ATTR_PURE_INSTRUCTIONS = 0x80000000
class MachOHeader(Structure):
_fields_ = [
("magic", c_uint),
("cputype", c_uint),
("cpusubtype", c_uint),
("filetype", c_uint),
("ncmds", c_uint),
("sizeofcmds", c_uint),
("flags", c_uint)
]
""" This class parses the Mach-O """
class MACHO:
def __init__(self, binary):
self.__binary = bytearray(binary)
self.__machHeader = None
self.__rawLoadCmd = None
self.__sections_l = []
self.__setHeader()
self.__setLoadCmd()
def __setHeader(self):
self.__machHeader = MACH_HEADER.from_buffer_copy(self.__binary)
if self.getArchMode() == CS_MODE_32:
self.__rawLoadCmd = self.__binary[28:28+self.__machHeader.sizeofcmds]
elif self.getArchMode() == CS_MODE_64:
self.__rawLoadCmd = self.__binary[32:32+self.__machHeader.sizeofcmds]
def __setLoadCmd(self):
base = self.__rawLoadCmd
for i in range(self.__machHeader.ncmds):
command = LOAD_COMMAND.from_buffer_copy(base)
if command.cmd == MACHOFlags.LC_SEGMENT:
segment = SEGMENT_COMMAND.from_buffer_copy(base)
self.__setSections(segment.nsects, base[56:], 32)
elif command.cmd == MACHOFlags.LC_SEGMENT_64:
segment = SEGMENT_COMMAND64.from_buffer_copy(base)
self.__setSections(segment.nsects, base[72:], 64)
base = base[command.cmdsize:]
def __setSections(self, sectionsNumber, base, sizeHeader):
| |
<reponame>freakinhippie/ansible-pan<gh_stars>0
#!/usr/bin/env python
# Copyright 2017 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: panos_security_rule
short_description: Create security rule policy on PAN-OS devices or Panorama management console.
description:
- Security policies allow you to enforce rules and take action, and can be as general or specific as needed.
- The policy rules are compared against the incoming traffic in sequence, and because the first rule that matches
- the traffic is applied, the more specific rules must precede the more general ones.
author: "<NAME> (@ivanbojer), <NAME> (@stealthllama), <NAME> (@mrichardson03)"
version_added: "2.4"
requirements:
- pandevice can be obtained from PyPI U(https://pypi.python.org/pypi/pandevice)
notes:
- Checkmode is not supported.
- Panorama is supported.
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device being configured.
required: true
username:
description:
- Username credentials to use for auth unless I(api_key) is set.
default: 'admin'
password:
description:
- Password credentials to use for auth unless I(api_key) is set.
api_key:
description:
- API key that can be used instead of I(username)/I(password) credentials.
rule_name:
description:
- Name of the security rule.
required: true
source_zone:
description:
- List of source zones.
default: 'any'
source_ip:
description:
- List of source addresses.
default: 'any'
source_user:
description:
- Use users to enforce policy for individual users or a group of users.
default: 'any'
hip_profiles:
description: >
- If you are using GlobalProtect with host information profile (HIP) enabled, you can also base the policy
on information collected by GlobalProtect. For example, the user access level can be determined HIP that
notifies the firewall about the user's local configuration.
default: 'any'
destination_zone:
description:
- List of destination zones.
default: 'any'
destination_ip:
description:
- List of destination addresses.
default: 'any'
application:
description:
- List of applications, application groups, and/or application filters.
default: 'any'
service:
description:
- List of services and/or service groups.
default: 'application-default'
category:
description:
- List of destination URL categories.
action:
description:
- Action to apply once rules matches.
log_setting:
description:
- Log forwarding profile.
log_start:
description:
- Whether to log at session start.
default: false
log_end:
description:
- Whether to log at session end.
default: true
description:
description:
- Description of the security rule.
default: 'None'
rule_type:
description:
- Type of security rule (version 6.1 of PanOS and above).
default: 'universal'
tag_name:
description:
- List of tags associated with the rule.
default: 'None'
negate_source:
description:
- Match on the reverse of the 'source_ip' attribute
default: false
negate_destination:
description:
- Match on the reverse of the 'destination_ip' attribute
default: false
disabled:
description:
- Disable this rule.
default: false
schedule:
description:
- Schedule in which this rule is active.
icmp_unreachable:
description:
- Send 'ICMP Unreachable'. Used with 'deny', 'drop', and 'reset' actions.
default: false
disable_server_response_inspection:
description:
- Disables packet inspection from the server to the client. Useful under heavy server load conditions.
default: false
group_profile:
description: >
- Security profile group that is already defined in the system. This property supersedes antivirus,
vulnerability, spyware, url_filtering, file_blocking, data_filtering, and wildfire_analysis properties.
default: None
antivirus:
description:
- Name of the already defined antivirus profile.
default: None
vulnerability:
description:
- Name of the already defined vulnerability profile.
default: None
spyware:
description:
- Name of the already defined spyware profile.
default: None
url_filtering:
description:
- Name of the already defined url_filtering profile.
default: None
file_blocking:
description:
- Name of the already defined file_blocking profile.
default: None
data_filtering:
description:
- Name of the already defined data_filtering profile.
default: None
wildfire_analysis:
description:
- Name of the already defined wildfire_analysis profile.
default: None
location:
description:
- Position to place the created rule in the rule base. Supported values are
I(top)/I(bottom)/I(before)/I(after).
default: 'bottom'
existing_rule:
description:
- If 'location' is set to 'before' or 'after', this option specifies an existing
rule name. The new rule will be created in the specified position relative to this
rule. If 'location' is set to 'before' or 'after', this option is required.
devicegroup:
description:
- Device groups are logical groups of firewalls in Panorama.
- If the device group is not defined actions will affect the Shared Panorama context.
default: None
rulebase:
description:
- The Panorama rulebase in which the rule will be created. Only used with Panorama.
default: 'pre-rulebase'
target:
description:
- Apply this rule exclusively to the listed firewalls in Panorama.
negate_target:
description:
- Exclude this rule from the listed firewalls in Panorama.
vsys:
description:
- The VSYS in which to create the rule.
default: 'vsys1'
state:
description:
- The state of the rule. Can be either I(present)/I(absent).
default: 'present'
operation:
description:
- The action to be taken. Supported values are I(add)/I(update)/I(find)/I(delete).
- I(Deprecated - use 'state' instead.)
default: 'add'
commit:
description:
- Commit configuration if changed.
default: false
'''
EXAMPLES = '''
- name: add SSH inbound rule to Panorama device group
panos_security_rule:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
rule_name: 'SSH permit'
description: 'SSH rule test'
tag_name: ['production']
source_zone: ['public']
source_ip: ['any']
destination_zone: ['private']
destination_ip: ['1.1.1.1']
application: ['ssh']
action: 'allow'
devicegroup: 'Cloud Edge'
rulebase: 'pre-rulebase'
- name: add a rule to allow HTTP multimedia only to CDNs
panos_security_rule:
ip_address: '{{ ip_address }}'
api_key: '{{ api_key }}'
rule_name: 'HTTP Multimedia'
description: 'Allow HTTP multimedia only to host at 1.1.1.1'
source_zone: ['private']
destination_zone: ['public']
category: ['content-delivery-networks']
application: ['http-video', 'http-audio']
service: ['service-http', 'service-https']
action: 'allow'
- name: add a more complex rule that uses security profiles
panos_security_rule:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
rule_name: 'Allow HTTP'
source_zone: ['public']
destination_zone: ['private']
log_start: false
log_end: true
action: 'allow'
antivirus: 'strict'
vulnerability: 'strict'
spyware: 'strict'
url_filtering: 'strict'
wildfire_analysis: 'default'
- name: disable a Panorama pre-rule
panos_security_rule:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
rule_name: 'Allow telnet'
source_zone: ['public']
destination_zone: ['private']
source_ip: ['any']
destination_ip: ['1.1.1.1']
log_start: false
log_end: true
action: 'allow'
devicegroup: 'Production edge'
rulebase: 'pre-rulebase'
disabled: true
- name: delete a devicegroup security rule
panos_security_rule:
ip_address: '{{ ip_address }}'
api_key: '{{ api_key }}'
operation: 'delete'
rule_name: 'Allow telnet'
devicegroup: 'DC Firewalls'
rulebase: 'pre-rulebase'
state: 'absent'
- name: add a rule at a specific location in the rulebase
panos_security_rule:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
operation: 'add'
rule_name: 'SSH permit'
description: 'SSH rule test'
source_zone: ['untrust']
destination_zone: ['trust']
source_ip: ['any']
source_user: ['any']
destination_ip: ['1.1.1.1']
category: ['any']
application: ['ssh']
service: ['application-default']
action: 'allow'
location: 'before'
existing_rule: 'Allow MySQL'
'''
RETURN = '''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import get_exception
try:
from pandevice.base import PanDevice
from pandevice.policies import Rulebase, SecurityRule, PreRulebase, PostRulebase
from pandevice.device import Vsys
from pandevice.firewall import Firewall
from pandevice.panorama import Panorama, DeviceGroup
from pandevice.errors import PanDeviceError
from pandevice import network
HAS_LIB = True
except ImportError:
HAS_LIB = False
ACCEPTABLE_MOVE_ERRORS = (
'already at the top',
'already at the bottom',
)
def get_devicegroup(device, devicegroup):
dg_list = device.refresh_devices()
for group in dg_list:
if isinstance(group, DeviceGroup):
if group.name == devicegroup:
return group
def get_vsys(vsys, vsys_list):
for v in vsys_list:
if v.name == vsys:
return v
def find_rule(rules, new_rule):
for r in rules:
if r.name == new_rule.name:
return r
# TODO: Remove operation parameter and all associated code
def main():
argument_spec = dict(
ip_address=dict(required=True),
username=dict(default='admin'),
password=dict(no_log=True),
api_key=dict(no_log=True),
rule_name=dict(required=True),
source_zone=dict(type='list', default=['any']),
source_ip=dict(type='list', default=["any"]),
source_user=dict(type='list', default=['any']),
hip_profiles=dict(type='list', default=['any']),
destination_zone=dict(type='list', default=['any']),
destination_ip=dict(type='list', default=["any"]),
application=dict(type='list', default=['any']),
service=dict(type='list', default=['application-default']),
category=dict(type='list', default=['any']),
action=dict(default='allow', choices=['allow', 'deny', 'drop', 'reset-client', 'reset-server', 'reset-both']),
log_setting=dict(),
log_start=dict(type='bool', default=False),
log_end=dict(type='bool', default=True),
description=dict(default=''),
rule_type=dict(default='universal', choices=['universal', 'intrazone', 'interzone']),
tag_name=dict(type='list'),
negate_source=dict(type='bool', default=False),
negate_destination=dict(type='bool', default=False),
disabled=dict(type='bool', default=False),
schedule=dict(),
icmp_unreachable=dict(type='bool'),
disable_server_response_inspection=dict(type='bool', default=False),
group_profile=dict(),
antivirus=dict(),
spyware=dict(),
vulnerability=dict(),
url_filtering=dict(),
file_blocking=dict(),
wildfire_analysis=dict(),
data_filtering=dict(),
| |
<gh_stars>10-100
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Pool actions.
"""
# isort: STDLIB
import os
from collections import defaultdict
# isort: THIRDPARTY
from justbytes import Range
from .._constants import PoolMaintenanceErrorCode
from .._errors import (
StratisCliAggregateError,
StratisCliEngineError,
StratisCliIncoherenceError,
StratisCliInUseOtherTierError,
StratisCliInUseSameTierError,
StratisCliNameConflictError,
StratisCliNoChangeError,
StratisCliPartialChangeError,
StratisCliPartialFailureError,
)
from .._stratisd_constants import BlockDevTiers, PoolActionAvailability, StratisdErrors
from ._connection import get_object
from ._constants import TOP_OBJECT
from ._formatting import get_property, print_table, size_triple, to_hyphenated
from ._utils import get_clevis_info
def _generate_pools_to_blockdevs(managed_objects, to_be_added, tier):
"""
Generate a map of pools to which block devices they own
:param managed_objects: the result of a GetManagedObjects call
:type managed_objects: dict of str * dict
:param to_be_added: the blockdevs to be added
:type to_be_added: frozenset of str
:param tier: tier to search for blockdevs to be added
:type tier: _stratisd_constants.BlockDevTiers
:returns: a map of pool names to sets of strings containing blockdevs they own
:rtype: dict of str * frozenset of str
"""
# pylint: disable=import-outside-toplevel
from ._data import MODev, MOPool, devs, pools
pool_map = dict(
(path, str(MOPool(info).Name()))
for (path, info) in pools().search(managed_objects)
)
pools_to_blockdevs = defaultdict(list)
for modev in (
modev
for modev in (
MODev(info)
for (_, info) in devs(props={"Tier": tier}).search(managed_objects)
)
if str(modev.Devnode()) in to_be_added
):
pools_to_blockdevs[pool_map[modev.Pool()]].append(str(modev.Devnode()))
return dict(
(pool, frozenset(blockdevs)) for pool, blockdevs in pools_to_blockdevs.items()
)
def _check_opposite_tier(managed_objects, to_be_added, other_tier):
"""
Check whether specified blockdevs are already in the other tier.
:param managed_objects: the result of a GetManagedObjects call
:type managed_objects: dict of str * dict
:param to_be_added: the blockdevs to be added
:type to_be_added: frozenset of str
:param other_tier: the other tier, not the one requested
:type other_tier: _stratisd_constants.BlockDevTiers
:raises StratisCliInUseOtherTierError: if blockdevs are used by other tier
"""
pools_to_blockdevs = _generate_pools_to_blockdevs(
managed_objects, to_be_added, other_tier
)
if pools_to_blockdevs != {}:
raise StratisCliInUseOtherTierError(
pools_to_blockdevs,
BlockDevTiers.DATA
if other_tier == BlockDevTiers.CACHE
else BlockDevTiers.CACHE,
)
def _check_same_tier(pool_name, managed_objects, to_be_added, this_tier):
"""
Check whether specified blockdevs are already in the tier to which they
are to be added.
:param managed_objects: the result of a GetManagedObjects call
:type managed_objects: dict of str * dict
:param to_be_added: the blockdevs to be added
:type to_be_added: frozenset of str
:param this_tier: the tier requested
:type this_tier: _stratisd_constants.BlockDevTiers
:raises StratisCliPartialChangeError: if blockdevs are used by this tier
:raises StratisCliInUseSameTierError: if blockdevs are used by this tier in another pool
"""
pools_to_blockdevs = _generate_pools_to_blockdevs(
managed_objects, to_be_added, this_tier
)
owned_by_current_pool = frozenset(pools_to_blockdevs.get(pool_name, []))
owned_by_other_pools = dict(
(pool, devnodes)
for pool, devnodes in pools_to_blockdevs.items()
if pool_name != pool
)
if owned_by_current_pool != frozenset():
raise StratisCliPartialChangeError(
"add to cache" if this_tier == BlockDevTiers.CACHE else "add to data",
to_be_added.difference(owned_by_current_pool),
to_be_added.intersection(owned_by_current_pool),
)
if owned_by_other_pools != {}:
raise StratisCliInUseSameTierError(owned_by_other_pools, this_tier)
def _fetch_locked_pools_property(proxy):
"""
Fetch the LockedPools property from stratisd.
:param proxy: proxy to the top object in stratisd
:return: a representation of unlocked devices
:rtype: dict
:raises StratisCliEngineError:
"""
# pylint: disable=import-outside-toplevel
from ._data import Manager
return Manager.Properties.LockedPools.Get(proxy)
class PoolActions:
"""
Pool actions.
"""
@staticmethod
def create_pool(namespace):
"""
Create a stratis pool.
:raises StratisCliEngineError:
:raises StratisCliIncoherenceError:
:raises StratisCliNameConflictError:
"""
# pylint: disable=import-outside-toplevel
from ._data import Manager, ObjectManager, pools
proxy = get_object(TOP_OBJECT)
managed_objects = ObjectManager.Methods.GetManagedObjects(proxy, {})
pool_name = namespace.pool_name
names = pools(props={"Name": pool_name}).search(managed_objects)
blockdevs = frozenset([os.path.abspath(p) for p in namespace.blockdevs])
if list(names) != []:
raise StratisCliNameConflictError("pool", pool_name)
_check_opposite_tier(managed_objects, blockdevs, BlockDevTiers.CACHE)
_check_same_tier(pool_name, managed_objects, blockdevs, BlockDevTiers.DATA)
clevis_info = get_clevis_info(namespace)
((changed, (_, _)), return_code, message) = Manager.Methods.CreatePool(
proxy,
{
"name": pool_name,
"redundancy": (True, 0),
"devices": blockdevs,
"key_desc": (
(True, namespace.key_desc)
if namespace.key_desc is not None
else (False, "")
),
"clevis_info": (False, ("", ""))
if clevis_info is None
else (True, clevis_info),
},
)
if return_code != StratisdErrors.OK: # pragma: no cover
raise StratisCliEngineError(return_code, message)
if not changed: # pragma: no cover
raise StratisCliIncoherenceError(
(
"Expected to create the specified pool %s but stratisd "
"reports that it did not actually create the pool"
)
% pool_name
)
@staticmethod
def init_cache(namespace): # pylint: disable=too-many-locals
"""
Initialize the cache of an existing stratis pool.
:raises StratisCliEngineError:
:raises StratisCliIncoherenceError:
"""
# pylint: disable=import-outside-toplevel
from ._data import MODev, ObjectManager, Pool, devs, pools
proxy = get_object(TOP_OBJECT)
managed_objects = ObjectManager.Methods.GetManagedObjects(proxy, {})
pool_name = namespace.pool_name
(pool_object_path, _) = next(
pools(props={"Name": pool_name})
.require_unique_match(True)
.search(managed_objects)
)
blockdevs = frozenset([os.path.abspath(p) for p in namespace.blockdevs])
_check_opposite_tier(managed_objects, blockdevs, BlockDevTiers.DATA)
_check_same_tier(pool_name, managed_objects, blockdevs, BlockDevTiers.CACHE)
((changed, devs_added), return_code, message) = Pool.Methods.InitCache(
get_object(pool_object_path), {"devices": blockdevs}
)
if return_code != StratisdErrors.OK:
raise StratisCliEngineError(return_code, message)
if not changed or len(devs_added) < len(blockdevs): # pragma: no cover
devnodes_added = [
MODev(info).Devnode()
for (object_path, info) in devs(
props={"Pool": pool_object_path}
).search(ObjectManager.Methods.GetManagedObjects(proxy, {}))
if object_path in devs_added
]
raise StratisCliIncoherenceError(
(
"Expected to add the specified blockdevs as cache "
"to pool %s but stratisd reports that it did not actually "
"add some or all of the blockdevs requested; devices "
"added: (%s), devices requested: (%s)"
)
% (namespace.pool_name, ", ".join(devnodes_added), ", ".join(blockdevs))
)
@staticmethod
def list_pools(namespace):
"""
List all stratis pools.
"""
# pylint: disable=import-outside-toplevel
from ._data import MOPool, ObjectManager, pools
proxy = get_object(TOP_OBJECT)
managed_objects = ObjectManager.Methods.GetManagedObjects(proxy, {})
pools_with_props = [
MOPool(info) for objpath, info in pools().search(managed_objects)
]
def physical_size_triple(mopool):
"""
Calculate the triple to display for total physical size.
The format is total/used/free where the display value for each
member of the tuple are chosen automatically according to justbytes'
configuration.
:param mopool: an object representing all the properties of the pool
:type mopool: MOPool
:returns: a string to display in the resulting list output
:rtype: str
"""
total_physical_size = Range(mopool.TotalPhysicalSize())
total_physical_used = get_property(mopool.TotalPhysicalUsed(), Range, None)
return size_triple(total_physical_size, total_physical_used)
def properties_string(mopool):
"""
Make a string encoding some important properties of the pool
:param mopool: an object representing all the properties of the pool
:type mopool: MOPool
:param props_map: a map of properties returned by GetAllProperties
:type props_map: dict of str * any
"""
def gen_string(has_property, code):
"""
Generate the display string for a boolean property
:param has_property: whether the property is true or false
:type has_property: bool or NoneType
:param str code: the code to generate the string for
:returns: the generated string
:rtype: str
"""
if has_property == True: # pylint: disable=singleton-comparison
prefix = " "
elif has_property == False: # pylint: disable=singleton-comparison
prefix = "~"
# This is only going to occur if the engine experiences an
# error while calculating a property or if our code has a bug.
else: # pragma: no cover
prefix = "?"
return prefix + code
props_list = [(mopool.HasCache(), "Ca"), (mopool.Encrypted(), "Cr")]
return ",".join(gen_string(x, y) for x, y in props_list)
format_uuid = (
(lambda mo_uuid: mo_uuid) if namespace.unhyphenated_uuids else to_hyphenated
)
def alert_string(mopool):
"""
Alert information to display, if any
:param mopool: object to access pool properties
:returns: string w/ alert information, "" if no alert
:rtype: str
"""
action_availability = PoolActionAvailability.from_str(
mopool.AvailableActions()
)
error_codes = action_availability.pool_maintenance_error_codes()
return ", ".join(sorted(str(code) for code in error_codes))
tables = [
(
mopool.Name(),
physical_size_triple(mopool),
properties_string(mopool),
format_uuid(mopool.Uuid()),
alert_string(mopool),
)
for mopool in pools_with_props
]
print_table(
["Name", "Total Physical", "Properties", "UUID", "Alerts"],
sorted(tables, key=lambda entry: entry[0]),
["<", ">", ">", ">", "<"],
)
@staticmethod
def destroy_pool(namespace):
"""
Destroy a stratis pool.
If no pool exists, the method succeeds.
:raises StratisCliEngineError:
:raises StratisCliIncoherenceError:
"""
# pylint: disable=import-outside-toplevel
from ._data import Manager, ObjectManager, pools
proxy = get_object(TOP_OBJECT)
managed_objects = ObjectManager.Methods.GetManagedObjects(proxy, {})
(pool_object_path, _) = next(
pools(props={"Name": namespace.pool_name})
.require_unique_match(True)
.search(managed_objects)
)
((changed, _), return_code, message) = Manager.Methods.DestroyPool(
proxy, {"pool": pool_object_path}
)
# This branch can be covered, since the engine will return an error
# if the pool can not be destroyed because it has filesystems.
if return_code != StratisdErrors.OK:
raise StratisCliEngineError(return_code, message)
if not changed: # pragma: no cover
raise StratisCliIncoherenceError(
(
"Expected to destroy | |
return 'You have no current accepted jobs in your queue.'
raw_jobs = [j.job_tag for j in job_records]
responder = ListOutputResponder(cmd_object.cmdspec,
parse_sms_message_body,
single_item_noun='accepted job',
plural_item_noun='accepted jobs')
return responder.generate(command_object=cmd_object,
string_list=raw_jobs,
render_callback=render_job_line,
filter_callback=filter_job_tag,
dialog_context=dlg_context,
dialog_engine=dlg_engine,
service_registry=service_registry)
def generate_list_in_progress_jobs(cmd_object, dlg_engine, dlg_context, service_registry, **kwargs):
db_svc = service_registry.lookup('postgres')
with db_svc.txn_scope() as session:
job_records = list_in_progress_jobs_for_courier(dlg_context.courier.id, session, db_svc)
if not len(job_records):
return 'You have no in-progress jobs in your queue.'
raw_jobs = [j.job_tag for j in job_records]
responder = ListOutputResponder(cmd_object.cmdspec,
parse_sms_message_body,
single_item_noun='in-progress job',
plural_item_noun='in-progress jobs')
return responder.generate(command_object=cmd_object,
record_list=raw_jobs,
render_callback=render_job_line,
filter_callback=filter_job_tag,
dialog_context=dlg_context,
dialog_engine=dlg_engine,
service_registry=service_registry)
def list_user_messages(courier_id, session, db_svc):
UserMessage = db_svc.Base.classes.messages
UserHandle = db_svc.Base.classes.user_handle_maps
data = []
for msg, handle in session.query(UserMessage, UserHandle).filter(UserMessage.from_user == UserHandle.user_id,
UserMessage.to_user == courier_id,
UserMessage.deleted_ts == None).all():
msg_rec = {
'from_user': msg.from_user,
'from_user_handle': handle.handle,
'msg_data': msg.msg_data,
'msg_timestamp': msg.created_ts
}
data.append(msg_rec)
return data
def render_message_line(index, message):
datestr = message['msg_timestamp'].strftime("%Y-%m-%d, %H:%M")
if index:
return '#%d:[%s]\n at [%s] \n > %s' % (index, message['from_user_handle'], datestr, message['msg_data'])
return '[%s]\n at [%s] \n > %s' % (message['from_user_handle'], datestr, message['msg_data'])
def filter_job_tag(job_tag, filter_expression):
if filter_expression in job_tag:
return True
return False
def filter_message(msg_record, filter_expression):
return True
def generate_list_messages(cmd_object, dlg_engine, dlg_context, service_registry, **kwargs):
db_svc = service_registry.lookup('postgres')
with db_svc.txn_scope() as session:
user_messages = list_user_messages(dlg_context.courier.id, session, db_svc)
if not len(user_messages):
return 'You have no messages in your inbox.'
responder = ListOutputResponder(cmd_object.cmdspec, parse_sms_message_body)
return responder.generate(command_object=cmd_object,
record_list=user_messages,
render_callback=render_message_line,
filter_callback=filter_message,
dialog_context=dlg_context,
dialog_engine=dlg_engine,
service_registry=service_registry)
def render_bid_line(index, bid_record):
if index:
return 'bid #%d: %s' % (index, bid_record['job_tag'])
return bid_record['job_tag']
def filter_bid(bid_record, filter_expression):
return True
def generate_list_my_bids(cmd_object, dlg_engine, dlg_context, service_registry, **kwargs):
db_svc = service_registry.lookup('postgres')
with db_svc.txn_scope() as session:
user_bids = list_user_bids(dlg_context.courier.id, session, db_svc)
if not len(user_bids):
return 'You have no active bids.'
responder = ListOutputResponder(cmd_object.cmdspec, parse_sms_message_body)
return responder.generate(command_object=cmd_object,
record_list=user_bids,
render_callback=render_bid_line,
filter_callback=filter_bid,
dialog_context=dlg_context,
dialog_engine=dlg_engine,
service_registry=service_registry)
def render_job_line(index, job_tag):
if index:
return '# %d: %s' % (index, job_tag)
return job_tag
def generate_list_open_jobs(cmd_object, dlg_engine, dlg_context, service_registry, **kwargs):
print('#--- Generating open job listing...')
db_svc = service_registry.lookup('postgres')
with db_svc.txn_scope() as session:
job_records = list_available_jobs(session, db_svc)
if not len(job_records):
return 'No open jobs found.'
raw_jobs = [j.job_tag for j in job_records]
responder = ListOutputResponder(cmd_object.cmdspec, parse_sms_message_body)
return responder.generate(command_object=cmd_object,
record_list=raw_jobs,
render_callback=render_job_line,
filter_callback=filter_job_tag,
dialog_context=dlg_context,
dialog_engine=dlg_engine,
service_registry=service_registry)
def pfx_command_lookup_abbrev(prefix_cmd, dlg_engine, dlg_context, service_registry):
abbreviation = prefix_cmd.name
expansion = ABBREVIATIONS.get(abbreviation, 'No such abbreviation registered.')
return expansion
def pfx_command_sethandle(prefix_cmd, dlg_engine, dlg_context, service_registry):
handle = prefix_cmd.name
db_svc = service_registry.lookup('postgres')
with db_svc.txn_scope() as session:
try:
handle_entry = lookup_live_courier_handle(dlg_context.courier.id, session, db_svc)
if handle_entry:
if handle_entry.handle == handle:
return
else:
handle_entry.expired_ts = datetime.datetime.now()
session.add(handle_entry)
session.flush()
# ignore the mode; the command name is the user handle
payload = {
'user_id': dlg_context.courier.id,
'handle': handle,
'is_public': True,
'created_ts': datetime.datetime.now()
}
new_handle_entry = ObjectFactory.create_user_handle(db_svc, **payload)
session.add(new_handle_entry)
session.flush()
return ' '.join([
'Your user handle has been set to %s.' % new_handle_entry.handle,
'A system user can send a message to your log by texting:',
'@%s, space, and the message.' % handle
])
except Exception as err:
print('exception of type %s thrown: %s' % (err.__class__.__name__, str(err)))
session.rollback()
return 'There was an error creating your user handle. Please contact your administrator.'
def pfx_command_sendlog(prefix_cmd, dlg_engine, dlg_context, service_registry):
db_svc = service_registry.lookup('postgres')
with db_svc.txn_scope() as session:
try:
if prefix_cmd.mode == 'simple':
return ' '.join([
"To send a message to a user's log,",
'text @<user handle>, space, and the message.'
])
elif prefix_cmd.mode == 'extended':
target_handle = prefix_cmd.name
message = prefix_cmd.body
to_courier = lookup_courier_by_handle(target_handle, session, db_svc)
if not to_courier:
return 'Courier with handle %s not found.' % target_handle
payload = {
'from_user': dlg_context.courier.id,
'to_user': to_courier.id,
'msg_type': 1,
'mime_type': 'text/plain',
'msg_data': message
}
log_record = ObjectFactory.create_user_log(db_svc, **payload)
session.add(log_record)
session.flush()
return 'Message sent.'
except Exception as err:
print('exception of type %s thrown: %s' % (err.__class__.__name__, str(err)))
session.rollback()
return 'There was an error sending your message. Please contact your administrator.'
def pfx_command_macro(prefix_cmd, dlg_engine, dlg_context, service_registry):
# mode is either 'define' or 'execute'
macro = None
db_svc = service_registry.lookup('postgres')
with db_svc.txn_scope() as session:
# in extended mode, a prefix command contains a name and a body,
# separated by the "defchar" found in the prefix's command spec
#
if prefix_cmd.mode == 'extended':
# TODO: filter out invalid command body strings / check for max length
payload = {
'user_id': dlg_context.courier.id,
'name': prefix_cmd.name,
'command_string': prefix_cmd.body
}
macro = ObjectFactory.create_macro(db_svc, **payload)
session.add(macro)
session.flush()
# in simple mode, a prefix command contains only the command name
elif prefix_cmd.mode == 'simple':
macro = lookup_macro(dlg_context.courier.id,
prefix_cmd.name,
session,
db_svc)
if not macro:
return 'No macro %s%s has been registered under your user ID.' % (prefix_cmd.cmdspec.command, prefix_cmd.name)
chained_command = parse_sms_message_body(macro.command_string)
return dlg_engine.reply_command(chained_command, dlg_context, service_registry)
print('Courier %s created macro: macro' % macro)
return 'Command macro %s%s registered.' % (prefix_cmd.cmdspec.command, prefix_cmd.name)
SMSDialogContext = namedtuple('SMSDialogContext', 'courier source_number message')
class DialogEngine(object):
def __init__(self):
self.msg_dispatch_tbl = {}
self.generator_dispatch_tbl = {}
self.prefix_dispatch_tbl = {}
def register_cmd_spec(self, sms_command_spec, handler_func):
self.msg_dispatch_tbl[str(sms_command_spec)] = handler_func
def register_generator_cmd(self, generator_cmd_spec, handler_func):
self.generator_dispatch_tbl[str(generator_cmd_spec)] = handler_func
def register_prefix_cmd(self, prefix_spec, handler_func):
self.prefix_dispatch_tbl[str(prefix_spec)] = handler_func
def _reply_prefix_command(self, prefix_cmd, dialog_context, service_registry, **kwargs):
command = self.prefix_dispatch_tbl.get(str(prefix_cmd.cmdspec))
if not command:
return 'No handler registered in SMS DialogEngine for prefix command %s.' % prefix_cmd.cmdspec.command
return command(prefix_cmd, self, dialog_context, service_registry)
def _reply_generator_command(self, gen_cmd, dialog_context, service_registry, **kwargs):
list_generator = self.generator_dispatch_tbl.get(str(gen_cmd.cmdspec))
if not list_generator:
return 'No handler registered in SMS DialogEngine for generator command %s.' % gen_cmd.cmdspec.command
return list_generator(gen_cmd, self, dialog_context, service_registry)
def _reply_sys_command(self, sys_cmd, dialog_context, service_registry, **kwargs):
handler = self.msg_dispatch_tbl.get(str(sys_cmd.cmdspec))
if not handler:
return 'No handler registered in SMS DialogEngine for system command %s.' % sys_cmd.cmdspec.command
return handler(sys_cmd, dialog_context, service_registry)
def reply_command(self, command_input, dialog_context, service_registry, **kwargs):
# command types: generator, syscommand, prefix
if command_input.cmd_type == 'prefix':
return self._reply_prefix_command(command_input.cmd_object, dialog_context, service_registry)
elif command_input.cmd_type == 'syscommand':
return self._reply_sys_command(command_input.cmd_object, dialog_context, service_registry)
elif command_input.cmd_type == 'generator':
return self._reply_generator_command(command_input.cmd_object, dialog_context, service_registry)
else:
raise Exception('Unrecognized command input type %s.' % command_input.cmd_type)
def sms_responder_func(input_data, service_objects, **kwargs):
db_svc = service_objects.lookup('postgres')
sms_svc = service_objects.lookup('sms')
engine = DialogEngine()
engine.register_cmd_spec(SMS_SYSTEM_COMMAND_SPECS['bid'], handle_bid_for_job)
engine.register_cmd_spec(SMS_SYSTEM_COMMAND_SPECS['acc'], handle_accept_job)
engine.register_cmd_spec(SMS_SYSTEM_COMMAND_SPECS['dt'], handle_job_details)
engine.register_cmd_spec(SMS_SYSTEM_COMMAND_SPECS['ert'], handle_en_route)
engine.register_cmd_spec(SMS_SYSTEM_COMMAND_SPECS['can'], handle_cancel_job)
engine.register_cmd_spec(SMS_SYSTEM_COMMAND_SPECS['fin'], handle_job_finished)
engine.register_cmd_spec(SMS_SYSTEM_COMMAND_SPECS['911'], handle_emergency)
engine.register_cmd_spec(SMS_SYSTEM_COMMAND_SPECS['hlp'], handle_help)
engine.register_cmd_spec(SMS_SYSTEM_COMMAND_SPECS['on'], handle_on_duty)
engine.register_cmd_spec(SMS_SYSTEM_COMMAND_SPECS['off'], handle_off_duty)
engine.register_cmd_spec(SMS_SYSTEM_COMMAND_SPECS['mdel'], handle_delete_user_message)
engine.register_generator_cmd(SMS_GENERATOR_COMMAND_SPECS['my'], generate_list_my_accepted_jobs)
engine.register_generator_cmd(SMS_GENERATOR_COMMAND_SPECS['awd'], generate_list_my_awarded_jobs)
engine.register_generator_cmd(SMS_GENERATOR_COMMAND_SPECS['opn'], generate_list_open_jobs)
engine.register_generator_cmd(SMS_GENERATOR_COMMAND_SPECS['prg'], generate_list_in_progress_jobs)
engine.register_generator_cmd(SMS_GENERATOR_COMMAND_SPECS['msg'], generate_list_messages)
engine.register_generator_cmd(SMS_GENERATOR_COMMAND_SPECS['bst'], generate_list_my_bids)
engine.register_prefix_cmd(SMS_PREFIX_COMMAND_SPECS['$'], pfx_command_macro)
engine.register_prefix_cmd(SMS_PREFIX_COMMAND_SPECS['@'], pfx_command_sendlog)
engine.register_prefix_cmd(SMS_PREFIX_COMMAND_SPECS['&'], pfx_command_sethandle)
engine.register_prefix_cmd(SMS_PREFIX_COMMAND_SPECS['#'], pfx_command_lookup_abbrev)
print('###------ SMS payload:')
source_number = input_data['From']
raw_message_body = input_data['Body']
print('###')
print('###------ Received raw message "%s" from mobile number [%s].' % (raw_message_body, source_number))
print('###')
mobile_number = normalize_mobile_number(source_number)
courier = None
with db_svc.txn_scope() as session:
courier = lookup_courier_by_mobile_number(mobile_number, session, db_svc)
if not courier:
print('Courier with mobile number %s not found.' % mobile_number)
sms_svc.send_sms(mobile_number, REPLY_NOT_IN_NETWORK)
return core.TransformStatus(ok_status('SMS event received', is_valid_command=False))
session.expunge(courier)
dlg_context = SMSDialogContext(courier=courier, source_number=mobile_number, message=unquote_plus(raw_message_body))
try:
command_input = parse_sms_message_body(raw_message_body)
print('#----- Resolved command: %s' % str(command_input))
response = engine.reply_command(command_input, dlg_context, service_objects)
sms_svc.send_sms(mobile_number, response)
return core.TransformStatus(ok_status('SMS event received', is_valid_command=True, command=command_input))
except IncompletePrefixCommand as err:
print('Error data: %s' % err)
print('#----- Incomplete prefix command: in message body: %s' % raw_message_body)
sms_svc.send_sms(mobile_number, SMS_PREFIX_COMMAND_SPECS[raw_message_body].definition)
return core.TransformStatus(ok_status('SMS event received', is_valid_command=False))
except UnrecognizedSMSCommand as err:
print('Error data: %s' % err)
print('#----- Unrecognized system command: in message body: %s' % raw_message_body)
sms_svc.send_sms(mobile_number, compile_help_string())
return core.TransformStatus(ok_status('SMS event received', is_valid_command=False))
def poll_job_status_func(input_data, service_objects, **kwargs):
db_svc = service_objects.lookup('postgres')
JobStatus = db_svc.Base.classes.job_status
tag = input_data['job_tag']
status = None
with db_svc.txn_scope() as session:
result = session.query(JobStatus).filter(and_(JobStatus.job_tag == tag, JobStatus.expired_ts == None)).one()
status = result.status
return core.TransformStatus(ok_status('poll request', job_tag=tag, job_status=status))
def update_job_log_func(input_data, service_objects, **kwargs):
raise snap.TransformNotImplementedException('update_job_log_func')
def couriers_by_status_func(input_data, service_objects, **kwargs):
status = input_data['status']
courier_records = None
db_svc = service_objects.lookup('postgres')
with db_svc.txn_scope() as session:
courier_records = lookup_couriers_by_status(status, session, db_svc)
return core.TransformStatus(ok_status('couriers by status', courier_status=status, couriers=courier_records))
def update_courier_status_func(input_data, service_objects, **kwargs):
new_status = input_data['status']
courier_id = input_data['id']
db_svc = service_objects.lookup('postgres')
with db_svc.txn_scope() as session:
courier = lookup_courier_by_id(courier_id, session, db_svc)
if courier.duty_status == new_status:
did_update = False
else:
courier.duty_status = new_status
session.add(courier)
did_update = True
return core.TransformStatus(ok_status('update courier status', updated=did_update, id=courier_id, duty_status=new_status))
def open_bidding_func(input_data, service_objects, **kwargs):
'''Open a bidding window on the job with the job_tag passed in the inputs
'''
bidding_window = None
db_svc = service_objects.lookup('postgres')
with db_svc.txn_scope() as session:
input_record = prepare_bid_window_record(input_data, session, db_svc)
bidding_window = ObjectFactory.create_bidding_window(db_svc, **input_record)
session.add(bidding_window)
session.flush()
return core.TransformStatus(ok_status('open bidding window',
id=bidding_window.id,
job_id=bidding_window.job_id,
job_tag=bidding_window.job_tag))
def close_bidding_func(input_data, service_objects, **kwargs):
'''Close the bidding window with the ID passed in the inputs
'''
bidding_window = None
db_svc = service_objects.lookup('postgres')
| |
4
)
if self.options['verbose']: print('WGAN Discriminator Input Created')
# Discriminator layers for the projection of the text embedding
self.d_projector = nn.Sequential(
nn.Linear(in_features=self.options['caption_vec_len'], out_features=self.options['t_dim']),
nn.BatchNorm1d(num_features=self.options['t_dim']),
nn.LeakyReLU(negative_slope=self.options['leak'], inplace=True)
)
if self.options['verbose']: print('WGAN Discriminator Projector Created')
# Discriminator layers for the concatenation of the text embedding and image
# Vanilla GAN uses sigmoid output
# WGAN does not use sigmoid output
self.discriminator_output = nn.Sequential(
# Dim: batch_size x (num_df * 16 + t_dim) x 4 x 4
nn.Conv2d(self.options['num_df'] * 16 + self.options['t_dim'], 1, 4, 1, 0, bias=False),
# Dim: batch_size x 1 x 1 x 1
)
if self.options['verbose']: print('WGAN Discriminator Output Created')
if self.options['verbose']: print('WGAN Discriminator Created\n')
# WGAN Discriminator Forward Propagation
def forward(self, images, text_embed):
images_intermediate = self.discriminator_input(images)
projected_embed = self.d_projector(text_embed)
# Repeat the projected dimensions and change the permutations
# Dim: batch_size x 256 -> batch_size x 256 x 4 x 4
replicated_embed = projected_embed.repeat(4, 4, 1, 1).permute(2, 3, 0, 1)
latent_vec = torch.cat([images_intermediate, replicated_embed], 1)
output = self.discriminator_output(latent_vec)
# Squeeze dims: batch_size x 1 x 1 x 1 -> batch_size
output = output.view(-1, 1).squeeze(1)
return output
# Loss of WGAN Discriminator
# L_D = L(y_r) - L(y_f)
# Loss of WGAN with CLS (caption loss sensitivity - makes sure captions match the image)
# L_D = L(y_r) - L(y_w) - L(y_f)
def loss(self, real_img_passed, fake_img_passed, wrong_img_passed=None):
d_real_loss = real_img_passed.mean()
d_fake_loss = fake_img_passed.mean()
d_loss = d_real_loss - d_fake_loss
# option to use conditional loss sensitivity
if self.options['use_cls']:
d_wrong_loss = wrong_img_passed.mean()
d_loss -= d_wrong_loss
return d_loss, d_real_loss, d_fake_loss, d_wrong_loss
return d_loss, d_real_loss, d_fake_loss
# Calculate the gradient for the D and returns D loss
def calc_grad_d(self, real_img_passed, fake_img_passed, wrong_img_passed=None):
if self.options['use_cls']:
d_loss, d_real_loss, d_fake_loss, d_wrong_loss = self.loss(real_img_passed, fake_img_passed, wrong_img_passed)
d_wrong_loss.backward(self.grad_factor)
else:
d_loss, d_real_loss, d_fake_loss = self.loss(real_img_passed, fake_img_passed)
d_real_loss.backward(self.neg_grad_factor)
d_fake_loss.backward(self.grad_factor)
return d_loss
'''
BEGAN MODEL
https://arxiv.org/pdf/1703.10717.pdf
https://github.com/sunshineatnoon/Paper-Implementations/blob/master/BEGAN/models.py
https://github.com/carpedm20/BEGAN-pytorch
'''
class BeganGenerator(nn.Module):
def __init__(self, options):
super(BeganGenerator,self).__init__()
self.options = options
# Dimensions of the latent vector (concatenate original embedding vector and noise vector)
self.options['concat_dim'] = self.options['t_dim'] + self.options['z_dim']
if self.options['verbose']: print('\nCreating BEGAN Generator...')
# Input Dim: batch_size x (caption_vec_len)
self.text_embedder = nn.Linear(self.options['caption_vec_len'], self.options['t_dim'])
# Input Dim: batch_size x (t_dim)
# Input Dim: batch_size x (concat_dim)
self.g_embedder = nn.Linear(self.options['concat_dim'], self.options['num_gf'] * 8 * 8)
# Dim: batch_size x (num_gf * 8 * 8)
if self.options['verbose']: print('BEGAN Generator Embedder Created')
self.generator = nn.Sequential(
# Input Dim: batch_size x (num_gf) x 8 x 8
upsample_conv_block(self.options['num_gf'], self.options['num_gf']),
# Dim: batch_size x (num_gf) x 16 x 16
upsample_conv_block(self.options['num_gf'], self.options['num_gf']),
# Dim: batch_size x (num_gf) x 32 x 32
upsample_conv_block(self.options['num_gf'], self.options['num_gf']),
# Dim: batch_size x (num_gf) x 64 x 64
upsample_conv_block(self.options['num_gf'], self.options['num_gf']),
# Dim: batch_size x (num_gf) x 128 x 128
nn.Conv2d(self.options['num_gf'], self.options['num_gf'], kernel_size=3, stride=1, padding=1),
nn.ELU(inplace=True),
# Dim: batch_size x (num_gf) x 128 x 128
nn.Conv2d(self.options['num_gf'], self.options['num_gf'], kernel_size=3, stride=1, padding=1),
nn.ELU(inplace=True),
# Dim: batch_size x (num_gf) x 128 x 128
nn.Conv2d(self.options['num_gf'], self.options['image_channels'], kernel_size=3, stride=1, padding=1),
# Dim: batch_size x (num_channels) x 128 x 128
nn.Tanh()
)
if self.options['verbose']: print('BEGAN Generator Created\n')
def forward(self, text_embed, noise):
# Concatenate the projected embedding and the noise
X = self.text_embedder(text_embed)
X = torch.cat([X, noise], 1)
X = self.g_embedder(X)
X = X.view(X.size(0), self.options['num_gf'], 8, 8)
X = self.generator(X)
return X
class BeganDiscriminator(nn.Module):
def __init__(self, options):
super(BeganDiscriminator,self).__init__()
self.options = options
if self.options['verbose']: print('Creating BEGAN Discriminator...')
# Discriminator layers for the input of the image (encodes image)
self.d_encoder = nn.Sequential(
# Input Dim: batch_size x (num_channels) x 128 x 128
nn.Conv2d(self.options['image_channels'], self.options['num_df'], kernel_size=3, stride=1, padding=1),
nn.ELU(inplace=True),
# Dim: batch_size x (num_df) x 128 x 128
conv_block(self.options['num_df'], self.options['num_df']),
# Dim: batch_size x (num_df) x 64 x 64
conv_block(self.options['num_df'], self.options['num_df'] * 2),
# Dim: batch_size x (num_df * 2) x 32 x 32
conv_block(self.options['num_df'] * 2, self.options['num_df'] * 3),
# Dim: batch_size x (num_df * 3) x 16 x 16
conv_block(self.options['num_df'] * 3, self.options['num_df'] * 4),
# Dim: batch_size x (num_df * 4) x 8 x 8
nn.Conv2d(self.options['num_df'] * 4, self.options['num_df'] * 4, kernel_size=3, stride=1, padding=1),
nn.ELU(inplace=True),
# Dim: batch_size x (num_df * 4) x 8 x 8
nn.Conv2d(self.options['num_df'] * 4, self.options['num_df'] * 4, kernel_size=3, stride=1, padding=1),
nn.ELU(inplace=True)
# Dim: batch_size x (num_df * 4) x 8 x 8
)
if self.options['verbose']: print('BEGAN Discriminator Encoder Created')
self.d_embedder = nn.Sequential(
# Input Dim: batch_size x (num_df * 4 * 8 * 8)
nn.Linear(self.options['num_df'] * 4 * 8 * 8, self.options['began_hidden_size']),
# Dim: batch_size x (hidden_size)
nn.Linear(self.options['began_hidden_size'], self.options['num_df'] * 8 * 8)
# Dim: batch_size x (num_df * 8 * 8)
)
if self.options['verbose']: print('BEGAN Discriminator Embedder Created')
self.d_decoder = nn.Sequential(
# Input Dim: batch_size x (num_df) x 8 x 8
upsample_conv_block(self.options['num_df'], self.options['num_df']),
# Dim: batch_size x (num_df) x 16 x 16
upsample_conv_block(self.options['num_df'], self.options['num_df']),
# Dim: batch_size x (num_df) x 32 x 32
upsample_conv_block(self.options['num_df'], self.options['num_df']),
# Dim: batch_size x (num_df) x 64 x 64
upsample_conv_block(self.options['num_df'], self.options['num_df']),
# Dim: batch_size x (num_df) x 128 x 128
nn.Conv2d(self.options['num_df'], self.options['num_df'], kernel_size=3, stride=1, padding=1),
nn.ELU(inplace=True),
# Dim: batch_size x (num_df) x 128 x 128
nn.Conv2d(self.options['num_df'], self.options['num_df'], kernel_size=3, stride=1, padding=1),
nn.ELU(inplace=True),
# Dim: batch_size x (num_df) x 128 x 128
nn.Conv2d(self.options['num_df'], self.options['image_channels'], kernel_size=3, stride=1, padding=1),
# Dim: batch_size x (num_channels) x 128 x 128
nn.Tanh()
)
if self.options['verbose']: print('BEGAN Discriminator Decoder Created')
if self.options['verbose']: print('BEGAN Discriminator Created\n')
def forward(self, images):
X = self.d_encoder(images)
X = X.view(X.size(0), self.options['num_df'] * 4 * 8 * 8)
X = self.d_embedder(X)
X = X.view(X.size(0), self.options['num_df'], 8, 8)
X = self.d_decoder(X)
return X
'''
Conditional BEGAN Model
Based on paper
https://arxiv.org/pdf/1703.10717.pdf
https://github.com/sunshineatnoon/Paper-Implementations/blob/master/BEGAN/models.py
https://github.com/carpedm20/BEGAN-pytorch
https://github.com/taey16/CBEGAN
'''
# Unlike the other generator (which uses convtranpse), this generator uses conv and upsampling blocks
class CondBeganGenerator(nn.Module):
def __init__(self, options):
super(CondBeganGenerator, self).__init__()
self.options = options
# Dimensions of the latent vector (concatenate original embedding vector and noise vector)
self.options['concat_dim'] = self.options['t_dim'] + self.options['z_dim']
if self.options['verbose']: print('\nCreating CONDITIONAL BEGAN Generator...')
# Input Dim: batch_size x (caption_vec_len)
self.g_embedder = nn.Linear(self.options['caption_vec_len'], self.options['t_dim'])
# Dim: batch_size x (t_dim)
# Input Dim: batch_size x (concat_dim)
self.g_concat_embedder = nn.Linear(self.options['concat_dim'], self.options['num_gf'] * 8 * 8)
# Dim: batch_size x (num_gf * 8 * 8)
if self.options['verbose']: print('CONDITIONAL BEGAN Generator Embedder Created')
self.generator = nn.Sequential(
# Input Dim: batch_size x (num_gf) x 8 x 8
upsample_conv_block(self.options['num_gf'], self.options['num_gf']),
# Dim: batch_size x (num_gf) x 16 x 16
upsample_conv_block(self.options['num_gf'], self.options['num_gf']),
# Dim: batch_size x (num_gf) x 32 x 32
upsample_conv_block(self.options['num_gf'], self.options['num_gf']),
# Dim: batch_size x (num_gf) x 64 x 64
upsample_conv_block(self.options['num_gf'], self.options['num_gf']),
# Dim: batch_size x (num_gf) x 128 x 128
nn.Conv2d(self.options['num_gf'], self.options['num_gf'], kernel_size=3, stride=1, padding=1),
nn.ELU(inplace=True),
# Dim: batch_size x (num_gf) x 128 x 128
nn.Conv2d(self.options['num_gf'], self.options['num_gf'], kernel_size=3, stride=1, padding=1),
nn.ELU(inplace=True),
# Dim: batch_size x (num_gf) x 128 x 128
nn.Conv2d(self.options['num_gf'], self.options['image_channels'], kernel_size=3, stride=1, padding=1),
# Dim: batch_size x (num_channels) x 128 x 128
nn.Tanh()
)
if self.options['verbose']: print('CONDITIONAL BEGAN Generator Created\n')
def forward(self, text_embed, noise):
# Concatenate the projected embedding and the noise
X = self.g_embedder(text_embed)
X = torch.cat([X, noise], 1)
X = self.g_concat_embedder(X)
X = X.view(X.size(0), self.options['num_gf'], 8, 8)
X = self.generator(X)
return X
class CondBeganDiscriminator(nn.Module):
def __init__(self, options):
super(CondBeganDiscriminator, self).__init__()
self.options = options
# Initialize began k value to 0
self.began_k = 0
if self.options['verbose']: print('Creating COND BEGAN Discriminator...')
# Discriminator layers for the input of the image
self.d_encoder = nn.Sequential(
# Input Dim: batch_size x (num_channels) x 128 x 128
nn.Conv2d(self.options['image_channels'], self.options['num_df'], kernel_size=3, stride=1, padding=1),
nn.ELU(inplace=True),
# Dim: batch_size x (num_df) x 128 x 128
conv_block(self.options['num_df'], self.options['num_df']),
# Dim: batch_size x (num_df) x 64 x 64
conv_block(self.options['num_df'], self.options['num_df'] * 2),
# Dim: batch_size x (num_df * 2) x 32 x 32
conv_block(self.options['num_df'] * 2, self.options['num_df'] * 3),
# Dim: batch_size x (num_df * 3) x 16 x 16
conv_block(self.options['num_df'] * 3, self.options['num_df'] * 4),
# Dim: batch_size x (num_df * 4) x 8 x 8
nn.Conv2d(self.options['num_df'] * 4, self.options['num_df'] * 4, kernel_size=3, stride=1, padding=1),
nn.ELU(inplace=True),
# Dim: batch_size x (num_df * 4) x 8 x 8
nn.Conv2d(self.options['num_df'] * 4, self.options['num_df'] * 4, kernel_size=3, stride=1, padding=1),
nn.ELU(inplace=True)
# Dim: batch_size x (num_df * 4) x 8 x 8
)
if self.options['verbose']: print('COND BEGAN Discriminator Input Created')
# Discriminator layers the embedding of the hidden vector
# Input Dim: batch_size x (num_df * 4 * 8 * 8)
self.d_embedder = nn.Linear(self.options['num_df'] * 4 * 8 * 8, self.options['began_hidden_size'])
# Dim: batch_size x (hidden_size)
# Embedder for the input text vector
# Input Dim: batch_size x (caption_vec_len)
self.text_embedder = nn.Linear(self.options['caption_vec_len'], self.options['t_dim'])
# Dim: batch_size x (t_dim)
# Embedder for the combined hidden vector and conditional text caption vector
# Input Dim: batch_size x (hidden_size + t_dim)
self.d_combined_embedder = nn.Linear(self.options['began_hidden_size'] + self.options['t_dim'], self.options['num_df'] * 8 * 8)
# Dim: batch_size x (num_df * 8 * 8)
if self.options['verbose']: print('COND BEGAN Discriminator Projector Created')
# Discriminator upsample layers for the concatenation of the text embedding and image to output an image
# Reconstructs the image
self.d_decoder = nn.Sequential(
# Input Dim: batch_size x (num_df) x 8 x 8
upsample_conv_block(self.options['num_df'], self.options['num_df']),
# Dim: batch_size x (num_df) x 16 x 16
upsample_conv_block(self.options['num_df'], self.options['num_df']),
| |
<gh_stars>1-10
"""
Core VM-related functionality for executing programs
Timer and VM are implemented here.
"""
from collections import namedtuple
from copy import copy
from typing import Tuple, List, ByteString, Iterable, Union, Dict
from random import randrange
from eightdad.core.bytecode import (
PATTERN_IXII,
PATTERN_INNN,
PATTERN_IIII,
PATTERN_IXKK,
PATTERN_IXYI,
PATTERN_IXYN
)
from eightdad.core.bytecode import Chip8Instruction
from eightdad.core.video import VideoRam, DEFAULT_DIGITS
DEFAULT_EXECUTION_START = 0x200
INSTRUCTION_LENGTH = 2 # 2 bytes, 16 bits
class Timer:
"""
Simple timer that decrements at 60hz, per the spec
"""
def __init__(self, hz_decrement_rate: float = 60.0):
self.decrement_threshold = 1.0 / hz_decrement_rate
self.elapsed = 0.0
self.value = 0
def tick(self, dt: float) -> None:
"""
Advance the timer by dt seconds.
Assumes only small values of dt will be sent, doesn't apply multiple
decrements per dt. Values of dt larger than decrement threshold may
cause issues.
:param dt: how large a time step to apply
:return:
"""
self.elapsed += dt
if self.elapsed >= self.decrement_threshold:
self.elapsed -= self.decrement_threshold
if self.value > 0:
self.value -= 1
def upper_hex(src: Union[int, Iterable[int]]) -> str:
"""
Return uppercase hex of an int or byte source
:param src: an integer
:return: the number as uppercase hex, minus
"""
if isinstance(src, int):
s = hex(src)[2:].upper()
if len(s) < 2:
return "0" + s
return s
return "".join((upper_hex(i) for i in src))
VMState = namedtuple(
'VMState', [
'program_counter',
'next_instruction',
'v_registers',
'timers',
'stack',
'keys'
],
)
def report_state(state: VMState):
pc = state.program_counter
next_instr = state.next_instruction
print(
f"== state ==\n"
f"PC : 0x{upper_hex(next_instr)} @ 0x{upper_hex(pc)}\n"
f"stack : {state.stack}\n"
f"registers: {state.v_registers}\n"
f"keys : {state.keys}\n"
)
class Chip8VirtualMachine:
def load_to_memory(self, data: ByteString, location: int) -> None:
"""
Load given data to a specific location in memory.
Data must be a ByteString or support the buffer protocol.
Raises IndexError if data is too long to be inserted at the passed
location, ie would extend past the end of memory.
:param data: a bytestring that supports buffer protocol
:param location: where in memory to load to
:return:
"""
if location < 0:
raise IndexError("Location must be positive")
end = location + len(data)
if end > len(self.memory):
raise IndexError("Passed data extends past the end of memory")
# if it doesn't implement buffer protocol, error
try:
view = memoryview(data)
except Exception as e:
raise TypeError(
"data must be a ByteString or otherwise support the "
"buffer protocol."
) from e
self.memory[location:end] = view
def load_digits(self, source: List[bytes], location: int) -> None:
"""
Load hex digit data into a location memory.
The largest digit size is used as the digit length.
:param source: the list of bytes objects to load from.
:param location: where in memory to load the data
:return: None
"""
self.digits_memory_location = location
self.digit_length = max(map(len, source))
current_start = location
for digit_data in source:
self.load_to_memory(digit_data, current_start)
current_start += self.digit_length
def __init__(
self,
display_size: Tuple[int, int] = (64, 32),
display_wrap: bool = False,
memory_size: int = 4096,
execution_start: int = DEFAULT_EXECUTION_START,
digit_start: int = 0x0,
ticks_per_frame: int = 20,
frames_per_second: int = 30,
video_ram_type: type = VideoRam
):
"""
Build a Chip-8 VM.
The video_ram_type is intended for passing subclasses that keep
track of tiling or other platform-specific display features to
improve drawing performance.
Avoiding redraw of unchanged pixels is the major intended
usecase for this feature. For example, a curses frontend
using braille characters as pixel blocks is one possibility.
:param display_size: A pair of values for the screen type.
:param display_wrap: whether drawing wraps
:param memory_size: how big RAM should be
:param execution_start: where to start execution
:param digit_start: where digits should start in ram
:param ticks_per_frame: how many instructions execute per frame
:param frames_per_second: how many frames/sec execute
:param video_ram_type: a VideoRam class or subclass
"""
# initialize display-related functionality
self.memory = bytearray(memory_size)
width, height = display_size
if not isinstance(video_ram_type, type) \
or not issubclass(video_ram_type, VideoRam):
raise TypeError(
f"VideoRam subclass expected,"
f" not a {type(video_ram_type)}")
self.video_ram = video_ram_type(width, height, display_wrap)
self.digits_memory_location, self.digit_length = 0, 0
self.load_digits(DEFAULT_DIGITS, digit_start)
# set up execution-related state
self.program_counter = execution_start
self.program_increment = 0 # how much PC will be incremented by
self.i_register = 0
self.v_registers = bytearray(16)
self.call_stack = []
# key-related variables
self.waiting_for_key = False
self.waiting_register = None
self._keystates = [False] * 16 # Whether each key is down
self._delay_timer = Timer()
self._sound_timer = Timer()
self.ticks_per_frame = ticks_per_frame
self.frames_per_second = frames_per_second
self.ticks_per_second = ticks_per_frame * frames_per_second
self.tick_length = 1.0 / self.ticks_per_second
self.instruction_parser = Chip8Instruction()
self.instruction_unhandled = False
@property
def delay_timer(self):
return self._delay_timer.value
@delay_timer.setter
def delay_timer(self, value):
self._delay_timer.value = value
@property
def sound_timer(self):
return self._sound_timer.value
@sound_timer.setter
def sound_timer(self, value):
self._sound_timer.value = value
def press(self, key: int) -> None:
self._keystates[key] = True
def pressed(self, key: int) -> bool:
return self._keystates[key]
def release(self, key: int) -> None:
self._keystates[key] = False
def dump_state(self) -> VMState:
"""
Return a named tuple representing VM state.
It is the responsibility of the frontend implementation to
render this object into a form readable to the user.
:return: named tuple of registers, keys, and stack
"""
pc = self.program_counter
mem = self.memory
# get the instruction as big endian int, skip struct
next_instruction = mem[pc] << 8
next_instruction += mem[pc + 1]
return VMState(
pc,
next_instruction,
copy(self.v_registers),
{'delay_timer': self.delay_timer, 'sound_timer': self.sound_timer},
copy(self.call_stack),
copy(self._keystates)
)
def skip_next_instruction(self):
"""
Sugar to skip instructions.
"""
self.program_increment += INSTRUCTION_LENGTH
def handle_ixii(self):
"""
Execute F and E type nibble instructions.
This includes:
- keypress handling
- setting timers
- some manipulation of I register (sprites, addition)
- bulk register save/load to/from location I in memory
"""
type_nibble = self.instruction_parser.type_nibble
lo_byte = self.instruction_parser.lo_byte
x = self.instruction_parser.x
if type_nibble == 0xF:
if lo_byte == 0x07:
self.v_registers[x] = self._delay_timer.value
elif lo_byte == 0x0A: # Enter wait state until the VM gets a keypress
self.waiting_for_key = True
self.waiting_register = x
# Timers
elif lo_byte == 0x15:
self._delay_timer.value = self.v_registers[x]
elif lo_byte == 0x18:
self._sound_timer.value = self.v_registers[x]
# I manipulation
elif lo_byte == 0x1E:
self.i_register += self.v_registers[x]
elif lo_byte == 0x29: # Fx29, I = Address of digit for value in Vx
digit = self.v_registers[x]
self.i_register = self.digits_memory_location +\
(digit * self.digit_length)
# Store BCD of Vx at I, I+1, I+2
elif lo_byte == 0x33:
reg_value = self.v_registers[x]
ones = reg_value % 10
tens = ((reg_value - ones) % 100) // 10
hundreds = reg_value // 100
self.memory[self.i_register] = hundreds
self.memory[self.i_register + 1] = tens
self.memory[self.i_register + 2] = ones
elif lo_byte == 0x55: # save registers to memory starting at I
i = self.i_register
for register in range(0, x + 1):
self.memory[i + register] = self.v_registers[register]
elif lo_byte == 0x65: # Load register from memory starting at I
i = self.i_register
for register in range(0, x + 1):
self.v_registers[register] = self.memory[i + register]
else:
self.instruction_unhandled = True
elif type_nibble == 0xE:
# one of the keypress skip instructions
# get the key for value in VX
key_pressed = self._keystates[self.v_registers[x]]
if lo_byte == 0xA1:
if not key_pressed:
self.skip_next_instruction()
elif lo_byte == 0x9E:
# skip next instruction if key in register X is pressed
if key_pressed:
self.skip_next_instruction()
else:
self.instruction_unhandled = True
else:
self.instruction_unhandled = True
def _handle_innn(self) -> None:
"""
Execute address-related instructions
This includes such jumps, calls, and setting the I register.
:return: None
"""
nnn = self.instruction_parser.nnn
type_nibble = self.instruction_parser.type_nibble
if type_nibble == 0xA: # set I to nnn
self.i_register = nnn
elif type_nibble == 0x2: # call instruction
self.program_increment = 0
self.stack_call(nnn)
elif type_nibble == 0x1 or type_nibble == 0xB: # jump instruction
self.program_increment = 0
self.program_counter = nnn
if type_nibble == 0xB: # includes a shift
self.program_counter += self.v_registers[0]
else:
self.instruction_unhandled = True
def handle_ixkk(self) -> None:
x = self.instruction_parser.x
kk = self.instruction_parser.kk
type_nibble = self.instruction_parser.type_nibble
if type_nibble == 0x3:
if self.v_registers[x] == kk:
self.skip_next_instruction()
elif type_nibble == 0x4:
if self.v_registers[x] != kk:
self.skip_next_instruction()
elif type_nibble == 0x6:
self.v_registers[x] = kk
elif type_nibble == 0x7:
self.v_registers[x] = (self.v_registers[x] + kk) % 0x100
elif type_nibble == 0xC:
self.v_registers[x] = randrange(0, 0xFF) & kk
else:
self.instruction_unhandled = True
def | |
<gh_stars>1-10
#!/usr/bin/python3
#--------------------------------------------------------------------
# Automation of Stickney Observatory pt 2 -
# auto_dome.py - A program written to allow the use of a user -
# interface to control the dome's motion. -
# Author(s): <NAME>, <NAME>, <NAME>, <NAME> -
# THIS CODE DOES NOT WORK. IT NEEDS TO BE FIXED AND DEBUGGED. -
#--------------------------------------------------------------------
# Import modules for our program
import time
from datetime import datetime
import RPi.GPIO as GPIO
import smbus
import sys
import os
# UI imports
from flask import Flask, request, abort, jsonify, url_for, redirect, render_template
import json
# Create an instance of Flask and tie it to this python program
app = Flask(__name__)
# Set our notch counter to start at 0
notches = 0
# GPIO Setup:
# Set warnings to false
GPIO.setwarnings(False)
# Set up mode for GPIO
GPIO.setmode(GPIO.BOARD)
# Buttons Setup
GPIO.setup(8, GPIO.IN, pull_up_down = GPIO.PUD_UP) # counter clockwise button
GPIO.setup(10, GPIO.IN, pull_up_down = GPIO.PUD_UP) # home button
GPIO.setup(22, GPIO.IN, pull_up_down = GPIO.PUD_UP) # e stop button
GPIO.setup(7, GPIO.IN, pull_up_down = GPIO.PUD_UP) # clockwise button
# IR Sensors Setup
GPIO.setup(36, GPIO.IN, pull_up_down = GPIO.PUD_UP) # notch count IR sensor
GPIO.setup(35, GPIO.IN, pull_up_down = GPIO.PUD_UP) # home IR sensor
# Relays Setup
pin_list = [11,13,15,16,18] # create list of relay GPIO pins
# Set up GPIO pin list for relays
for i in pin_list:
GPIO.setup(i, GPIO.OUT)
power_relays = (11,16,18) # allows for simultaneous pin manipulation, where 'power' means dome movement
directional_relays = (13,15) # allows for simultaneous pin manipulation, where 'directional' means setup of relays
#GPIO.setup(11, GPIO.OUT) # R0,R00 relay
#GPIO.setup(13, GPIO.OUT) # R1 relay
#GPIO.setup(15, GPIO.OUT) # R2 relay
#GPIO.setup(16, GPIO.OUT) # R3 relay
#GPIO.setup(18, GPIO.OUT) # R4 relay
# Functions:
# IR beam break sensor notch count function (returns notch count and does not print anything)
def notch_counter(input):
input = GPIO.input(36)
direction_relay = GPIO.input(13) # Emma: I added this variable by chosing an arbitrary relay that goes high or low depending on the direction so we can count based on the direction the dome is moving
global notches
if input != 0 and direction_relays == False: # the sensor is in a notch hole, and the dome is moving clockwise
notches = notches + 1
elif input != 0 and direction_relay == True: # the sensor is in a notch hole, and the dome is moving counter clockwise
notches = notches - 1 # thereby subtracting from the notch count
GPIO.add_event_detect(36, GPIO.BOTH, callback = notch_counter) # waits for the sensor to sense a change in input
# When e stop button is pressed, set the relays to low and restart the code
def emergency_stop(e_stop):
e_stop = GPIO.input(22)
print("Stopping all systems.")
GPIO.output(directional_relays, GPIO.LOW) # set relays R1 and R2 to low simultaneously
time.sleep(0.1) # allow for directional relays to switch before power_relays
GPIO.output(power_relays, GPIO.LOW) # set relays R0,R00,R3,R4 to low simultaneously
print("Restarting the program.")
python = sys.executable
# os.execl(python, python, *sys.argv)
os.execl(python, os.path.abspath(__file__), *sys.argv)
# os.system('python "~/Dome/button_dome.py"')
sys.exit(0)
GPIO.add_event_detect(22, GPIO.FALLING, callback = emergency_stop)
# Error handling with buttons:
def error_handle():
if button_status_c == False and button_status_cc == False: # Error handling. User cannot push both buttons. Dome will not move.
print("Not allowed to press both clockwise and counterclockwise. Not moving.")
GPIO.output(directional_relays, GPIO.LOW) # set relays R1 and R2 to low simultaneously
time.sleep(0.1) # allow for directional relays to switch before power_relays
GPIO.output(power_relays, GPIO.LOW) # set relays R0,R00,R3,R4 to low simultaneously
if button_status_c == False and button_status_home == False: # Error handling. User cannot push both buttons. Dome will not move.
print("Not allowed to press both clockwise and home. Not moving.")
GPIO.output(directional_relays, GPIO.LOW) # set relays R1 and R2 to low simultaneously
time.sleep(0.1) # allow for directional relays to switch before power_relays
GPIO.output(power_relays, GPIO.LOW) # set relays R0,R00,R3,R4 to low simultaneously
if button_status_cc == False and button_status_home == False: # Error handling. User cannot push both buttons. Dome will not move.
print("Not allowed to press both counterclockwise and home. Not moving.")
GPIO.output(directional_relays, GPIO.LOW) # set relays R1 and R2 to low simultaneously
time.sleep(0.1) # allow for directional relays to switch before power_relays
GPIO.output(power_relays, GPIO.LOW) # set relays R0,R00,R3,R4 to low simultaneously
if button_status_c == False and e_stop == False: # Error handling. User cannot push both buttons. Dome will not move.
print("Not allowed to press both clockwise and e stop. Not moving.")
GPIO.output(directional_relays, GPIO.LOW) # set relays R1 and R2 to low simultaneously
time.sleep(0.1) # allow for directional relays to switch before power_relays
GPIO.output(power_relays, GPIO.LOW) # set relays R0,R00,R3,R4 to low simultaneously
if button_status_cc == False and e_stop == False: # Error handling. User cannot push both buttons. Dome will not move.
print("Not allowed to press both counter clockwise and e stop. Not moving.")
GPIO.output(directional_relays, GPIO.LOW) # set relays R1 and R2 to low simultaneously
time.sleep(0.1) # allow for directional relays to switch before power_relays
GPIO.output(power_relays, GPIO.LOW) # set relays R0,R00,R3,R4 to low simultaneously
if button_status_home == False and e_stop == False: # Error handling. User cannot push both buttons. Dome will not move.
print("Not allowed to press both home and e stop. Not moving.")
GPIO.output(directional_relays, GPIO.LOW) # set relays R1 and R2 to low simultaneously
time.sleep(0.1) # allow for directional relays to switch before power_relays
GPIO.output(power_relays, GPIO.LOW) # set relays R0,R00,R3,R4 to low simultaneously
# Motor functions:
# Dome clockwise movement
def go_clockwise():
GPIO.output(directional_relays, GPIO.LOW) # set relays R1 and R2 to low simultaneously
time.sleep(0.1) # allow for directional relays to switch before power_relays
GPIO.output(power_relays, GPIO.HIGH) # set relays R0,R00,R3,R4 to high simultaneously
# print("Moving clockwise.")
# Dome counter clockwise movement
#def go_counter_clockwise():
# GPIO.output(directional_relays, GPIO.HIGH) #set relays R1 and R2 to high simultaneously
# time.sleep(0.1) #allow for directional relays to switch before power_relays
# GPIO.output(power_relays, GPIO.HIGH) # set relays R0,R00,R3,R4 to high simultaneously
# print("Moving counter clockwise.")
# Stop the motor
def stop_motor():
GPIO.output(directional_relays, GPIO.LOW) # set relays R1 and R2 to low simultaneously
time.sleep(0.1) # allow for directional relays to switch before power_relays
GPIO.output(power_relays, GPIO.LOW) # set relays R0,R00,R3,R4 to low simultaneously
# print("Stopping motor.")
# Get our azimuth input
def get_azimuth(user_input):
user_azimuth = user_input * (494/360) # where 494 is the amount of notches
return user_azimuth
# Go to our location given user input from an html page
@app.route("/next-location", methods=['POST', 'GET'])
def go_location():
go_location_string = '<html>\n <head><title>Welcome to the Stickney Observatory Dome Control</title>\n </head>\n <body>\n \n Hello user. What azimuth is the telescope going to?\n <form action="http://domecontrol.bennington.edu:5000/next-location" method="POST">\n <br>\n Azimuth:\n <input type="number" name="azimuth">\n <input type="submit" name="go" value="Go">\n <br>\n Do you want to go home?\n <input type="submit" name="home" value="Go Home">\n <br>\n Are you finished using the dome?\n <input type="submit" name="shutdown" value="Shut Down System">\n <br>\n Emergency Stop\n <input type="submit" name="estop" value="Emergency Stop">\n <br>\n \n </form>\n </body>\n </html>'
if request.method == 'POST':
if request.form['go'] == 'Go':
print(request.form)
try:
azimuth_value = int(request.form['azimuth'])
if azimuth_value > 360:
bad = "That azimuth is too big, please go back and try again."
return bad
elif azimuth_value <= 0:
bad = "That azimuth is too small, please go back and try again."
return bad
else:
azimuth = int(get_azimuth(azimuth_value))
global notches
if(notches < azimuth):
while notches < azimuth:
go_clockwise()
elif(notches > azimuth):
while notches > azimuth:
go_counter_clockwise()
elif(notches == azimuth): # Emma: I was nervous about the else statement not catching it when it equals the azimuth, so i put this in as a duplicate to be safe?
stop_motor()
print("Dome moved and motor stopped.")
return go_location_string
else:
stop_motor()
except ValueError:
bad = "You gave me a string, not an integer. Please go back and try again."
return bad
elif request.form['home'] == 'Go Home':
# home_sensor = GPIO.input(35)
# while home_sensor != 0:
# go_clockwise()
# stop_motor()
print("test test test")
return "At home position."
elif request.form['shutdown'] == 'Shut Down System':
GPIO.output(directional_relays, GPIO.LOW) # set relays R1 and R2 to low simultaneously
time.sleep(0.1) # allow for directional relays to switch before power_relays
GPIO.output(power_relays, GPIO.LOW) # set relays R0,R00,R3,R4 to low simultaneously
return "Stopping motor. Shutting down."
elif request.form['estop'] == 'Emergency Stop':
GPIO.output(directional_relays, GPIO.LOW) # set relays R1 and R2 to low simultaneously
time.sleep(0.1) # allow for directional relays to switch before power_relays
GPIO.output(power_relays, GPIO.LOW) # set relays R0,R00,R3,R4 to low simultaneously
return "Everything has shut down."
else:
print("crap")
return "crap"
# Go home
#def go_home():
# if request.form['home'] == 'Go Home':
# home_sensor = GPIO.input(35)
# while home_sensor != 0:
# go_clockwise()
# print | |
last_x >= _first_x
roots += 1
hP.roots = roots
fork_.append(hP) # P-connected hPs will be converted to segments at each _fork
if _x > x: # x overlap between hP and next P: hP is buffered for next scan_P_, else hP included in a blob segment
buff_.append(hP)
elif roots != 1:
frame, _frame = form_blob(hP, frame, _frame, typ) # segment is terminated and packed into its blob
_x0 = _x + 1 # = first x of next _P
buff_ += _buff_ # _buff_ is likely empty
P_.append([P, fork_]) # P with no overlap to next _P is extended to hP and buffered for next-line scan_P_
return P_, buff_, hP_, frame, _frame # hP_ and buff_ contain only remaining _Ps, with _x => next x
# ---------- scan_P_() end ------------------------------------------------------------------------------------------
def form_segment(hP, frame, _frame, typ):
# Add hP to higher-line segment or convert it into new segment; merge blobs
_P, fork_ = hP
ave_x = (_P.L - 1) // 2 # extra-x L = L-1 (1x in L)
if len(fork_) == 1 and fork_[0].roots == 1: # hP has one fork: hP.fork_[0], and that fork has one root: hP
fork = fork_[0]
# hP is merged into higher-line blob segment (Pars, roots, ave_x, xD, Py_, coords) at hP.fork_[0]:
fork.accum_params(_P.params()) # params = [L, I, Dx, Dy, Dt, Mx, My, Mt, Alt0, Alt1, Alt2, Alt3, Alt4]
fork.roots = 0 # roots
xd = ave_x - fork.ave_x
fork.ave_x = ave_x # ave_x
fork.xD += xd # xD for seg normalization and orientation, or += |dx| for curved yL?
fork.abs_xD += abs(xd)
fork.xd_.append(xd)
fork.e_.append(_P) # Py_: vertical buffer of Ps merged into seg
fork.extend_coords(_P.coords()) # min_x, max_x
hP = fork # replace segment with including fork's segment
else: # new segment is initialized:
hP = pattern(typ_str[typ] + 'segment', (_P.min_x, _P.max_x), (y - rng - 1, -1), sign=_P.sign) # new instance of pattern class
hP.accum_params(_P.params()) # initialize params with _P's params, etc.
hP.roots = 0 # init roots
hP.fork_ = fork_ # init fork_
hP.ave_x = ave_x # ave_x
hP.xD = 0 # xD
hP.abs_xD = 0
hP.xd_ = [0] # xd_ of corresponding Py_
hP.e_.append(_P) # Py_
if not fork_: # if no fork_: initialize blob
blob = pattern(typ_str[typ] + 'blob', (_P.min_x, _P.max_x), (y - rng - 1, -1), sign=hP.sign)
blob.xD = 0
blob.abs_xD = 0
blob.Ly = 0
blob.remaining_roots = 1
else: # else merge into fork's blob
blob = fork_[0].blob
hP.blob = blob # merge hP into blob
blob.e_.append(hP) # segment is buffered into blob's root_
if len(fork_) > 1: # merge blobs of all forks
if fork_[0].roots == 1: # if roots == 1
frame, _frame = form_blob(fork_[0], frame, _frame, typ, 1) # terminate seg of 1st fork
for fork in fork_[1:len(fork_)]: # merge blobs of other forks into blob of 1st fork
if fork.roots == 1:
frame, _frame = form_blob(fork, frame, _frame, typ, 1)
if not fork.blob is blob: # if not already merged/same
blobs = fork.blob
blob.accum_params(blobs.params()) # params = [L, I, Dx, Dy, Dt, Mx, My, Mt, Alt0, Alt1, Alt2, Alt3, Alt4]
blob.extend_coords(blobs.coords()) # coord = [min_x, max_x, min_y, max_y]
blob.xD += blobs.xD
blob.abs_xD += blobs.abs_xD
blob.Ly += blobs.Ly
blob.remaining_roots += blobs.remaining_roots
for seg in blobs.e_:
if not seg is fork:
seg.blob = blob # blobs in other forks are references to blob in the first fork
blob.e_.append(seg) # buffer of merged root segments
fork.blob = blob
blob.e_.append(fork)
blob.remaining_roots -= 1
return hP, frame, _frame
# ---------- form_segment() end -----------------------------------------------------------------------------------------
def term_segment_(hP_, frame, _frame, typ):
# merge segments of last line into their blobs
while hP_:
hP, frame, _frame = form_segment(hP_.popleft(), frame, _frame, typ)
frame, _frame = form_blob(hP, frame, _frame, typ)
return hP_, frame, _frame
# ---------- term_segment_() end ----------------------------------------------------------------------------------------
def form_blob(term_seg, frame, _frame, typ, y_carry=0):
# Terminated segment is merged into continued or initialized blob (all connected segments)
blob = term_seg.blob
term_seg.max_y = y - rng - 1 - y_carry # set max_y <- current y; y_carry: min elevation of term_seg over current hP
blob.accum_params(term_seg.params()) # params = [L, I, Dx, Dy, Dt, Mx, My, Mt, Alt0, Alt1, Alt2, Alt3, Alt4]
blob.extend_coords(term_seg.coords()) # coords = [min_x, max_x, min_y, max_y]
blob.xD += term_seg.xD # ave_x angle, to evaluate blob for re-orientation
blob.abs_xD += term_seg.abs_xD
blob.Ly += len(term_seg.e_) # Ly = number of slices in segment
blob.remaining_roots += term_seg.roots - 1 # reference to term_seg is already in blob[9]
term_seg.terminated = True
if blob.remaining_roots == 0: # if remaining_roots == 0: blob is terminated and packed in frame
# sort indices of blob' segments by their min and max coordinates
blob.sorted_min_x_idx_, blob.sorted_max_x_idx_, blob.sorted_min_y_idx_, blob.sorted_max_y_idx_, \
blob.sorted_min_x_, blob.sorted_max_x_, blob.sorted_min_y_, blob.sorted_max_y_ = sort_segments(blob.e_)
# terminated blob is packed into frame
if term_seg.core == 'm' and term_seg.sign == 0: # is negative mblob
frame[typ].accum_params(term_seg.params())
frame[typ].xD += blob.xD # ave_x angle, to evaluate frame for re-orientation
frame[typ].abs_xD += blob.abs_xD
frame[typ].Ly += blob.Ly # +Ly
delattr(blob, 'remaining_roots')
blob.terminated = True
frame[typ].e_.append(blob)
# initialize tsegment with terminated blob:
blob.fork_ = []
if t > t_rng * 2:
frame[typ], _frame[typ] = scan_blob_(blob, frame[typ], _frame[typ])
return frame, _frame
# ---------- form_blob() end ----------------------------------------------------------------------------------------
def sort_segments(e_):
" sort indices by min|max coords of segments"
sorted_idx_min_x_ = sorted(range(len(e_)), key=lambda i: e_[i].min_x) # segment indices sorted by min_x
sorted_idx_max_x_ = sorted(range(len(e_)), key=lambda i: e_[i].max_x) # segment indices sorted by max_x
sorted_idx_min_y_ = sorted(range(len(e_)), key=lambda i: e_[i].min_y) # segment indices sorted by min_y
sorted_idx_max_y_ = sorted(range(len(e_)), key=lambda i: e_[i].max_y) # segment indices sorted by max_y
# the following lists are for zoning olp segs
return sorted_idx_min_x_, sorted_idx_max_x_, sorted_idx_min_y_, sorted_idx_max_y_, \
[e_[sorted_idx_min_x_[i]].min_x for i in range(len(e_))], \
[e_[sorted_idx_max_x_[i]].max_x for i in range(len(e_))], \
[e_[sorted_idx_min_y_[i]].min_y for i in range(len(e_))], \
[e_[sorted_idx_max_y_[i]].max_y for i in range(len(e_))]
# ---------- sort_coords() end --------------------------------------------------------------------------------------
def scan_blob_(blob, frame, _frame):
# blob scans pri_blobs in higher frame, combines overlapping blobs into tblobs
# Select only overlapping pri_blobs in _frame for speed?
debug_idx_ = []
olp_idx_ = find_overlaps(_frame, blob.coords())
if len(olp_idx_) != 0:
pri_tseg_ = _frame.e_ # list of same type pri_tsegs
for olp_idx in olp_idx_:
pri_tseg = pri_tseg_[olp_idx]
pri_blob = pri_tseg.e_[-1]
if pri_blob.sign == blob.sign: # Check sign
olp_min_x = max(pri_blob.min_x, blob.min_x)
olp_max_x = min(pri_blob.max_x, blob.max_x)
olp_min_y = max(pri_blob.min_y, blob.min_y)
olp_max_y = min(pri_blob.max_y, blob.max_y)
if scan_segment_(blob, pri_blob, [olp_min_x, olp_max_x, olp_min_y, olp_max_y]):
pri_tseg.roots += 1
blob.fork_.append(pri_tseg)
debug_idx_.append(olp_idx)
# For Debugging --------------------------------------------------------------
# Print selected blob formed in frame at t > t_rng * 2 and all it's overlapping blobs in previous frame
global olp_debug, debug_case
if olp_debug and t > t_rng * 2 and len(debug_idx_) != 0:
if debug_case == output_at_case:
filtered_pri_frame = np.array([[[127] * 4] * X] * Y)
rebuild_blob('./images/', 0, blob, filtered_pri_frame, 1)
for i in debug_idx_:
rebuild_blob('./images/olp_', i, _frame.e_[i].e_[-1], filtered_pri_frame, 1)
olp_debug = False
else:
debug_case += 1
# ----------------------------------------------------------------------------
return frame, _frame
# ---------- scan_blob_() end ---------------------------------------------------------------------------------------
def scan_segment_(blob, pri_blob, bounding_box):
# scans segments for overlap
# choose only segments inside olp to check:
idx = find_overlaps(blob, bounding_box)
pri_idx = find_overlaps(pri_blob, bounding_box)
for i in idx:
seg = blob.e_[i]
olp_idx_ = np.intersect1d(find_overlaps(pri_blob, seg.coords()), pri_idx)
if len(olp_idx_) != 0:
pri_seg_ = pri_blob.e_
for olp_idx in olp_idx_:
pri_seg = pri_seg_[olp_idx]
olp_min_y = max(pri_seg.min_y, seg.min_y) # olp_min/max_y indicates
olp_max_y = min(pri_seg.max_y, seg.max_y) # potentially overlapping Ps
olp_P_idx_stop = olp_max_y - seg.min_y + 1
olp_P_idx = olp_min_y - seg.min_y
olp_pri_P_idx = olp_min_y - pri_seg.min_y
while olp_P_idx < olp_P_idx_stop:
P = seg.e_[olp_P_idx]
pri_P = pri_seg.e_[olp_pri_P_idx]
if P.min_x <= pri_P.max_x and P.max_x >= pri_P.min_x:
return True
olp_P_idx += 1
olp_pri_P_idx += 1
return False
# ---------- scan_segment_() end ------------------------------------------------------------------------------------
def find_overlaps(obj, bounding_box):
# Search for boundaries of sorted pri_blobs that overlap boundaries of input blob
N = len(obj.e_)
min_x, max_x, min_y, max_y = bounding_box
# find_olp_index(a_, first_index, last_index, target, right_olp):
_min_x_idx = find_olp_index(obj.sorted_min_x_, 0, N, max_x, 1)
_max_x_idx = find_olp_index(obj.sorted_max_x_, 0, N, min_x, 0)
_min_y_idx = find_olp_index(obj.sorted_min_y_, 0, N, max_y, 1)
_max_y_idx = find_olp_index(obj.sorted_max_y_, 0, N, min_y, 0)
| |
import os
import numpy as np
import tensorflow as tf
import cv2
import time
import sys
import pickle
import ROLO_utils as util
class YOLO_TF:
fromfile = None
tofile_img = 'test/output.jpg'
tofile_txt = 'test/output.txt'
imshow = True
filewrite_img = False
filewrite_txt = False
disp_console = True
weights_file = '/home/marc/ROLO/3rd\ party_upgrade/weights/YOLO_small.ckpt'
alpha = 0.1
threshold = 0.08
iou_threshold = 0.5
num_class = 20
num_box = 2
grid_size = 7
classes = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
w_img, h_img = [352, 240]
num_feat = 4096
num_predict = 6 # final output of LSTM 6 loc parameters
num_heatmap = 1024
def __init__(self, argvs=[]):
self.argv_parser(argvs)
self.build_networks()
if self.fromfile is not None: self.detect_from_file(self.fromfile)
def argv_parser(self, argvs):
for i in range(1, len(argvs), 2):
if argvs[i] == '-fromfile': self.fromfile = argvs[i + 1]
if argvs[i] == '-tofile_img': self.tofile_img = argvs[i + 1]; self.filewrite_img = True
if argvs[i] == '-tofile_txt': self.tofile_txt = argvs[i + 1]; self.filewrite_txt = True
if argvs[i] == '-imshow':
if argvs[i + 1] == '1':
self.imshow = True
else:
self.imshow = False
if argvs[i] == '-disp_console':
if argvs[i + 1] == '1':
self.disp_console = True
else:
self.disp_console = False
def build_networks(self):
if self.disp_console: print("Building YOLO_small graph...")
self.x = tf.placeholder('float32', [None, 448, 448, 3])
self.conv_1 = self.conv_layer(1, self.x, 64, 7, 2)
self.pool_2 = self.pooling_layer(2, self.conv_1, 2, 2)
self.conv_3 = self.conv_layer(3, self.pool_2, 192, 3, 1)
self.pool_4 = self.pooling_layer(4, self.conv_3, 2, 2)
self.conv_5 = self.conv_layer(5, self.pool_4, 128, 1, 1)
self.conv_6 = self.conv_layer(6, self.conv_5, 256, 3, 1)
self.conv_7 = self.conv_layer(7, self.conv_6, 256, 1, 1)
self.conv_8 = self.conv_layer(8, self.conv_7, 512, 3, 1)
self.pool_9 = self.pooling_layer(9, self.conv_8, 2, 2)
self.conv_10 = self.conv_layer(10, self.pool_9, 256, 1, 1)
self.conv_11 = self.conv_layer(11, self.conv_10, 512, 3, 1)
self.conv_12 = self.conv_layer(12, self.conv_11, 256, 1, 1)
self.conv_13 = self.conv_layer(13, self.conv_12, 512, 3, 1)
self.conv_14 = self.conv_layer(14, self.conv_13, 256, 1, 1)
self.conv_15 = self.conv_layer(15, self.conv_14, 512, 3, 1)
self.conv_16 = self.conv_layer(16, self.conv_15, 256, 1, 1)
self.conv_17 = self.conv_layer(17, self.conv_16, 512, 3, 1)
self.conv_18 = self.conv_layer(18, self.conv_17, 512, 1, 1)
self.conv_19 = self.conv_layer(19, self.conv_18, 1024, 3, 1)
self.pool_20 = self.pooling_layer(20, self.conv_19, 2, 2)
self.conv_21 = self.conv_layer(21, self.pool_20, 512, 1, 1)
self.conv_22 = self.conv_layer(22, self.conv_21, 1024, 3, 1)
self.conv_23 = self.conv_layer(23, self.conv_22, 512, 1, 1)
self.conv_24 = self.conv_layer(24, self.conv_23, 1024, 3, 1)
self.conv_25 = self.conv_layer(25, self.conv_24, 1024, 3, 1)
self.conv_26 = self.conv_layer(26, self.conv_25, 1024, 3, 2)
self.conv_27 = self.conv_layer(27, self.conv_26, 1024, 3, 1)
self.conv_28 = self.conv_layer(28, self.conv_27, 1024, 3, 1)
self.fc_29 = self.fc_layer(29, self.conv_28, 512, flat=True, linear=False)
self.fc_30 = self.fc_layer(30, self.fc_29, 4096, flat=False, linear=False)
# skip dropout_31
self.fc_32 = self.fc_layer(32, self.fc_30, 1470, flat=False, linear=True)
self.sess = tf.Session()
self.sess.run(tf.initialize_all_variables())
self.saver = tf.train.Saver()
self.saver.restore(self.sess, self.weights_file)
if self.disp_console: print("Loading complete!" + '\n')
def conv_layer(self, idx, inputs, filters, size, stride):
channels = inputs.get_shape()[3]
weight = tf.Variable(tf.truncated_normal([size, size, int(channels), filters], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[filters]))
pad_size = size // 2
pad_mat = np.array([[0, 0], [pad_size, pad_size], [pad_size, pad_size], [0, 0]])
inputs_pad = tf.pad(inputs, pad_mat)
conv = tf.nn.conv2d(inputs_pad, weight, strides=[1, stride, stride, 1], padding='VALID',
name=str(idx) + '_conv')
conv_biased = tf.add(conv, biases, name=str(idx) + '_conv_biased')
if self.disp_console: print(
' Layer %d : Type = Conv, Size = %d * %d, Stride = %d, Filters = %d, Input channels = %d' % (
idx, size, size, stride, filters, int(channels)))
return tf.maximum(self.alpha * conv_biased, conv_biased, name=str(idx) + '_leaky_relu')
def pooling_layer(self, idx, inputs, size, stride):
if self.disp_console: print(
' Layer %d : Type = Pool, Size = %d * %d, Stride = %d' % (idx, size, size, stride))
return tf.nn.max_pool(inputs, ksize=[1, size, size, 1], strides=[1, stride, stride, 1], padding='SAME',
name=str(idx) + '_pool')
def fc_layer(self, idx, inputs, hiddens, flat=False, linear=False):
input_shape = inputs.get_shape().as_list()
if flat:
dim = input_shape[1] * input_shape[2] * input_shape[3]
inputs_transposed = tf.transpose(inputs, (0, 3, 1, 2))
inputs_processed = tf.reshape(inputs_transposed, [-1, dim])
else:
dim = input_shape[1]
inputs_processed = inputs
weight = tf.Variable(tf.truncated_normal([dim, hiddens], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[hiddens]))
if self.disp_console: print(
' Layer %d : Type = Full, Hidden = %d, Input dimension = %d, Flat = %d, Activation = %d' % (
idx, hiddens, int(dim), int(flat), 1 - int(linear)))
if linear: return tf.add(tf.matmul(inputs_processed, weight), biases, name=str(idx) + '_fc')
ip = tf.add(tf.matmul(inputs_processed, weight), biases)
return tf.maximum(self.alpha * ip, ip, name=str(idx) + '_fc')
def detect_from_cvmat(self, img):
s = time.time()
self.h_img, self.w_img, _ = img.shape
img_resized = cv2.resize(img, (448, 448))
img_RGB = cv2.cvtColor(img_resized, cv2.COLOR_BGR2RGB)
img_resized_np = np.asarray(img_RGB)
inputs = np.zeros((1, 448, 448, 3), dtype='float32')
inputs[0] = (img_resized_np / 255.0) * 2.0 - 1.0
in_dict = {self.x: inputs}
net_output = self.sess.run(self.fc_32, feed_dict=in_dict)
self.result = self.interpret_output(net_output[0])
self.show_results(img, self.result)
strtime = str(time.time() - s)
if self.disp_console: print('Elapsed time : ' + strtime + ' secs' + '\n')
def detect_from_file(self, filename):
if self.disp_console: print('Detect from ' + filename)
img = cv2.imread(filename)
# img = misc.imread(filename)
self.detect_from_cvmat(img)
def detect_from_crop_sample(self):
self.w_img = 640
self.h_img = 420
f = np.array(open('person_crop.txt', 'r').readlines(), dtype='float32')
inputs = np.zeros((1, 448, 448, 3), dtype='float32')
for c in range(3):
for y in range(448):
for x in range(448):
inputs[0, y, x, c] = f[c * 448 * 448 + y * 448 + x]
in_dict = {self.x: inputs}
net_output = self.sess.run(self.fc_32, feed_dict=in_dict)
self.boxes, self.probs = self.interpret_output(net_output[0])
img = cv2.imread('person.jpg')
self.show_results(self.boxes, img)
def interpret_output(self, output):
probs = np.zeros((7, 7, 2, 20))
class_probs = np.reshape(output[0:980], (7, 7, 20))
scales = np.reshape(output[980:1078], (7, 7, 2))
boxes = np.reshape(output[1078:], (7, 7, 2, 4))
offset = np.transpose(np.reshape(np.array([np.arange(7)] * 14), (2, 7, 7)), (1, 2, 0))
boxes[:, :, :, 0] += offset
boxes[:, :, :, 1] += np.transpose(offset, (1, 0, 2))
boxes[:, :, :, 0:2] = boxes[:, :, :, 0:2] / 7.0
boxes[:, :, :, 2] = np.multiply(boxes[:, :, :, 2], boxes[:, :, :, 2])
boxes[:, :, :, 3] = np.multiply(boxes[:, :, :, 3], boxes[:, :, :, 3])
boxes[:, :, :, 0] *= self.w_img
boxes[:, :, :, 1] *= self.h_img
boxes[:, :, :, 2] *= self.w_img
boxes[:, :, :, 3] *= self.h_img
for i in range(2):
for j in range(20):
probs[:, :, i, j] = np.multiply(class_probs[:, :, j], scales[:, :, i])
filter_mat_probs = np.array(probs >= self.threshold, dtype='bool')
filter_mat_boxes = np.nonzero(filter_mat_probs)
boxes_filtered = boxes[filter_mat_boxes[0], filter_mat_boxes[1], filter_mat_boxes[2]]
probs_filtered = probs[filter_mat_probs]
classes_num_filtered = np.argmax(filter_mat_probs, axis=3)[
filter_mat_boxes[0], filter_mat_boxes[1], filter_mat_boxes[2]]
argsort = np.array(np.argsort(probs_filtered))[::-1]
boxes_filtered = boxes_filtered[argsort]
probs_filtered = probs_filtered[argsort]
classes_num_filtered = classes_num_filtered[argsort]
for i in range(len(boxes_filtered)):
if probs_filtered[i] == 0: continue
for j in range(i + 1, len(boxes_filtered)):
if self.iou(boxes_filtered[i], boxes_filtered[j]) > self.iou_threshold:
probs_filtered[j] = 0.0
filter_iou = np.array(probs_filtered > 0.0, dtype='bool')
boxes_filtered = boxes_filtered[filter_iou]
probs_filtered = probs_filtered[filter_iou]
classes_num_filtered = classes_num_filtered[filter_iou]
result = []
for i in range(len(boxes_filtered)):
result.append([self.classes[classes_num_filtered[i]], boxes_filtered[i][0], boxes_filtered[i][1],
boxes_filtered[i][2], boxes_filtered[i][3], probs_filtered[i]])
return result
def show_results(self, img, results):
img_cp = img.copy()
if self.filewrite_txt:
ftxt = open(self.tofile_txt, 'w')
for i in range(len(results)):
x = int(results[i][1])
y = int(results[i][2])
w = int(results[i][3]) // 2
h = int(results[i][4]) // 2
if self.disp_console: print(
' class : ' + results[i][0] + ' , [x,y,w,h]=[' + str(x) + ',' + str(y) + ',' + str(
int(results[i][3])) + ',' + str(int(results[i][4])) + '], Confidence = ' + str(results[i][5]))
if self.filewrite_img or self.imshow:
cv2.rectangle(img_cp, (x - w, y - h), (x + w, y + h), (0, 255, 0), 2)
cv2.rectangle(img_cp, (x - w, y - h - 20), (x + w, y - h), (125, 125, 125), -1)
cv2.putText(img_cp, results[i][0] + ' : %.2f' % results[i][5], (x - w + 5, y - h - 7),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
if self.filewrite_txt:
ftxt.write(results[i][0] + ',' + str(x) + ',' + str(y) + ',' + str(w) + ',' + str(h) + ',' + str(
results[i][5]) + '\n')
if self.filewrite_img:
if self.disp_console: print(' image file writed : ' + self.tofile_img)
cv2.imwrite(self.tofile_img, img_cp)
if self.imshow:
cv2.imshow('YOLO_small detection', img_cp)
cv2.waitKey(0)
if self.filewrite_txt:
if self.disp_console: print(' txt file writed : ' + self.tofile_txt)
ftxt.close()
def iou(self, box1, box2):
tb = min(box1[0] + 0.5 * box1[2], box2[0] + 0.5 * box2[2]) - max(box1[0] - 0.5 * box1[2],
box2[0] - 0.5 * box2[2])
lr = min(box1[1] + 0.5 * box1[3], box2[1] + 0.5 * box2[3]) - max(box1[1] - 0.5 * box1[3],
box2[1] - 0.5 * box2[3])
if tb < 0 or lr < 0:
intersection = 0
else:
intersection = tb * lr
return intersection / (box1[2] * box1[3] + box2[2] * box2[3] - intersection)
# my addition
def createFolder(self, path):
if not os.path.exists(path):
os.makedirs(path)
def debug_location(self, img, location):
img_cp = img.copy()
x = int(location[1])
y = int(location[2])
w = int(location[3]) // 2
h = int(location[4]) // 2
cv2.rectangle(img_cp, (x - w, y - h), (x + w, y + h), (0, 255, 0), 2)
cv2.rectangle(img_cp, (x - w, y - h - 20), (x + w, y - h), (125, 125, 125), -1)
cv2.putText(img_cp, str(location[0]) + ' : %.2f' % location[5], (x - w + 5, y - h - 7),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
cv2.imshow('YOLO_small detection', img_cp)
cv2.waitKey(1)
def debug_locations(self, img, locations):
img_cp = img.copy()
for location in locations:
x = int(location[1])
y = int(location[2])
w = int(location[3]) // 2
h = int(location[4]) // 2
cv2.rectangle(img_cp, (x - w, y - h), (x + w, y + h), (0, 255, 0), 2)
cv2.rectangle(img_cp, (x - w, y - h - 20), (x + w, y - h), (125, 125, 125), -1)
cv2.putText(img_cp, str(location[0]) + ' : %.2f' % location[5], (x - w + 5, y - h - 7),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
cv2.imshow('YOLO_small detection', img_cp)
cv2.waitKey(1)
def debug_gt_location(self, img, location):
img_cp = img.copy()
x = int(location[0])
y = int(location[1])
w = int(location[2])
h = int(location[3])
cv2.rectangle(img_cp, (x, y), (x + w, y | |
args.target = target
args.enablement = enablement
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_setNotificationsEnabled(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = setNotificationsEnabled_result()
result.read(iprot)
iprot.readMessageEnd()
if result.e is not None:
raise result.e
return
def requestAccountPasswordReset(self, provider, identifier, locale):
"""
Parameters:
- provider
- identifier
- locale
"""
self.send_requestAccountPasswordReset(provider, identifier, locale)
self.recv_requestAccountPasswordReset()
def send_requestAccountPasswordReset(self, provider, identifier, locale):
self._oprot.writeMessageBegin('requestAccountPasswordReset', TMessageType.CALL, self._seqid)
args = requestAccountPasswordReset_args()
args.provider = provider
args.identifier = identifier
args.locale = locale
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_requestAccountPasswordReset(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = requestAccountPasswordReset_result()
result.read(iprot)
iprot.readMessageEnd()
if result.e is not None:
raise result.e
return
def getCountries(self, countryGroup):
"""
Parameters:
- countryGroup
"""
self.send_getCountries(countryGroup)
return self.recv_getCountries()
def send_getCountries(self, countryGroup):
self._oprot.writeMessageBegin('getCountries', TMessageType.CALL, self._seqid)
args = getCountries_args()
args.countryGroup = countryGroup
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getCountries(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getCountries_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "getCountries failed: unknown result")
def registerUserid(self, reqSeq, searchId):
"""
Parameters:
- reqSeq
- searchId
"""
self.send_registerUserid(reqSeq, searchId)
return self.recv_registerUserid()
def send_registerUserid(self, reqSeq, searchId):
self._oprot.writeMessageBegin('registerUserid', TMessageType.CALL, self._seqid)
args = registerUserid_args()
args.reqSeq = reqSeq
args.searchId = searchId
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_registerUserid(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = registerUserid_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "registerUserid failed: unknown result")
def isUseridAvailable(self, searchId):
"""
Parameters:
- searchId
"""
self.send_isUseridAvailable(searchId)
return self.recv_isUseridAvailable()
def send_isUseridAvailable(self, searchId):
self._oprot.writeMessageBegin('isUseridAvailable', TMessageType.CALL, self._seqid)
args = isUseridAvailable_args()
args.searchId = searchId
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_isUseridAvailable(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = isUseridAvailable_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "isUseridAvailable failed: unknown result")
def getProfile(self, syncReason):
"""
Parameters:
- syncReason
"""
self.send_getProfile(syncReason)
return self.recv_getProfile()
def send_getProfile(self, syncReason):
self._oprot.writeMessageBegin('getProfile', TMessageType.CALL, self._seqid)
args = getProfile_args()
args.syncReason = syncReason
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getProfile(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getProfile_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "getProfile failed: unknown result")
def startUpdateVerification(self, region, carrier, phone, udidHash, deviceInfo, networkCode, locale, simInfo):
"""
Parameters:
- region
- carrier
- phone
- udidHash
- deviceInfo
- networkCode
- locale
- simInfo
"""
self.send_startUpdateVerification(region, carrier, phone, udidHash, deviceInfo, networkCode, locale, simInfo)
return self.recv_startUpdateVerification()
def send_startUpdateVerification(self, region, carrier, phone, udidHash, deviceInfo, networkCode, locale, simInfo):
self._oprot.writeMessageBegin('startUpdateVerification', TMessageType.CALL, self._seqid)
args = startUpdateVerification_args()
args.region = region
args.carrier = carrier
args.phone = phone
args.udidHash = udidHash
args.deviceInfo = deviceInfo
args.networkCode = networkCode
args.locale = locale
args.simInfo = simInfo
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_startUpdateVerification(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = startUpdateVerification_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "startUpdateVerification failed: unknown result")
def verifyPhoneNumber(self, sessionId, pinCode, udidHash, migrationPincodeSessionId, oldUdidHash):
"""
Parameters:
- sessionId
- pinCode
- udidHash
- migrationPincodeSessionId
- oldUdidHash
"""
self.send_verifyPhoneNumber(sessionId, pinCode, udidHash, migrationPincodeSessionId, oldUdidHash)
return self.recv_verifyPhoneNumber()
def send_verifyPhoneNumber(self, sessionId, pinCode, udidHash, migrationPincodeSessionId, oldUdidHash):
self._oprot.writeMessageBegin('verifyPhoneNumber', TMessageType.CALL, self._seqid)
args = verifyPhoneNumber_args()
args.sessionId = sessionId
args.pinCode = pinCode
args.udidHash = udidHash
args.migrationPincodeSessionId = migrationPincodeSessionId
args.oldUdidHash = oldUdidHash
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_verifyPhoneNumber(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = verifyPhoneNumber_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "verifyPhoneNumber failed: unknown result")
def updateAccountMigrationPincode(self, accountMigrationPincode):
"""
Parameters:
- accountMigrationPincode
"""
self.send_updateAccountMigrationPincode(accountMigrationPincode)
self.recv_updateAccountMigrationPincode()
def send_updateAccountMigrationPincode(self, accountMigrationPincode):
self._oprot.writeMessageBegin('updateAccountMigrationPincode', TMessageType.CALL, self._seqid)
args = updateAccountMigrationPincode_args()
args.accountMigrationPincode = accountMigrationPincode
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_updateAccountMigrationPincode(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = updateAccountMigrationPincode_result()
result.read(iprot)
iprot.readMessageEnd()
if result.e is not None:
raise result.e
return
def resendPinCode(self, sessionId):
"""
Parameters:
- sessionId
"""
self.send_resendPinCode(sessionId)
self.recv_resendPinCode()
def send_resendPinCode(self, sessionId):
self._oprot.writeMessageBegin('resendPinCode', TMessageType.CALL, self._seqid)
args = resendPinCode_args()
args.sessionId = sessionId
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_resendPinCode(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = resendPinCode_result()
result.read(iprot)
iprot.readMessageEnd()
if result.e is not None:
raise result.e
return
def changeVerificationMethod(self, sessionId, method):
"""
Parameters:
- sessionId
- method
"""
self.send_changeVerificationMethod(sessionId, method)
return self.recv_changeVerificationMethod()
def send_changeVerificationMethod(self, sessionId, method):
self._oprot.writeMessageBegin('changeVerificationMethod', TMessageType.CALL, self._seqid)
args = changeVerificationMethod_args()
args.sessionId = sessionId
args.method = method
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_changeVerificationMethod(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = changeVerificationMethod_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "changeVerificationMethod failed: unknown result")
def finishUpdateVerification(self, sessionId):
"""
Parameters:
- sessionId
"""
self.send_finishUpdateVerification(sessionId)
self.recv_finishUpdateVerification()
def send_finishUpdateVerification(self, sessionId):
self._oprot.writeMessageBegin('finishUpdateVerification', TMessageType.CALL, self._seqid)
args = finishUpdateVerification_args()
args.sessionId = sessionId
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_finishUpdateVerification(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = finishUpdateVerification_result()
result.read(iprot)
iprot.readMessageEnd()
if result.e is not None:
raise result.e
return
def getSettings(self, syncReason):
"""
Parameters:
- syncReason
"""
self.send_getSettings(syncReason)
return self.recv_getSettings()
def send_getSettings(self, syncReason):
self._oprot.writeMessageBegin('getSettings', TMessageType.CALL, self._seqid)
args = getSettings_args()
args.syncReason = syncReason
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getSettings(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getSettings_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "getSettings failed: unknown result")
def blockRecommendation(self, reqSeq, id):
"""
Parameters:
- reqSeq
- id
"""
self.send_blockRecommendation(reqSeq, id)
self.recv_blockRecommendation()
def send_blockRecommendation(self, reqSeq, id):
self._oprot.writeMessageBegin('blockRecommendation', TMessageType.CALL, self._seqid)
args = blockRecommendation_args()
args.reqSeq = reqSeq
args.id = id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_blockRecommendation(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = blockRecommendation_result()
result.read(iprot)
iprot.readMessageEnd()
if result.e is not None:
raise result.e
return
def blockContact(self, reqSeq, id):
"""
Parameters:
- reqSeq
- id
"""
self.send_blockContact(reqSeq, id)
self.recv_blockContact()
def send_blockContact(self, reqSeq, id):
self._oprot.writeMessageBegin('blockContact', TMessageType.CALL, self._seqid)
args = blockContact_args()
args.reqSeq = reqSeq
args.id = id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_blockContact(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = blockContact_result()
result.read(iprot)
iprot.readMessageEnd()
if result.e is not None:
raise result.e
return
def unblockRecommendation(self, reqSeq, id):
"""
Parameters:
- reqSeq
- id
"""
self.send_unblockRecommendation(reqSeq, id)
self.recv_unblockRecommendation()
def send_unblockRecommendation(self, reqSeq, id):
self._oprot.writeMessageBegin('unblockRecommendation', TMessageType.CALL, self._seqid)
args = unblockRecommendation_args()
args.reqSeq = reqSeq
args.id = id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_unblockRecommendation(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = unblockRecommendation_result()
result.read(iprot)
iprot.readMessageEnd()
if result.e is not None:
raise result.e
return
def unblockContact(self, reqSeq, id, reference):
"""
Parameters:
- reqSeq
- id
- reference
"""
self.send_unblockContact(reqSeq, id, reference)
self.recv_unblockContact()
def send_unblockContact(self, reqSeq, id, reference):
self._oprot.writeMessageBegin('unblockContact', TMessageType.CALL, self._seqid)
args = unblockContact_args()
args.reqSeq = reqSeq
args.id = id
args.reference = reference
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_unblockContact(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = unblockContact_result()
result.read(iprot)
iprot.readMessageEnd()
if result.e is not None:
raise result.e
return
def findAndAddContactsByMid(self, reqSeq, mid, type, reference):
"""
Parameters:
- reqSeq
- mid
- type
- reference
"""
self.send_findAndAddContactsByMid(reqSeq, mid, type, reference)
return self.recv_findAndAddContactsByMid()
def send_findAndAddContactsByMid(self, reqSeq, mid, type, reference):
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.