text
stringlengths 2
999k
|
|---|
#################
# credit https://github.com/ryanzhumich/editsql/blob/master/preprocess.py
import argparse
import os
import sys
import pickle
import json
import shutil
import sqlparse
from postprocess_eval import get_candidate_tables
def write_interaction(interaction_list,split,output_dir):
json_split = os.path.join(output_dir,split+'.json')
pkl_split = os.path.join(output_dir,split+'.pkl')
with open(json_split, 'w') as outfile:
for interaction in interaction_list:
json.dump(interaction, outfile, indent = 4)
outfile.write('\n')
new_objs = []
for i, obj in enumerate(interaction_list):
new_interaction = []
for ut in obj["interaction"]:
sql = ut["sql"]
sqls = [sql]
tok_sql_list = []
for sql in sqls:
results = []
tokenized_sql = sql.split()
tok_sql_list.append((tokenized_sql, results))
ut["sql"] = tok_sql_list
new_interaction.append(ut)
obj["interaction"] = new_interaction
new_objs.append(obj)
with open(pkl_split,'wb') as outfile:
pickle.dump(new_objs, outfile)
return
def read_database_schema(database_schema_filename, schema_tokens, column_names, database_schemas_dict):
with open(database_schema_filename) as f:
database_schemas = json.load(f)
def get_schema_tokens(table_schema):
column_names_surface_form = []
column_names = []
column_names_original = table_schema['column_names_original']
table_names = table_schema['table_names']
table_names_original = table_schema['table_names_original']
for i, (table_id, column_name) in enumerate(column_names_original):
if table_id >= 0:
table_name = table_names_original[table_id]
column_name_surface_form = '{}.{}'.format(table_name,column_name)
else:
# this is just *
column_name_surface_form = column_name
column_names_surface_form.append(column_name_surface_form.lower())
column_names.append(column_name.lower())
# also add table_name.*
for table_name in table_names_original:
column_names_surface_form.append('{}.*'.format(table_name.lower()))
return column_names_surface_form, column_names
for table_schema in database_schemas:
database_id = table_schema['db_id']
database_schemas_dict[database_id] = table_schema
schema_tokens[database_id], column_names[database_id] = get_schema_tokens(table_schema)
return schema_tokens, column_names, database_schemas_dict
def remove_from_with_join(format_sql_2):
used_tables_list = []
format_sql_3 = []
table_to_name = {}
table_list = []
old_table_to_name = {}
old_table_list = []
for sub_sql in format_sql_2.split('\n'):
if 'select ' in sub_sql:
# only replace alias: t1 -> table_name, t2 -> table_name, etc...
if len(table_list) > 0:
for i in range(len(format_sql_3)):
for table, name in table_to_name.items():
format_sql_3[i] = format_sql_3[i].replace(table, name)
old_table_list = table_list
old_table_to_name = table_to_name
table_to_name = {}
table_list = []
format_sql_3.append(sub_sql)
elif sub_sql.startswith('from'):
new_sub_sql = None
sub_sql_tokens = sub_sql.split()
for t_i, t in enumerate(sub_sql_tokens):
if t == 'as':
table_to_name[sub_sql_tokens[t_i+1]] = sub_sql_tokens[t_i-1]
table_list.append(sub_sql_tokens[t_i-1])
elif t == ')' and new_sub_sql is None:
# new_sub_sql keeps some trailing parts after ')'
new_sub_sql = ' '.join(sub_sql_tokens[t_i:])
if len(table_list) > 0:
# if it's a from clause with join
if new_sub_sql is not None:
format_sql_3.append(new_sub_sql)
used_tables_list.append(table_list)
else:
# if it's a from clause without join
table_list = old_table_list
table_to_name = old_table_to_name
assert 'join' not in sub_sql
if new_sub_sql is not None:
sub_sub_sql = sub_sql[:-len(new_sub_sql)].strip()
assert len(sub_sub_sql.split()) == 2
used_tables_list.append([sub_sub_sql.split()[1]])
format_sql_3.append(sub_sub_sql)
format_sql_3.append(new_sub_sql)
elif 'join' not in sub_sql:
assert len(sub_sql.split()) == 2 or len(sub_sql.split()) == 1
if len(sub_sql.split()) == 2:
used_tables_list.append([sub_sql.split()[1]])
format_sql_3.append(sub_sql)
else:
print('bad from clause in remove_from_with_join')
exit()
else:
format_sql_3.append(sub_sql)
if len(table_list) > 0:
for i in range(len(format_sql_3)):
for table, name in table_to_name.items():
format_sql_3[i] = format_sql_3[i].replace(table, name)
used_tables = []
for t in used_tables_list:
for tt in t:
used_tables.append(tt)
used_tables = list(set(used_tables))
return format_sql_3, used_tables, used_tables_list
def remove_from_without_join(format_sql_3, column_names, schema_tokens):
format_sql_4 = []
table_name = None
for sub_sql in format_sql_3.split('\n'):
if 'select ' in sub_sql:
if table_name:
for i in range(len(format_sql_4)):
tokens = format_sql_4[i].split()
for ii, token in enumerate(tokens):
if token in column_names and tokens[ii-1] != '.':
if (ii+1 < len(tokens) and tokens[ii+1] != '.' and tokens[ii+1] != '(') or ii+1 == len(tokens):
if '{}.{}'.format(table_name,token) in schema_tokens:
tokens[ii] = '{} . {}'.format(table_name,token)
format_sql_4[i] = ' '.join(tokens)
format_sql_4.append(sub_sql)
elif sub_sql.startswith('from'):
sub_sql_tokens = sub_sql.split()
if len(sub_sql_tokens) == 1:
table_name = None
elif len(sub_sql_tokens) == 2:
table_name = sub_sql_tokens[1]
else:
print('bad from clause in remove_from_without_join')
print(format_sql_3)
exit()
else:
format_sql_4.append(sub_sql)
if table_name:
for i in range(len(format_sql_4)):
tokens = format_sql_4[i].split()
for ii, token in enumerate(tokens):
if token in column_names and tokens[ii-1] != '.':
if (ii+1 < len(tokens) and tokens[ii+1] != '.' and tokens[ii+1] != '(') or ii+1 == len(tokens):
if '{}.{}'.format(table_name,token) in schema_tokens:
tokens[ii] = '{} . {}'.format(table_name,token)
format_sql_4[i] = ' '.join(tokens)
return format_sql_4
def add_table_name(format_sql_3, used_tables, column_names, schema_tokens):
# If just one table used, easy case, replace all column_name -> table_name.column_name
if len(used_tables) == 1:
table_name = used_tables[0]
format_sql_4 = []
for sub_sql in format_sql_3.split('\n'):
if sub_sql.startswith('from'):
format_sql_4.append(sub_sql)
continue
tokens = sub_sql.split()
for ii, token in enumerate(tokens):
if token in column_names and tokens[ii-1] != '.':
if (ii+1 < len(tokens) and tokens[ii+1] != '.' and tokens[ii+1] != '(') or ii+1 == len(tokens):
if '{}.{}'.format(table_name,token) in schema_tokens:
tokens[ii] = '{} . {}'.format(table_name,token)
format_sql_4.append(' '.join(tokens))
return format_sql_4
def get_table_name_for(token):
table_names = []
for table_name in used_tables:
if '{}.{}'.format(table_name, token) in schema_tokens:
table_names.append(table_name)
if len(table_names) == 0:
return 'table'
if len(table_names) > 1:
return None
else:
return table_names[0]
format_sql_4 = []
for sub_sql in format_sql_3.split('\n'):
if sub_sql.startswith('from'):
format_sql_4.append(sub_sql)
continue
tokens = sub_sql.split()
for ii, token in enumerate(tokens):
# skip *
if token == '*':
continue
if token in column_names and tokens[ii-1] != '.':
if (ii+1 < len(tokens) and tokens[ii+1] != '.' and tokens[ii+1] != '(') or ii+1 == len(tokens):
table_name = get_table_name_for(token)
if table_name:
tokens[ii] = '{} . {}'.format(table_name, token)
format_sql_4.append(' '.join(tokens))
return format_sql_4
def check_oov(format_sql_final, output_vocab, schema_tokens):
for sql_tok in format_sql_final.split():
if not (sql_tok in schema_tokens or sql_tok in output_vocab):
print('OOV!', sql_tok)
raise Exception('OOV')
def normalize_space(format_sql):
format_sql_1 = [' '.join(sub_sql.strip().replace(',',' , ').replace('.',' . ').replace('(',' ( ').replace(')',' ) ').split()) for sub_sql in format_sql.split('\n')]
format_sql_1 = '\n'.join(format_sql_1)
format_sql_2 = format_sql_1.replace('\njoin',' join').replace(',\n',', ').replace(' where','\nwhere').replace(' intersect', '\nintersect').replace('\nand',' and').replace('order by t2 .\nstart desc', 'order by t2 . start desc')
format_sql_2 = format_sql_2.replace('select\noperator', 'select operator').replace('select\nconstructor', 'select constructor').replace('select\nstart', 'select start').replace('select\ndrop', 'select drop').replace('select\nwork', 'select work').replace('select\ngroup', 'select group').replace('select\nwhere_built', 'select where_built').replace('select\norder', 'select order').replace('from\noperator', 'from operator').replace('from\nforward', 'from forward').replace('from\nfor', 'from for').replace('from\ndrop', 'from drop').replace('from\norder', 'from order').replace('.\nstart', '. start').replace('.\norder', '. order').replace('.\noperator', '. operator').replace('.\nsets', '. sets').replace('.\nwhere_built', '. where_built').replace('.\nwork', '. work').replace('.\nconstructor', '. constructor').replace('.\ngroup', '. group').replace('.\nfor', '. for').replace('.\ndrop', '. drop').replace('.\nwhere', '. where')
format_sql_2 = format_sql_2.replace('group by', 'group_by').replace('order by', 'order_by').replace('! =', '!=').replace('limit value', 'limit_value')
return format_sql_2
def normalize_final_sql(format_sql_5):
format_sql_final = format_sql_5.replace('\n', ' ').replace(' . ', '.').replace('group by', 'group_by').replace('order by', 'order_by').replace('! =', '!=').replace('limit value', 'limit_value')
# normalize two bad sqls
if 't1' in format_sql_final or 't2' in format_sql_final or 't3' in format_sql_final or 't4' in format_sql_final:
format_sql_final = format_sql_final.replace('t2.dormid', 'dorm.dormid')
# This is the failure case of remove_from_without_join()
format_sql_final = format_sql_final.replace('select city.city_name where city.state_name in ( select state.state_name where state.state_name in ( select river.traverse where river.river_name = value ) and state.area = ( select min ( state.area ) where state.state_name in ( select river.traverse where river.river_name = value ) ) ) order_by population desc limit_value', 'select city.city_name where city.state_name in ( select state.state_name where state.state_name in ( select river.traverse where river.river_name = value ) and state.area = ( select min ( state.area ) where state.state_name in ( select river.traverse where river.river_name = value ) ) ) order_by city.population desc limit_value')
return format_sql_final
def parse_sql(sql_string, db_id, column_names, output_vocab, schema_tokens, schema):
format_sql = sqlparse.format(sql_string, reindent=True)
format_sql_2 = normalize_space(format_sql)
num_from = sum([1 for sub_sql in format_sql_2.split('\n') if sub_sql.startswith('from')])
num_select = format_sql_2.count('select ') + format_sql_2.count('select\n')
format_sql_3, used_tables, used_tables_list = remove_from_with_join(format_sql_2)
format_sql_3 = '\n'.join(format_sql_3)
format_sql_4 = add_table_name(format_sql_3, used_tables, column_names, schema_tokens)
format_sql_4 = '\n'.join(format_sql_4)
format_sql_5 = remove_from_without_join(format_sql_4, column_names, schema_tokens)
format_sql_5 = '\n'.join(format_sql_5)
format_sql_final = normalize_final_sql(format_sql_5)
candidate_tables_id, table_names_original = get_candidate_tables(format_sql_final, schema)
failure = False
if len(candidate_tables_id) != len(used_tables):
failure = True
check_oov(format_sql_final, output_vocab, schema_tokens)
return format_sql_final
def read_spider_split(split_json, interaction_list, database_schemas, column_names, output_vocab, schema_tokens, remove_from):
with open(split_json) as f:
split_data = json.load(f)
print('read_spider_split', split_json, len(split_data))
for i, ex in enumerate(split_data):
db_id = ex['db_id']
final_sql = []
skip = False
for query_tok in ex['query_toks_no_value']:
if query_tok != '.' and '.' in query_tok:
# invalid sql; didn't use table alias in join
final_sql += query_tok.replace('.',' . ').split()
skip = True
else:
final_sql.append(query_tok)
final_sql = ' '.join(final_sql)
if skip and 'train' in split_json:
continue
if remove_from:
final_sql_parse = parse_sql(final_sql, db_id, column_names[db_id], output_vocab, schema_tokens[db_id], database_schemas[db_id])
else:
final_sql_parse = final_sql
final_utterance = ' '.join(ex['question_toks'])
if db_id not in interaction_list:
interaction_list[db_id] = []
interaction = {}
interaction['id'] = ''
interaction['scenario'] = ''
interaction['database_id'] = db_id
interaction['interaction_id'] = len(interaction_list[db_id])
interaction['final'] = {}
interaction['final']['utterance'] = final_utterance
interaction['final']['sql'] = final_sql_parse
interaction['interaction'] = [{'utterance': final_utterance, 'sql': final_sql_parse}]
interaction_list[db_id].append(interaction)
return interaction_list
def read_data_json(split_json, interaction_list, database_schemas, column_names, output_vocab, schema_tokens, remove_from):
with open(split_json) as f:
split_data = json.load(f)
print('read_data_json', split_json, len(split_data))
for interaction_data in split_data:
db_id = interaction_data['database_id']
final_sql = interaction_data['final']['query']
final_utterance = interaction_data['final']['utterance']
if db_id not in interaction_list:
interaction_list[db_id] = []
# no interaction_id in train
if 'interaction_id' in interaction_data['interaction']:
interaction_id = interaction_data['interaction']['interaction_id']
else:
interaction_id = len(interaction_list[db_id])
interaction = {}
interaction['id'] = ''
interaction['scenario'] = ''
interaction['database_id'] = db_id
interaction['interaction_id'] = interaction_id
interaction['final'] = {}
interaction['final']['utterance'] = final_utterance
interaction['final']['sql'] = final_sql
interaction['interaction'] = []
for turn in interaction_data['interaction']:
turn_sql = []
skip = False
for query_tok in turn['query_toks_no_value']:
if query_tok != '.' and '.' in query_tok:
# invalid sql; didn't use table alias in join
turn_sql += query_tok.replace('.',' . ').split()
skip = True
else:
turn_sql.append(query_tok)
turn_sql = ' '.join(turn_sql)
# Correct some human sql annotation error
turn_sql = turn_sql.replace('select f_id from files as t1 join song as t2 on t1 . f_id = t2 . f_id', 'select t1 . f_id from files as t1 join song as t2 on t1 . f_id = t2 . f_id')
turn_sql = turn_sql.replace('select name from climber mountain', 'select name from climber')
turn_sql = turn_sql.replace('select sid from sailors as t1 join reserves as t2 on t1 . sid = t2 . sid join boats as t3 on t3 . bid = t2 . bid', 'select t1 . sid from sailors as t1 join reserves as t2 on t1 . sid = t2 . sid join boats as t3 on t3 . bid = t2 . bid')
turn_sql = turn_sql.replace('select avg ( price ) from goods )', 'select avg ( price ) from goods')
turn_sql = turn_sql.replace('select min ( annual_fuel_cost ) , from vehicles', 'select min ( annual_fuel_cost ) from vehicles')
turn_sql = turn_sql.replace('select * from goods where price < ( select avg ( price ) from goods', 'select * from goods where price < ( select avg ( price ) from goods )')
turn_sql = turn_sql.replace('select distinct id , price from goods where price < ( select avg ( price ) from goods', 'select distinct id , price from goods where price < ( select avg ( price ) from goods )')
turn_sql = turn_sql.replace('select id from goods where price > ( select avg ( price ) from goods', 'select id from goods where price > ( select avg ( price ) from goods )')
if skip and 'train' in split_json:
continue
if remove_from:
try:
turn_sql_parse = parse_sql(turn_sql, db_id, column_names[db_id], output_vocab, schema_tokens[db_id], database_schemas[db_id])
except:
print('continue')
continue
else:
turn_sql_parse = turn_sql
if 'utterance_toks' in turn:
turn_utterance = ' '.join(turn['utterance_toks']) # not lower()
else:
turn_utterance = turn['utterance']
interaction['interaction'].append({'utterance': turn_utterance, 'sql': turn_sql_parse})
if len(interaction['interaction']) > 0:
interaction_list[db_id].append(interaction)
return interaction_list
def read_spider(spider_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from):
interaction_list = {}
train_json = os.path.join(spider_dir, 'train.json')
interaction_list = read_spider_split(train_json, interaction_list, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
dev_json = os.path.join(spider_dir, 'dev.json')
interaction_list = read_spider_split(dev_json, interaction_list, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
return interaction_list
def read_sparc(sparc_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from):
interaction_list = {}
train_json = os.path.join(sparc_dir, 'train_no_value.json')
interaction_list = read_data_json(train_json, interaction_list, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
dev_json = os.path.join(sparc_dir, 'dev_no_value.json')
interaction_list = read_data_json(dev_json, interaction_list, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
return interaction_list
def read_cosql(cosql_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from):
interaction_list = {}
train_json = os.path.join(cosql_dir, 'train.json')
interaction_list = read_data_json(train_json, interaction_list, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
dev_json = os.path.join(cosql_dir, 'dev.json')
interaction_list = read_data_json(dev_json, interaction_list, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
return interaction_list
def read_db_split(data_dir):
train_database = []
with open(os.path.join(data_dir,'train_db_ids.txt')) as f:
for line in f:
train_database.append(line.strip())
dev_database = []
with open(os.path.join(data_dir,'dev_db_ids.txt')) as f:
for line in f:
dev_database.append(line.strip())
return train_database, dev_database
def preprocess(dataset, remove_from=False):
# Validate output_vocab
output_vocab = ['_UNK', '_EOS', '.', 't1', 't2', '=', 'select', 'from', 'as', 'value', 'join', 'on', ')', '(', 'where', 't3', 'by', ',', 'count', 'group', 'order', 'distinct', 't4', 'and', 'limit', 'desc', '>', 'avg', 'having', 'max', 'in', '<', 'sum', 't5', 'intersect', 'not', 'min', 'except', 'or', 'asc', 'like', '!', 'union', 'between', 't6', '-', 't7', '+', '/']
if remove_from:
output_vocab = ['_UNK', '_EOS', '=', 'select', 'value', ')', '(', 'where', ',', 'count', 'group_by', 'order_by', 'distinct', 'and', 'limit_value', 'limit', 'desc', '>', 'avg', 'having', 'max', 'in', '<', 'sum', 'intersect', 'not', 'min', 'except', 'or', 'asc', 'like', '!=', 'union', 'between', '-', '+', '/']
print('size of output_vocab', len(output_vocab))
print('output_vocab', output_vocab)
print()
if dataset == 'spider':
spider_dir = 'data/spider/'
database_schema_filename = 'data/spider/tables.json'
output_dir = 'data/spider_data'
if remove_from:
output_dir = 'data/spider_data_removefrom'
train_database, dev_database = read_db_split(spider_dir)
elif dataset == 'sparc':
sparc_dir = 'data/sparc/'
database_schema_filename = 'data/sparc/tables.json'
output_dir = 'data/sparc_data'
if remove_from:
output_dir = 'data/sparc_data_removefrom'
train_database, dev_database = read_db_split(sparc_dir)
elif dataset == 'cosql':
cosql_dir = 'data/cosql/'
database_schema_filename = 'data/cosql/tables.json'
output_dir = 'data/cosql_data'
if remove_from:
output_dir = 'data/cosql_data_removefrom'
train_database, dev_database = read_db_split(cosql_dir)
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
os.mkdir(output_dir)
schema_tokens = {}
column_names = {}
database_schemas = {}
print('Reading spider database schema file')
schema_tokens, column_names, database_schemas = read_database_schema(database_schema_filename, schema_tokens, column_names, database_schemas)
num_database = len(schema_tokens)
print('num_database', num_database, len(train_database), len(dev_database))
print('total number of schema_tokens / databases:', len(schema_tokens))
output_database_schema_filename = os.path.join(output_dir, 'tables.json')
with open(output_database_schema_filename, 'w') as outfile:
json.dump([v for k,v in database_schemas.items()], outfile, indent=4)
if dataset == 'spider':
interaction_list = read_spider(spider_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
elif dataset == 'sparc':
interaction_list = read_sparc(sparc_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
elif dataset == 'cosql':
interaction_list = read_cosql(cosql_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
print('interaction_list length', len(interaction_list))
train_interaction = []
for database_id in interaction_list:
if database_id not in dev_database:
train_interaction += interaction_list[database_id]
dev_interaction = []
for database_id in dev_database:
dev_interaction += interaction_list[database_id]
print('train interaction: ', len(train_interaction))
print('dev interaction: ', len(dev_interaction))
write_interaction(train_interaction, 'train', output_dir)
write_interaction(dev_interaction, 'dev', output_dir)
return
if __name__ == '__main__':
# values are dataset=sparc, remove_from=True
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", choices=('spider', 'sparc', 'cosql'), default='sparc')
parser.add_argument('--remove_from', action='store_true', default=False)
args = parser.parse_args()
preprocess(args.dataset, args.remove_from)
|
from __future__ import division
from builtins import object
from past.utils import old_div
from proteus.mprans import (SW2DCV, GN_SW2DCV)
from proteus.Domain import RectangularDomain, PlanarStraightLineGraphDomain
import numpy as np
from proteus import (Domain, Context,
MeshTools as mt)
from proteus.Profiling import logEvent
from proteus.Gauges import PointGauges
import proteus.SWFlow.SWFlowProblem as SWFlowProblem
import os
"""
We reproduce the 2009-2010 experiments of [Swigler, 2009] and
[Lynett, 2019] performed at the O.H. Hinsdale Wave Research
Laboratory of Oregon State University. The experiments were conducted
to study specific phenomena that are known to occur when solitary
waves propagate over irregular bathymetry such as shoaling,
refraction, breaking, etc. In the experiment, nine wave gauges (WGs)
were placed along the basin to capture the free surface elevation
along with three Acoustic Doppler Velocimeters (ADVs) that
measured the velocities in both horizontal directions.
"""
# *************************** #
# ***** GENERAL OPTIONS ***** #
# *************************** #
opts = Context.Options([
('sw_model', 1, "sw_model = {0,1} for {SWEs,DSWEs}"),
("final_time", 10.0, "Final time for simulation"),
("dt_output", 0.1, "Time interval to output solution"),
("cfl", 0.25, "Desired CFL restriction"),
("refinement", 4, "Refinement level"),
("structured", True, "Structured or unstructured mesh"),
("he", 0.5, "Mesh size for unstructured mesh"),
("reflecting_BCs", False, "Use reflecting BCs for all boundaries"),
("want_gauges", False, "Output for water height point gauge")
])
###################
# DOMAIN AND MESH #
###################
L = (48.8, 26.5) # this is length in x direction and y direction
refinement = opts.refinement
rectangle = RectangularDomain(L=L, x=[0, -13.25, 0])
# CREATE REFINEMENT #
nnx0 = 6
nnx = (nnx0 - 1) * (2**refinement) + 1
nny = old_div((nnx - 1), 2) + 1
he = old_div(L[0], float(nnx - 1))
if opts.structured:
domain = rectangle
else:
rectangle.writePoly("reef")
domain = PlanarStraightLineGraphDomain(fileprefix="reef")
domain.MeshOptions.triangleOptions = "pAq30Dena%f" % (0.5 * opts.he**2,)
nnx = None
nny = None
###############################
# CONSTANTS NEEDED FOR SETUP #
###############################
g = 9.81
# stuff for solitary wave
h0 = 0.78
alpha = 0.4 # 0.5 * h0
xs = 5.0
r = np.sqrt(old_div(3. * alpha, (4. * h0**2 * (h0 + alpha))))
c = np.sqrt(g * (h0 + alpha))
# stuff for bathymetry, including shelf and cone
rcone = 3.
hcone = 0.45
yc = 13.25
#####################################
# Some functions defined here #
####################################
def solitary_wave(x, t):
sechSqd = (1.0 / np.cosh(r * (x - xs - c * t)))**2
return alpha * sechSqd
def bathymetry_function(X):
x = X[0]
y = X[1] + yc
# first define cone topography
cone = np.maximum(
hcone - np.sqrt(((x - 17.0)**2 + (y - yc)**2) / (rcone / hcone)**2), 0.0)
# define piecewise function for base
base = 0. * x
conds = [x < 10.2, (10.2 < x) & (x <= 17.5), (17.5 <= x) & (x <= 32.5),
32.5 < x]
base_values = [lambda x: 0.0,
lambda x: (0.5 - 0.0) / (17.5 - 10.20) * (x - 10.2),
lambda x: 1.0 + (1.0 - 0.5)/(32.5 - 17.5) * (x - 32.5),
lambda x: 1.]
base = np.piecewise(x, conds, base_values)
# define piecewise function for shelf
shelf = 0. * x
dist = 1.0 - np.minimum(1.0, np.abs(y - yc) / yc)
aux_x = 12.50 + 12.4999 * (1.0 - dist)
aux_z = 0.70 + 0.050 * (1.0 - dist)
conds = [x < 10.2, (10.2 <= x) & (x <= aux_x), (aux_x <= x) & (x <= 25.),
(25. < x) & (x <= 32.5), 32.5 < x]
shelf_values = [0.0,
aux_z / (aux_x - 10.20) * (x - 10.2),
0.75 + (aux_z - 0.75) / (aux_x - 25.) * (x - 25.),
1. + (1. - 0.5) / (32.5 - 17.5) * (x - 32.5),
1.]
shelf = np.select(conds, shelf_values)
bath = np.maximum(base, shelf) + cone
return bath
######################
# INITIAL CONDITIONS #
######################
class water_height_at_t0(object):
def uOfXT(self, X, t):
hTilde = h0 + solitary_wave(X[0], 0)
h = max(hTilde - bathymetry_function(X), 0.)
return h
class x_mom_at_t0(object):
def uOfXT(self, X, t):
hTilde = h0 + solitary_wave(X[0], 0)
h = max(hTilde - bathymetry_function(X), 0.)
return h * c * old_div(hTilde - h0, hTilde)
class y_mom_at_t0(object):
def uOfXT(self, X, t):
return 0.
class heta_at_t0(object):
def uOfXT(self, X, t):
h = water_height_at_t0().uOfXT(X, t)
return h**2
class hw_at_t0(object):
def uOfXT(self, X, t):
sechSqd = (1.0 / np.cosh(r * (X[0] - xs)))**2.0
hTilde = h0 + solitary_wave(X[0], 0)
h = max(hTilde - bathymetry_function(X), 0.)
hTildePrime = -2.0 * alpha * r * np.tanh(r * (X[0] - xs)) * sechSqd
hw = -h**2 * old_div(c * h0 * hTildePrime, hTilde**2)
return hw
###############################
##### BOUNDARY CONDITIONS #####
###############################
X_coords = (0.0, 48.8) # this is x domain, used in BCs
Y_coords = (-13.25, 13.25) # this is y domain, used in BCs
def x_mom_DBC(X, flag):
if X[0] == X_coords[0] or X[0] == X_coords[1]:
return lambda X, t: 0.0
def y_mom_DBC(X, flag):
if X[1] == Y_coords[0] or X[1] == Y_coords[1]:
return lambda X, t: 0.0
# ********************************** #
# ***** Create mySWFlowProblem ***** #
# ********************************** #
outputStepping = SWFlowProblem.OutputStepping(
opts.final_time, dt_output=opts.dt_output)
initialConditions = {'water_height': water_height_at_t0(),
'x_mom': x_mom_at_t0(),
'y_mom': y_mom_at_t0(),
'h_times_eta': heta_at_t0(),
'h_times_w': hw_at_t0()}
boundaryConditions = {'water_height': lambda x, flag: None,
'x_mom': x_mom_DBC,
'y_mom': y_mom_DBC,
'h_times_eta': lambda x, flag: None,
'h_times_w': lambda x, flag: None}
# **************************** #
# ********** GAUGES ********** #
# **************************** #
heightPointGauges = PointGauges(gauges=((('h'), ((7.5, 0.0, 0),
(13.0, 0.0, 0),
(21.0, 0.0, 0),
(7.5, 5.0, 0),
(13.0, 5.0, 0),
(21.0, 5.0, 0),
(25.0, 0.0, 0),
(25.0, 5.0, 0),
(25.0, 10.0, 0))),),
activeTime=(0.01, opts.final_time),
fileName='reef_wave_gauges.csv')
# ********************************************* #
# ********** Create my SWFlowProblem ********** #
# ********************************************* #
mySWFlowProblem = SWFlowProblem.SWFlowProblem(sw_model=opts.sw_model,
cfl=opts.cfl,
outputStepping=outputStepping,
structured=opts.structured,
he=he,
nnx=nnx,
nny=nny,
domain=domain,
initialConditions=initialConditions,
boundaryConditions=boundaryConditions,
reflectingBCs=opts.reflecting_BCs,
bathymetry=bathymetry_function,
analyticalSolution=None)
mySWFlowProblem.physical_parameters['LINEAR_FRICTION'] = 0
mySWFlowProblem.physical_parameters['mannings'] = 0.0
if opts.want_gauges:
mySWFlowProblem.auxiliaryVariables = [heightPointGauges]
|
# -*- coding: utf-8 -*-
"""Manages multiple objects under different contexts."""
from collections import Counter as ValueCounter
from contextlib import contextmanager
from copy import deepcopy
from enum import Enum, unique
from inspect import getmro
from threading import RLock
from traceback import format_exception
from typing import TYPE_CHECKING, TypeVar, cast, overload
from weakref import WeakKeyDictionary
from six import iteritems, itervalues, raise_from, with_metaclass
from ._bases import Base, BaseMeta, Generic, final
from ._changes import BaseChange
from ._constants import BASE_STRING_TYPES, STRING_TYPES
from ._data import BaseData, InteractiveDictData
from ._exceptions import BaseObjettoException
from ._states import BaseState, DictState
from .data import (
Data,
DictData,
InteractiveData,
ListData,
data_attribute,
data_dict_attribute,
data_protected_dict_attribute,
data_protected_list_attribute,
data_set_attribute,
)
from .utils.custom_repr import custom_mapping_repr
from .utils.factoring import format_factory, run_factory
from .utils.recursive_repr import recursive_repr
from .utils.reraise_context import ReraiseContext
from .utils.storage import Storage
from .utils.type_checking import (
assert_is_callable,
assert_is_instance,
assert_is_subclass,
)
from .utils.weak_reference import WeakReference
if TYPE_CHECKING:
from typing import (
AbstractSet,
Any,
Callable,
Counter,
Dict,
Final,
Iterator,
List,
Mapping,
MutableMapping,
Optional,
Set,
Tuple,
Type,
Union,
)
from ._changes import BaseAtomicChange, Batch
from ._data import InteractiveSetData
from ._history import HistoryObject
from ._objects import BaseObject, Relationship
from ._observers import ActionObserverExceptionData, InternalObserver
from .utils.factoring import LazyFactory
from .utils.subject_observer import ObserverExceptionInfo
assert Relationship
assert InternalObserver
ReadFunction = Callable[[], "Store"]
WriteFunction = Callable[
[Any, BaseData, Mapping[str, Any], Counter[BaseObject], BaseAtomicChange], None
]
ReadMetadataFunction = Callable[[], InteractiveDictData]
UpdateMetadataFunction = Callable[[Mapping[str, Any]], None]
__all__ = [
"ActionObserversFailedError",
"RejectChangeException",
"Phase",
"Action",
"Store",
"BO",
"ApplicationMeta",
"Application",
"ApplicationRoot",
"ApplicationProperty",
"ApplicationSnapshot",
]
@unique
class Phase(Enum):
"""Action phase."""
PRE = "PRE"
"""Before the changes."""
POST = "POST"
"""After the changes."""
class ActionObserversFailedError(BaseObjettoException):
"""
Action observers failed while observing action.
Inherits from:
- :class:`objetto.bases.BaseObjettoException`
:param exception_infos: Observer exception infos.
:type exception_infos: tuple[objetto.observers.ActionObserverExceptionData]
"""
def __init__(self, message, exception_infos):
# type: (str, Tuple[ActionObserverExceptionData, ...]) -> None
message = (
(
message
+ "\n\n"
+ "\n".join(
(
("Observer: {}\n" "Change: {}\n" "Phase: {}\n").format(
exception_info.observer,
type(exception_info.action.change).__fullname__,
exception_info.phase.name,
)
+ "".join(
format_exception(
exception_info.exception_type,
exception_info.exception,
exception_info.traceback,
)
)
)
for exception_info in exception_infos
)
)
if exception_infos
else message
)
super(ActionObserversFailedError, self).__init__(message)
self.__exception_infos = exception_infos
@property
def exception_infos(self):
# type: () -> Tuple[ActionObserverExceptionData, ...]
"""
Observer exception infos.
:rtype: tuple[objetto.observers.ActionObserverExceptionData]
"""
return self.__exception_infos
class RejectChangeException(BaseObjettoException):
"""
Exception to be raised from within a reaction. This will cause the change to be
reverted and and the custom callback function to run after that.
Inherits from:
- :class:`objetto.bases.BaseObjettoException`
:param change: Change to reject.
:type change: objetto.bases.BaseChange
:param callback: Callback to run after change is rewound.
:type callback: function or collections.abc.Callable
"""
def __init__(self, message, change, callback):
# type: (str, BaseChange, Callable[[], None]) -> None
from ._changes import BaseChange
with ReraiseContext(TypeError, "'change' parameter"):
assert_is_instance(change, BaseChange)
with ReraiseContext(TypeError, "'callback' parameter"):
assert_is_callable(callback)
message = (
"{}; change {} was rejected but callback could not run because rejection "
"was not raised and/or caught within the correct context"
).format(message, change)
super(RejectChangeException, self).__init__(message)
self.__change = change
self.__callback = callback
@property
def change(self):
# type: () -> BaseChange
"""
Change to reject.
:rtype: objetto.bases.BaseChange
"""
return self.__change
@property
def callback(self):
# type: () -> Callable[[], None]
"""
Callback to run after change is rewound.
:rtype: function or collections.abc.Callable
"""
return self.__callback
class TemporaryContextException(BaseObjettoException):
"""Temporary write context exception."""
class ApplicationLock(Base):
"""
Re-entrant threading lock for thread-safe applications.
- Can be deep copied and pickled.
"""
__slots__ = ("__lock",)
def __init__(self):
# type: () -> None
self.__lock = RLock()
def __deepcopy__(self, memo=None):
# type: (Optional[Dict[int, Any]]) -> ApplicationLock
"""
Make a deep copy.
:param memo: Memo dict.
:return: Deep copy.
"""
if memo is None:
memo = {}
try:
deep_copy = memo[id(self)]
except KeyError:
deep_copy = memo[id(self)] = type(self)()
return deep_copy
def __enter__(self):
"""Enter lock context."""
return self.__lock.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
"""Exit lock context."""
return self.__lock.__exit__(exc_type, exc_val, exc_tb)
def __reduce__(self):
# type: () -> Tuple[Type[ApplicationLock], Tuple]
"""
Reduce for pickling.
:return: Class and init arguments.
"""
return type(self), ()
class Store(InteractiveData):
"""Holds an object's state, data, metadata, hierarchy, and history information."""
state = data_attribute(BaseState, subtypes=True, checked=False) # type: BaseState
"""State."""
data = data_attribute(
(BaseData, None), subtypes=True, checked=False
) # type: Optional[BaseData]
"""Data."""
metadata = data_dict_attribute(
key_types=STRING_TYPES, checked=False
) # type: InteractiveDictData[str, Any]
"""Metadata."""
parent_ref = data_attribute(
cast("Type[WeakReference[BaseObject]]", WeakReference),
checked=False,
default=WeakReference(),
) # type: WeakReference[BaseObject]
"""Weak reference to the parent."""
history_provider_ref = data_attribute(
cast("Type[WeakReference[BaseObject]]", WeakReference),
checked=False,
default=WeakReference(),
) # type: WeakReference[BaseObject]
"""Weak reference to the history provider."""
last_parent_history_ref = data_attribute(
cast("Type[WeakReference[HistoryObject]]", WeakReference),
checked=False,
default=WeakReference(),
) # type: WeakReference[HistoryObject]
"""Weak reference to the last history object."""
history = data_attribute(
(".._history|HistoryObject", None), checked=False, default=None
) # type: HistoryObject
"""History object."""
children = data_set_attribute(
".._objects|BaseObject", subtypes=True, checked=False
) # type: InteractiveSetData[BaseObject]
"""Children."""
@final
class Action(Data):
"""
Carries information about a change and where it happened in the hierarchy.
Inherits from:
- :class:`objetto.data.Data`
"""
sender = data_attribute(
".._objects|BaseObject", subtypes=True, checked=False
) # type: BaseObject
"""
Object where the action originated from (where the change happened).
:type: objetto.bases.BaseObject
"""
receiver = data_attribute(
".._objects|BaseObject", subtypes=True, checked=False
) # type: BaseObject
"""
Object relaying the action up the hierarchy.
:type: objetto.bases.BaseObject
"""
locations = data_protected_list_attribute(checked=False) # type: ListData[Any]
"""
List of relative locations from the receiver to the sender.
:type: list[str or int or collections.abc.Hashable]
"""
change = data_attribute(
BaseChange, subtypes=True, checked=False
) # type: BaseChange
"""
Change that happened in the sender.
:type: objetto.bases.BaseChange
"""
class Commit(Data):
"""Holds unmerged, modified stores."""
actions = data_protected_list_attribute(
Action, checked=False, finalized=True
) # type: Final[ListData[Action]]
"""Actions."""
stores = data_protected_dict_attribute(
Store,
checked=False,
key_types=".._objects|BaseObject",
key_subtypes=True,
) # type: Final[DictData[BaseObject, Store]]
"""Modified stores."""
@final
class BatchCommit(Commit):
"""Batch commit."""
phase = data_attribute(Phase, checked=False) # type: Phase
"""Batch phase."""
# noinspection PyTypeChecker
_AR = TypeVar("_AR", bound="ApplicationRoot")
# noinspection PyTypeChecker
BO = TypeVar("BO", bound="BaseObject")
@final
class ApplicationRoot(Base, Generic[BO]):
"""
Describes a root object that gets initialized with the application.
.. note::
Prefer using the :func:`objetto.applications.root` factory over
instantiating :class:`objetto.applications.ApplicationRoot` directly.
Inherits from:
- :class:`objetto.bases.Base`
:param obj_type: Object type.
:type obj_type: type[objetto.bases.BaseObject]
:param priority: Initialization priority.
:type priority: int or None
:param kwargs: Keyword arguments to be passed to the object's `__init__`.
:raises ValueError: Used reserved keyword argument.
:raises TypeError: Invalid object type.
"""
__slots__ = ("__obj_type", "__priority", "__kwargs")
def __init__(self, obj_type, priority=None, **kwargs):
# type: (Type[BO], Optional[int], Any) -> None
# Check kwargs for reserved keys.
if "app" in kwargs:
error = "can't use reserved keyword argument 'app'"
raise ValueError(error)
from ._objects import BaseObject
with ReraiseContext(TypeError, "'obj_type' parameter"):
assert_is_subclass(obj_type, BaseObject)
self.__obj_type = obj_type
self.__priority = priority
self.__kwargs = DictState(kwargs)
@overload
def __get__(self, instance, owner):
# type: (_AR, None, Type[Application]) -> _AR
pass
@overload
def __get__(self, instance, owner):
# type: (Application, Type[Application]) -> BO
pass
@overload
def __get__(self, instance, owner):
# type: (_AR, object, type) -> _AR
pass
def __get__(self, instance, owner):
"""
Get attribute value when accessing from valid instance or this descriptor
otherwise.
:param instance: Instance.
:type instance: objetto.applications.Application or None
:param owner: Owner class.
:type owner: type[objetto.applications.Application]
:return: Object instance or this descriptor.
:rtype: objetto.bases.BaseObject or objetto.applications.ApplicationRoot
"""
if instance is not None and isinstance(instance, Application):
return instance.__.get_root_obj(self)
return self
def __hash__(self):
# type: () -> int
"""
Get hash based on object id.
:return: Hash based on object id.
:rtype: int
"""
return hash(id(self))
def __eq__(self, other):
# type: (object) -> bool
"""
Compare with another object for identity.
:param other: Another object.
:return: True if the same object.
:rtype: bool
"""
return other is self
@recursive_repr
def __repr__(self):
# type: () -> str
"""
Get representation.
:return: Representation.
:rtype: str
"""
return custom_mapping_repr(
self.to_dict(),
prefix="{}(".format(type(self).__name__),
template="{key}={value}",
suffix=")",
key_repr=str,
)
def to_dict(self):
# type: () -> Dict[str, Any]
"""
Convert to dictionary.
:return: Dictionary.
:rtype: dict[str, Any]
"""
return {
"obj_type": self.obj_type,
"priority": self.priority,
"kwargs": self.kwargs,
}
@property
def obj_type(self):
# type: () -> Type[BO]
"""
Object type.
:rtype: type[objetto.bases.BaseObject]
"""
return self.__obj_type
@property
def priority(self):
# type: () -> Optional[int]
"""
Initialization priority.
:rtype: int or None
"""
return self.__priority
@property
def kwargs(self):
# type: () -> DictState[str, Any]
"""Keyword arguments to be passed to object's '__init__'."""
return self.__kwargs
class ApplicationProperty(Base):
"""
Dynamic generic application property.
Inherits from:
- :class:`objetto.bases.Base`
:param default_factory: Default value factory.
:type default_factory: str or collections.abc.Callable or None
"""
__slots__ = ("__weakref__", "__default_factory", "__module")
def __init__(self, default_factory=None, module=None):
# type: (LazyFactory, Optional[str]) -> None
# 'default_factory'
with ReraiseContext((ValueError, TypeError), "'default_factory' parameter"):
default_factory = format_factory(default_factory, module=module)
# 'module'
with ReraiseContext(TypeError, "'module' parameter"):
assert_is_instance(module, BASE_STRING_TYPES + (None,))
module = module or None
self.__default_factory = default_factory
self.__module = module
@final
def fabricate_default_value(self, **kwargs):
# type: (Any) -> Any
"""
Fabricate default value.
:param kwargs: Keyword arguments to be passed to the factory.
:return: Fabricated value.
:raises ValueError: No default factory.
"""
if self.__default_factory is not None:
return run_factory(self.__default_factory, kwargs=kwargs)
else:
error = "property has no 'default factory'"
raise ValueError(error)
@property
def default_factory(self):
# type: () -> LazyFactory
"""
Default value factory.
:rtype: str or collections.abc.Callable or None
"""
return self.__default_factory
@property
def module(self):
# type: () -> Optional[str]
"""
Optional module path to use in case partial paths are provided.
:rtype: str or None
"""
return self.__module
class ApplicationInternals(Base):
"""Internals for `Application`."""
__slots__ = (
"__app_ref",
"__history_cls",
"__lock",
"__storage",
"__snapshot",
"__busy_writing",
"__busy_hierarchy",
"__commits",
"__reading",
"__writing",
"__roots",
)
def __init__(self, app):
# type: (Application) -> None
self.__app_ref = WeakReference(app)
self.__history_cls = None # type: Optional[Type[HistoryObject]]
self.__lock = ApplicationLock()
self.__storage = Storage() # type: Storage[BaseObject, Store]
self.__snapshot = None # type: Optional[ApplicationSnapshot]
self.__busy_writing = set() # type: Set[BaseObject]
self.__busy_hierarchy = ValueCounter() # type: Counter[BaseObject]
self.__commits = [] # type: List[Commit]
self.__reading = [] # type: List[Optional[BaseObject]]
self.__writing = [] # type: List[Optional[BaseObject]]
self.__roots = {} # type: Dict[ApplicationRoot, BaseObject]
def __deepcopy__(self, memo=None):
# type: (Optional[Dict[int, Any]]) -> ApplicationInternals
"""
Deep copy.
:param memo: Memo dict.
:return: Deep copy.
:raises RuntimeError: Can't deep copy while application is in a 'write' context.
"""
if memo is None:
memo = {}
with self.read_context():
if self.__writing:
error = "can't deep copy while application is in a 'write' context"
raise RuntimeError(error)
try:
deep_copy = memo[id(self)]
except KeyError:
cls = type(self)
deep_copy = memo[id(self)] = cls.__new__(cls)
deep_copy_state_args = self.__getstate__(), memo
deep_copy_state = deepcopy(*deep_copy_state_args)
deep_copy.__setstate__(deep_copy_state)
return deep_copy
def __getstate__(self):
# type: () -> Dict[str, Any]
"""
Get state for pickling.
:return: State.
:raises RuntimeError: Can't pickle while application is in a 'write' context.
"""
with self.read_context():
if self.__writing:
error = "can't pickle while application is in a 'write' context"
raise RuntimeError(error)
return super(ApplicationInternals, self).__getstate__()
def __read(self, obj):
# type: (BaseObject) -> Store
"""
Get current store for an object.
:param obj: Object.
:return: Store.
"""
if self.__snapshot is not None:
try:
return self.__snapshot._storage.query(obj)
except KeyError:
error = "object with id {} is not valid in snapshot".format(id(obj))
raise RuntimeError(error)
if self.__writing:
try:
return self.__commits[-1].stores[obj]
except (IndexError, KeyError):
pass
try:
return self.__storage.query(obj)
except KeyError:
error = "object with id {} is no longer valid".format(id(obj))
raise RuntimeError(error)
def __read_history(
self,
obj, # type: BaseObject
):
# type: (...) -> Union[Tuple[HistoryObject, BaseObject], Tuple[None, None]]
"""
Get current history for an object.
:param obj: Object.
:return: History object and history provider or (None, None).
"""
store = self.__read(obj)
if store.history is not None:
return store.history, obj
provider = store.history_provider_ref()
if provider is not None:
return self.__read_history(provider)
return None, None
def __pre_parent_check(
self,
obj, # type: BaseObject
hierarchy, # type: List[BaseObject]
child_counter, # type: Counter[BaseObject]
):
# type: (...) -> None
"""
Run checks before performing parenting/unparenting.
:param obj: Object adopting/releasing children.
:param hierarchy: Cached hierarchy.
:param child_counter: Child object counter.
:raises ValueError: Can't have history objects as children of other objects.
:raises ValueError: Can't have root objects as children of other objects.
:raises ValueError: Can't change parent while object's hierarchy is locked.
:raises ValueError: Can't change parent while object is initializing.
:raises ValueError: Object is already parented.
:raises ValueError: Parent cycle detected.
:raises ValueError: Object is not a child.
:raises ValueError: Can't parent more than once.
:raises ValueError: Can't unparent more than once.
"""
if not child_counter:
return
children = None
for child, count in iteritems(child_counter):
if count:
if self.__history_cls is not None:
if isinstance(child, self.__history_cls):
error = (
"can't have '{}' objects as children of other objects"
).format(self.__history_cls.__fullname__)
raise ValueError(error)
if child.__.is_root:
error = "'{}' object is a root and can't be parented".format(
type(child).__fullname__
)
raise ValueError(error)
if self.__busy_hierarchy.get(child):
error = (
"can't change parent for {} while its hierarchy is locked"
).format(child)
raise ValueError(error)
if child._initializing:
error = (
"can't change parent for {} while running it's '__init__'"
).format(child)
raise ValueError(error)
if count == 1:
child_parent = self.__read(child).parent_ref()
if child_parent is not None:
error = (
"{} is already parented to {}, can't parent it to {}"
).format(child, child_parent, obj)
raise ValueError(error)
for parent in hierarchy:
if parent is child:
error = "parent cycle between {} and {}".format(child, obj)
raise ValueError(error)
elif count == -1:
if children is None:
children = self.__read(obj).children
if child not in children:
error = "{} is not a child of {}".format(child, obj)
raise ValueError(error)
elif count > 1:
error = "{} can't be parented to {} more than once".format(child, obj)
raise ValueError(error)
elif count < -1:
error = "{} can't be un-parented from {} more than once".format(
child, obj
)
raise ValueError(error)
def __write(
self,
obj, # type: BaseObject
state, # type: BaseState
data, # type: Optional[BaseData]
metadata, # type: Mapping[str, Any]
child_counter, # type: Counter[BaseObject]
change, # type: BaseAtomicChange
):
# type: (...) -> None
"""
Perform a 'write' operation.
:param obj: Object.
:param state: New state.
:param data: New data.
:param metadata: New metadata.
:param child_counter: Child counter.
:param change: Change.
"""
# Lock hierarchy and cache it.
with self.__hierarchy_context(obj) as hierarchy:
assert hierarchy[0] is obj
# Perform pre-parent check.
self.__pre_parent_check(obj, hierarchy, child_counter)
# Lock parenting for new children.
with self.__new_children_context(change.new_children):
# Enter history atomic batch if not in a batch.
history, history_provider = self.__read_history(obj)
atomic_batch_change = None
if (
history is not None
and not obj._initializing
and not history.executing
and not history.in_batch()
):
from ._changes import Batch
atomic_batch_change = Batch(
name=change.name,
obj=obj,
metadata={"atomic_change": change, "is_atomic": True},
)
history.__enter_batch__(atomic_batch_change)
# Pre phase.
child = None # type: Optional[BaseObject]
single_locations = [] # type: List[Any]
single_data_locations = [] # type: List[Any]
all_locations = [] # type: List[List[Any]]
all_data_locations = [] # type: List[List[Any]]
actions = [] # type: List[Action]
for i, parent in enumerate(hierarchy):
if i == 0:
location = None
locations = []
data_location = None
data_locations = []
else:
assert child is not None
location = parent._locate(child)
locations = [location] + all_locations[-1]
relationship = cast(
"Relationship", parent._get_relationship(location)
)
if relationship.data:
data_location = parent._locate_data(child)
data_locations = [data_location, all_data_locations[-1]]
else:
data_location = None
data_locations = []
single_locations.append(location)
all_locations.append(locations)
single_data_locations.append(data_location)
all_data_locations.append(data_locations)
action = Action(
sender=obj,
receiver=parent,
locations=locations,
change=change,
)
assert action.sender is obj
assert action.receiver is parent
actions.append(action)
self.__react(action.receiver, action, Phase.PRE)
child = parent
# Flush histories and filter history adopters.
new_children_last_parent_history_updates = set()
histories_to_flush = set()
filtered_history_adopters = set()
# noinspection PyTypeChecker
for new_child in change.new_children:
new_child_store = self.__read(new_child)
last_parent_history = new_child_store.last_parent_history_ref()
if last_parent_history is not history:
if last_parent_history is not None:
histories_to_flush.add(last_parent_history)
new_children_last_parent_history_updates.add(new_child)
# noinspection PyTypeChecker
for adopter in change.history_adopters:
if type(adopter)._history_descriptor is not None:
continue
filtered_history_adopters.add(adopter)
adopter_old_history, _ = self.__read_history(adopter)
if adopter_old_history is not history:
if adopter_old_history is not None:
histories_to_flush.add(adopter_old_history)
for history_to_flush in histories_to_flush:
history_to_flush.flush()
# Store changes.
try:
stores = self.__commits[-1].stores
except IndexError:
stores = InteractiveDictData()
store = self.__read(obj)
old_data = store.data
store = store.update(
{
"state": state,
"data": data,
"metadata": metadata,
}
)
# Children changes.
if change.old_children or change.new_children:
children = store.children
# noinspection PyTypeChecker
for old_child in change.old_children:
children = children.remove(old_child)
child_store = self.__read(old_child).set(
"parent_ref", WeakReference()
)
stores = stores._set(old_child, child_store)
# noinspection PyTypeChecker
for new_child in change.new_children:
children = children.add(new_child)
child_store = self.__read(new_child).set(
"parent_ref", WeakReference(obj)
)
if new_child in new_children_last_parent_history_updates:
child_store = child_store.set(
"last_parent_history_ref", WeakReference(history)
)
stores = stores._set(new_child, child_store)
store = store.set("children", children)
stores = stores._set(obj, store)
# History propagation.
for adopter in filtered_history_adopters:
try:
adopter_store = stores[adopter]
except KeyError:
adopter_store = self.__read(adopter)
adopter_store = adopter_store.set(
"history_provider_ref", WeakReference(obj)
)
stores = stores._set(adopter, adopter_store)
# Upstream data changes.
if data is not old_data:
child_data = data
child = None
for i, parent in enumerate(hierarchy):
if i == 0:
child = parent
continue
assert child is not None
location = single_locations[i]
relationship = cast(
"Relationship", parent._get_relationship(location)
)
if not relationship.data:
break
assert data is not None
assert child_data is not None
data_location = single_data_locations[i]
parent_old_store = self.__read(parent)
parent_new_store = type(
parent
).__functions__.replace_child_data(
parent_old_store,
child,
data_location,
child_data,
)
if parent_new_store is parent_old_store:
break
stores = stores._set(parent, parent_new_store)
child = parent
child_data = parent_new_store.data
# Commit!
commit = Commit(actions=actions, stores=stores)
self.__commits.append(commit)
# Push change to history.
if (
history is not None
and history_provider is not None
and not obj._initializing
and not history_provider._initializing
):
history.__push_change__(change)
# Post phase.
for action in actions:
self.__react(action.receiver, action, Phase.POST)
# Exit history atomic batch.
if history is not None and atomic_batch_change is not None:
history.__exit_batch__(atomic_batch_change)
def __update_metadata(
self,
obj, # type: BaseObject
update, # type: Mapping[str, Any]
):
# type: (...) -> None
"""
Update metadata.
:param obj: Object.
:param update: Metadata update.
"""
# Store changes.
try:
stores = self.__commits[-1].stores
except IndexError:
stores = InteractiveDictData()
store = self.__read(obj)
old_metadata = store.metadata
store = store.update({"metadata": old_metadata.update(update)})
stores = stores._set(obj, store)
# Commit!
commit = Commit(actions=(), stores=stores)
self.__commits.append(commit)
def __revert(self, index):
# type: (int) -> None
"""
Revert changes to a particular index.
:param index: Index.
"""
del self.__commits[index:]
def __push(self):
# type: () -> None
"""Push and merge changes to permanent storage."""
if self.__commits:
commits = self.__commits
self.__commits = []
action_exception_infos = [] # type: List[ActionObserverExceptionData]
def ingest_action_exception_infos(result):
# type: (Tuple[ObserverExceptionInfo, ...]) -> None
"""
Ingest exception information.
:param result: Exception information from subject-observers.
"""
for exception_info in result:
internal_observer = cast(
"InternalObserver", exception_info.observer
)
action_observer = internal_observer.action_observer_ref()
if action_observer is not None:
from objetto._observers import ActionObserverExceptionData
action_exception_info = ActionObserverExceptionData(
observer=action_observer,
action=cast("Action", exception_info.payload[0]),
phase=cast("Phase", exception_info.payload[1]),
exception_type=exception_info.exception_type,
exception=exception_info.exception,
traceback=exception_info.traceback,
)
action_exception_infos.append(action_exception_info)
for commit in commits:
if type(commit) is BatchCommit:
for action in commit.actions:
phase = commit.phase # type: ignore
ingest_action_exception_infos(
action.receiver.__.subject.send(
action, cast("Phase", phase)
)
)
else:
for action in commit.actions:
ingest_action_exception_infos(
action.receiver.__.subject.send(action, Phase.PRE)
)
self.__storage = self.__storage.update(commit.stores)
for action in commit.actions:
ingest_action_exception_infos(
action.receiver.__.subject.send(action, Phase.POST)
)
if action_exception_infos:
raise ActionObserversFailedError(
"external observers raised exceptions (see tracebacks below)",
tuple(action_exception_infos),
)
@contextmanager
def __hierarchy_context(self, obj):
# type: (BaseObject) -> Iterator[List[BaseObject]]
"""
Context manager that locks and caches an object's upper hierarchy.
:param obj: Object.
:return: Cached upper hierarchy (starting with the object itself).
"""
hierarchy = [] # type: List[BaseObject]
parent = obj # type: Optional[BaseObject]
while parent is not None:
self.__busy_hierarchy[parent] += 1
hierarchy.append(parent)
# noinspection PyCallingNonCallable
parent = self.__read(parent).parent_ref()
try:
yield hierarchy
finally:
for parent in hierarchy:
self.__busy_hierarchy[parent] -= 1
if not self.__busy_hierarchy[parent]:
del self.__busy_hierarchy[parent]
@contextmanager
def __new_children_context(self, new_children):
# type: (AbstractSet[BaseObject]) -> Iterator
"""
Context manager that locks parenting for new children.
:param new_children: New children.
"""
for new_child in new_children:
self.__busy_hierarchy[new_child] += 1
try:
yield
finally:
for new_child in new_children:
self.__busy_hierarchy[new_child] -= 1
if not self.__busy_hierarchy[new_child]:
del self.__busy_hierarchy[new_child]
@staticmethod
def __react(obj, action, phase):
# type: (BaseObject, Action, Phase) -> None
"""
Run object's reactions.
:param obj: Object.
:param action: Action.
:param phase: Phase.
"""
for reaction in type(obj)._reactions:
reaction(obj, action, phase)
def init_object(self, obj):
# type: (BaseObject) -> None
"""
Initialize object.
:param obj: Object.
"""
with self.write_context():
try:
stores = self.__commits[-1].stores
except IndexError:
stores = InteractiveDictData()
def _obj_in_storage():
try:
self.__storage.query(obj)
except KeyError:
return False
else:
return True
if obj in stores or _obj_in_storage():
error = "object {} can't be initialized more than once".format(obj)
raise RuntimeError(error)
cls = type(obj) # type: Type[BaseObject]
kwargs = {} # type: Dict[str, Any]
# History object.
history_descriptor = cls._history_descriptor
if history_descriptor is not None:
app = self.__app_ref()
assert app is not None
if self.__history_cls is None:
from ._history import HistoryObject
self.__history_cls = HistoryObject
kwargs.update(
history_provider_ref=WeakReference(obj),
history=self.__history_cls(app, size=history_descriptor.size),
)
# State.
state = cls._state_factory()
# Data.
data_type = cls.Data
if data_type is not None:
data = data_type.__make__() # type: ignore
else:
data = None
# Commit!
stores = stores._set(obj, Store(state=state, data=data, **kwargs))
commit = Commit(stores=stores)
self.__commits.append(commit)
@contextmanager
def snapshot_context(self, snapshot):
# type: (ApplicationSnapshot) -> Iterator
"""
Snapshot read context manager.
:param snapshot: Snapshot.
"""
with self.read_context():
self.__snapshot = snapshot
try:
yield
finally:
self.__snapshot = None
@contextmanager
def read_context(self, obj=None):
# type: (Optional[BaseObject]) -> Iterator[ReadFunction]
"""
Read context manager.
:param obj: Object.
:return: Read handle function.
"""
with self.__lock:
topmost = not self.__reading
self.__reading.append(obj)
def read():
# type: () -> Store
"""Read object store."""
assert obj is not None
return self.__read(obj)
try:
yield read
finally:
self.__reading.pop()
if topmost:
assert not self.__reading
@contextmanager
def write_context(
self,
obj=None, # type: Optional[BaseObject]
):
# type: (...) -> Iterator[Tuple[ReadFunction, WriteFunction]]
"""
Write context manager.
:param obj: Object.
:return: Read and write handle functions.
"""
with self.__lock:
if self.__reading:
error = "can't enter a 'write' context while in a 'read' context"
raise RuntimeError(error)
topmost = not self.__writing
index = len(self.__commits)
self.__writing.append(obj)
def read():
# type: () -> Store
"""Read object store."""
assert obj is not None
return self.__read(obj)
def write(
state, # type: Any
data, # type: BaseData
metadata, # type: Mapping[str, Any]
child_counter, # type: Counter[BaseObject]
change, # type: BaseAtomicChange
):
# type: (...) -> None
"""Write changes to object."""
assert obj is not None
if obj in self.__busy_writing:
error_ = "reaction cycle detected on {}".format(obj)
raise RuntimeError(error_)
self.__busy_writing.add(obj)
try:
self.__write(obj, state, data, metadata, child_counter, change)
except RejectChangeException as e_:
self.__busy_writing.remove(obj)
if e_.change is not change:
raise
self.__revert(index)
e_.callback()
except Exception:
self.__busy_writing.remove(obj)
raise
else:
self.__busy_writing.remove(obj)
try:
yield read, write
except Exception as e:
self.__revert(index)
if not topmost or type(e) is not TemporaryContextException:
raise
else:
if topmost:
with self.read_context():
self.__push()
finally:
self.__writing.pop()
if topmost:
assert not self.__busy_hierarchy
assert not self.__busy_writing
assert not self.__commits
assert not self.__writing
@contextmanager
def update_metadata_context(
self,
obj, # type: BaseObject
):
# type: (...) -> Iterator[Tuple[ReadMetadataFunction, UpdateMetadataFunction]]
"""
Update metadata context manager.
:param obj: Object.
:return: Read metadata and write metadata handle functions.
"""
with self.write_context():
def read_metadata():
# type: () -> InteractiveDictData
"""Read metadata."""
return self.__read(obj).metadata
def update_metadata(
update, # type: Mapping[str, Any]
):
# type: (...) -> None
"""Update metadata."""
self.__update_metadata(obj, update)
yield read_metadata, update_metadata
@contextmanager
def batch_context(self, obj, change):
# type: (BaseObject, Batch) -> Iterator
"""
Batch change context.
:param obj: Object.
:param change: Batch change.
"""
with self.write_context():
index = len(self.__commits)
def _get_stores():
try:
return self.__commits[-1].stores
except IndexError:
return {}
try:
with self.__hierarchy_context(obj) as hierarchy:
assert hierarchy[0] is obj
# Get history.
history, history_provider = self.__read_history(obj)
# Gather actions.
child = None # type: Optional[BaseObject]
single_locations = [] # type: List[Any]
all_locations = [] # type: List[List[Any]]
actions = [] # type: List[Action]
for i, parent in enumerate(hierarchy):
if i == 0:
location = None
locations = []
else:
assert child is not None
location = parent._locate(child)
locations = [location] + all_locations[-1]
single_locations.append(location)
all_locations.append(locations)
action = Action(
sender=obj,
receiver=parent,
locations=locations,
change=change,
)
assert action.sender is obj
assert action.receiver is parent
actions.append(action)
child = parent
# Commit Pre.
pre_commit = BatchCommit(
actions=actions, stores=_get_stores(), phase=Phase.PRE
)
self.__commits.append(pre_commit)
# History Pre.
if (
history is not None
and history_provider is not None
and not obj._initializing
and not history_provider._initializing
):
history.__enter_batch__(change)
# Pre.
for action in actions:
self.__react(action.receiver, action, Phase.PRE)
yield change
# History Post.
if (
history is not None
and history_provider is not None
and not obj._initializing
and not history_provider._initializing
):
history.__exit_batch__(change)
# Post.
for action in actions:
self.__react(action.receiver, action, Phase.POST)
# Commit Post.
post_commit = BatchCommit(
actions=actions, stores=_get_stores(), phase=Phase.POST
)
self.__commits.append(post_commit)
# Catch rejection.
except RejectChangeException as e:
self.__revert(index)
if e.change is not change:
raise
e.callback()
def init_root_objs(self):
# type: () -> None
"""Initialize root objects."""
app = self.__app_ref()
assert app is not None
assert not self.__roots
roots = type(app)._roots
if roots:
with self.write_context():
sorted_roots = sorted(
itervalues(roots), key=lambda r: (r.priority is None, r.priority)
)
for root in sorted_roots:
root_obj = root.obj_type(app, **root.kwargs)
self.__roots[root] = root_obj
root_obj.__.set_root()
def get_root_obj(self, root):
# type: (ApplicationRoot) -> BaseObject
"""
Get root object.
:param root: Application root descriptor.
:return: Root object.
"""
return self.__roots[root]
def take_snapshot(self):
"""
Take a snapshot of the current application state.
:return: Application snapshot.
:rtype: objetto.applications.ApplicationSnapshot
"""
storage = self.__storage
if self.__writing and self.__commits:
storage = storage.update(self.__commits[-1].stores)
app = self.__app_ref()
assert app is not None
return ApplicationSnapshot(app, storage)
@property
def is_writing(self):
# type: () -> bool
"""
Whether this application is inside a write context.
:rtype: bool
"""
with self.read_context():
return bool(self.__writing)
@property
def is_reading(self):
# type: () -> bool
"""
Whether this application is inside a read context.
:rtype: bool
"""
with self.read_context():
return len(self.__reading) > 1
class ApplicationMeta(BaseMeta):
"""
Metaclass for :class:`objetto.applications.Application`.
Inherits from:
- :class:`objetto.bases.BaseMeta`
Features:
- Check and store `root descriptors <objetto.applications.root>`_.
"""
__roots = WeakKeyDictionary(
{}
) # type: MutableMapping[ApplicationMeta, Mapping[str, ApplicationRoot]]
__root_names = WeakKeyDictionary(
{}
) # type: MutableMapping[ApplicationMeta, Mapping[ApplicationRoot, str]]
def __init__(cls, name, bases, dct):
# type: (str, Tuple[Type, ...], Dict[str, Any]) -> None
super(ApplicationMeta, cls).__init__(name, bases, dct)
# Store roots.
roots = {}
for base in reversed(getmro(cls)):
for member_name, member in iteritems(base.__dict__):
if isinstance(member, ApplicationRoot):
roots[member_name] = member
elif member_name in roots:
del roots[member_name]
# Store root names.
root_names = {}
for root_name, root in iteritems(roots):
root_names[root] = root_name
type(cls).__roots[cls] = DictState(roots)
type(cls).__root_names[cls] = DictState(root_names)
@property
def _roots(cls):
# type: () -> Mapping[str, ApplicationRoot]
"""
Attributes mapped by name.
:rtype: dict[str, objetto.applications.ApplicationRoot]
"""
return type(cls).__roots[cls]
@property
def _root_names(cls):
# type: () -> Mapping[Any, str]
"""
Names mapped by root.
:rtype: dict[objetto.applications.ApplicationRoot, str]
"""
return type(cls).__root_names[cls]
class Application(with_metaclass(ApplicationMeta, Base)):
"""
Application.
Metaclass:
- :class:`objetto.applications.ApplicationMeta`
Inherits from:
- :class:`objetto.bases.Base`
Features:
- Manages multiple :class:`objetto.bases.BaseObject` under the same hierarchy.
- Offers contexts for reading/writing/batch.
- Reverts changes when an error occurs.
- Manages :class:`objetto.objects.Action` propagation, internally and externally.
When initializing an :class:`objetto.objects.BaseObject`, you have to pass an
:class:`objetto.applications.Application` as its first parameter.
.. code:: python
>>> from objetto import Application, Object
>>> app = Application()
>>> obj = Object(app) # pass application as first parameter
>>> obj.app is app # access it through the 'app' property
True
"""
__slots__ = ("__weakref__", "__", "__properties")
def __init__(self):
self.__ = ApplicationInternals(self)
self.__.init_root_objs()
self.__properties = WeakKeyDictionary()
@final
def _get_property(self, prop, **kwargs):
# type: (ApplicationProperty, Any) -> Any
"""
Get property value.
:param prop: Application property.
:type prop: objetto.applications.ApplicationProperty
:param kwargs: Keyword arguments to be passed to the default factory.
:return: Value.
:raises TypeError: Invalid parameter type.
"""
try:
value = self.__properties[prop]
except KeyError:
with ReraiseContext(TypeError, "'prop' parameter"):
assert_is_instance(prop, ApplicationProperty)
if "app" in kwargs:
error = "can't pass reserved keyword argument 'app' in kwargs"
exc = ValueError(error)
raise_from(exc, None)
raise exc
kwargs["app"] = self
value = self.__properties[prop] = prop.fabricate_default_value(**kwargs)
return value
@final
def _set_property(self, prop, value):
# type: (ApplicationProperty, Any) -> None
"""
Set property value.
:param prop: Application property.
:type prop: objetto.applications.ApplicationProperty
:param value: Value.
:raises TypeError: Invalid parameter type.
"""
if prop not in self.__properties:
with ReraiseContext(TypeError, "'prop' parameter"):
assert_is_instance(prop, ApplicationProperty)
self.__properties[prop] = value
@final
def _delete_property(self, prop, force=False):
# type: (ApplicationProperty, bool) -> None
"""
Delete property value.
:param prop: Application property.
:type prop: objetto.applications.ApplicationProperty
:param force: If True, will not fail if has no value set.
:type force: bool
:raises TypeError: Invalid parameter type.
:raises ValueError: Property has no value.
"""
if prop not in self.__properties:
with ReraiseContext(TypeError, "'prop' parameter"):
assert_is_instance(prop, ApplicationProperty)
if not force:
error = "property has no value, can't delete it"
raise ValueError(error)
self.__properties.pop(prop, None)
@final
@contextmanager
def read_context(self, snapshot=None):
# type: (Optional[ApplicationSnapshot]) -> Iterator
"""
Read context.
:param snapshot: Application state snapshot.
:type snapshot: objetto.applications.ApplicationSnapshot
:return: Context manager.
:rtype: contextlib.AbstractContextManager
:raises ValueError: Application mismatch.
"""
if snapshot is not None:
with ReraiseContext((TypeError, ValueError), "'snapshot' parameter"):
assert_is_instance(snapshot, ApplicationSnapshot)
if snapshot.app is not self:
error = "application mismatch"
raise ValueError(error)
with self.__.snapshot_context(snapshot):
yield
else:
with self.__.read_context():
yield
@final
@contextmanager
def write_context(self):
# type: () -> Iterator
"""
Write context.
:return: Context manager.
:rtype: contextlib.AbstractContextManager
"""
with self.__.write_context():
yield
@final
@contextmanager
def temporary_context(self):
# type: () -> Iterator
"""
Temporary write context.
:return: Context manager.
:rtype: contextlib.AbstractContextManager
"""
with self.__.write_context():
try:
yield
except Exception:
raise
else:
raise TemporaryContextException()
@final
def take_snapshot(self):
# type: () -> ApplicationSnapshot
"""
Take a snapshot of the current application state.
:return: Application snapshot.
:rtype: objetto.applications.ApplicationSnapshot
"""
return self.__.take_snapshot()
@property
def is_writing(self):
# type: () -> bool
"""
Whether this application is inside a write context.
:rtype: bool
"""
return self.__.is_writing
@property
def is_reading(self):
# type: () -> bool
"""
Whether this application is inside a read context.
:rtype: bool
"""
return self.__.is_reading
@final
class ApplicationSnapshot(Base):
"""
Application snapshot.
Inherits from:
- :class:`objetto.bases.Base`
Features:
- Freezes entire application state at a moment in time.
- Can be used with an application's read context to travel back in time.
You can acquire a snapshot by calling the
:meth:`objetto.applications.Application.take_snapshot` method. You can then pass it
to a :meth:`objetto.applications.Application.read_context` in order to temporarily
bring the whole application back to the time the snapshot was taken.
.. code:: python
>>> from objetto import Application, Object, attribute
>>> class Person(Object):
... name = attribute(str)
...
>>> app = Application()
>>> obj = Person(app, name="Albert")
>>> obj.name
'Albert'
>>> snapshot = app.take_snapshot()
>>> obj.name = "Einstein"
>>> obj.name
'Einstein'
>>> with app.read_context(snapshot):
... obj.name
...
'Albert'
>>> obj.name
'Einstein'
"""
__slots__ = ("__app", "__storage")
def __init__(self, app, storage):
# type: (Application, Storage) -> None
self.__app = app
self.__storage = storage
@property
def _storage(self):
# type: () -> Storage
"""Internal storage."""
return self.__storage
@property
def app(self):
# type: () -> Application
"""
Application.
:rtype: objetto.applications.Application
"""
return self.__app
|
#!/usr/bin/env python3
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
from svg_util import parse_array_of_floats
import unittest
class ParseArrayOfFloatsTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(parse_array_of_floats(""), [])
self.assertEqual(parse_array_of_floats(" "), [])
def test_single_value(self):
self.assertEqual(parse_array_of_floats("123"), [123])
self.assertEqual(parse_array_of_floats(" \t 123 \t"), [123])
def test_single_value_exponent(self):
self.assertEqual(parse_array_of_floats("12e+3"), [12000])
self.assertEqual(parse_array_of_floats("12e-3"), [0.012])
def test_space_separated_values(self):
self.assertEqual(parse_array_of_floats("123 45 6 89"),
[123, 45, 6, 89])
self.assertEqual(parse_array_of_floats(" 123 45 6 89 "),
[123, 45, 6, 89])
def test_comma_separated_values(self):
self.assertEqual(parse_array_of_floats("123,45,6,89"),
[123, 45, 6, 89])
self.assertEqual(parse_array_of_floats(" 123,45,6,89 "),
[123, 45, 6, 89])
def test_mixed_separated_values(self):
self.assertEqual(parse_array_of_floats("123,45 6,89"),
[123, 45, 6, 89])
self.assertEqual(parse_array_of_floats(" 123 45,6,89 "),
[123, 45, 6, 89])
def test_omitted_value_with_comma(self):
self.assertEqual(parse_array_of_floats("1,,3"), [1, 0, 3])
self.assertEqual(parse_array_of_floats(",,3"), [0, 0, 3])
def test_sign_as_separator(self):
self.assertEqual(parse_array_of_floats("1-3"), [1, -3])
self.assertEqual(parse_array_of_floats("1+3"), [1, 3])
def test_all_commas(self):
self.assertEqual(parse_array_of_floats(",,,"), [0, 0, 0, 0])
def test_value_with_decimal_separator(self):
self.assertEqual(parse_array_of_floats("3.5"), [3.5])
def test_comma_separated_values_with_decimal_separator(self):
self.assertEqual(parse_array_of_floats("2.75,8.5"), [2.75, 8.5])
if __name__ == '__main__':
unittest.main(verbosity=2)
|
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
from robot.errors import DataError
from robot.utils import (Importer, is_string, py3to2, split_args_from_name_or_path,
type_name)
from .listenermethods import ListenerMethods, LibraryListenerMethods
from .loggerhelper import AbstractLoggerProxy, IsLogged
from .logger import LOGGER
@py3to2
class Listeners(object):
_method_names = ('start_suite', 'end_suite', 'start_test', 'end_test',
'start_keyword', 'end_keyword', 'log_message', 'message',
'output_file', 'report_file', 'log_file', 'debug_file',
'xunit_file', 'library_import', 'resource_import',
'variables_import', 'close')
def __init__(self, listeners, log_level='INFO'):
self._is_logged = IsLogged(log_level)
listeners = ListenerProxy.import_listeners(listeners,
self._method_names)
for name in self._method_names:
method = ListenerMethods(name, listeners)
if name.endswith(('_keyword', '_file', '_import', 'log_message')):
name = '_' + name
setattr(self, name, method)
def set_log_level(self, level):
self._is_logged.set_level(level)
def start_keyword(self, kw):
if kw.type != kw.IF_ELSE_ROOT:
self._start_keyword(kw)
def end_keyword(self, kw):
if kw.type != kw.IF_ELSE_ROOT:
self._end_keyword(kw)
def log_message(self, msg):
if self._is_logged(msg.level):
self._log_message(msg)
def imported(self, import_type, name, attrs):
method = getattr(self, '_%s_import' % import_type.lower())
method(name, attrs)
def output_file(self, file_type, path):
method = getattr(self, '_%s_file' % file_type.lower())
method(path)
def __bool__(self):
return any(isinstance(method, ListenerMethods) and method
for method in self.__dict__.values())
class LibraryListeners(object):
_method_names = ('start_suite', 'end_suite', 'start_test', 'end_test',
'start_keyword', 'end_keyword', 'log_message', 'message',
'close')
def __init__(self, log_level='INFO'):
self._is_logged = IsLogged(log_level)
for name in self._method_names:
method = LibraryListenerMethods(name)
if name == 'log_message':
name = '_' + name
setattr(self, name, method)
def register(self, listeners, library):
listeners = ListenerProxy.import_listeners(listeners,
self._method_names,
prefix='_',
raise_on_error=True)
for method in self._listener_methods():
method.register(listeners, library)
def _listener_methods(self):
return [method for method in self.__dict__.values()
if isinstance(method, LibraryListenerMethods)]
def unregister(self, library, close=False):
if close:
self.close(library=library)
for method in self._listener_methods():
method.unregister(library)
def new_suite_scope(self):
for method in self._listener_methods():
method.new_suite_scope()
def discard_suite_scope(self):
for method in self._listener_methods():
method.discard_suite_scope()
def set_log_level(self, level):
self._is_logged.set_level(level)
def log_message(self, msg):
if self._is_logged(msg.level):
self._log_message(msg)
def imported(self, import_type, name, attrs):
pass
def output_file(self, file_type, path):
pass
class ListenerProxy(AbstractLoggerProxy):
_no_method = None
def __init__(self, listener, method_names, prefix=None):
listener, name = self._import_listener(listener)
AbstractLoggerProxy.__init__(self, listener, method_names, prefix)
self.name = name
self.version = self._get_version(listener)
if self.version == 3:
self.start_keyword = self.end_keyword = None
self.library_import = self.resource_import = self.variables_import = None
def _import_listener(self, listener):
if not is_string(listener):
# Modules have `__name__`, with others better to use `type_name`.
name = getattr(listener, '__name__', None) or type_name(listener)
return listener, name
name, args = split_args_from_name_or_path(listener)
importer = Importer('listener', logger=LOGGER)
listener = importer.import_class_or_module(os.path.normpath(name),
instantiate_with_args=args)
return listener, name
def _get_version(self, listener):
try:
version = int(listener.ROBOT_LISTENER_API_VERSION)
if version not in (2, 3):
raise ValueError
except AttributeError:
raise DataError("Listener '%s' does not have mandatory "
"'ROBOT_LISTENER_API_VERSION' attribute."
% self.name)
except (ValueError, TypeError):
raise DataError("Listener '%s' uses unsupported API version '%s'."
% (self.name, listener.ROBOT_LISTENER_API_VERSION))
return version
@classmethod
def import_listeners(cls, listeners, method_names, prefix=None,
raise_on_error=False):
imported = []
for listener in listeners:
try:
imported.append(cls(listener, method_names, prefix))
except DataError as err:
name = listener if is_string(listener) else type_name(listener)
msg = "Taking listener '%s' into use failed: %s" % (name, err)
if raise_on_error:
raise DataError(msg)
LOGGER.error(msg)
return imported
|
#
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: EPL-2.0
#
import argparse
import os
import sys
from common.base_model_init import BaseModelInitializer
from common.base_model_init import set_env_var
class ModelInitializer(BaseModelInitializer):
accuracy_script = "coco_mAP.sh"
accuracy_script_path = ""
def run_inference_sanity_checks(self, args, custom_args):
if args.batch_size != -1 and args.batch_size != 1:
sys.exit("R-FCN inference supports 'batch-size=1' " +
"only, please modify via the '--batch_size' flag.")
def __init__(self, args, custom_args, platform_util):
super(ModelInitializer, self).__init__(args, custom_args, platform_util)
self.accuracy_script_path = os.path.join(
self.args.intelai_models, self.args.mode, self.args.precision,
self.accuracy_script)
self.benchmark_script = os.path.join(
self.args.intelai_models, self.args.mode,
self.args.precision, "eval.py")
# Set KMP env vars, if they haven't already been set
config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json")
self.set_kmp_vars(config_file_path)
self.run_inference_sanity_checks(self.args, self.custom_args)
self.parse_custom_args()
self.research_dir = os.path.join(self.args.model_source_dir,
"research")
def run_benchmark(self):
command_prefix = self.get_command_prefix(self.args.socket_id) + \
self.python_exe + " " + self.benchmark_script
# set num_inter_threads and num_intra_threads
self.set_num_inter_intra_threads()
if self.args.socket_id == -1:
if self.args.num_cores == 1:
command_prefix = "taskset -c 0 " + \
command_prefix
self.args.num_intra_threads = 1
else:
command_prefix = "taskset -c 0-" + \
str(self.args.num_cores - 1) + \
" " + command_prefix
set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads)
config_file_path = os.path.join(self.args.checkpoint,
self.args.config_file)
run_cmd = command_prefix + \
" --inter_op " + str(self.args.num_inter_threads) + \
" --intra_op " + str(self.args.num_intra_threads) + \
" --omp " + str(self.args.num_intra_threads) + \
" --pipeline_config_path " + config_file_path + \
" --checkpoint_dir " + str(self.args.checkpoint) + \
" --eval_dir " + self.research_dir + \
"/object_detection/models/rfcn/eval " + \
" --logtostderr " + \
" --blocktime=0 " + \
" --run_once=True"
self.run_command(run_cmd)
def parse_custom_args(self):
if self.custom_args:
parser = argparse.ArgumentParser()
parser.add_argument("--config_file", default=None,
dest="config_file", type=str)
parser.add_argument("-q", "--split",
help="Location of accuracy data",
type=str, default=None)
self.args = parser.parse_args(self.custom_args,
namespace=self.args)
def run_accuracy_command(self):
if not os.path.exists(self.accuracy_script_path):
raise ValueError("Unable to locate the R-FCN accuracy script: "
"{}".format(self.accuracy_script_path))
command = "FROZEN_GRAPH=" + self.args.input_graph
if self.args.data_location and os.path.exists(
self.args.data_location):
command += " TF_RECORD_FILE=" + self.args.data_location
else:
raise ValueError(
"Unable to locate the coco data record file at {}".format(
self.args.tf_record_file))
if self.args.split:
command += " SPLIT=" + self.args.split
else:
raise ValueError("Must specify SPLIT parameter")
command += " TF_MODELS_ROOT={}".format(
self.args.model_source_dir)
command += " " + self.accuracy_script_path
self.run_command(command)
def run(self):
original_dir = os.getcwd()
os.chdir(self.research_dir)
if self.args.accuracy_only:
self.run_accuracy_command()
else:
self.run_benchmark()
os.chdir(original_dir)
|
#!/usr/bin/python
# -*- coding: latin-1 -*-
from tinydb import TinyDB, Query
from random import randint
import subprocess,os
import sched, time
import pyaudio
import wave
import random
import datetime
os.chdir(os.path.dirname(__file__))
def playsound():
"""Plays notification sound"""
chunk = 1024
wf = wave.open('../res/Belligerent.wav', 'rb')
p = pyaudio.PyAudio()
stream = p.open(
format = p.get_format_from_width(wf.getsampwidth()),
channels = wf.getnchannels(),
rate = wf.getframerate(),
output = True)
data = wf.readframes(chunk)
while data != '':
stream.write(data)
data = wf.readframes(chunk)
stream.close()
p.terminate()
def sendmessage(title,message):
"""Sends notification to user"""
print os.getcwd() +os.path.join(os.path.dirname(__file__),'/../res/exercise.png')
subprocess.Popen(['notify-send','-i',os.getcwd() +os.path.join(os.path.dirname(__file__),'/../res/exercise.png'),'-a','DEAL!',title, message])
playsound()
return
def add_log_entry(tid):
db = TinyDB('../database/log.json')
ts = time.time()
year = datetime.datetime.fromtimestamp(ts).strftime("%Y")
month = datetime.datetime.fromtimestamp(ts).strftime("%m")
day = datetime.datetime.fromtimestamp(ts).strftime("%d")
hour = datetime.datetime.fromtimestamp(ts).strftime("%H")
minute = datetime.datetime.fromtimestamp(ts).strftime("%M")
db.insert({'tid': tid, 'year': year,'month': month,'day': day,'hour': hour,'minute': minute}) #insertdate
def throw(inner_loop):
db = TinyDB('../database/db.json')
active_task_list = []
db_query = Query()
if (db.search((db_query.tid == -1) & (db_query.active == 1)) == [] ):
result_task = db.search(db_query.active == 1)
for item in result_task:
active_task_list.append(item['tid'])
print active_task_list
print random.choice(active_task_list)
for item in db.search(db_query.tid == random.choice(active_task_list)):
add_log_entry(item['tid'])
sendmessage(item['name'],item['disc'])
break
loop.enter(3600, 1, throw, (inner_loop,))
db = TinyDB('../database/db.json')
db_query = Query()
if (db.search(db_query.tid == -1) == []):
db.insert({'tid': -1,'active': 0})
db.close()
loop = sched.scheduler(time.time, time.sleep)
loop.enter(0, 1, throw, (loop,))
loop.run()
|
# qubit number=3
# total number=51
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.rx(-0.09738937226128368,input_qubit[2]) # number=2
prog.h(input_qubit[1]) # number=33
prog.cz(input_qubit[2],input_qubit[1]) # number=34
prog.h(input_qubit[1]) # number=35
prog.h(input_qubit[1]) # number=3
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_QC280.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_belem")
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
# Univeral Tool Template v011.0
tpl_ver = 11.16
tpl_date = 191025
print("tpl_ver: {0}-{1}".format(tpl_ver, tpl_date))
# by ying - https://github.com/shiningdesign/universal_tool_template.py
import importlib
import sys
# ---- hostMode ----
hostMode = ''
hostModeList = [
['maya', {'mui':'maya.OpenMayaUI', 'cmds':'maya.cmds'} ],
['nuke', {'nuke':'nuke', 'nukescripts':'nukescripts'} ],
['fusion', {'fs':'fusionscript'} ],
['houdini', {'hou':'hou'} ],
['blender', {'bpy':'bpy'} ],
['npp', {'Npp':'Npp'} ],
]
for name, libs in hostModeList:
try:
for x in libs.keys():
globals()[x] = importlib.import_module(libs[x])
hostMode = name
break
except ImportError:
pass
if hostMode == '':
hostMode = 'desktop'
print('Host: {0}'.format(hostMode))
# ---- qtMode ----
qtMode = 0 # 0: PySide; 1 : PyQt, 2: PySide2, 3: PyQt5
qtModeList = ('PySide', 'PyQt4', 'PySide2', 'PyQt5')
try:
from PySide import QtGui, QtCore
import PySide.QtGui as QtWidgets
qtMode = 0
if hostMode == "maya":
import shiboken
except ImportError:
try:
from PySide2 import QtCore, QtGui, QtWidgets
qtMode = 2
if hostMode == "maya":
import shiboken2 as shiboken
except ImportError:
try:
from PyQt4 import QtGui,QtCore
import PyQt4.QtGui as QtWidgets
import sip
qtMode = 1
except ImportError:
from PyQt5 import QtGui,QtCore,QtWidgets
import sip
qtMode = 3
print('Qt: {0}'.format(qtModeList[qtMode]))
# ---- pyMode ----
# python 2,3 support unicode function
try:
UNICODE_EXISTS = bool(type(unicode))
except NameError:
# lambda s: str(s) # this works for function but not for class check
unicode = str
if sys.version_info[:3][0]>=3:
reload = importlib.reload # add reload
pyMode = '.'.join([ str(n) for n in sys.version_info[:3] ])
print("Python: {0}".format(pyMode))
# ---- osMode ----
osMode = 'other'
if sys.platform in ['win32','win64']:
osMode = 'win'
elif sys.platform == 'darwin':
osMode = 'mac'
elif sys.platform == 'linux2':
osMode = 'linux'
print("OS: {0}".format(osMode))
# ---- template module list ----
import os # for path and language code
from functools import partial # for partial function creation
import json # for ascii data output
if sys.version_info[:3][0]<3:
import cPickle # for binary data output
else:
import _pickle as cPickle
import re # for name pattern
import ctypes # for windows instance detection
import subprocess # for cmd call
#=======================================
# UniversalToolUI template class
#=======================================
class UniversalToolUI(QtWidgets.QMainWindow):
def __init__(self, parent=None, mode=0):
QtWidgets.QMainWindow.__init__(self, parent)
#------------------------------
# class variables
#------------------------------
self.version = '0.1'
self.date = '2017.01.01'
self.log = 'no version log in user class'
self.help = 'no help guide in user class'
self.hotkey = {}
self.uiList={} # for ui obj storage
self.memoData = {} # key based variable data storage
self.memoData['font_size_default'] = QtGui.QFont().pointSize()
self.memoData['font_size'] = self.memoData['font_size_default']
self.memoData['last_export'] = ''
self.memoData['last_import'] = ''
self.name = self.__class__.__name__
self.location = ''
if getattr(sys, 'frozen', False):
# frozen - cx_freeze
self.location = sys.executable
else:
# unfrozen
self.location = os.path.realpath(sys.modules[self.__class__.__module__].__file__)
self.iconPath = os.path.join(os.path.dirname(self.location),'icons',self.name+'.png')
self.iconPix = QtGui.QPixmap(self.iconPath)
self.icon = QtGui.QIcon(self.iconPath)
self.fileType='.{0}_EXT'.format(self.name)
#------------------------------
# core function variable
#------------------------------
self.qui_core_dict = {
'vbox': 'QVBoxLayout','hbox':'QHBoxLayout','grid':'QGridLayout', 'form':'QFormLayout',
'split': 'QSplitter', 'grp':'QGroupBox', 'tab':'QTabWidget',
'btn':'QPushButton', 'btnMsg':'QPushButton', 'label':'QLabel', 'input':'QLineEdit', 'check':'QCheckBox', 'choice':'QComboBox',
'txt': 'QTextEdit',
'list': 'QListWidget', 'tree': 'QTreeWidget', 'table': 'QTableWidget',
'space': 'QSpacerItem',
'menu' : 'QMenu', 'menubar' : 'QMenuBar',
}
self.qui_user_dict = {}
def setupStyle(self):
# global app style setting for desktop
if hostMode == "desktop":
QtWidgets.QApplication.setStyle(QtWidgets.QStyleFactory.create('Cleanlooks'))
self.setStyleSheet("QLineEdit:disabled{background-color: gray;}")
def setupMenu(self):
# global help menu
if 'help_menu' in self.uiList.keys():
# for info review
self.qui_atn('helpHostMode_atnNone','Host Mode - {}'.format(hostMode),'Host Running.')
self.qui_atn('helpPyMode_atnNone','Python Mode - {}'.format(pyMode),'Python Library Running.')
self.qui_atn('helpQtMode_atnNone','Qt Mode - {}'.format(qtModeList[qtMode]),'Qt Library Running.')
self.qui_atn('helpTemplate_atnNone','Universal Tool Teamplate - {0}.{1}'.format(tpl_ver, tpl_date),'based on Univeral Tool Template v{0} by Shining Ying - https://github.com/shiningdesign/universal{1}tool{1}template.py'.format(tpl_ver,'_'))
self.uiList['helpGuide_msg'] = self.help
self.qui_atn('helpGuide_atnMsg','Usage Guide','How to Usge Guide.')
self.uiList['helpLog_msg'] = self.log
self.qui_atn('helpLog_atnMsg','About v{0} - {1}'.format(self.version, self.date),'Vesion Log.')
self.qui_menu('helpHostMode_atnNone | helpPyMode_atnNone | helpQtMode_atnNone | helpTemplate_atnNone | _ | helpGuide_atnMsg | helpLog_atnMsg', 'help_menu')
def setupWin(self):
self.setWindowTitle(self.name + " - v" + self.version + " - host: " + hostMode)
self.setWindowIcon(self.icon)
self.drag_position=QtGui.QCursor.pos() # initial win drag position
def setupUI(self, layout='grid'):
main_widget = QtWidgets.QWidget()
self.setCentralWidget(main_widget)
main_layout = self.quickLayout(layout, 'main_layout') # grid for auto fill window size
main_widget.setLayout(main_layout)
def Establish_Connections(self):
for ui_name in self.uiList.keys():
prefix = ui_name.rsplit('_', 1)[0]
if ui_name.endswith('_btn'):
self.uiList[ui_name].clicked.connect(getattr(self, prefix+"_action", partial(self.default_action,ui_name)))
elif ui_name.endswith('_atn'):
self.uiList[ui_name].triggered.connect(getattr(self, prefix+"_action", partial(self.default_action,ui_name)))
elif ui_name.endswith('_btnMsg'):
self.uiList[ui_name].clicked.connect(getattr(self, prefix+"_message", partial(self.default_message,ui_name)))
elif ui_name.endswith('_atnMsg'):
self.uiList[ui_name].triggered.connect(getattr(self, prefix+"_message", partial(self.default_message,ui_name)))
#=======================================
# ui response functions
#=======================================
def default_action(self, ui_name, *argv):
print("No action defined for this UI element: "+ui_name)
def default_message(self, ui_name):
prefix = ui_name.rsplit('_', 1)[0]
msgName = prefix+"_msg"
msg_txt = msgName + " is not defined in uiList."
if msgName in self.uiList:
msg_txt = self.uiList[msgName]
self.quickMsg(msg_txt)
def default_menu_call(self, ui_name, point):
if ui_name in self.uiList.keys() and ui_name+'_menu' in self.uiList.keys():
self.uiList[ui_name+'_menu'].exec_(self.uiList[ui_name].mapToGlobal(point))
def toggleTop_action(self):
self.setWindowFlags(self.windowFlags() ^ QtCore.Qt.WindowStaysOnTopHint)
self.show()
def hotkey_action(self):
txt_list = []
for each_key in sorted(self.hotkey.keys()):
txt_list.append(each_key+' : '+unicode(self.hotkey[each_key].key().toString()))
self.quickMsg('\n'.join(txt_list))
#=======================================
# ui feedback functions
#=======================================
def ____ui_feedback_functions____():
pass
def quickInfo(self, info, force=0):
if hasattr( self.window(), "quickInfo") and force == 0:
self.window().statusBar().showMessage(info)
else:
self.statusBar().showMessage(info)
def quickMsg(self, msg, block=1, ask=0):
tmpMsg = QtWidgets.QMessageBox(self) # for simple msg that no need for translation
tmpMsg.setWindowTitle("Info")
lineCnt = len(msg.split('\n'))
if lineCnt > 25:
scroll = QtWidgets.QScrollArea()
scroll.setWidgetResizable(1)
content = QtWidgets.QWidget()
scroll.setWidget(content)
layout = QtWidgets.QVBoxLayout(content)
tmpLabel = QtWidgets.QLabel(msg)
tmpLabel.setTextInteractionFlags(QtCore.Qt.TextSelectableByMouse)
layout.addWidget(tmpLabel)
tmpMsg.layout().addWidget(scroll, 0, 0, 1, tmpMsg.layout().columnCount())
tmpMsg.setStyleSheet("QScrollArea{min-width:600 px; min-height: 400px}")
else:
tmpMsg.setText(msg)
if block == 0:
tmpMsg.setWindowModality( QtCore.Qt.NonModal )
if ask==0:
tmpMsg.addButton("OK",QtWidgets.QMessageBox.YesRole)
else:
tmpMsg.setStandardButtons(QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel)
if block:
value = tmpMsg.exec_()
if value == QtWidgets.QMessageBox.Ok:
return 1
else:
return 0
else:
tmpMsg.show()
return 0
def quickMsgAsk(self, msg, mode=0, choice=[]):
# getItem, getInteger, getDouble, getText
modeOpt = (QtWidgets.QLineEdit.Normal, QtWidgets.QLineEdit.NoEcho, QtWidgets.QLineEdit.Password, QtWidgets.QLineEdit.PasswordEchoOnEdit)
# option: QtWidgets.QInputDialog.UseListViewForComboBoxItems
if len(choice)==0:
txt, ok = QtWidgets.QInputDialog.getText(self, "Input", msg, modeOpt[mode])
return (unicode(txt), ok)
else:
txt, ok = QtWidgets.QInputDialog.getItem(self, "Input", msg, choice, 0, 0)
return (unicode(txt), ok)
def quickModKeyAsk(self):
modifiers = QtWidgets.QApplication.queryKeyboardModifiers()
clickMode = 0 # basic mode
if modifiers == QtCore.Qt.ControlModifier:
clickMode = 1 # ctrl
elif modifiers == QtCore.Qt.ShiftModifier:
clickMode = 2 # shift
elif modifiers == QtCore.Qt.AltModifier:
clickMode = 3 # alt
elif modifiers == QtCore.Qt.ControlModifier | QtCore.Qt.ShiftModifier | QtCore.Qt.AltModifier:
clickMode = 4 # ctrl+shift+alt
elif modifiers == QtCore.Qt.ControlModifier | QtCore.Qt.AltModifier:
clickMode = 5 # ctrl+alt
elif modifiers == QtCore.Qt.ControlModifier | QtCore.Qt.ShiftModifier:
clickMode = 6 # ctrl+shift
elif modifiers == QtCore.Qt.AltModifier | QtCore.Qt.ShiftModifier:
clickMode = 7 # alt+shift
return clickMode
def quickFileAsk(self, type, ext=None, dir=None):
if ext == None:
ext = "RAW data (*.json);;RAW binary data (*.dat);;Format Txt (*{0});;AllFiles (*.*)".format(self.fileType)
elif isinstance(ext, (str,unicode)):
if ';;' not in ext:
if ext == '':
ext = 'AllFiles (*.*)'
else:
ext = self.extFormat(ext) + ';;AllFiles (*.*)'
elif isinstance(ext, (tuple,list)):
if len(ext) > 0 and isinstance(ext[0], (tuple,list)):
tmp_list = [self.extFormat(x) for x in ext]
tmp_list.append('AllFiles (*.*)')
ext = ';;'.join(tmp_list)
else:
ext = ';;'.join([self.extFormat(x) for x in ext].append('AllFiles(*.*)'))
elif isinstance(ext, dict):
tmp_list = [self.extFormat(x) for x in ext.items()]
tmp_list.append('AllFiles (*.*)')
ext = ';;'.join(tmp_list)
else:
ext = "AllFiles (*.*)"
file = ''
if type == 'export':
if dir == None:
dir = self.memoData['last_export']
file = QtWidgets.QFileDialog.getSaveFileName(self, "Save File",dir,ext)
elif type == 'import':
if dir == None:
dir = self.memoData['last_import']
file = QtWidgets.QFileDialog.getOpenFileName(self, "Open File",dir,ext)
if isinstance(file, (list, tuple)):
file = file[0] # for deal with pyside case
else:
file = unicode(file) # for deal with pyqt case
# save last dir in memoData
if file != '':
if type == 'export':
self.memoData['last_export'] = os.path.dirname(file) #QFileInfo().path()
elif type == 'import':
self.memoData['last_import'] = os.path.dirname(file)
return file
def extFormat(self, ext):
if isinstance(ext, (tuple,list)):
ext = '{0} (*.{1})'.format(ext[1],ext[0])
else:
if ext.startswith('.'):
ext = ext[1:]
ext = '{0} (*.{0})'.format(ext)
return ext
def quickFolderAsk(self,dir=None):
if dir == None:
dir = self.memoData['last_browse']
if self.parent is not None and hasattr(self.parent, 'memoData'):
dir = self.parent.memoData['last_browse']
return unicode(QtWidgets.QFileDialog.getExistingDirectory(self, "Select Directory",dir))
def openFolder(self, folderPath):
if os.path.isfile(folderPath):
folderPath = os.path.dirname(folderPath)
if os.path.isdir(folderPath):
cmd_list = None
if sys.platform == 'darwin':
cmd_list = ['open', '--', folderPath]
elif sys.platform == 'linux2':
cmd_list = ['xdg-open', folderPath]
elif sys.platform in ['win32','win64']:
cmd_list = ['explorer', folderPath.replace('/','\\')]
if cmd_list != None:
try:
subprocess.check_call(cmd_list)
except subprocess.CalledProcessError:
pass # handle errors in the called executable
except OSError:
pass # executable not found
def openFile(self, filePath):
if sys.platform in ['win32','win64']:
os.startfile(filePath)
elif sys.platform == 'darwin':
os.open(filePath)
elif sys.platform == 'linux2':
os.xdg-open(filePath)
def newFolder(self, parentPath, name=None):
if os.path.isfile(parentPath):
parentPath = os.path.dirname(parentPath)
created = 0
if name == None:
name, ok = self.quickMsgAsk('Enter the folder name:')
if not ok or name=='':
return
create_path = os.path.join(parentPath, name)
if os.path.isdir(create_path):
self.quickMsg('Already Exists')
else:
try:
os.makedirs(create_path)
created = 1
except OSError:
self.quickMsg('Error on creation user data folder')
return created
#=======================================
# ui info functions
#=======================================
def ____ui_info_functions____():
pass
def input_text(self, input_name, msg=''):
text = unicode(self.uiList[input_name].text())
if text == '':
print("Please define the text. {0}".format(msg))
return
return text
def input_int(self, input_name, min=None, max=None, msg=''):
input_txt = str(self.uiList[input_name].text())
result = None
# int valid
if not input_txt.isdigit():
print("Please enter a valid int. {0}".format(msg))
return
result = int(input_txt)
# min
if min != None:
if result < min:
print("Please enter a valid int number >= {0}. {1}".format(min, msg))
return
# max
if max != None:
if result > max:
print("Please enter a valid int number <= {0}. {1}".format(max, msg))
return
return result
def input_float(self, input_name, min=None, max=None, msg=''):
input_txt = str(self.uiList[input_name].text())
result = None
try:
result = float(input_txt)
except (ValueError, TypeError):
return
# min
if min != None:
if result < min:
print("Please enter a valid int number >= {0}. {1}".format(min, msg))
return
# max
if max != None:
if result > max:
print("Please enter a valid int number <= {0}. {1}".format(max, msg))
return
return result
def input_choice(self, ui_name):
if ui_name in self.uiList.keys():
return self.uiList[ui_name].currentIndex()
else:
return
def input_check(self, ui_name):
if ui_name in self.uiList.keys():
return self.uiList[ui_name].isChecked()
else:
return
def output_text(self, ui_name, text):
if ui_name in self.uiList.keys():
self.uiList[ui_name].setText(text)
#=======================================
# file data functions
#=======================================
def ____file_functions____():
pass
def readDataFile(self,file,binary=0):
with open(file) as f:
if binary == 0:
data = json.load(f)
else:
data = cPickle.load(f)
return data
def writeDataFile(self,data,file,binary=0):
with open(file, 'w') as f:
if binary == 0:
json.dump(data, f)
else:
cPickle.dump(data, f)
def readTextFile(self, file):
with open(file) as f:
txt = f.read()
return txt
def writeTextFile(self, txt, file, b=0):
if b==0:
b=''
else:
b = 'b'
with open(file, 'w'+b) as f:
f.write(txt)
def dict_merge(self, default_dict, extra_dict, addKey=0):
# dictionary merge, with optional adding extra data from extra_dict
new_dict = {}
for key in default_dict.keys():
if not isinstance( default_dict[key], dict ):
# value case
if key in extra_dict.keys():
is_same_text_type = isinstance(extra_dict[key], (str,unicode)) and isinstance(default_dict[key], (str,unicode))
is_same_non_text_type = type(extra_dict[key]) is type(default_dict[key])
if is_same_text_type or is_same_non_text_type:
print('use config file value for key: '+key)
new_dict[key] = extra_dict[key]
else:
new_dict[key] = default_dict[key]
else:
new_dict[key] = default_dict[key]
else:
# dictionary case
if key in extra_dict.keys() and isinstance( extra_dict[key], dict ):
new_dict[key] = self.dict_merge( default_dict[key], extra_dict[key], addKey )
else:
new_dict[key] = default_dict[key]
# optional, add additional keys
if addKey == 1:
for key in [ x for x in extra_dict.keys() if x not in default_dict.keys() ]:
new_dict[key] = extra_dict[key]
return new_dict
#=======================================
# ui text functions
#=======================================
def ____ui_text_functions____():
pass
def fontNormal_action(self, uiClass_list=[]):
if len(uiClass_list) == 0:
uiClass_list = 'QLabel,QPushButton'.split(',')
self.memoData['font_size'] = self.memoData['font_size_default']
self.setStyleSheet( "{0} { font-size: {1}pt;}".format(','.join(uiClass_list), self.memoData['font_size']) )
def fontUp_action(self, uiClass_list=[]):
if len(uiClass_list) == 0:
uiClass_list = 'QLabel,QPushButton'.split(',')
self.memoData['font_size'] += 2
self.setStyleSheet( "{0} { font-size: {1}pt;}".format(','.join(uiClass_list), self.memoData['font_size']) )
def fontDown_action(self, uiClass_list=[]):
if len(uiClass_list) == 0:
uiClass_list = 'QLabel,QPushButton'.split(',')
if self.memoData['font_size'] >= self.memoData['font_size_default']:
self.memoData['font_size'] -= 2
self.setStyleSheet( "{0} { font-size: {1}pt;}".format(','.join(uiClass_list), self.memoData['font_size']) )
def loadLang(self, build_menu=1):
# store default language
self.memoData['lang']={}
self.memoData['lang']['default']={}
for ui_name in self.uiList.keys():
ui_element = self.uiList[ui_name]
if isinstance(ui_element, (QtWidgets.QLabel, QtWidgets.QPushButton, QtWidgets.QAction, QtWidgets.QCheckBox) ):
# uiType: QLabel, QPushButton, QAction(menuItem), QCheckBox
self.memoData['lang']['default'][ui_name] = unicode(ui_element.text())
elif isinstance(ui_element, (QtWidgets.QGroupBox, QtWidgets.QMenu) ):
# uiType: QMenu, QGroupBox
self.memoData['lang']['default'][ui_name] = unicode(ui_element.title())
elif isinstance(ui_element, QtWidgets.QTabWidget):
# uiType: QTabWidget
tabCnt = ui_element.count()
tabNameList = []
for i in range(tabCnt):
tabNameList.append(unicode(ui_element.tabText(i)))
self.memoData['lang']['default'][ui_name]=';'.join(tabNameList)
elif isinstance(ui_element, QtWidgets.QComboBox):
# uiType: QComboBox
itemCnt = ui_element.count()
itemNameList = []
for i in range(itemCnt):
itemNameList.append(unicode(ui_element.itemText(i)))
self.memoData['lang']['default'][ui_name]=';'.join(itemNameList)
elif isinstance(ui_element, QtWidgets.QTreeWidget):
# uiType: QTreeWidget
labelCnt = ui_element.headerItem().columnCount()
labelList = []
for i in range(labelCnt):
labelList.append(unicode(ui_element.headerItem().text(i)))
self.memoData['lang']['default'][ui_name]=';'.join(labelList)
elif isinstance(ui_element, QtWidgets.QTableWidget):
# uiType: QTableWidget
colCnt = ui_element.columnCount()
headerList = []
for i in range(colCnt):
if ui_element.horizontalHeaderItem(i):
headerList.append( unicode(ui_element.horizontalHeaderItem(i).text()) )
else:
headerList.append('')
self.memoData['lang']['default'][ui_name]=';'.join(headerList)
elif isinstance(ui_element, (str, unicode) ):
# uiType: string for msg
self.memoData['lang']['default'][ui_name] = self.uiList[ui_name]
# language menu
lang_menu = 'language_menu'
if build_menu == 1:
self.qui_menubar('language_menu;&Language')
self.qui_menu('langDefault_atnLang;Default | _', lang_menu)
self.uiList['langDefault_atnLang'].triggered.connect(partial(self.setLang,'default'))
# scan for language file
lang_path = os.path.dirname(self.location)
baseName = os.path.splitext( os.path.basename(self.location) )[0]
for file in self.getPathChild(lang_path, pattern=baseName+'_lang_[a-zA-Z]+.json', isfile=1):
langName = re.findall(baseName+'_lang_(.+)\.json', file)
if len(langName) == 1:
langName = langName[0].upper()
self.memoData['lang'][ langName ] = self.readDataFile( os.path.join(lang_path, file) )
if build_menu == 1:
self.qui_menu('{0}_atnLang;{0}'.format(langName), lang_menu)
self.uiList[langName+'_atnLang'].triggered.connect(partial(self.setLang,langName))
# if no language file detected, add export default language option
if build_menu == 1:
if isinstance(self, QtWidgets.QMainWindow) and len(self.memoData['lang']) == 1:
self.qui_menu('langExport_atnLang;Export Default Language', lang_menu)
self.uiList['langExport_atnLang'].triggered.connect(self.exportLang)
def setLang(self, langName):
lang_data = self.memoData['lang'][langName]
for ui_name in lang_data.keys():
if ui_name in self.uiList.keys() and lang_data[ui_name] != '':
ui_element = self.uiList[ui_name]
# '' means no translation availdanle in that data file
if isinstance(ui_element, (QtWidgets.QLabel, QtWidgets.QPushButton, QtWidgets.QAction, QtWidgets.QCheckBox) ):
# uiType: QLabel, QPushButton, QAction(menuItem), QCheckBox
ui_element.setText(lang_data[ui_name])
elif isinstance(ui_element, (QtWidgets.QGroupBox, QtWidgets.QMenu) ):
# uiType: QMenu, QGroupBox
ui_element.setTitle(lang_data[ui_name])
elif isinstance(ui_element, QtWidgets.QTabWidget):
# uiType: QTabWidget
tabCnt = ui_element.count()
tabNameList = lang_data[ui_name].split(';')
if len(tabNameList) == tabCnt:
for i in range(tabCnt):
if tabNameList[i] != '':
ui_element.setTabText(i,tabNameList[i])
elif isinstance(ui_element, QtWidgets.QComboBox):
# uiType: QComboBox
itemCnt = ui_element.count()
itemNameList = lang_data[ui_name].split(';')
ui_element.clear()
ui_element.addItems(itemNameList)
elif isinstance(ui_element, QtWidgets.QTreeWidget):
# uiType: QTreeWidget
labelCnt = ui_element.headerItem().columnCount()
labelList = lang_data[ui_name].split(';')
ui_element.setHeaderLabels(labelList)
elif isinstance(ui_element, QtWidgets.QTableWidget):
# uiType: QTableWidget
colCnt = ui_element.columnCount()
headerList = lang_data[ui_name].split(';')
cur_table.setHorizontalHeaderLabels( headerList )
elif isinstance(ui_element, (str, unicode) ):
# uiType: string for msg
self.uiList[ui_name] = lang_data[ui_name]
def exportLang(self):
file = self.quickFileAsk('export', ext='json')
if file != '':
self.writeDataFile( self.memoData['lang']['default'], file )
self.quickMsg("Languge File created: '"+file)
#=======================================
# qui functions
#=======================================
def ____ui_creation_functions____():
pass
def setAsUI(self):
# turn win to widget
self.setWindowFlags(QtCore.Qt.Widget)
self.statusBar().hide()
self.uiList['main_layout'].setContentsMargins(0, 0, 0, 0)
def qui(self, ui_list_string, parent_ui_string='', insert_opt=''):
ui_creation_list = [ x.strip() for x in ui_list_string.split('|') if x.strip() !='']
ui_creation_quickUI_list = []
# ------------
# - ui list
# ------------
for ui_creation in ui_creation_list:
arg_list = ui_creation.split(';')
uiName = arg_list[0].split('@')[0]
# ------------
# continue if ui is already created. pass as ui reference
if uiName in self.uiList.keys():
ui_creation_quickUI_list.append(self.uiList[uiName])
continue
# ------------
# create quickUI string
# - expand short name for Class
uiClass = uiName.rsplit('_',1)[-1]
if uiClass == 'layout' and len(arg_list)>1:
uiClass = arg_list[1]
arg_list = [ arg_list[0] ]
if uiClass in self.qui_user_dict:
uiClass = self.qui_user_dict[uiClass] # first, try user dict
elif uiClass in self.qui_core_dict:
uiClass = self.qui_core_dict[uiClass] # then, try default core dict
# - check it is valid Qt class or a user class
if hasattr(QtWidgets, uiClass) or uiClass in sys.modules:
pass # uiClass is valid for Qt class, user module
else:
print("WARNING: ({0}) is not defined in self.qui_user_dict and it is not a Qt widget class or User class; Item {1} Ignored.".format(uiClass, uiName))
continue
# - set quickUI creation format
arg_list[0] = arg_list[0] +';'+uiClass
if len(arg_list)==1:
if uiClass in ('QPushButton','QLabel'):
arg_list.append(uiName) # give empty button and label a place holder name
ui_creation_quickUI_list.append(';'.join(arg_list))
# ------------
# - ui parent
# ------------
parent_creation_quickUI_input = ''
parent_arg_list = parent_ui_string.split(';')
parent_uiName = parent_arg_list[0]
# - continue if parent ui is already created. pass as ui reference
if parent_uiName in self.uiList.keys():
parent_creation_quickUI_input = self.uiList[parent_uiName]
else:
parent_uiClass = parent_uiName.rsplit('_',1)[-1]
if parent_uiClass == 'layout' and len(parent_arg_list)>1:
parent_uiClass = parent_arg_list[1]
parent_arg_list = [ parent_arg_list[0] ]
if parent_uiClass in self.qui_user_dict:
parent_uiClass = self.qui_user_dict[parent_uiClass] # first, try user dict
elif parent_uiClass in self.qui_core_dict:
parent_uiClass = self.qui_core_dict[parent_uiClass] # then, try default core dict
# - check it is valid Qt class or a user class
if hasattr(QtWidgets, parent_uiClass) or parent_uiClass in sys.modules:
pass # uiClass is valid for Qt class, user module
else:
print("WARNING: ({0}) is not defined in self.qui_user_dict and it is not a Qt widget class or User class; Item {1} Ignored.".format(parent_uiClass, parent_uiName))
return
# - set quickUI creation format
parent_arg_list[0] = parent_arg_list[0] +';'+parent_uiClass
parent_creation_quickUI_input = ';'.join(parent_arg_list)
self.quickUI(ui_creation_quickUI_list, parent_creation_quickUI_input, insert_opt)
return parent_uiName
def qui_menu(self, action_list_str, menu_str):
# qui menu creation
# syntax: self.qui_menu('right_menu_createFolder_atn;Create Folder,Ctrl+D | right_menu_openFolder_atn;Open Folder', 'right_menu')
if menu_str not in self.uiList.keys():
self.uiList[menu_str] = QtWidgets.QMenu()
create_opt_list = [ x.strip() for x in action_list_str.split('|') ]
for each_creation in create_opt_list:
ui_info = [ x.strip() for x in each_creation.split(';') ]
atn_name = ui_info[0]
atn_title = ''
atn_hotkey = ''
if len(ui_info) > 1:
options = ui_info[1].split(',')
atn_title = '' if len(options) < 1 else options[0]
atn_hotkey = '' if len(options) < 2 else options[1]
if atn_name != '':
if atn_name == '_':
self.uiList[menu_str].addSeparator()
else:
if atn_name not in self.uiList.keys():
self.uiList[atn_name] = QtWidgets.QAction(atn_title, self)
if atn_hotkey != '':
self.uiList[atn_name].setShortcut(QtGui.QKeySequence(atn_hotkey))
self.uiList[menu_str].addAction(self.uiList[atn_name])
def qui_atn(self, ui_name, title, tip=None, icon=None, parent=None, key=None):
self.uiList[ui_name] = QtWidgets.QAction(title, self)
if icon!=None:
self.uiList[ui_name].setIcon(QtGui.QIcon(icon))
if tip !=None:
self.uiList[ui_name].setStatusTip(tip)
if key != None:
self.uiList[ui_name].setShortcut(QtGui.QKeySequence(key))
if parent !=None:
if isinstance(parent, (str, unicode)) and parent in self.uiList.keys():
self.uiList[parent].addAction(self.uiList[ui_name])
elif isinstance(parent, QtWidgets.QMenu):
parent.addAction(self.uiList[ui_name])
return ui_name
def qui_key(self, key_name, key_combo, func):
self.hotkey[key_name] = QtWidgets.QShortcut(QtGui.QKeySequence(key_combo), self)
self.hotkey[key_name].activated.connect( func )
def qui_menubar(self, menu_list_str):
if not isinstance(self, QtWidgets.QMainWindow):
print("Warning: Only QMainWindow can have menu bar.")
return
menubar = self.menuBar()
create_opt_list = [ x.strip() for x in menu_list_str.split('|') ]
for each_creation in create_opt_list:
ui_info = [ x.strip() for x in each_creation.split(';') ]
menu_name = ui_info[0]
menu_title = ''
if len(ui_info) > 1:
menu_title = ui_info[1]
if menu_name not in self.uiList.keys():
self.uiList[menu_name] = QtWidgets.QMenu(menu_title)
menubar.addMenu(self.uiList[menu_name])
#=======================================
# ui creation functions
#=======================================
def quickLayout(self, type, ui_name=""):
the_layout = ''
if type in ("form", "QFormLayout"):
the_layout = QtWidgets.QFormLayout()
the_layout.setLabelAlignment(QtCore.Qt.AlignLeft)
the_layout.setFieldGrowthPolicy(QtWidgets.QFormLayout.AllNonFixedFieldsGrow)
elif type in ("grid", "QGridLayout"):
the_layout = QtWidgets.QGridLayout()
elif type in ("hbox", "QHBoxLayout"):
the_layout = QtWidgets.QHBoxLayout()
the_layout.setAlignment(QtCore.Qt.AlignTop)
else:
the_layout = QtWidgets.QVBoxLayout()
the_layout.setAlignment(QtCore.Qt.AlignTop)
if ui_name != "":
self.uiList[ui_name] = the_layout
return the_layout
def quickUI(self, part_list, parentObject="", insert_opt=""):
# part_list contains:
# -- 1. string (strings for widget/space, layout, container[group, tab, splitter])
# -- 2. object (widget/space, layout, container[group, tab, splitter])
# -- 3. object list
# -- 4. [object list, label_object list]
# parentObject contains:
# -- 1. string (strings for layout, container[group, tab, splitter])
# -- 2. object (layout, container[group, tab, splitter])
# insert_opt:
# -- insert into grid layout, h, v
# -- insert into tab, titles
if not isinstance(part_list, (list, tuple)):
part_list = [part_list]
# func variable
ui_list = []
ui_label_list = []
form_type = 0 # flag for store whether ui_list need a label widget list for form layout creation
# 1. convert string to object and flatten part_list
for each_part in part_list:
# 1.1 string
if isinstance(each_part, str):
# - string : get part info
partInfo = each_part.split(';')
uiNameLabel = partInfo[0].split('@')
uiName = uiNameLabel[0]
uiLabel = ''
if len(uiNameLabel) > 1:
uiLabel = uiNameLabel[1]
form_type = 1
uiType = partInfo[1] if len(partInfo) > 1 else ""
uiArgs = partInfo[2] if len(partInfo) > 2 else ""
# - string : valid info
if uiType == "":
print("Warning (QuickUI): uiType is empty for "+each_part)
else:
# - string : to object creation
ui_create_state = 0 # flag to track creation success
if not uiType[0] == 'Q':
# -- 3rd ui type, create like UI_Class.UI_Class()
self.uiList[uiName] = getattr(sys.modules[uiType], uiType)() # getattr(eval(uiType), uiType)()
ui_list.append(self.uiList[uiName])
ui_create_state = 1
else:
# -- Qt ui
if uiType in ('QVBoxLayout', 'QHBoxLayout', 'QFormLayout', 'QGridLayout'):
# --- Qt Layout creation preset func
ui_list.append(self.quickLayout(uiType, uiName))
ui_create_state = 1
elif uiType in ('QSplitter', 'QTabWidget', 'QGroupBox'):
# --- Qt container creation
if uiType == 'QSplitter':
# ---- QSplitter as element
split_type = QtCore.Qt.Horizontal
if uiArgs == 'v':
split_type = QtCore.Qt.Vertical
self.uiList[uiName]=QtWidgets.QSplitter(split_type)
ui_list.append(self.uiList[uiName])
ui_create_state = 1
elif uiType == 'QTabWidget':
# ---- QTabWidget as element, no tab label need for input
self.uiList[uiName]=QtWidgets.QTabWidget()
self.uiList[uiName].setStyleSheet("QTabWidget::tab-bar{alignment:center;}QTabBar::tab { min-width: 100px; }")
ui_list.append(self.uiList[uiName])
ui_create_state = 1
elif uiType == 'QGroupBox':
# ---- QGroupBox as element, with layout type and optional title
arg_list = [x.strip() for x in uiArgs.split(',')]
grp_layout = arg_list[0] if arg_list[0]!='' else 'vbox'
grp_title = arg_list[1] if len(arg_list)>1 else uiName
# create layout and set grp layout
grp_layout = self.quickLayout(grp_layout, uiName+"_layout" )
self.uiList[uiName] = QtWidgets.QGroupBox(grp_title)
self.uiList[uiName].setLayout(grp_layout)
ui_list.append(self.uiList[uiName])
ui_create_state = 1
else:
# --- Qt widget creation
if uiArgs == "":
# ---- widget with no uiArgs
self.uiList[uiName] = getattr(QtWidgets, uiType)()
ui_list.append(self.uiList[uiName])
ui_create_state = 1
else:
# ---- widget with uiArgs
if not ( uiArgs.startswith("(") and uiArgs.endswith(")") ):
# ----- with string arg
self.uiList[uiName] = getattr(QtWidgets, uiType)(uiArgs)
ui_list.append(self.uiList[uiName])
ui_create_state = 1
else:
# ----- with array arg
arg_list = uiArgs.replace('(','').replace(')','').split(',')
if uiType == 'QComboBox':
self.uiList[uiName] = QtWidgets.QComboBox()
self.uiList[uiName].addItems(arg_list)
ui_list.append(self.uiList[uiName])
ui_create_state = 1
elif uiType == 'QTreeWidget':
self.uiList[uiName] = QtWidgets.QTreeWidget()
self.uiList[uiName].setHeaderLabels(arg_list)
ui_list.append(self.uiList[uiName])
ui_create_state = 1
elif uiType == 'QSpacerItem':
policyList = ( QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Ignored)
# 0 = fixed; 1 > min; 2 < max; 3 = prefered; 4 = <expanding>; 5 = expanding> Aggresive; 6=4 ignored size input
# factors in fighting for space: horizontalStretch
# extra space: setContentsMargins and setSpacing
# ref: http://www.cnblogs.com/alleyonline/p/4903337.html
arg_list = [ int(x) for x in arg_list ]
self.uiList[uiName] = QtWidgets.QSpacerItem(arg_list[0],arg_list[1], policyList[arg_list[2]], policyList[arg_list[3]] )
ui_list.append(self.uiList[uiName])
ui_create_state = 1
else:
print("Warning (QuickUI): uiType don't support array arg for "+each_part)
# - string : Qt widget label for form element creation
if ui_create_state == 1:
if uiLabel != '':
ui_label_list.append((uiName,uiLabel))
else:
ui_label_list.append('')
ui_create_state = 0
else:
# 1.2 other part like: object, object list, [object, label object]
if isinstance(each_part, (QtWidgets.QWidget, QtWidgets.QLayout, QtWidgets.QSpacerItem)):
# - object
ui_list.append(each_part)
ui_label_list.append('')
elif isinstance(each_part, (tuple, list)):
# - object list, [object, label object]
if len(each_part) != 0:
if isinstance(each_part[0], (tuple, list)) and len(each_part)==2:
# -- [object, label object]
ui_list.extend(each_part[0])
ui_label_list.extend(each_part[1])
else:
# -- object list
ui_list.extend(each_part)
ui_label_list.extend(['']*len(each_part))
# 2 parentObject part
if parentObject == '':
# - if no parentObject, return object list or [object list, label_object list]
if form_type == 1:
return [ui_list, ui_label_list]
else:
return ui_list
else:
if isinstance(parentObject, str):
# - if parentObject, convert string to parentObject
parentName = ''
parentType = ''
parentArgs = ''
layout_type_list = (
'QVBoxLayout', 'QHBoxLayout', 'QFormLayout', 'QGridLayout', 'vbox', 'hbox', 'grid', 'form',
'QSplitter', 'QTabWidget', 'QGroupBox', 'split', 'tab', 'grp',
)
# get options
parentOpt = parentObject.split(';')
if len(parentOpt) == 1:
# -- only 1 arg case: strict name format, eg. conf_QHBoxLayout, config_hbox
parentName = parentOpt[0] # 1 para case: strict name endfix format
parentType = parentName.rsplit('_',1)[-1]
elif len(parentOpt)==2:
# -- only 2 arg case:
# a. flexible name format + type eg. conf_layout;QGridLayout, conf_layout;hbox
# b. strict name format, + setting eg. conf_QGridLayout;h, config_grid;h
parentName = parentOpt[0]
if parentOpt[1] in layout_type_list:
parentType = parentOpt[1] # a
else:
parentType = parentName.rsplit('_',1)[-1]
parentArgs = parentOpt[1] # b
elif len(parentOpt)>=3:
# -- 3 arg case:
# flexible name format + type + settings eg. conf_layout;QGridLayout;h
parentName = parentOpt[0]
parentType = parentOpt[1]
parentArgs = parentOpt[2]
# - validate layout options
if parentName=='' or (parentType not in layout_type_list):
print("Warning (QuickUI): quickUI not support parent layout as "+parentObject)
return
else:
# - create layout
if parentType in ('QVBoxLayout', 'QHBoxLayout', 'QFormLayout', 'QGridLayout', 'vbox', 'hbox', 'grid', 'form'):
# -- layout object case
parentObject = self.quickLayout(parentType, parentName)
elif parentType in ('QSplitter', 'QTabWidget', 'QGroupBox', 'split', 'tab', 'grp'):
# --- Qt container creation
if parentType in ('QSplitter', 'split'):
# ---- QSplitter as element
split_type = QtCore.Qt.Horizontal
if parentArgs == 'v':
split_type = QtCore.Qt.Vertical
self.uiList[parentName]=QtWidgets.QSplitter(split_type)
parentObject = self.uiList[parentName]
elif parentType in ('QTabWidget', 'tab'):
# ---- QTabWidget as element, no tab label need for input
self.uiList[parentName]=QtWidgets.QTabWidget()
self.uiList[parentName].setStyleSheet("QTabWidget::tab-bar{alignment:center;}QTabBar::tab { min-width: 100px; }")
parentObject = self.uiList[parentName]
elif parentType in ('QGroupBox', 'grp'):
# ---- QGroupBox as element, with layout type and optional title
arg_list = [x.strip() for x in parentArgs.split(',')]
grp_layout = arg_list[0] if arg_list[0]!='' else 'vbox'
grp_title = arg_list[1] if len(arg_list)>1 else parentName
# create layout and set grp layout
grp_layout = self.quickLayout(grp_layout, parentName+"_layout" )
self.uiList[parentName] = QtWidgets.QGroupBox(grp_title)
self.uiList[parentName].setLayout(grp_layout)
parentObject = self.uiList[parentName]
# 3. get parentLayout inside parentObject
parentLayout = ''
if isinstance(parentObject, QtWidgets.QLayout):
parentLayout = parentObject
elif isinstance(parentObject, QtWidgets.QGroupBox):
parentLayout = parentObject.layout()
# 3.1 insert part_list into parentLayout for layout and groupbox
if isinstance(parentLayout, QtWidgets.QBoxLayout):
for each_ui in ui_list:
if isinstance(each_ui, QtWidgets.QWidget):
parentLayout.addWidget(each_ui)
elif isinstance(each_ui, QtWidgets.QSpacerItem):
parentLayout.addItem(each_ui)
elif isinstance(each_ui, QtWidgets.QLayout):
parentLayout.addLayout(each_ui)
elif isinstance(parentLayout, QtWidgets.QGridLayout):
# one row/colume operation only
insertRow = parentLayout.rowCount()
insertCol = parentLayout.columnCount()
for i in range(len(ui_list)):
each_ui = ui_list[i]
x = insertRow if insert_opt=="h" else i
y = i if insert_opt=="h" else insertCol
if isinstance(each_ui, QtWidgets.QWidget):
parentLayout.addWidget(each_ui,x,y)
elif isinstance(each_ui, QtWidgets.QSpacerItem):
parentLayout.addItem(each_ui,x,y)
elif isinstance(each_ui, QtWidgets.QLayout):
parentLayout.addLayout(each_ui,x,y)
elif isinstance(parentLayout, QtWidgets.QFormLayout):
for i in range(len(ui_list)):
each_ui = ui_list[i]
if isinstance(each_ui, QtWidgets.QWidget) or isinstance(each_ui, QtWidgets.QLayout):
# create and add label: (uiName, uiLabel)
if ui_label_list[i] != '':
uiLabelName = ui_label_list[i][0] + "_label"
uiLabelText = ui_label_list[i][1]
self.uiList[uiLabelName] = QtWidgets.QLabel(uiLabelText)
parentLayout.addRow(self.uiList[uiLabelName], each_ui)
else:
parentLayout.addRow(each_ui)
else:
# 3.2 insert for empty parentLayout for split, and tab
if isinstance(parentObject, QtWidgets.QSplitter):
for each_ui in ui_list:
if isinstance(each_ui, QtWidgets.QWidget):
parentObject.addWidget(each_ui)
else:
tmp_holder = QtWidgets.QWidget()
tmp_holder.setLayout(each_ui)
parentObject.addWidget(tmp_holder)
elif isinstance(parentObject, QtWidgets.QTabWidget):
tab_names = insert_opt.replace('(','').replace(')','').split(',')
for i in range( len(ui_list) ):
each_tab = ui_list[i]
each_name = 'tab_'+str(i)
if i < len(tab_names):
if tab_names[i] != '':
each_name = tab_names[i]
if isinstance(each_tab, QtWidgets.QWidget):
parentObject.addTab(each_tab, each_name)
else:
tmp_holder = QtWidgets.QWidget()
tmp_holder.setLayout(each_tab)
parentObject.addTab(tmp_holder, each_name)
return parentObject
def quickSplitUI(self, name, part_list, type):
split_type = QtCore.Qt.Horizontal
if type == 'v':
split_type = QtCore.Qt.Vertical
self.uiList[name]=QtWidgets.QSplitter(split_type)
for each_part in part_list:
if isinstance(each_part, QtWidgets.QWidget):
self.uiList[name].addWidget(each_part)
else:
tmp_holder = QtWidgets.QWidget()
tmp_holder.setLayout(each_part)
self.uiList[name].addWidget(tmp_holder)
return self.uiList[name]
def quickTabUI(self, name, tab_list, tab_names):
self.uiList[name]=QtWidgets.QTabWidget()
self.uiList[name].setStyleSheet("QTabWidget::tab-bar{alignment:center;}QTabBar::tab { min-width: 100px; }")
for i in range( len(tab_list) ):
each_tab = tab_list[i]
each_name = tab_names[i]
if isinstance(each_tab, QtWidgets.QWidget):
self.uiList[name].addTab(each_tab, each_name)
else:
tmp_holder = QtWidgets.QWidget()
tmp_holder.setLayout(each_tab)
self.uiList[name].addTab(tmp_holder, each_name)
return self.uiList[name]
def quickGrpUI(self, ui_name, ui_label, ui_layout):
self.uiList[ui_name] = QtWidgets.QGroupBox(ui_label)
if isinstance(ui_layout, QtWidgets.QLayout):
self.uiList[ui_name].setLayout(ui_layout)
elif isinstance(ui_layout, str):
ui_layout = self.quickLayout(ui_name+"_layout", ui_layout)
self.uiList[ui_name].setLayout(ui_layout)
return [self.uiList[ui_name], ui_layout]
def quickPolicy(self, ui_list, w, h):
if not isinstance(ui_list, (list, tuple)):
ui_list = [ui_list]
# 0 = fixed; 1 > min; 2 < max; 3 = prefered; 4 = <expanding>; 5 = expanding> Aggresive; 6=4 ignored size input
policyList = ( QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Ignored)
for each_ui in ui_list:
if isinstance(each_ui, str):
each_ui = self.uiList[each_ui]
each_ui.setSizePolicy(policyList[w],policyList[h])
def mui_to_qt(self, mui_name):
if hostMode != "maya":
return
ptr = mui.MQtUtil.findControl(mui_name)
if ptr is None:
ptr = mui.MQtUtil.findLayout(mui_name)
if ptr is None:
ptr = mui.MQtUtil.findMenuItem(mui_name)
if ptr is not None:
if qtMode in (0,2):
# ==== for pyside ====
return shiboken.wrapInstance(long(ptr), QtWidgets.QWidget)
elif qtMode in (1,3):
# ==== for PyQt====
return sip.wrapinstance(long(ptr), QtCore.QObject)
def qt_to_mui(self, qt_obj):
if hostMode != "maya":
return
ref = None
if qtMode in (0,2):
# ==== for pyside ====
ref = long(shiboken.getCppPointer(qt_obj)[0])
elif qtMode in (1,3):
# ==== for PyQt====
ref = long(sip.unwrapinstance(qt_obj))
if ref is not None:
return mui.MQtUtil.fullName(ref)
#=======================================
# widget specific functions
#=======================================
def ____TreeWidget_Process_Functions____():
pass
def path_pattern_to_task(self, path_pattern):
# break config text into section of sub-directory search task
# each task: 'sub_directory_path_to/content_list', 'content_dir_variable_name'
# also, 'content_dir_variable_name' also is the key to its filter pattern
# example: [('/VFX/assets/models', 'category'), ('', 'asset'), ('/Mesh/publish', 'model_file')])
part_list = path_pattern.split('/')
task_config = []
task_pattern = re.compile('{.+}') # grab variable name in path_pattern with {variable} format
sub = ''
for each in part_list:
if task_pattern.match(each):
task_config.append( (sub,each[1:-1]) )
sub = ''
else:
sub=sub+'/'+each
return task_config
def getPathChild(self, scanPath, pattern='', isfile=0):
resultList =[]
if not os.path.isdir(scanPath):
return resultList
if isfile == 0:
resultList = [x for x in os.listdir(scanPath) if os.path.isdir(os.path.join(scanPath,x))]
elif isfile == 1:
resultList = [x for x in os.listdir(scanPath) if os.path.isfile(os.path.join(scanPath,x))]
else:
resultList = os.listdir(scanPath)
if pattern != '':
cur_pattern = re.compile(pattern)
resultList = [x for x in resultList if cur_pattern.match(x)]
resultList.sort()
return resultList
def path_info(self, scanPath, file_pattern='', folder_pattern='', file=0, folder=0):
# alternative method of getPathChild
file_list = []
folder_list = []
# prepare filter
cur_file_pattern = None
if file_pattern != '':
cur_file_pattern = file_pattern # re object
if isinstance(file_pattern, (unicode,str)):
cur_file_pattern = re.compile(file_pattern)
cur_folder_pattern = None
if folder_pattern != '':
cur_folder_pattern = folder_pattern # re object
if isinstance(folder_pattern, (unicode,str)):
cur_folder_pattern = re.compile(folder_pattern)
# category file and folder
for x in os.listdir(scanPath):
if os.path.isdir(os.path.join(scanPath,x)):
folder_list.append(x)
elif os.path.isfile(os.path.join(scanPath,x)):
file_list.append(x)
file_list.sort()
folder_list.sort()
# filter result
result = []
if file == 1:
if cur_file_pattern is None:
result.append(file_list)
else:
result.append( [x for x in file_list if cur_file_pattern.match(x)] )
if folder == 1:
if cur_folder_pattern is None:
result.append(folder_list)
else:
result.append( [x for x in folder_list if cur_folder_pattern.match(x)] )
if len(result) == 1:
result = result[0]
return result
def DirToData(self, scanPath, task_config, pattern_config, currentTag=''):
'''
[
node_info
node_child
]
'''
if not isinstance(task_config, (tuple, list)):
return ( [], [] )
else:
if len(task_config)== 0:
return ( [], [] )
task_list = task_config
# 1. get path if at least 1 task
cur_task = task_list[0]
rest_task = [] if len(task_list)==1 else task_list[1:]
scanPath = scanPath.replace('\\','/')
if cur_task[0] != '':
scanPath = scanPath+cur_task[0] # note join path with /startswith/ will goto top path
if not os.path.isdir(scanPath):
print('Error: path not exists: {}'.format(scanPath))
return ( [], [] )
# 2. get list and filter list
cur_pattern = '' if cur_task[1] not in pattern_config.keys() else pattern_config[cur_task[1]]
isfile = 0 # folder only
if cur_task[1].endswith('_file'):
isfile = 1 # file only
if cur_task[1].endswith('_all'):
isfile = 2 # folder and file
node_name = os.path.basename(scanPath)
node_info = ['', '', scanPath ] if currentTag == '' else [node_name, currentTag, scanPath ]
node_info_child = []
parentTag = currentTag
for each_name in self.getPathChild(scanPath, cur_pattern, isfile):
cur_path = os.path.join(scanPath, each_name).replace('\\','/')
cur_tag = each_name if parentTag == '' else parentTag+':'+each_name
if os.path.isdir(cur_path):
if len(rest_task) > 0:
# go next level task
node_info_child.append( self.DirToData(cur_path, rest_task, pattern_config, cur_tag) )
else:
node_info_child.append( ( [os.path.basename(cur_path), cur_tag, cur_path ], [] ) )
else:
node_info_child.append( ( [os.path.basename(cur_path), '', cur_path ], [] ) )
return (node_info, node_info_child)
def DirToTree(self, cur_tree, parentNode, scanPath, task_config, pattern_config):
if not isinstance(task_config, (tuple, list)):
return
else:
if len(task_config)== 0:
return
task_list = task_config
# 1. get path if at least 1 task
cur_task = task_list[0]
rest_task = [] if len(task_list)==1 else task_list[1:]
scanPath = scanPath.replace('\\','/')
if cur_task[0] != '':
# because join path with /startswith/ will goto top path
scanPath = scanPath+cur_task[0]
if not os.path.isdir(scanPath):
print('Error: path not exists: {}'.format(scanPath))
return
# 2. get list and filter list
cur_pattern = '' if cur_task[1] not in pattern_config.keys() else pattern_config[cur_task[1]]
isfile = 0 # folder only
if cur_task[1].endswith('_file'):
isfile = 1 # file only
if cur_task[1].endswith('_all'):
isfile = 2 # folder and file
child_list = self.getPathChild(scanPath, cur_pattern, isfile)
node_list = {}
# 3. create node in normal style
parentNode_info = unicode(parentNode.text(1))
if isfile == 2:
group_dict = {}
for each_name in child_list:
if os.path.isdir(os.path.join(scanPath, each_name)):
new_node = QtWidgets.QTreeWidgetItem()
new_node.setText(0, each_name)
new_node.setText(2, os.path.join(scanPath,each_name).replace('\\','/') )
parentNode.addChild(new_node)
node_list[each_name]=new_node
else:
prefix, ext = os.path.splitext(each_name)
# file type
fileType = ext[1:]
# file version
version_txt = ""
possible_version_list = re.findall(r'_v([\d]+)[_\.]', each_name) # last _v999.ext or _v999_xxx.ext
if len(possible_version_list) > 0:
version_txt = possible_version_list[-1]
# file prefix
if version_txt != "":
prefix = each_name.rsplit("_v"+version_txt, 1)[0]
# file group
group_name = prefix+':'+fileType
if group_name not in group_dict.keys():
group_dict[group_name] = []
group_dict[group_name].append(each_name)
# add group node first
for group_name in sorted(group_dict.keys()):
group_dict[group_name].sort(reverse=1)
group_item_list = group_dict[group_name]
fileType = group_name.split(':')[1]
group_node = QtWidgets.QTreeWidgetItem()
group_node_top_name = group_item_list[0]
cur_filePath = os.path.join(scanPath,group_node_top_name).replace("\\","/")
group_node.setText(0, group_node_top_name)
group_node.setText(1, fileType)
group_node.setText(2, cur_filePath)
parentNode.addChild(group_node)
# add sub version to the tree
if len(group_item_list) == 1:
node_list[group_node_top_name]=group_node
if len(group_item_list) > 1:
for each_name in group_item_list:
sub_node = QtWidgets.QTreeWidgetItem()
cur_filePath = os.path.join(scanPath,each_name).replace("\\","/")
sub_node.setText(0, each_name)
sub_node.setText(1, fileType)
sub_node.setText(2, cur_filePath)
group_node.addChild(sub_node)
node_list[each_name]=sub_node
elif isfile == 0:
for each_name in child_list:
new_node = QtWidgets.QTreeWidgetItem()
new_node.setText(0, each_name)
if parentNode_info == '':
new_node.setText(1, each_name)
else:
new_node.setText(1, parentNode_info+':'+each_name)
new_node.setText(2, os.path.join(scanPath,each_name).replace('\\','/') )
parentNode.addChild(new_node)
node_list[each_name]=new_node
elif isfile == 1:
# 3. create node in combine style
#-- group similar
group_dict = {}
for each_name in child_list:
prefix, ext = os.path.splitext(each_name)
# file type
fileType = ext[1:]
# file version
version_txt = ""
possible_version_list = re.findall(r'_v([\d]+)[_\.]', each_name) # last _v999.ext or _v999_xxx.ext
if len(possible_version_list) > 0:
version_txt = possible_version_list[-1]
# file prefix
if version_txt != "":
prefix = each_name.rsplit("_v"+version_txt, 1)[0]
# file group
group_name = prefix+':'+fileType
if group_name not in group_dict.keys():
group_dict[group_name] = []
group_dict[group_name].append(each_name)
# add group node first
for group_name in sorted(group_dict.keys()):
group_dict[group_name].sort(reverse=1)
group_item_list = group_dict[group_name]
fileType = group_name.split(':')[1]
group_node = QtWidgets.QTreeWidgetItem()
group_node_top_name = group_item_list[0]
cur_filePath = os.path.join(scanPath,group_node_top_name).replace("\\","/")
group_node.setText(0, group_node_top_name)
group_node.setText(1, fileType)
group_node.setText(2, cur_filePath)
parentNode.addChild(group_node)
# add sub version to the tree
if len(group_item_list) == 1:
node_list[group_node_top_name]=group_node
if len(group_item_list) > 1:
for each_name in group_item_list:
sub_node = QtWidgets.QTreeWidgetItem()
cur_filePath = os.path.join(scanPath,each_name).replace("\\","/")
sub_node.setText(0, each_name)
sub_node.setText(1, fileType)
sub_node.setText(2, cur_filePath)
group_node.addChild(sub_node)
node_list[each_name]=sub_node
# go next level task
if len(rest_task) > 0:
for each_name in child_list:
cur_parentPath = os.path.join(scanPath, each_name).replace('\\', '/')
if os.path.isdir(cur_parentPath):
self.DirToTree(cur_tree, node_list[each_name], cur_parentPath, rest_task, pattern_config)
def TreeToData(self, tree, cur_node):
# now take widghet col count instead tree column count with hidden ones
child_count = cur_node.childCount()
node_info = [ unicode( cur_node.text(i) ) for i in range(cur_node.columnCount()) ]
node_info_child = []
for i in range(child_count):
node_info_child.append( self.TreeToData(tree, cur_node.child(i) ) )
return (node_info, node_info_child)
def DataToTree(self, tree, cur_node, data, filter='', col=0):
node_info = data[0]
node_info_child = data[1]
[cur_node.setText(i, node_info[i]) for i in range(len(node_info))]
# re filter
if filter != '' and isinstance(filter, (str, unicode)):
filter = re.compile(filter, re.IGNORECASE)
for sub_data in node_info_child:
if filter == '':
new_node = QtWidgets.QTreeWidgetItem()
cur_node.addChild(new_node)
self.DataToTree(tree, new_node, sub_data)
else:
if not filter.search(sub_data[0][col]) and not self.DataChildCheck(sub_data[1], filter, col):
pass
else:
new_node = QtWidgets.QTreeWidgetItem()
cur_node.addChild(new_node)
new_node.setExpanded(1)
self.DataToTree(tree, new_node, sub_data, filter, col)
def DataChildCheck(self, DataChild, filter, col):
ok_cnt = 0
if isinstance(filter, (str, unicode)):
filter = re.compile(filter, re.IGNORECASE)
for sub_data in DataChild:
if filter.search(sub_data[0][col]) or self.DataChildCheck(sub_data[1], filter, col):
ok_cnt +=1
return ok_cnt
def TreeExport(self, tree_name, file):
# export process
ui_data = self.TreeToData(self.uiList[tree_name], self.uiList[tree_name].invisibleRootItem())
# file process
if file.endswith('.dat'):
self.writeDataFile(ui_data, file, binary=1)
else:
self.writeDataFile(ui_data, file)
self.quickInfo("File: '"+file+"' creation finished.")
def TreeImport(self, tree_name, file):
# import process
ui_data = ""
if file.endswith('.dat'):
ui_data = self.readDataFile(file, binary=1)
else:
ui_data = self.readDataFile(file)
self.uiList['dir_tree'].clear()
self.DataToTree(self.uiList['dir_tree'], self.uiList['dir_tree'].invisibleRootItem(), ui_data)
self.quickInfo("File: '"+file+"' loading finished.")
# tree operation -- tmp v1.0
def quickTree(self, cur_node, node_data, editable=0):
# not per-column control on editable
if not isinstance(node_data, (list, tuple)):
node_data = [node_data]
# 1. create a new node
new_node = QtWidgets.QTreeWidgetItem()
for i,name in enumerate(node_data):
new_node.setText(i, name)
if editable == 1:
new_node.setFlags(new_node.flags()|QtCore.Qt.ItemIsEditable)
# 2. add it
cur_node.addChild(new_node)
# 3. expand it
cur_node.setExpanded(1)
def quickTreeRemove(self, cur_tree):
root = cur_tree.invisibleRootItem()
for item in cur_tree.selectedItems():
(item.parent() or root).removeChild(item)
# -- end tree operation
def cache_tree(self, cur_tree_name, force=1):
cur_tree = self.uiList[cur_tree_name]
if 'cache' not in self.memoData:
self.memoData['cache'] = {}
if force == 1:
self.memoData['cache'][cur_tree_name] = self.TreeToData(cur_tree, cur_tree.invisibleRootItem())
else:
if cur_tree_name not in self.memoData['cache']:
self.memoData['cache'][cur_tree_name] = self.TreeToData(cur_tree, cur_tree.invisibleRootItem())
def filter_tree(self, cur_tree_name, word):
self.filter_tree_col(cur_tree_name, 0, word)
def filter_tree_col(self, cur_tree_name, col, word):
word = unicode(word)
cur_tree = self.uiList[cur_tree_name]
parentNode = cur_tree.invisibleRootItem()
# read cache, if no cache, create cache
self.cache_tree(cur_tree_name, force = 0)
# filter and show, reset back to cache
cur_tree.clear()
if word != '':
self.DataToTree(cur_tree, parentNode, self.memoData['cache'][cur_tree_name], filter=word, col=col)
else:
self.DataToTree(cur_tree, parentNode, self.memoData['cache'][cur_tree_name])
#############################################
# User Class creation
#############################################
version = '0.1'
date = '2017.01.01'
log = '''
#------------------------------
# How to Use:
# 1. global replace class name "UserClassUI" to "YourToolName" in your editor,
# - in icons folder, the Tool GUI icon should name as "YourToolName.png"
# 2. change file name "universal_tool_template.py" to "YourPythonFileName.py",
# - in icons folder, the Maya shelf icon should name as "YourPythonFileName.png", if you name all name the same, then 1 icon is enough
# 3. load it up and run
#------------------------------
'''
help = '''
# loading template - Run in python panel
myPath='/path_to_universal_tool_or_custom_name/'
import sys;myPath in sys.path or sys.path.append(myPath);
import universal_tool_template
universal_tool_template.main()
# loading template - Run in system command console
python universal_tool_template.py
'''
# --------------------
# user module list
# --------------------
class UserClassUI(UniversalToolUI):
def __init__(self, parent=None, mode=0):
UniversalToolUI.__init__(self, parent)
# class variables
self.version= version
self.date = date
self.log = log
self.help = help
# mode: example for receive extra user input as parameter
self.mode = 0
if mode in [0,1]:
self.mode = mode # mode validator
# Custom user variable
#------------------------------
# initial data
#------------------------------
self.memoData['data']=[]
self.memoData['settingUI']=[]
self.qui_user_dict = {} # e.g: 'edit': 'LNTextEdit',
self.setupStyle()
if isinstance(self, QtWidgets.QMainWindow):
self.setupMenu()
self.setupWin()
self.setupUI()
self.Establish_Connections()
self.loadLang()
self.loadData()
#------------------------------
# overwrite functions
#------------------------------
def setupMenu(self):
self.qui_menubar('file_menu;&File | setting_menu;&Setting | help_menu;&Help')
info_list = ['export', 'import','user']
info_item_list = ['{0}Config_atn;{1} Config (&{2}),Ctrl+{2}'.format(info,info.title(),info.title()[0]) for info in info_list]+['_']
self.qui_menu('|'.join(info_item_list), 'setting_menu')
# toggle on top
self.qui_menu('toggleTop_atn;Toggle Always-On-Top', 'setting_menu')
super(self.__class__,self).setupMenu()
def setupWin(self):
super(self.__class__,self).setupWin()
self.setGeometry(500, 300, 250, 110) # self.resize(250,250)
def setupUI(self):
super(self.__class__,self).setupUI('grid')
#------------------------------
# user ui creation part
#------------------------------
# + template: qui version since universal tool template v7
# - no extra variable name, all text based creation and reference
self.qui('box_btn;Box | sphere_btn;Sphere | ring_btn;Ring', 'my_layout;grid', 'h')
self.qui('box2_btn;Box2 | sphere2_btn;Sphere2 | ring2_btn;Ring2', 'my_layout', 'h')
self.qui('cat_btn;Cat | dog_btn;Dog | pig_btn;Pig', 'pet_layout;grid', 'v')
self.qui('cat2_btn;Cat2 | dog2_btn;Dog2 | pig2_btn;Pig2', 'pet_layout', 'v')
self.qui('name_input@Name:;John | email_input@Email:;test@test.com', 'entry_form')
self.qui('user2_btn;User2 | info2_btn;Info2', 'my_grp;vbox,Personal Data')
self.qui('source_txt | process_btn;Process and Update', 'upper_vbox')
self.qui('upper_vbox | result_txt', 'input_split;v')
self.qui('filePath_input | fileLoad_btn;Load | fileExport_btn;Export', 'fileBtn_layout;hbox')
self.qui('my_layout | my_table | input_split | entry_form | fileBtn_layout | pet_layout | my_grp', 'main_layout')
cur_table = self.uiList['my_table']
cur_table.setRowCount(0)
cur_table.setColumnCount(1)
cur_table.insertColumn(cur_table.columnCount())
cur_item = QtWidgets.QTableWidgetItem('ok') #QtWidgets.QPushButton('Cool') #
cur_table.insertRow(0)
cur_table.setItem(0,1, cur_item) #setCellWidget(0,0,cur_item)
cur_table.setHorizontalHeaderLabels(('a','b'))
'''
self.qui('source_txt | process_btn;Process and Update', 'upper_vbox')
self.qui('upper_vbox | result_txt', 'input_split;v')
self.qui('filePath_input | fileLoad_btn;Load | fileExport_btn;Export', 'fileBtn_layout;hbox')
self.qui('input_split | fileBtn_layout', 'main_layout')
'''
self.memoData['settingUI']=[]
#------------- end ui creation --------------------
keep_margin_layout = ['main_layout']
keep_margin_layout_obj = []
# add tab layouts
for each in self.uiList.values():
if isinstance(each, QtWidgets.QTabWidget):
for i in range(each.count()):
keep_margin_layout_obj.append( each.widget(i).layout() )
for name, each in self.uiList.items():
if isinstance(each, QtWidgets.QLayout) and name not in keep_margin_layout and not name.endswith('_grp_layout') and each not in keep_margin_layout_obj:
each.setContentsMargins(0, 0, 0, 0)
self.quickInfo('Ready')
# self.statusBar().hide()
def Establish_Connections(self):
super(self.__class__,self).Establish_Connections()
# custom ui response
# shortcut connection
self.hotkey = {}
# self.hotkey['my_key'] = QtWidgets.QShortcut(QtGui.QKeySequence( "Ctrl+1" ), self)
# self.hotkey['my_key'].activated.connect(self.my_key_func)
# ---- user response list ----
def loadData(self):
print("Load data")
# load config
config = {}
config['root_name'] = 'root_default_name'
# overload config file if exists next to it
# then, save merged config into self.memoData['config']
prefix, ext = os.path.splitext(self.location)
config_file = prefix+'_config.json'
if os.path.isfile(config_file):
external_config = self.readDataFile(config_file)
print('info: External config file found.')
if isinstance( external_config, dict ):
self.memoData['config'] = self.dict_merge(config, external_config, addKey=1)
print('info: External config merged.')
else:
self.memoData['config'] = config
print('info: External config is not a dict and ignored.')
else:
self.memoData['config'] = config
# load user setting
user_setting = {}
if self.mode == 0:
# for standalone mode only
user_dirPath = os.path.join(os.path.expanduser('~'), 'Tool_Config', self.__class__.__name__)
user_setting_filePath = os.path.join(user_dirPath, 'setting.json')
if os.path.isfile(user_setting_filePath):
user_setting = self.readDataFile(user_setting_filePath)
if 'sizeInfo' in user_setting:
self.setGeometry(*user_setting['sizeInfo'])
# custome setting loading here
preset = {}
for ui in self.memoData['settingUI']:
if ui in user_setting:
preset[ui]=user_setting[ui]
#self.updateUI(preset)
def closeEvent(self, event):
if self.mode == 0:
# for standalone mode only
user_dirPath = os.path.join(os.path.expanduser('~'), 'Tool_Config', self.__class__.__name__)
if not os.path.isdir(user_dirPath):
try:
os.makedirs(user_dirPath)
except OSError:
print('Error on creation user data folder')
if not os.path.isdir(user_dirPath):
print('Fail to create user dir.')
return
# save setting
user_setting = {}
geoInfo = self.geometry()
user_setting['sizeInfo'] = [geoInfo.x(), geoInfo.y(), geoInfo.width(), geoInfo.height()]
# custome setting saving here
for ui in self.memoData['settingUI']:
if ui.endswith('_choice'):
user_setting[ui] = unicode(self.uiList[ui].currentText())
elif ui.endswith('_input'):
user_setting[ui] = unicode(self.uiList[ui].text())
user_setting_filePath = os.path.join(user_dirPath, 'setting.json')
self.writeDataFile(user_setting, user_setting_filePath)
# - example button functions
def updateUI(self,preset={}):
show_list = []
# update based on setting preset value
cur_preset_ui = 'root_choice'
if cur_preset_ui in preset:
if preset[cur_preset_ui] != '' and preset[cur_preset_ui] in show_list:
self.uiList[cur_preset_ui].setCurrentIndex(show_list.index(preset[cur_preset_ui]))
def process_action(self): # (optional)
config = self.memoData['config']
print("Process ....")
source_txt = unicode(self.uiList['source_txt'].toPlainText())
# 2: update memory
self.memoData['data'] = [row.strip() for row in source_txt.split('\n')]
print("Update Result")
txt=config['root_name']+'\n'+'\n'.join([('>>: '+row) for row in self.memoData['data']])
self.uiList['result_txt'].setText(txt)
# - example file io function
def exportConfig_action(self):
file= self.quickFileAsk('export', {'json':'JSON data file', 'xdat':'Pickle binary file'})
if file == "":
return
# export process
ui_data = self.memoData['config']
# file process
if file.endswith('.xdat'):
self.writeDataFile(ui_data, file, binary=1)
else:
self.writeDataFile(ui_data, file)
self.quickInfo("File: '"+file+"' creation finished.")
def importConfig_action(self):
file= self.quickFileAsk('import',{'json':'JSON data file', 'xdat':'Pickle binary file'})
if file == "":
return
# import process
ui_data = ""
if file.endswith('.xdat'):
ui_data = self.readDataFile(file, binary=1)
else:
ui_data = self.readDataFile(file)
self.memoData['config'] = ui_data
self.quickInfo("File: '"+file+"' loading finished.")
def userConfig_action(self):
user_dirPath = os.path.join(os.path.expanduser('~'), 'Tool_Config', self.__class__.__name__)
self.openFolder(user_dirPath)
#=======================================
# window instance creation
#=======================================
single_UserClassUI = None
app_UserClassUI = None
def main(mode=0):
# get parent window in Maya
parentWin = None
if hostMode == "maya":
if qtMode in (0,2): # pyside
parentWin = shiboken.wrapInstance(long(mui.MQtUtil.mainWindow()), QtWidgets.QWidget)
elif qtMode in (1,3): # PyQt
parentWin = sip.wrapinstance(long(mui.MQtUtil.mainWindow()), QtCore.QObject)
# create app object for certain host
global app_UserClassUI
if hostMode in ('desktop', 'blender', 'npp', 'fusion'):
# single instance app mode on windows
if osMode == 'win':
# check if already open for single desktop instance
from ctypes import wintypes
order_list = []
result_list = []
top = ctypes.windll.user32.GetTopWindow(None)
if top:
length = ctypes.windll.user32.GetWindowTextLengthW(top)
buff = ctypes.create_unicode_buffer(length + 1)
ctypes.windll.user32.GetWindowTextW(top, buff, length + 1)
class_name = ctypes.create_string_buffer(200)
ctypes.windll.user32.GetClassNameA(top, ctypes.byref(class_name), 200)
result_list.append( [buff.value, class_name.value, top ])
order_list.append(top)
while True:
next = ctypes.windll.user32.GetWindow(order_list[-1], 2) # win32con.GW_HWNDNEXT
if not next:
break
length = ctypes.windll.user32.GetWindowTextLengthW(next)
buff = ctypes.create_unicode_buffer(length + 1)
ctypes.windll.user32.GetWindowTextW(next, buff, length + 1)
class_name = ctypes.create_string_buffer(200)
ctypes.windll.user32.GetClassNameA(next, ctypes.byref(class_name), 200)
result_list.append( [buff.value, class_name.value, next] )
order_list.append(next)
# result_list: [(title, class, hwnd int)]
winTitle = 'UserClassUI' # os.path.basename(os.path.dirname(__file__))
is_opened = 0
for each in result_list:
if re.match(winTitle+' - v[0-9.]* - host: desktop',each[0]) and each[1] == 'QWidget':
is_opened += 1
if is_opened == 1:
ctypes.windll.user32.SetForegroundWindow(each[2])
return
if hostMode in ('npp','fusion'):
app_UserClassUI = QtWidgets.QApplication([])
elif hostMode in ('houdini'):
pass
else:
app_UserClassUI = QtWidgets.QApplication(sys.argv)
#--------------------------
# ui instance
#--------------------------
# template 1 - Keep only one copy of windows ui in Maya
global single_UserClassUI
if single_UserClassUI is None:
if hostMode == 'maya':
single_UserClassUI = UserClassUI(parentWin, mode)
elif hostMode == 'nuke':
single_UserClassUI = UserClassUI(QtWidgets.QApplication.activeWindow(), mode)
else:
single_UserClassUI = UserClassUI()
# extra note: in Maya () for no parent; (parentWin,0) for extra mode input
single_UserClassUI.show()
ui = single_UserClassUI
if hostMode != 'desktop':
ui.activateWindow()
# template 2 - allow loading multiple windows of same UI in Maya
'''
if hostMode == "maya":
ui = UserClassUI(parentWin)
ui.show()
else:
pass
# extra note: in Maya () for no parent; (parentWin,0) for extra mode input
'''
# loop app object for certain host
if hostMode in ('desktop'):
sys.exit(app_UserClassUI.exec_())
elif hostMode in ('npp','fusion'):
app_UserClassUI.exec_()
return ui
if __name__ == "__main__":
main()
|
from .base_options import BaseOptions
import socket
class TestOptions(BaseOptions):
def initialize(self, parser):
def_results_dir = './results/'
parser = BaseOptions.initialize(self, parser)
parser.add_argument('--ntest', type=int, default=float("inf"), help='# of test examples.')
parser.add_argument('--results_dir', type=str, default=def_results_dir, help='saves results here.')
parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--how_many', type=int, default=50, help='how many test images to run')
parser.set_defaults(model='dmorph')
# To avoid cropping, the loadSize should be the same as fineSize
parser.set_defaults(loadSize=parser.get_default('fineSize'))
self.isTrain = False
return parser
|
"""Base box coder.
Box coders convert between coordinate frames, namely image-centric
(with (0,0) on the top left of image) and anchor-centric (with (0,0) being
defined by a specific anchor).
Users of a BoxCoder can call two methods:
encode: which encodes a box with respect to a given anchor
(or rather, a tensor of boxes wrt a corresponding tensor of anchors) and
decode: which inverts this encoding with a decode operation.
In both cases, the arguments are assumed to be in 1-1 correspondence already;
it is not the job of a BoxCoder to perform matching.
"""
from abc import ABCMeta
from abc import abstractmethod
from abc import abstractproperty
import numpy as np
# Box coder types.
FASTER_RCNN = 'faster_rcnn'
KEYPOINT = 'keypoint'
MEAN_STDDEV = 'mean_stddev'
SQUARE = 'square'
class BoxCoder(object):
"""Abstract base class for box coder."""
__metaclass__ = ABCMeta
@abstractproperty
def code_size(self):
"""Return the size of each code.
This number is a constant and should agree with the output of the `encode`
op (e.g. if rel_codes is the output of self.encode(...), then it should have
shape [N, code_size()]). This abstractproperty should be overridden by
implementations.
Returns:
an integer constant
"""
pass
def encode(self, boxes, anchors):
"""Encode a box list relative to an anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded
anchors: BoxList of N anchors
Returns:
a tensor representing N relative-encoded boxes
"""
return self._encode(boxes, anchors)
def decode(self, rel_codes, anchors):
"""Decode boxes that are encoded relative to an anchor collection.
Args:
rel_codes: a tensor representing N relative-encoded boxes
anchors: BoxList of anchors
Returns:
boxlist: BoxList holding N boxes encoded in the ordinary way (i.e.,
with corners y_min, x_min, y_max, x_max)
"""
return self._decode(rel_codes, anchors)
@abstractmethod
def _encode(self, boxes, anchors):
"""Method to be overriden by implementations.
Args:
boxes: BoxList holding N boxes to be encoded
anchors: BoxList of N anchors
Returns:
a tensor representing N relative-encoded boxes
"""
pass
@abstractmethod
def _decode(self, rel_codes, anchors):
"""Method to be overriden by implementations.
Args:
rel_codes: a tensor representing N relative-encoded boxes
anchors: BoxList of anchors
Returns:
boxlist: BoxList holding N boxes encoded in the ordinary way (i.e.,
with corners y_min, x_min, y_max, x_max)
"""
pass
|
'''
Data pre-processing
'''
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
def concat2D(data):
'''
Concatenate all spectra opened with agilentFPA_multiple or agilentFPA where
mode = 'mosaic'. Useful for processing.
Create a label for each spectrum. Useful for group analysis, model
training/testing.
Parameters
----------
data : list of dict or dict
Data files opened with agilentFPA_multiple (single or mosaic) or
one data file opened with agilentFPA (mosaic)
Returns
-------
spec_concat : ndarray
All spectra in the shape [n_spectra, n_points].
label_concat : list
List of one filename per each spectrum.
'''
spec_all = []
label_concat = []
# If only one mosaic, create a list with only it
if isinstance(data, dict):
data = [data]
# Get spectra
for oneFPA in data:
spec_one = oneFPA['spec']
# If single, add the spectra in the spectra list
if len(np.shape(spec_one)) == 2:
spec_all += [spec_one]
# If mosaic, reshape tiles in 2D array
# then add the spectra in the spectra list
elif len(np.shape(spec_one)) == 3:
spec_all += [
np.reshape(
spec_one,
(
(oneFPA['fpa_size']
* oneFPA['fpa_size']
* oneFPA['tiles_x']
* oneFPA['tiles_y']),
len(oneFPA['wn'])
)
)
]
# Add one label for each spectrum in the label list
label = ([oneFPA['filename']]
* (oneFPA['fpa_size']
* oneFPA['tiles_x']
* oneFPA['fpa_size']
* oneFPA['tiles_y']))
label_concat += label
# All spectra to one 2D array
spec_concat = np.concatenate(spec_all, axis=0)
print('Spectra concatenated. Labels created.')
return spec_concat, label_concat
def wnnear(wn, value):
'''
Wavenumbers are not integers, so find the nearest.
Parameters
----------
wn : ndarray
Wavenumber of shape [n_points].
value : int
Point to be found.
Returns
-------
idx : int
Index of the closest wavenumber point.
'''
idx = (np.abs(wn - value)).argmin()
return idx
def cut_outer(wn, spec, init, final):
'''
Cut the spectra and the wavenumber in the style:
XXXXX----------XXXXX
where:
X: removed value
-: held value
Parameters
----------
wn : ndarray
Wavenumber of shape [n_points].
spec : ndarray
Spectra of shape [n_spectra, n_points].
init : int
Initial value to hold.
final : int
Final value to hold.
Returns
-------
wn : ndarray
Cutted wavenumber of shape [n_points].
spec : ndarray
Cutted spectra of shape [n_spectra, n_points].
'''
mask = ((wn >= init) & (wn <= final))
wn = wn[mask]
spec = spec[:,mask]
print(f'Selected from {init} to {final} cm-1.')
return wn, spec
def cut_inner(wn, spec, init, final):
'''
Cut the spectra and the wavenumber in the style:
----------XXXXX----------
where:
X: removed value
-: held value
Parameters
----------
wn : ndarray
Wavenumber of shape [n_points].
spec : ndarray
Spectra of shape [n_spectra, n_points].
init : int
Initial value to remove.
final : int
Final value to remove.
Returns
-------
wn : ndarray
Cutted wavenumber of shape [n_points].
spec : ndarray
Cutted spectra of shape [n_spectra, n_points].
'''
mask = ((wn < init) | (wn > final))
wn = wn[mask]
spec = spec[:,mask]
print(f'Removed from {init} to {final} cm-1.')
return wn, spec
def sg(spec, window, deriv=0):
'''
Savitzky–Golay filtering.
Parameters
----------
spec : ndarray
Spectra of shape [n_spectra, n_points].
window : int
Lenght of the filter window.
deriv : int, optional
Derivative order. The default is 0 (no differentiation).
Returns
-------
spec : ndarray
Filtered spectra of same shape.
'''
from scipy.signal import savgol_filter
spec = savgol_filter(
spec,
window_length=window,
polyorder=2,
deriv=deriv,
axis=1
)
print('Savitzky–Golay filter applied. \n' \
f'- Window lenght = {window} \n' \
f'- Derivative order = {deriv}'
)
return spec
def vector(spec):
'''
Vector normalization
Parameters
----------
spec : ndarray
Spectra of shape [n_spectra, n_points].
Returns
-------
spec : ndarray
Normalized spectra of same shape.
'''
spec = np.divide(
spec,
np.sqrt(np.sum(np.square(spec),axis=1))[:,None]
)
print('Vector normalization applied.')
return spec
def minmax(spec):
'''
Min-max normalization
Parameters
----------
spec : ndarray
Spectra of shape [n_spectra, n_points].
Returns
-------
spec : ndarray
Normalized spectra of same shape.
'''
spec = np.divide(
spec - np.min(spec, axis=1)[:,None],
(np.max(spec, axis=1) - np.min(spec, axis=1))[:,None]
)
print('Min-max normalization applied.')
return spec
def snv(spec):
'''
Standard Normal Variate
Parameters
----------
spec : ndarray
Spectra of shape [n_spectra, n_points].
Returns
-------
spec : ndarray
Normalized spectra of same shape.
'''
spec = np.divide(
spec-np.mean(spec, axis=1)[:,None],
np.std(spec,axis=1)[:,None]
)
print('Standard Normal Variate applied.')
return spec
def emsc(
spec,
degree=2,
norm=True,
paraffin=np.array(False),
wn=np.array(False),
expvar = 0.99
):
'''
Extended multiplicative signal correction (EMSC).
As described in Afseth and Kohler, 2012 (10.1016/j.chemolab.2012.03.004).
- spec = a + spec_mean*b + e
- spec_corr = (spec - a)/b
- spec_corr = (spec - a - d1*(spec) - d2*(spec**2) - ... - dn*(spec**n))/b
Parameters
----------
spec : ndarray
Spectra of shape [n_spectra, n_points].
degree : int, optional
Degree of the polynomial model. The default is 2.
norm : bool, optional
Normalize the data. The default is True.
paraffin : ndarray, optional
Paraffin spectra. The default is False.
If paraffin spectra of same shape as spec is informed, a PCA model \
of paraffin is added to the EMSC model.
A wn vector must be informed in this case to mask paraffin region.
wn : ndarray, optional
If paraffin spectra is informed, a wn of shape [n_points] has to be \
informed as well.
expvar : float
Explained variance. If paraffin spectra is informed, number of PCs is \
selected to reach desired expvar. The deafult is 99%.
Returns
-------
spec_corr : ndarray
Corrected spectra.
'''
# Polynomial model
d = np.linspace(-1, 1, np.shape(spec)[1]).reshape(-1,1)
d = np.repeat(d,degree+1,axis=1)**np.arange(0,degree+1)
# If paraffin spectra, add paraffin to the model
if paraffin.any():
# Fit Paraffin PCA
pca = PCA()
pca.fit(paraffin)
# Get eigenvectors of PCs where the explained variance is up to expvar
variance_cumulative = np.cumsum(pca.explained_variance_ratio_)
pcselect = (variance_cumulative <= expvar)
vectors = pca.components_.T[:,pcselect]
# Paraffin region mask
mask = ((wn >= 1350) & (wn <= 1500))
par_mean = np.mean(paraffin, axis=0).reshape(-1,1)
par_mean[~mask] = 0
vectors[~mask,:] = 0
# Model
model = np.hstack((
np.mean(spec, axis=0).reshape(-1,1),
d,
par_mean,
vectors
))
else:
# Model
model = np.hstack((
np.mean(spec, axis=0).reshape(-1,1),
d
))
# Least Squares estimation
params = np.linalg.lstsq(model, spec.T, rcond=None)[0]
# Baseline correction (a, d1, d2, ..., dn)
spec_corr = spec - model[:,1:].dot(params[1:,:]).T
# Normalization (b)
if norm:
spec_corr = spec_corr/(params[0,:].T[:,None])
print('EMSC applied.')
return spec_corr
def meancenter(spec, orientation):
'''
Mean center rows or columns.
Parameters
----------
spec : ndarray
Spectra of shape [n_spectra, n_points].
orientation : str
Spectra of shape [n_spectra, n_points].
Returns
-------
spec : ndarray
Mean centered spectra of same shape.
'''
if orientation == 'row':
spec = spec - np.mean(spec, axis=1)[:,None]
print('Mean centered (rows).')
elif orientation == 'column':
spec = spec - np.mean(spec, axis=0)[None,:]
print('Mean centered (columns).')
else:
print('Invalid orientation! \nSelect "row" or "column" orientation.')
return spec
def offset(spec):
'''
Remove spectra offset.
Parameters
----------
spec : ndarray
Spectra of shape [n_spectra, n_points].
Returns
-------
spec : ndarray
Spectra without offset.
'''
spec = spec - np.min(spec, axis=1)[:,None]
return spec
def quality(spec, wn,
signal=[1620,1690], noise=[1800,1900],
threshold=False, label=False):
'''
Quality test based on the Signal-to-Noise Ratio (SNR).
Optional: remove bad quality spectra based on input threshold.
Parameters
----------
spec : ndarray
Spectra of shape [n_spectra, n_points].
wn : ndarray
Wavenumber of shape [n_points].
signal : list of int, optional
Initial and end points of the signal band. \
The default is [1620,1690] for the Amide I band.
noise : list of int, optional
Initial and end points of the noise band. \
The default is [1800,1900] for the biological dead region (usually).
threshold : float, optional
SNRs lower than the threshold are bad quality. The default is False.
label : list, optional
List of labels. The default is False. Pass only if threshold.
Returns
-------
quality_bad : boolean, optional (only if threshold)
Array identifying outliers.
spec_clean : ndarray, optional (only if threshold)
Cleaned spectra of shape [n_spectra, n_points] (bad quality removed).
label_clean : ndarray, optional (only if threshold)
Cleaned labels of shape [n_spectra] (bad quality removed).
'''
# Offset
spec_off = offset(spec)
# Signal-to-Noise Ratio
signal_mask = ((wn >= signal[0]) & (wn <= signal[1]))
signal_area = np.trapz(spec_off[:,signal_mask])
noise_mask = ((wn >= noise[0]) & (wn <= noise[1]))
noise_area = np.trapz(spec_off[:,noise_mask])
snr = signal_area/noise_area
# If bad quality thresholding
if threshold:
from itertools import compress
# Thresholding
quality_bad = snr < threshold
spec_clean = spec[~quality_bad,:]
label_clean = list(compress(label, ~quality_bad))
print(f'{sum(quality_bad)} bad quality spectra found' \
f' ({np.round((sum(quality_bad)/spec.shape[0])*100, 2)}%' \
' of the total spectra).')
# Plot histogram with threshold line
fig, ax = plt.subplots()
plt.hist(snr, bins=500)
plt.xlabel('SNR')
plt.ylabel('Frequency')
plt.axvline(x=threshold, color='red', linewidth=1)
plt.xlim(0)
return quality_bad, spec_clean, label_clean
else:
# Plot histogram
fig, ax = plt.subplots()
plt.hist(snr, bins=500)
plt.xlabel('SNR')
plt.ylabel('Frequency')
plt.xlim(0)
def pcanoise(spec, ncomp=False, expvar=False):
'''
PCA Noise Reduction.
Accepts a fixed PC number OR a fixed explained variance and PC will be \
sellected according to it. Only one has to be informed.
Examples:
- pcanoise(spec, ncomp=10):
- Apply a PCA model with 10 PCs.
- pcanoise(spec, expvar=0.9)
- Apply a PCA model with the number of PCs where the explained \
variance is up to 90%.
Parameters
----------
spec : ndarray
Spectra of shape [n_spectra, n_points].
ncomp : int, optional
Number of Principal Components. The default is False.
expvar : float, optional
Explained variance to reach with the PCs. The default is False.
Returns
-------
spec_denoise : ndarray
Spectra after PCA Noise Reduction.
'''
if ncomp and not expvar:
# Fit PCA Model and calculate the parameters
pca = PCA(n_components=ncomp)
scores = pca.fit_transform(spec)
variance_cumulative = np.cumsum(pca.explained_variance_ratio_)
loadings = (pca.components_.T
* np.sqrt(pca.explained_variance_))
# Spectra noise reduction
spec_denoise = np.mean(spec, axis=0) + np.dot(scores, loadings.T)
print(f'PCA Noise Reduction applied: \n' \
f'- Selected PCs: {ncomp}. \n' \
f'- Explained Variance: ' \
f'{np.round(variance_cumulative[-1]*100, 2)}%.')
return spec_denoise
elif expvar and not ncomp:
# Fit PCA Model
pca = PCA()
scores = pca.fit(spec)
# Check the number of PCs where the explained variance is up to expvar
variance_cumulative = np.cumsum(pca.explained_variance_ratio_)
pcselect = (variance_cumulative <= expvar)
# Calculate the parameters (only for selected PCs)
scores = pca.transform(spec)[:,pcselect]
loadings = (pca.components_.T[:,pcselect]
* np.sqrt(pca.explained_variance_)[pcselect])
# Spectra noise reduction
spec_denoise = np.mean(spec, axis=0) + np.dot(scores, loadings.T)
print(f'PCA Noise Reduction applied: \n' \
f'- Selected PCs: {sum(pcselect)}. \n' \
f'- Explained Variance: ' \
f'{np.round(variance_cumulative[pcselect][-1]*100, 2)}%.')
return spec_denoise
elif expvar and ncomp:
print('Invalid input! Two parameters informed.\n' \
'Inform only one: number of PCs (ncomp) ' \
'OR explained variance (expvar).')
elif not expvar and not ncomp:
print('Invalid input! No parameter informed.\n' \
'Inform only one: number of PCs (ncomp) ' \
'OR explained variance (expvar).')
|
from itertools import chain
from django.contrib.auth.models import AbstractUser
from django.contrib.postgres.fields import JSONField
from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
import pyotp
class User(AbstractUser):
email = models.EmailField(unique=True)
is_remote = models.BooleanField(default=False)
password_updated_at = models.DateTimeField(blank=True, null=True, editable=False)
class Meta:
ordering = ("username",)
def __str__(self):
return self.email or self.username
def save(self, *args, **kwargs):
if self.pk:
old_user = self._meta.model.objects.get(pk=self.pk)
if old_user.password != self.password:
if old_user.has_usable_password():
UserPasswordHistory.objects.create(
user=self,
password=old_user.password,
created_at=old_user.password_updated_at
)
self.password_updated_at = timezone.now()
elif self.password:
self.password_updated_at = timezone.now()
super().save(*args, **kwargs)
def username_and_email_editable(self):
return not self.is_remote
def is_superuser_editable(self):
return (not self.is_superuser or
User.objects.exclude(pk=self.pk).filter(is_superuser=True).count() > 0)
def editable(self):
return self.username_and_email_editable() or self.is_superuser_editable()
def deletable(self):
return not self.is_superuser
@cached_property
def has_verification_device(self):
return len(self._all_verification_devices) > 0
@cached_property
def _all_verification_devices(self):
return list(chain(self.usertotp_set.all(),
self.useru2f_set.all()))
def get_verification_devices(self):
return sorted(self._all_verification_devices,
key=lambda vd: vd.name)
def get_prioritized_verification_devices(self, user_agent):
verification_devices = sorted(self._all_verification_devices,
key=lambda vd: (-1 * vd.PRIORITY, vd.name))
ua_verification_devices = [vd for vd in verification_devices if vd.test_user_agent(user_agent)]
if not ua_verification_devices and verification_devices:
raise ValueError("No verification devices compatible with this user agent")
else:
return ua_verification_devices
class UserPasswordHistory(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
password = models.CharField(_('password'), max_length=128)
created_at = models.DateTimeField(editable=False)
class UserVerificationDevice(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
name = models.CharField(max_length=256)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
def get_type_for_display(self):
return self.TYPE
def __str__(self):
return "{} {}".format(self.get_type_for_display(), self.name)
def get_delete_url(self):
return reverse(self.delete_url_name, args=(self.pk,))
def serialize_for_event(self):
return {"type": self.TYPE,
"name": self.name}
class UserTOTP(UserVerificationDevice):
TYPE = "TOTP"
PRIORITY = 10
secret = models.CharField(max_length=256)
delete_url_name = "users:delete_totp"
class Meta:
unique_together = (("user", "name"),)
def get_verification_url(self):
return reverse("verify_totp")
def verify(self, code):
return pyotp.TOTP(self.secret).verify(code)
def test_user_agent(self, user_agent):
return True
class UserU2F(UserVerificationDevice):
TYPE = "U2F"
PRIORITY = 100
delete_url_name = "users:delete_u2f_device"
device = JSONField()
class Meta:
unique_together = (("user", "device"), ("user", "name"))
def get_verification_url(self):
return reverse("verify_u2f")
def test_user_agent(self, user_agent):
return user_agent and 'safari' not in user_agent.lower()
|
#!/usr/bin/env python3
"""Combine logs from multiple pyeongtaekcoin nodes as well as the test_framework log.
This streams the combined log output to stdout. Use combine_logs.py > outputfile
to write to an outputfile."""
import argparse
from collections import defaultdict, namedtuple
import heapq
import itertools
import os
import re
import sys
# Matches on the date format at the start of the log event
TIMESTAMP_PATTERN = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{6}Z")
LogEvent = namedtuple('LogEvent', ['timestamp', 'source', 'event'])
def main():
"""Main function. Parses args, reads the log files and renders them as text or html."""
parser = argparse.ArgumentParser(usage='%(prog)s [options] <test temporary directory>', description=__doc__)
parser.add_argument('-c', '--color', dest='color', action='store_true', help='outputs the combined log with events colored by source (requires posix terminal colors. Use less -r for viewing)')
parser.add_argument('--html', dest='html', action='store_true', help='outputs the combined log as html. Requires jinja2. pip install jinja2')
args, unknown_args = parser.parse_known_args()
if args.color and os.name != 'posix':
print("Color output requires posix terminal colors.")
sys.exit(1)
if args.html and args.color:
print("Only one out of --color or --html should be specified")
sys.exit(1)
# There should only be one unknown argument - the path of the temporary test directory
if len(unknown_args) != 1:
print("Unexpected arguments" + str(unknown_args))
sys.exit(1)
log_events = read_logs(unknown_args[0])
print_logs(log_events, color=args.color, html=args.html)
def read_logs(tmp_dir):
"""Reads log files.
Delegates to generator function get_log_events() to provide individual log events
for each of the input log files."""
files = [("test", "%s/test_framework.log" % tmp_dir)]
for i in itertools.count():
logfile = "{}/node{}/regtest/debug.log".format(tmp_dir, i)
if not os.path.isfile(logfile):
break
files.append(("node%d" % i, logfile))
return heapq.merge(*[get_log_events(source, f) for source, f in files])
def get_log_events(source, logfile):
"""Generator function that returns individual log events.
Log events may be split over multiple lines. We use the timestamp
regex match as the marker for a new log event."""
try:
with open(logfile, 'r', encoding='utf-8') as infile:
event = ''
timestamp = ''
for line in infile:
# skip blank lines
if line == '\n':
continue
# if this line has a timestamp, it's the start of a new log event.
time_match = TIMESTAMP_PATTERN.match(line)
if time_match:
if event:
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
event = line
timestamp = time_match.group()
# if it doesn't have a timestamp, it's a continuation line of the previous log.
else:
event += "\n" + line
# Flush the final event
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
except FileNotFoundError:
print("File %s could not be opened. Continuing without it." % logfile, file=sys.stderr)
def print_logs(log_events, color=False, html=False):
"""Renders the iterator of log events into text or html."""
if not html:
colors = defaultdict(lambda: '')
if color:
colors["test"] = "\033[0;36m" # CYAN
colors["node0"] = "\033[0;34m" # BLUE
colors["node1"] = "\033[0;32m" # GREEN
colors["node2"] = "\033[0;31m" # RED
colors["node3"] = "\033[0;33m" # YELLOW
colors["reset"] = "\033[0m" # Reset font color
for event in log_events:
print("{0} {1: <5} {2} {3}".format(colors[event.source.rstrip()], event.source, event.event, colors["reset"]))
else:
try:
import jinja2
except ImportError:
print("jinja2 not found. Try `pip install jinja2`")
sys.exit(1)
print(jinja2.Environment(loader=jinja2.FileSystemLoader('./'))
.get_template('combined_log_template.html')
.render(title="Combined Logs from testcase", log_events=[event._asdict() for event in log_events]))
if __name__ == '__main__':
main()
|
import argparse
import utils
def main(args):
path = args.path
assert path.endswith(".doc.tokens")
# Gold EDUs
edus = utils.read_lines(path.replace(".doc.tokens", ".edus.tokens"), process=lambda line: line.split()) # List[List[str]]
# Paragraphs
lines = utils.read_lines(path, process=lambda line: line.split())
paras = [] # List[List[List[str]]]
para = [lines[0]]
for i in range(1, len(lines)):
line = lines[i]
if len(line) == 0 and len(para) == 0:
continue
elif len(line) == 0 and len(para) != 0:
paras.append(para)
para = []
else:
para.append(line)
if len(para) != 0:
paras.append(para)
# Ending positions of gold EDUs
edu_end_positions = []
tok_i = 0
for edu in edus:
length = len(edu)
edu_end_positions.append(tok_i + length - 1)
tok_i += length
# Ending positions of paragraphs
para_end_positions = []
tok_i = 0
for para in paras:
length = len([token for tokens in para for token in tokens])
para_end_positions.append(tok_i + length - 1)
tok_i += length
cnt = len(para_end_positions)
# Filtered paragraph-ending positions
para_end_positions = list(set(edu_end_positions) & set(para_end_positions))
para_end_positions.sort()
# Re-make paragraphs based on the filtered paragraph-ending positions
new_paras = []
new_para = []
tok_i = 0
pos_i = 0
for para in paras:
new_para.extend(para)
length = len([token for tokens in para for token in tokens])
tok_i += length
if tok_i - 1 == para_end_positions[pos_i]:
new_paras.append(new_para)
new_para = []
pos_i += 1
assert pos_i == len(para_end_positions) == len(new_paras)
paras = new_paras
# Write
with open(path + ".fixed", "w") as f:
for para in new_paras:
for tokens in para:
tokens = " ".join(tokens)
f.write("%s\n" % tokens)
f.write("\n")
diff = cnt - len(para_end_positions)
print("Removed %d paragraph boundaries." % diff)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--path", type=str, required=True)
args = parser.parse_args()
main(args)
|
from typing import List
import json
from time import sleep
from datetime import date
from os import path
from api import BilibiliApi
from writer import write_md, write_raw_data
BASE_PATH = './archive'
NAP_TIME = .5
def generate_md(raw_data: BilibiliApi.RAW_DATA_T) -> str:
res = []
for video in raw_data:
line = '1. '
url = f'https://www.bilibili.com/video/{video["bvid"]}'
line += f'[{video["title"]}]({url})'
res.append(line)
return '\n'.join(res)
def generate_md_table_row(row: List[Any]) -> str:
return f'| {" | ".join(r for r in row)} |\n'
def summarize_tags(api: BilibiliApi, loc: str, name: str, aids: List[str]) -> BilibiliApi.RAW_DATA_T:
all_tags = {}
for aid in aids:
sleep(NAP_TIME)
tag_list = api.get_tag(aid)
for tag in tag_list:
if tag['tag_id'] in all_tags:
all_tags[tag['tag_id']]['day_count'] += 1
else:
all_tags[tag['tag_id']] = {'data': tag, 'day_count': 1}
write_raw_data(all_tags, path.join(loc, 'Tags', 'README.md'))
summary = []
for _, tag in all_tags.items():
name = tag['data']['tag_name']
count = tag['day_count']
summary.append((name, count))
sort(summary, key=lambda x: x[1], acending=False)
summary_header = ['Tag', 'Count']
summary_md = '# Tag Distribution\n'
summary_md += generate_md_table_row(summary_header)
summary_md += generate_md_table_row(['---'] * len(summary_header))
for row in summary:
summary_md += generate_md_table_row(row)
write_md(summary_md, path.join(loc, 'Tags', name))
def summarize_highest_ranked(api: BilibiliApi, loc: str) -> BilibiliApi.RAW_DATA_T:
highest_ranked = api.get_highest_ranked()
write_raw_data(highest_ranked, path.join(loc, 'Raw', 'highest_ranked.json'))
aids = [video['aid'] for video in highest_ranked]
summarize_tags(api, loc, 'highest_ranked.json', aids)
return highest_ranked
def summarize_most_popular(api: BilibiliApi, loc: str) -> BilibiliApi.RAW_DATA_T:
most_popular = api.get_most_popular()
write_raw_data(most_popular, path.join(loc, 'Raw', 'most_popular.json'))
aids = (video['aid'] for video in most_popular)
summarize_tags(api, loc, 'most_popular.json', aids)
return most_popular
def summarize_today():
date_str = date.today().isoformat()
loc = path.join(BASE_PATH, 'Bilibili', date_str)
api = BilibiliApi()
highest_ranked = summarize_highest_ranked(api, loc)
most_popular = summarize_most_popular(api, loc)
md_str = '# Highest Ranked Videos\n'
md_str += generate_md(highest_ranked)
md_str += '\n\n'
md_str += '# Most Popular Videos\n'
md_str += generate_md(most_popular)
write_md(md_str, path.join(loc, 'README.md'))
if __name__ == '__main__':
summarize_today()
|
from __future__ import annotations
from itertools import chain
import typing as t
from .exceptions import CannotResolve, CircularDependency, PartiallyResolved
from .helpers import EMPTY, _LookupStack, _Stack
from .types import DependencyInfo, T
from .resolvers import Resolver
class SimpleRepository:
def __init__(self, resolvers: t.Iterable[Resolver]):
self.resolvers: t.List[Resolver] = list(resolvers)
self.types_by_str: t.Dict[str, t.Type[T]] = {}
self.types: t.Dict[t.Type, DependencyInfo] = {}
self.instances: t.Dict[t.Type[T], t.List[T]] = {}
self.lookup_stack = _LookupStack()
def get(
self, key: t.Union[t.Type[T], str], many=False, repo=None, kwargs=None
) -> t.Union[t.List[T], T]:
if isinstance(key, str):
key = self.types_by_str.get(key)
if key is None:
raise CannotResolve(f"Could not resolve for key {key}")
if many:
return self.instances.get(key, [])
if not isinstance(key, type) and callable(key):
name = getattr(key, "__name__", str(key))
return self.resolve(factory=key, key=name, repo=repo, kwargs=kwargs)
if kwargs is not None or key not in self.instances:
inst = self.create(key, repo=repo, kwargs=kwargs)
return inst
instances = self.instances[key]
return instances[-1]
def create(self, key: t.Type[T], repo=None, kwargs=None) -> T:
"""Instantiate the object and all its dependencies
:param key: The class / factory function to instantiate
:param repo: The current ``Repository`` used for requesting dependencies
:param kwargs: Any user specified keyword arguments (see :func:`gimme.get`). These
will have preference over any keyword arguments this function supplies and any
keyword arguments supplied by :func:`gimme.register`
"""
repo = repo or self
if not isinstance(key, type):
raise TypeError(f"Can only create classes, not {key}")
if key in self.lookup_stack:
raise CircularDependency(str(self.lookup_stack))
info = self._ensure_info(key)
do_store = kwargs is None and info.store
combined_kwargs = {**(info.kwargs or {}), **(kwargs or {})}
inst = self.resolve(info.factory, key=key, repo=repo, kwargs=combined_kwargs)
if do_store:
self.add(inst)
return inst
def resolve(self, factory, key, repo=None, kwargs=None):
"""Resolve a factory function. This resolves all function parameters as dependencies,
runs the function and returns the result
"""
inst = EMPTY
with self.lookup_stack.push(key):
for plugin in self.resolvers:
try:
return plugin.create(factory, repo, kwargs)
except (CannotResolve, PartiallyResolved):
continue
if inst is EMPTY:
raise CannotResolve(str(self.lookup_stack))
def _ensure_info(self, cls: t.Type[T]) -> DependencyInfo:
info = self.types.get(cls)
if info:
return info
self.register(cls)
return self._ensure_info(cls)
def add(self, inst, deep=True):
def append_instance_to(key):
if key not in self.instances:
self.instances[key] = []
self.instances[key].append(inst)
cls = type(inst)
self.register(cls)
append_instance_to(cls)
if deep:
for base in cls.__mro__[1:]:
append_instance_to(base)
def register(
self,
cls: t.Type[T] = None,
factory: t.Callable = None,
info: DependencyInfo = None,
store=True,
kwargs=None,
):
if not (bool(cls) ^ bool(info)):
raise ValueError("Supply either cls or info")
if info is None:
if not isinstance(cls, type):
raise TypeError(f"Can only register classes, not {cls}")
if factory is None:
factory = cls
info = DependencyInfo(cls=cls, factory=factory, store=store, kwargs=kwargs)
for base in info.cls.__mro__:
if base not in self.types:
key = base.__name__
self.types_by_str[key] = base
self.types[base] = info
def add_resolver(self, resolver: Resolver):
self.resolvers.insert(0, resolver)
def __contains__(self, item: t.Type):
return item in self.instances
class LayeredRepository(_Stack[SimpleRepository]):
def __init__(self, first_layer: SimpleRepository):
super().__init__([first_layer])
@property
def current(self) -> SimpleRepository:
return self[-1]
def get(self, key: t.Union[t.Type[T], str], many=False, kwargs=None) -> t.Union[t.List[T], T]:
err = None
if many:
return list(chain.from_iterable(repo.get(key, many) for repo in self))
for repo in reversed(self):
try:
return repo.get(key, many, repo=self, kwargs=kwargs)
except CannotResolve as e:
err = e
if err:
raise err
def create(self, key: t.Type[T]) -> T:
return self.current.create(key, repo=self)
def pop(self):
if len(self) <= 1:
raise IndexError("Cannot pop the base repository layer")
return super().pop()
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.pop()
except IndexError:
pass
def __getattr__(self, item):
return getattr(self.current, item)
def __contains__(self, item: t.Type):
return any(item in repo for repo in self)
|
from ipaddress import ip_address, IPv4Network, IPv6Network
from typing import Iterable, Union, Any
from sector.server.outbound_message import NodeType
def is_in_network(peer_host: str, networks: Iterable[Union[IPv4Network, IPv6Network]]) -> bool:
try:
peer_host_ip = ip_address(peer_host)
return any(peer_host_ip in network for network in networks)
except ValueError:
return False
def is_localhost(peer_host: str) -> bool:
return peer_host == "127.0.0.1" or peer_host == "localhost" or peer_host == "::1" or peer_host == "0:0:0:0:0:0:0:1"
def class_for_type(type: NodeType) -> Any:
if type is NodeType.FULL_NODE:
from sector.full_node.full_node_api import FullNodeAPI
return FullNodeAPI
elif type is NodeType.WALLET:
from sector.wallet.wallet_node_api import WalletNodeAPI
return WalletNodeAPI
elif type is NodeType.INTRODUCER:
from sector.introducer.introducer_api import IntroducerAPI
return IntroducerAPI
elif type is NodeType.TIMELORD:
from sector.timelord.timelord_api import TimelordAPI
return TimelordAPI
elif type is NodeType.FARMER:
from sector.farmer.farmer_api import FarmerAPI
return FarmerAPI
elif type is NodeType.HARVESTER:
from sector.harvester.harvester_api import HarvesterAPI
return HarvesterAPI
raise ValueError("No class for type")
|
from django.contrib.auth.models import User
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from django.contrib.auth import authenticate
class AuthenticationUserTestCase(APITestCase):
def setUp(self):
self.list_url = reverse('Company-list')
self.user = User.objects.create_superuser('root', password='root')
def test_authentication_user_credentials(self):
"""User credentials verification test"""
user = authenticate(username='root', password='root')
self.assertTrue((user is not None) and user.is_authenticated)
def test_get_request_with_not_authenticated_user(self):
"""Verification test of get request to user without authentication"""
response = self.client.get(self.list_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_authentication_incorrect_username(self):
"""Incorrect username verification test"""
user = authenticate(username='rot', password='root')
self.assertFalse((user is not None) and user.is_authenticated)
def test_authentication_incorrect_password(self):
"""Incorrect password verification test"""
user = authenticate(username='root', password='123')
self.assertFalse((user is not None) and user.is_authenticated)
def test_get_request_with_authenticated_user(self):
"""Verification test of get request to user authenticated"""
self.client.force_authenticate(self.user)
response = self.client.get(self.list_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for include.IncludeNode'''
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import os
import unittest
import zlib
from grit.node import misc
from grit.node import include
from grit.node import empty
from grit import grd_reader
from grit import util
class IncludeNodeUnittest(unittest.TestCase):
def testGetPath(self):
root = misc.GritNode()
root.StartParsing(u'grit', None)
root.HandleAttribute(u'latest_public_release', u'0')
root.HandleAttribute(u'current_release', u'1')
root.HandleAttribute(u'base_dir', ur'..\resource')
release = misc.ReleaseNode()
release.StartParsing(u'release', root)
release.HandleAttribute(u'seq', u'1')
root.AddChild(release)
includes = empty.IncludesNode()
includes.StartParsing(u'includes', release)
release.AddChild(includes)
include_node = include.IncludeNode()
include_node.StartParsing(u'include', includes)
include_node.HandleAttribute(u'file', ur'flugel\kugel.pdf')
includes.AddChild(include_node)
root.EndParsing()
self.assertEqual(root.ToRealPath(include_node.GetInputPath()),
util.normpath(
os.path.join(ur'../resource', ur'flugel/kugel.pdf')))
def testGetPathNoBasedir(self):
root = misc.GritNode()
root.StartParsing(u'grit', None)
root.HandleAttribute(u'latest_public_release', u'0')
root.HandleAttribute(u'current_release', u'1')
root.HandleAttribute(u'base_dir', ur'..\resource')
release = misc.ReleaseNode()
release.StartParsing(u'release', root)
release.HandleAttribute(u'seq', u'1')
root.AddChild(release)
includes = empty.IncludesNode()
includes.StartParsing(u'includes', release)
release.AddChild(includes)
include_node = include.IncludeNode()
include_node.StartParsing(u'include', includes)
include_node.HandleAttribute(u'file', ur'flugel\kugel.pdf')
include_node.HandleAttribute(u'use_base_dir', u'false')
includes.AddChild(include_node)
root.EndParsing()
self.assertEqual(root.ToRealPath(include_node.GetInputPath()),
util.normpath(
os.path.join(ur'../', ur'flugel/kugel.pdf')))
def testCompressGzip(self):
root = util.ParseGrdForUnittest('''
<includes>
<include name="TEST_TXT" file="test_text.txt"
compress="gzip" type="BINDATA"/>
</includes>''', base_dir = util.PathFromRoot('grit/testdata'))
inc, = root.GetChildrenOfType(include.IncludeNode)
throwaway, compressed = inc.GetDataPackPair(lang='en', encoding=1)
decompressed_data = zlib.decompress(compressed, 16 + zlib.MAX_WBITS)
self.assertEqual(util.ReadFile(util.PathFromRoot('grit/testdata')
+ "/test_text.txt", util.BINARY),
decompressed_data)
if __name__ == '__main__':
unittest.main()
|
from django.contrib import admin
# Register your models here.
from . import models
class ItemListAdmin(admin.ModelAdmin):
list_display = ("title", "content")
admin.site.register(models.ItemList, ItemListAdmin)
|
#!/usr/bin/python3
from sentiment import get_sentiment
from pprint import pprint
#--- run
def run():
text = 'not bad'
sentiment = get_sentiment(text)
pprint(sentiment)
#---
run()
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: openconfig_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='openconfig_service.proto',
package='openconfig',
syntax='proto3',
serialized_pb=_b('\n\x18openconfig_service.proto\x12\nopenconfig\"-\n\x17GetDataEncodingsRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\x04\"\xb9\x01\n\x18GetDataEncodingsResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\x04\x12\x39\n\x08\x65ncoding\x18\x02 \x03(\x0e\x32\'.openconfig.OpenConfigDataEncodingTypes\x12=\n\rresponse_code\x18\x03 \x01(\x0e\x32&.openconfig.OpenConfigRpcResponseTypes\x12\x0f\n\x07message\x18\x04 \x01(\t\"g\n\x16SetDataEncodingRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\x04\x12\x39\n\x08\x65ncoding\x18\x02 \x01(\x0e\x32\'.openconfig.OpenConfigDataEncodingTypes\"}\n\x17SetDataEncodingResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\x04\x12=\n\rresponse_code\x18\x02 \x01(\x0e\x32&.openconfig.OpenConfigRpcResponseTypes\x12\x0f\n\x07message\x18\x03 \x01(\t\"&\n\x10GetModelsRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\x04\"9\n\x05Model\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\x12\x0f\n\x07version\x18\x03 \x01(\t\"\x99\x01\n\x11GetModelsResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\x04\x12 \n\x05model\x18\x02 \x03(\x0b\x32\x11.openconfig.Model\x12=\n\rresponse_code\x18\x03 \x01(\x0e\x32&.openconfig.OpenConfigRpcResponseTypes\x12\x0f\n\x07message\x18\x04 \x01(\t\"d\n\x0eGetRequestList\x12\x14\n\x0coperation_id\x18\x01 \x01(\t\x12.\n\toperation\x18\x02 \x01(\x0e\x32\x1b.openconfig.GetDataCommands\x12\x0c\n\x04path\x18\x03 \x01(\t\"\x8c\x01\n\nGetRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\x04\x12\x39\n\x08\x65ncoding\x18\x02 \x01(\x0e\x32\'.openconfig.OpenConfigDataEncodingTypes\x12/\n\x0bget_request\x18\x03 \x03(\x0b\x32\x1a.openconfig.GetRequestList\"\xed\x01\n\x0bGetResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\x04\x12\x36\n\x08response\x18\x02 \x03(\x0b\x32$.openconfig.GetResponse.ResponseList\x1a\x91\x01\n\x0cResponseList\x12\x14\n\x0coperation_id\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t\x12=\n\rresponse_code\x18\x04 \x01(\x0e\x32&.openconfig.OpenConfigRpcResponseTypes\x12\x0f\n\x07message\x18\x05 \x01(\t\"\xb2\x02\n\nSetRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\x04\x12\x13\n\x0btransaction\x18\x02 \x01(\x08\x12\x39\n\x08\x65ncoding\x18\x03 \x01(\x0e\x32\'.openconfig.OpenConfigDataEncodingTypes\x12\x44\n\x10\x63onfig_operation\x18\x04 \x03(\x0b\x32*.openconfig.SetRequest.ConfigOperationList\x1az\n\x13\x43onfigOperationList\x12\x14\n\x0coperation_id\x18\x01 \x01(\t\x12\x30\n\toperation\x18\x02 \x01(\x0e\x32\x1d.openconfig.SetConfigCommands\x12\x0c\n\x04path\x18\x03 \x01(\t\x12\r\n\x05value\x18\x04 \x01(\t\"\xcf\x01\n\x0bSetResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\x04\x12\x36\n\x08response\x18\x02 \x03(\x0b\x32$.openconfig.SetResponse.ResponseList\x1at\n\x0cResponseList\x12\x14\n\x0coperation_id\x18\x01 \x01(\t\x12=\n\rresponse_code\x18\x02 \x01(\x0e\x32&.openconfig.OpenConfigRpcResponseTypes\x12\x0f\n\x07message\x18\x03 \x01(\t*B\n\x1bOpenConfigDataEncodingTypes\x12\x10\n\x0c\x45NCODING_XML\x10\x00\x12\x11\n\rENCODING_JSON\x10\x01*M\n\x0fGetDataCommands\x12\x0b\n\x07GET_ALL\x10\x00\x12\x0e\n\nGET_CONFIG\x10\x01\x12\x0f\n\x0bGET_OPSTATE\x10\x02\x12\x0c\n\x08GET_OPER\x10\x03*M\n\x11SetConfigCommands\x12\x11\n\rUPDATE_CONFIG\x10\x00\x12\x12\n\x0eREPLACE_CONFIG\x10\x01\x12\x11\n\rDELETE_CONFIG\x10\x02*\xc1\x01\n\x1aOpenConfigRpcResponseTypes\x12\x06\n\x02OK\x10\x00\x12\x07\n\x03NOK\x10\x01\x12\x14\n\x10UNSUPPORTED_PATH\x10\x02\x12\x10\n\x0cINVALID_PATH\x10\x03\x12\x19\n\x15INVALID_CONFIGURATION\x10\x04\x12\x18\n\x14UNSUPPORTED_INTERVAL\x10\x05\x12\x1b\n\x17INVALID_SUBSCRIPTION_ID\x10\x06\x12\x18\n\x14UNSUPPORTED_ENCODING\x10\x07\x32\x91\x03\n\x10OpenconfigRpcApi\x12_\n\x10GetDataEncodings\x12#.openconfig.GetDataEncodingsRequest\x1a$.openconfig.GetDataEncodingsResponse\"\x00\x12\\\n\x0fSetDataEncoding\x12\".openconfig.SetDataEncodingRequest\x1a#.openconfig.SetDataEncodingResponse\"\x00\x12J\n\tGetModels\x12\x1c.openconfig.GetModelsRequest\x1a\x1d.openconfig.GetModelsResponse\"\x00\x12\x38\n\x03Get\x12\x16.openconfig.GetRequest\x1a\x17.openconfig.GetResponse\"\x00\x12\x38\n\x03Set\x12\x16.openconfig.SetRequest\x1a\x17.openconfig.SetResponse\"\x00\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_OPENCONFIGDATAENCODINGTYPES = _descriptor.EnumDescriptor(
name='OpenConfigDataEncodingTypes',
full_name='openconfig.OpenConfigDataEncodingTypes',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ENCODING_XML', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ENCODING_JSON', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1766,
serialized_end=1832,
)
_sym_db.RegisterEnumDescriptor(_OPENCONFIGDATAENCODINGTYPES)
OpenConfigDataEncodingTypes = enum_type_wrapper.EnumTypeWrapper(_OPENCONFIGDATAENCODINGTYPES)
_GETDATACOMMANDS = _descriptor.EnumDescriptor(
name='GetDataCommands',
full_name='openconfig.GetDataCommands',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='GET_ALL', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GET_CONFIG', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GET_OPSTATE', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GET_OPER', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1834,
serialized_end=1911,
)
_sym_db.RegisterEnumDescriptor(_GETDATACOMMANDS)
GetDataCommands = enum_type_wrapper.EnumTypeWrapper(_GETDATACOMMANDS)
_SETCONFIGCOMMANDS = _descriptor.EnumDescriptor(
name='SetConfigCommands',
full_name='openconfig.SetConfigCommands',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UPDATE_CONFIG', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REPLACE_CONFIG', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DELETE_CONFIG', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1913,
serialized_end=1990,
)
_sym_db.RegisterEnumDescriptor(_SETCONFIGCOMMANDS)
SetConfigCommands = enum_type_wrapper.EnumTypeWrapper(_SETCONFIGCOMMANDS)
_OPENCONFIGRPCRESPONSETYPES = _descriptor.EnumDescriptor(
name='OpenConfigRpcResponseTypes',
full_name='openconfig.OpenConfigRpcResponseTypes',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOK', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNSUPPORTED_PATH', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_PATH', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_CONFIGURATION', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNSUPPORTED_INTERVAL', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_SUBSCRIPTION_ID', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNSUPPORTED_ENCODING', index=7, number=7,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1993,
serialized_end=2186,
)
_sym_db.RegisterEnumDescriptor(_OPENCONFIGRPCRESPONSETYPES)
OpenConfigRpcResponseTypes = enum_type_wrapper.EnumTypeWrapper(_OPENCONFIGRPCRESPONSETYPES)
ENCODING_XML = 0
ENCODING_JSON = 1
GET_ALL = 0
GET_CONFIG = 1
GET_OPSTATE = 2
GET_OPER = 3
UPDATE_CONFIG = 0
REPLACE_CONFIG = 1
DELETE_CONFIG = 2
OK = 0
NOK = 1
UNSUPPORTED_PATH = 2
INVALID_PATH = 3
INVALID_CONFIGURATION = 4
UNSUPPORTED_INTERVAL = 5
INVALID_SUBSCRIPTION_ID = 6
UNSUPPORTED_ENCODING = 7
_GETDATAENCODINGSREQUEST = _descriptor.Descriptor(
name='GetDataEncodingsRequest',
full_name='openconfig.GetDataEncodingsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_id', full_name='openconfig.GetDataEncodingsRequest.request_id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=40,
serialized_end=85,
)
_GETDATAENCODINGSRESPONSE = _descriptor.Descriptor(
name='GetDataEncodingsResponse',
full_name='openconfig.GetDataEncodingsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_id', full_name='openconfig.GetDataEncodingsResponse.request_id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='encoding', full_name='openconfig.GetDataEncodingsResponse.encoding', index=1,
number=2, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='response_code', full_name='openconfig.GetDataEncodingsResponse.response_code', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='message', full_name='openconfig.GetDataEncodingsResponse.message', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=88,
serialized_end=273,
)
_SETDATAENCODINGREQUEST = _descriptor.Descriptor(
name='SetDataEncodingRequest',
full_name='openconfig.SetDataEncodingRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_id', full_name='openconfig.SetDataEncodingRequest.request_id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='encoding', full_name='openconfig.SetDataEncodingRequest.encoding', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=275,
serialized_end=378,
)
_SETDATAENCODINGRESPONSE = _descriptor.Descriptor(
name='SetDataEncodingResponse',
full_name='openconfig.SetDataEncodingResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_id', full_name='openconfig.SetDataEncodingResponse.request_id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='response_code', full_name='openconfig.SetDataEncodingResponse.response_code', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='message', full_name='openconfig.SetDataEncodingResponse.message', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=380,
serialized_end=505,
)
_GETMODELSREQUEST = _descriptor.Descriptor(
name='GetModelsRequest',
full_name='openconfig.GetModelsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_id', full_name='openconfig.GetModelsRequest.request_id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=507,
serialized_end=545,
)
_MODEL = _descriptor.Descriptor(
name='Model',
full_name='openconfig.Model',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='openconfig.Model.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='namespace', full_name='openconfig.Model.namespace', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='version', full_name='openconfig.Model.version', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=547,
serialized_end=604,
)
_GETMODELSRESPONSE = _descriptor.Descriptor(
name='GetModelsResponse',
full_name='openconfig.GetModelsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_id', full_name='openconfig.GetModelsResponse.request_id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='model', full_name='openconfig.GetModelsResponse.model', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='response_code', full_name='openconfig.GetModelsResponse.response_code', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='message', full_name='openconfig.GetModelsResponse.message', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=607,
serialized_end=760,
)
_GETREQUESTLIST = _descriptor.Descriptor(
name='GetRequestList',
full_name='openconfig.GetRequestList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='operation_id', full_name='openconfig.GetRequestList.operation_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='operation', full_name='openconfig.GetRequestList.operation', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='path', full_name='openconfig.GetRequestList.path', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=762,
serialized_end=862,
)
_GETREQUEST = _descriptor.Descriptor(
name='GetRequest',
full_name='openconfig.GetRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_id', full_name='openconfig.GetRequest.request_id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='encoding', full_name='openconfig.GetRequest.encoding', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='get_request', full_name='openconfig.GetRequest.get_request', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=865,
serialized_end=1005,
)
_GETRESPONSE_RESPONSELIST = _descriptor.Descriptor(
name='ResponseList',
full_name='openconfig.GetResponse.ResponseList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='operation_id', full_name='openconfig.GetResponse.ResponseList.operation_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='path', full_name='openconfig.GetResponse.ResponseList.path', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='openconfig.GetResponse.ResponseList.value', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='response_code', full_name='openconfig.GetResponse.ResponseList.response_code', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='message', full_name='openconfig.GetResponse.ResponseList.message', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1100,
serialized_end=1245,
)
_GETRESPONSE = _descriptor.Descriptor(
name='GetResponse',
full_name='openconfig.GetResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_id', full_name='openconfig.GetResponse.request_id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='response', full_name='openconfig.GetResponse.response', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_GETRESPONSE_RESPONSELIST, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1008,
serialized_end=1245,
)
_SETREQUEST_CONFIGOPERATIONLIST = _descriptor.Descriptor(
name='ConfigOperationList',
full_name='openconfig.SetRequest.ConfigOperationList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='operation_id', full_name='openconfig.SetRequest.ConfigOperationList.operation_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='operation', full_name='openconfig.SetRequest.ConfigOperationList.operation', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='path', full_name='openconfig.SetRequest.ConfigOperationList.path', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='openconfig.SetRequest.ConfigOperationList.value', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1432,
serialized_end=1554,
)
_SETREQUEST = _descriptor.Descriptor(
name='SetRequest',
full_name='openconfig.SetRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_id', full_name='openconfig.SetRequest.request_id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='transaction', full_name='openconfig.SetRequest.transaction', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='encoding', full_name='openconfig.SetRequest.encoding', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='config_operation', full_name='openconfig.SetRequest.config_operation', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_SETREQUEST_CONFIGOPERATIONLIST, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1248,
serialized_end=1554,
)
_SETRESPONSE_RESPONSELIST = _descriptor.Descriptor(
name='ResponseList',
full_name='openconfig.SetResponse.ResponseList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='operation_id', full_name='openconfig.SetResponse.ResponseList.operation_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='response_code', full_name='openconfig.SetResponse.ResponseList.response_code', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='message', full_name='openconfig.SetResponse.ResponseList.message', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1648,
serialized_end=1764,
)
_SETRESPONSE = _descriptor.Descriptor(
name='SetResponse',
full_name='openconfig.SetResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_id', full_name='openconfig.SetResponse.request_id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='response', full_name='openconfig.SetResponse.response', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_SETRESPONSE_RESPONSELIST, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1557,
serialized_end=1764,
)
_GETDATAENCODINGSRESPONSE.fields_by_name['encoding'].enum_type = _OPENCONFIGDATAENCODINGTYPES
_GETDATAENCODINGSRESPONSE.fields_by_name['response_code'].enum_type = _OPENCONFIGRPCRESPONSETYPES
_SETDATAENCODINGREQUEST.fields_by_name['encoding'].enum_type = _OPENCONFIGDATAENCODINGTYPES
_SETDATAENCODINGRESPONSE.fields_by_name['response_code'].enum_type = _OPENCONFIGRPCRESPONSETYPES
_GETMODELSRESPONSE.fields_by_name['model'].message_type = _MODEL
_GETMODELSRESPONSE.fields_by_name['response_code'].enum_type = _OPENCONFIGRPCRESPONSETYPES
_GETREQUESTLIST.fields_by_name['operation'].enum_type = _GETDATACOMMANDS
_GETREQUEST.fields_by_name['encoding'].enum_type = _OPENCONFIGDATAENCODINGTYPES
_GETREQUEST.fields_by_name['get_request'].message_type = _GETREQUESTLIST
_GETRESPONSE_RESPONSELIST.fields_by_name['response_code'].enum_type = _OPENCONFIGRPCRESPONSETYPES
_GETRESPONSE_RESPONSELIST.containing_type = _GETRESPONSE
_GETRESPONSE.fields_by_name['response'].message_type = _GETRESPONSE_RESPONSELIST
_SETREQUEST_CONFIGOPERATIONLIST.fields_by_name['operation'].enum_type = _SETCONFIGCOMMANDS
_SETREQUEST_CONFIGOPERATIONLIST.containing_type = _SETREQUEST
_SETREQUEST.fields_by_name['encoding'].enum_type = _OPENCONFIGDATAENCODINGTYPES
_SETREQUEST.fields_by_name['config_operation'].message_type = _SETREQUEST_CONFIGOPERATIONLIST
_SETRESPONSE_RESPONSELIST.fields_by_name['response_code'].enum_type = _OPENCONFIGRPCRESPONSETYPES
_SETRESPONSE_RESPONSELIST.containing_type = _SETRESPONSE
_SETRESPONSE.fields_by_name['response'].message_type = _SETRESPONSE_RESPONSELIST
DESCRIPTOR.message_types_by_name['GetDataEncodingsRequest'] = _GETDATAENCODINGSREQUEST
DESCRIPTOR.message_types_by_name['GetDataEncodingsResponse'] = _GETDATAENCODINGSRESPONSE
DESCRIPTOR.message_types_by_name['SetDataEncodingRequest'] = _SETDATAENCODINGREQUEST
DESCRIPTOR.message_types_by_name['SetDataEncodingResponse'] = _SETDATAENCODINGRESPONSE
DESCRIPTOR.message_types_by_name['GetModelsRequest'] = _GETMODELSREQUEST
DESCRIPTOR.message_types_by_name['Model'] = _MODEL
DESCRIPTOR.message_types_by_name['GetModelsResponse'] = _GETMODELSRESPONSE
DESCRIPTOR.message_types_by_name['GetRequestList'] = _GETREQUESTLIST
DESCRIPTOR.message_types_by_name['GetRequest'] = _GETREQUEST
DESCRIPTOR.message_types_by_name['GetResponse'] = _GETRESPONSE
DESCRIPTOR.message_types_by_name['SetRequest'] = _SETREQUEST
DESCRIPTOR.message_types_by_name['SetResponse'] = _SETRESPONSE
DESCRIPTOR.enum_types_by_name['OpenConfigDataEncodingTypes'] = _OPENCONFIGDATAENCODINGTYPES
DESCRIPTOR.enum_types_by_name['GetDataCommands'] = _GETDATACOMMANDS
DESCRIPTOR.enum_types_by_name['SetConfigCommands'] = _SETCONFIGCOMMANDS
DESCRIPTOR.enum_types_by_name['OpenConfigRpcResponseTypes'] = _OPENCONFIGRPCRESPONSETYPES
GetDataEncodingsRequest = _reflection.GeneratedProtocolMessageType('GetDataEncodingsRequest', (_message.Message,), dict(
DESCRIPTOR = _GETDATAENCODINGSREQUEST,
__module__ = 'openconfig_service_pb2'
# @@protoc_insertion_point(class_scope:openconfig.GetDataEncodingsRequest)
))
_sym_db.RegisterMessage(GetDataEncodingsRequest)
GetDataEncodingsResponse = _reflection.GeneratedProtocolMessageType('GetDataEncodingsResponse', (_message.Message,), dict(
DESCRIPTOR = _GETDATAENCODINGSRESPONSE,
__module__ = 'openconfig_service_pb2'
# @@protoc_insertion_point(class_scope:openconfig.GetDataEncodingsResponse)
))
_sym_db.RegisterMessage(GetDataEncodingsResponse)
SetDataEncodingRequest = _reflection.GeneratedProtocolMessageType('SetDataEncodingRequest', (_message.Message,), dict(
DESCRIPTOR = _SETDATAENCODINGREQUEST,
__module__ = 'openconfig_service_pb2'
# @@protoc_insertion_point(class_scope:openconfig.SetDataEncodingRequest)
))
_sym_db.RegisterMessage(SetDataEncodingRequest)
SetDataEncodingResponse = _reflection.GeneratedProtocolMessageType('SetDataEncodingResponse', (_message.Message,), dict(
DESCRIPTOR = _SETDATAENCODINGRESPONSE,
__module__ = 'openconfig_service_pb2'
# @@protoc_insertion_point(class_scope:openconfig.SetDataEncodingResponse)
))
_sym_db.RegisterMessage(SetDataEncodingResponse)
GetModelsRequest = _reflection.GeneratedProtocolMessageType('GetModelsRequest', (_message.Message,), dict(
DESCRIPTOR = _GETMODELSREQUEST,
__module__ = 'openconfig_service_pb2'
# @@protoc_insertion_point(class_scope:openconfig.GetModelsRequest)
))
_sym_db.RegisterMessage(GetModelsRequest)
Model = _reflection.GeneratedProtocolMessageType('Model', (_message.Message,), dict(
DESCRIPTOR = _MODEL,
__module__ = 'openconfig_service_pb2'
# @@protoc_insertion_point(class_scope:openconfig.Model)
))
_sym_db.RegisterMessage(Model)
GetModelsResponse = _reflection.GeneratedProtocolMessageType('GetModelsResponse', (_message.Message,), dict(
DESCRIPTOR = _GETMODELSRESPONSE,
__module__ = 'openconfig_service_pb2'
# @@protoc_insertion_point(class_scope:openconfig.GetModelsResponse)
))
_sym_db.RegisterMessage(GetModelsResponse)
GetRequestList = _reflection.GeneratedProtocolMessageType('GetRequestList', (_message.Message,), dict(
DESCRIPTOR = _GETREQUESTLIST,
__module__ = 'openconfig_service_pb2'
# @@protoc_insertion_point(class_scope:openconfig.GetRequestList)
))
_sym_db.RegisterMessage(GetRequestList)
GetRequest = _reflection.GeneratedProtocolMessageType('GetRequest', (_message.Message,), dict(
DESCRIPTOR = _GETREQUEST,
__module__ = 'openconfig_service_pb2'
# @@protoc_insertion_point(class_scope:openconfig.GetRequest)
))
_sym_db.RegisterMessage(GetRequest)
GetResponse = _reflection.GeneratedProtocolMessageType('GetResponse', (_message.Message,), dict(
ResponseList = _reflection.GeneratedProtocolMessageType('ResponseList', (_message.Message,), dict(
DESCRIPTOR = _GETRESPONSE_RESPONSELIST,
__module__ = 'openconfig_service_pb2'
# @@protoc_insertion_point(class_scope:openconfig.GetResponse.ResponseList)
))
,
DESCRIPTOR = _GETRESPONSE,
__module__ = 'openconfig_service_pb2'
# @@protoc_insertion_point(class_scope:openconfig.GetResponse)
))
_sym_db.RegisterMessage(GetResponse)
_sym_db.RegisterMessage(GetResponse.ResponseList)
SetRequest = _reflection.GeneratedProtocolMessageType('SetRequest', (_message.Message,), dict(
ConfigOperationList = _reflection.GeneratedProtocolMessageType('ConfigOperationList', (_message.Message,), dict(
DESCRIPTOR = _SETREQUEST_CONFIGOPERATIONLIST,
__module__ = 'openconfig_service_pb2'
# @@protoc_insertion_point(class_scope:openconfig.SetRequest.ConfigOperationList)
))
,
DESCRIPTOR = _SETREQUEST,
__module__ = 'openconfig_service_pb2'
# @@protoc_insertion_point(class_scope:openconfig.SetRequest)
))
_sym_db.RegisterMessage(SetRequest)
_sym_db.RegisterMessage(SetRequest.ConfigOperationList)
SetResponse = _reflection.GeneratedProtocolMessageType('SetResponse', (_message.Message,), dict(
ResponseList = _reflection.GeneratedProtocolMessageType('ResponseList', (_message.Message,), dict(
DESCRIPTOR = _SETRESPONSE_RESPONSELIST,
__module__ = 'openconfig_service_pb2'
# @@protoc_insertion_point(class_scope:openconfig.SetResponse.ResponseList)
))
,
DESCRIPTOR = _SETRESPONSE,
__module__ = 'openconfig_service_pb2'
# @@protoc_insertion_point(class_scope:openconfig.SetResponse)
))
_sym_db.RegisterMessage(SetResponse)
_sym_db.RegisterMessage(SetResponse.ResponseList)
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
class OpenconfigRpcApiStub(object):
"""
MGD Service Definitions
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetDataEncodings = channel.unary_unary(
'/openconfig.OpenconfigRpcApi/GetDataEncodings',
request_serializer=GetDataEncodingsRequest.SerializeToString,
response_deserializer=GetDataEncodingsResponse.FromString,
)
self.SetDataEncoding = channel.unary_unary(
'/openconfig.OpenconfigRpcApi/SetDataEncoding',
request_serializer=SetDataEncodingRequest.SerializeToString,
response_deserializer=SetDataEncodingResponse.FromString,
)
self.GetModels = channel.unary_unary(
'/openconfig.OpenconfigRpcApi/GetModels',
request_serializer=GetModelsRequest.SerializeToString,
response_deserializer=GetModelsResponse.FromString,
)
self.Get = channel.unary_unary(
'/openconfig.OpenconfigRpcApi/Get',
request_serializer=GetRequest.SerializeToString,
response_deserializer=GetResponse.FromString,
)
self.Set = channel.unary_unary(
'/openconfig.OpenconfigRpcApi/Set',
request_serializer=SetRequest.SerializeToString,
response_deserializer=SetResponse.FromString,
)
class OpenconfigRpcApiServicer(object):
"""
MGD Service Definitions
"""
def GetDataEncodings(self, request, context):
"""
Return the set of data encodings supported by the device for
configuration and telemetry data modeled in YANG
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetDataEncoding(self, request, context):
"""
Select and set one of the data encodings returned by
getDataEncodings. This RPC sets the global encoding
serialization for all data exchanged with the target
device. The global data encoding may be optionally overriden
by setting the encoding for an individual RPC if supported by the target
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetModels(self, request, context):
"""
Returns a repeated structure of supported data models
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Get(self, request, context):
"""
Requests data from the network device. The get RPC
request should include a subcommand to indicate the type of
data desired by the requestor. Supported types of data
include: configuration data (config: true nodes in the schema)
operational state data (config: false nodes)
derived operational state only (config: false nodes that
represent derived operational state, exluding config: false
nodes that represent applied configuration.
all data (config: true and config: false nodes)
A get RPC can contain multiple requests for data. Each
request includes a path specifying a subtree in the data
model, and a command to indicate which type of data should be returned
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Set(self, request, context):
"""
Modify configuration on the target device. The set
RPC accepts a combination of commands, each with an
associated path specification to indicate which data should be modified.
The commands in a set request should be fully validated and accepted by
the device before a response is returned. The
application of the configuration commands may or may not be
complete when the command returns. The NMS is expected to be
able to track the application of the configuration using the
operational state data in the telemetry stream, or by
retrieving the state data using an RPC
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_OpenconfigRpcApiServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetDataEncodings': grpc.unary_unary_rpc_method_handler(
servicer.GetDataEncodings,
request_deserializer=GetDataEncodingsRequest.FromString,
response_serializer=GetDataEncodingsResponse.SerializeToString,
),
'SetDataEncoding': grpc.unary_unary_rpc_method_handler(
servicer.SetDataEncoding,
request_deserializer=SetDataEncodingRequest.FromString,
response_serializer=SetDataEncodingResponse.SerializeToString,
),
'GetModels': grpc.unary_unary_rpc_method_handler(
servicer.GetModels,
request_deserializer=GetModelsRequest.FromString,
response_serializer=GetModelsResponse.SerializeToString,
),
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=GetRequest.FromString,
response_serializer=GetResponse.SerializeToString,
),
'Set': grpc.unary_unary_rpc_method_handler(
servicer.Set,
request_deserializer=SetRequest.FromString,
response_serializer=SetResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'openconfig.OpenconfigRpcApi', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetaOpenconfigRpcApiServicer(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""
MGD Service Definitions
"""
def GetDataEncodings(self, request, context):
"""
Return the set of data encodings supported by the device for
configuration and telemetry data modeled in YANG
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def SetDataEncoding(self, request, context):
"""
Select and set one of the data encodings returned by
getDataEncodings. This RPC sets the global encoding
serialization for all data exchanged with the target
device. The global data encoding may be optionally overriden
by setting the encoding for an individual RPC if supported by the target
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def GetModels(self, request, context):
"""
Returns a repeated structure of supported data models
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def Get(self, request, context):
"""
Requests data from the network device. The get RPC
request should include a subcommand to indicate the type of
data desired by the requestor. Supported types of data
include: configuration data (config: true nodes in the schema)
operational state data (config: false nodes)
derived operational state only (config: false nodes that
represent derived operational state, exluding config: false
nodes that represent applied configuration.
all data (config: true and config: false nodes)
A get RPC can contain multiple requests for data. Each
request includes a path specifying a subtree in the data
model, and a command to indicate which type of data should be returned
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def Set(self, request, context):
"""
Modify configuration on the target device. The set
RPC accepts a combination of commands, each with an
associated path specification to indicate which data should be modified.
The commands in a set request should be fully validated and accepted by
the device before a response is returned. The
application of the configuration commands may or may not be
complete when the command returns. The NMS is expected to be
able to track the application of the configuration using the
operational state data in the telemetry stream, or by
retrieving the state data using an RPC
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaOpenconfigRpcApiStub(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""
MGD Service Definitions
"""
def GetDataEncodings(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""
Return the set of data encodings supported by the device for
configuration and telemetry data modeled in YANG
"""
raise NotImplementedError()
GetDataEncodings.future = None
def SetDataEncoding(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""
Select and set one of the data encodings returned by
getDataEncodings. This RPC sets the global encoding
serialization for all data exchanged with the target
device. The global data encoding may be optionally overriden
by setting the encoding for an individual RPC if supported by the target
"""
raise NotImplementedError()
SetDataEncoding.future = None
def GetModels(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""
Returns a repeated structure of supported data models
"""
raise NotImplementedError()
GetModels.future = None
def Get(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""
Requests data from the network device. The get RPC
request should include a subcommand to indicate the type of
data desired by the requestor. Supported types of data
include: configuration data (config: true nodes in the schema)
operational state data (config: false nodes)
derived operational state only (config: false nodes that
represent derived operational state, exluding config: false
nodes that represent applied configuration.
all data (config: true and config: false nodes)
A get RPC can contain multiple requests for data. Each
request includes a path specifying a subtree in the data
model, and a command to indicate which type of data should be returned
"""
raise NotImplementedError()
Get.future = None
def Set(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""
Modify configuration on the target device. The set
RPC accepts a combination of commands, each with an
associated path specification to indicate which data should be modified.
The commands in a set request should be fully validated and accepted by
the device before a response is returned. The
application of the configuration commands may or may not be
complete when the command returns. The NMS is expected to be
able to track the application of the configuration using the
operational state data in the telemetry stream, or by
retrieving the state data using an RPC
"""
raise NotImplementedError()
Set.future = None
def beta_create_OpenconfigRpcApi_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_deserializers = {
('openconfig.OpenconfigRpcApi', 'Get'): GetRequest.FromString,
('openconfig.OpenconfigRpcApi', 'GetDataEncodings'): GetDataEncodingsRequest.FromString,
('openconfig.OpenconfigRpcApi', 'GetModels'): GetModelsRequest.FromString,
('openconfig.OpenconfigRpcApi', 'Set'): SetRequest.FromString,
('openconfig.OpenconfigRpcApi', 'SetDataEncoding'): SetDataEncodingRequest.FromString,
}
response_serializers = {
('openconfig.OpenconfigRpcApi', 'Get'): GetResponse.SerializeToString,
('openconfig.OpenconfigRpcApi', 'GetDataEncodings'): GetDataEncodingsResponse.SerializeToString,
('openconfig.OpenconfigRpcApi', 'GetModels'): GetModelsResponse.SerializeToString,
('openconfig.OpenconfigRpcApi', 'Set'): SetResponse.SerializeToString,
('openconfig.OpenconfigRpcApi', 'SetDataEncoding'): SetDataEncodingResponse.SerializeToString,
}
method_implementations = {
('openconfig.OpenconfigRpcApi', 'Get'): face_utilities.unary_unary_inline(servicer.Get),
('openconfig.OpenconfigRpcApi', 'GetDataEncodings'): face_utilities.unary_unary_inline(servicer.GetDataEncodings),
('openconfig.OpenconfigRpcApi', 'GetModels'): face_utilities.unary_unary_inline(servicer.GetModels),
('openconfig.OpenconfigRpcApi', 'Set'): face_utilities.unary_unary_inline(servicer.Set),
('openconfig.OpenconfigRpcApi', 'SetDataEncoding'): face_utilities.unary_unary_inline(servicer.SetDataEncoding),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_OpenconfigRpcApi_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_serializers = {
('openconfig.OpenconfigRpcApi', 'Get'): GetRequest.SerializeToString,
('openconfig.OpenconfigRpcApi', 'GetDataEncodings'): GetDataEncodingsRequest.SerializeToString,
('openconfig.OpenconfigRpcApi', 'GetModels'): GetModelsRequest.SerializeToString,
('openconfig.OpenconfigRpcApi', 'Set'): SetRequest.SerializeToString,
('openconfig.OpenconfigRpcApi', 'SetDataEncoding'): SetDataEncodingRequest.SerializeToString,
}
response_deserializers = {
('openconfig.OpenconfigRpcApi', 'Get'): GetResponse.FromString,
('openconfig.OpenconfigRpcApi', 'GetDataEncodings'): GetDataEncodingsResponse.FromString,
('openconfig.OpenconfigRpcApi', 'GetModels'): GetModelsResponse.FromString,
('openconfig.OpenconfigRpcApi', 'Set'): SetResponse.FromString,
('openconfig.OpenconfigRpcApi', 'SetDataEncoding'): SetDataEncodingResponse.FromString,
}
cardinalities = {
'Get': cardinality.Cardinality.UNARY_UNARY,
'GetDataEncodings': cardinality.Cardinality.UNARY_UNARY,
'GetModels': cardinality.Cardinality.UNARY_UNARY,
'Set': cardinality.Cardinality.UNARY_UNARY,
'SetDataEncoding': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'openconfig.OpenconfigRpcApi', cardinalities, options=stub_options)
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
|
# -*- coding: UTF-8 -*-
import collections
import functools
import logging
import time
import grpc
from urllib.parse import urlparse
from . import __version__
from .types import Status, DataType, DeployMode
from .check import check_pass_param, is_legal_host, is_legal_port, is_legal_index_metric_type, \
is_legal_binary_index_metric_type
from .pool import ConnectionPool, SingleConnectionPool, SingletonThreadPool
from .exceptions import BaseException, ParamError, DeprecatedError
from ..settings import DefaultConfig as config
from .utils import valid_binary_metric_types
from .utils import valid_index_types
from .utils import valid_binary_index_types
from .utils import valid_index_params_keys
from .utils import check_invalid_binary_vector
LOGGER = logging.getLogger(__name__)
def deprecated(func):
@functools.wraps(func)
def inner(*args, **kwargs):
error_str = "Function {} has been deprecated".format(func.__name__)
LOGGER.error(error_str)
raise DeprecatedError(error_str)
return inner
def retry_on_rpc_failure(retry_times=10, wait=1, retry_on_deadline=True):
def wrapper(func):
@functools.wraps(func)
def handler(self, *args, **kwargs):
counter = 1
while True:
try:
return func(self, *args, **kwargs)
except grpc.RpcError as e:
# DEADLINE_EXCEEDED means that the task wat not completed
# UNAVAILABLE means that the service is not reachable currently
# Reference: https://grpc.github.io/grpc/python/grpc.html#grpc-status-code
if e.code() != grpc.StatusCode.DEADLINE_EXCEEDED and e.code() != grpc.StatusCode.UNAVAILABLE:
raise e
if not retry_on_deadline and e.code() == grpc.StatusCode.DEADLINE_EXCEEDED:
raise e
if counter >= retry_times:
if e.code() == grpc.StatusCode.DEADLINE_EXCEEDED:
raise BaseException(1, "rpc timeout")
raise e
time.sleep(wait)
self._update_connection_pool()
except Exception as e:
raise e
finally:
counter += 1
return handler
return wrapper
def _pool_args(**kwargs):
pool_kwargs = dict()
for k, v in kwargs.items():
if k in ("pool_size", "wait_timeout", "handler", "try_connect", "pre_ping", "max_retry"):
pool_kwargs[k] = v
return pool_kwargs
def _set_uri(host, port, uri, handler="GRPC"):
default_port = config.GRPC_PORT if handler == "GRPC" else config.HTTP_PORT
default_uri = config.GRPC_URI if handler == "GRPC" else config.HTTP_URI
uri_prefix = "tcp://" if handler == "GRPC" else "http://"
if host is not None:
_port = port if port is not None else default_port
_host = host
elif port is None:
try:
_uri = urlparse(uri) if uri else urlparse(default_uri)
_host = _uri.hostname
_port = _uri.port
except (AttributeError, ValueError, TypeError) as e:
raise ParamError("uri is illegal: {}".format(e))
else:
raise ParamError("Param is not complete. Please invoke as follow:\n"
"\t(host = ${HOST}, port = ${PORT})\n"
"\t(uri = ${URI})\n")
if not is_legal_host(_host) or not is_legal_port(_port):
raise ParamError("host {} or port {} is illegal".format(_host, _port))
return "{}{}:{}".format(uri_prefix, str(_host), str(_port))
class Milvus:
def __init__(self, host=None, port=None, handler="GRPC", pool="SingletonThread", channel=None, **kwargs):
self._name = kwargs.get('name', None)
self._uri = None
self._status = None
self._connected = False
self._handler = handler
# store extra key-words arguments
self._kw = kwargs
if handler != "GRPC":
raise NotImplementedError("only grpc handler is supported now!")
_uri = kwargs.get('uri', None)
self._pool_type = pool
self._pool_uri = _set_uri(host, port, _uri, self._handler)
self._pool_kwargs = _pool_args(handler=handler, **kwargs)
self._update_connection_pool(channel=channel)
self._deploy_mode = DeployMode.Distributed
def _wait_for_healthy(self, timeout=60):
_timeout_on_every_retry = self._kw.get("timeout", 0)
_timeout = _timeout_on_every_retry if _timeout_on_every_retry else timeout
with self._connection() as handler:
start_time = time.time()
while (time.time() - start_time < _timeout):
try:
status = handler.fake_register_link(_timeout)
if status.error_code == 0:
self._deploy_mode = status.reason
return
except Exception:
pass
finally:
time.sleep(1)
raise Exception("server is not healthy, please try again later")
def __enter__(self):
self._conn = self._pool.fetch()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._conn.close()
self._conn = None
def __del__(self):
self._pool = None
def _connection(self):
if self._pool:
return self._pool.fetch()
raise Exception("Connection is already closed")
def _update_connection_pool(self, channel=None):
self._pool = None
if self._pool_type == "QueuePool":
self._pool = ConnectionPool(self._pool_uri, **self._pool_kwargs)
elif self._pool_type == "SingletonThread":
self._pool = SingletonThreadPool(self._pool_uri, channel=channel, **self._pool_kwargs)
elif self._pool_type == "Singleton":
self._pool = SingleConnectionPool(self._pool_uri, **self._pool_kwargs)
else:
raise ParamError("Unknown pool value: {}".format(self._pool_type))
if not channel:
self._wait_for_healthy()
@property
def name(self):
return self._name
@property
def handler(self):
return self._handler
def close(self):
"""
Close client instance
"""
if self._pool:
self._pool = None
return
raise Exception("connection was already closed!")
@retry_on_rpc_failure(retry_times=10, wait=1)
def create_collection(self, collection_name, fields, shards_num=2, timeout=None, **kwargs):
"""
Creates a collection.
:param collection_name: The name of the collection. A collection name can only include
numbers, letters, and underscores, and must not begin with a number.
:type collection_name: str
:param fields: Field parameters.
:type fields: dict
` {"fields": [
{"field": "A", "type": DataType.INT32}
{"field": "B", "type": DataType.INT64},
{"field": "C", "type": DataType.FLOAT},
{"field": "Vec", "type": DataType.FLOAT_VECTOR,
"params": {"dim": 128}}
],
"auto_id": True}`
:param shards_num: How wide to scale collection. Corresponds to how many active datanodes
can be used on insert.
:type shards_num: int
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:return: None
:rtype: NoneType
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
BaseException: If the return result from server is not ok
"""
with self._connection() as handler:
return handler.create_collection(collection_name, fields, shards_num=shards_num, timeout=timeout, **kwargs)
@retry_on_rpc_failure(retry_times=10, wait=1, retry_on_deadline=False)
def drop_collection(self, collection_name, timeout=None):
"""
Deletes a specified collection.
:param collection_name: The name of the collection to delete.
:type collection_name: str
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:return: None
:rtype: NoneType
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
BaseException: If the return result from server is not ok
"""
check_pass_param(collection_name=collection_name)
with self._connection() as handler:
return handler.drop_collection(collection_name, timeout)
@retry_on_rpc_failure(retry_times=10, wait=1)
def has_collection(self, collection_name, timeout=None):
"""
Checks whether a specified collection exists.
:param collection_name: The name of the collection to check.
:type collection_name: str
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:return: If specified collection exists
:rtype: bool
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
BaseException: If the return result from server is not ok
"""
check_pass_param(collection_name=collection_name)
with self._connection() as handler:
return handler.has_collection(collection_name, timeout)
@retry_on_rpc_failure(retry_times=10, wait=1)
def describe_collection(self, collection_name, timeout=None):
"""
Returns the schema of specified collection.
Example: {'collection_name': 'create_collection_eXgbpOtn', 'auto_id': True, 'description': '',
'fields': [{'field_id': 100, 'name': 'INT32', 'description': '', 'type': 4, 'params': {},
{'field_id': 101, 'name': 'FLOAT_VECTOR', 'description': '', 'type': 101,
'params': {'dim': '128'}}]}
:param collection_name: The name of the collection to describe.
:type collection_name: str
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:return: The schema of collection to describe.
:rtype: dict
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
BaseException: If the return result from server is not ok
"""
check_pass_param(collection_name=collection_name)
with self._connection() as handler:
return handler.describe_collection(collection_name, timeout)
@retry_on_rpc_failure(retry_times=10, wait=1)
def load_collection(self, collection_name, timeout=None, **kwargs):
"""
Loads a specified collection from disk to memory.
:param collection_name: The name of the collection to load.
:type collection_name: str
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:return: None
:rtype: NoneType
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
BaseException: If the return result from server is not ok
"""
check_pass_param(collection_name=collection_name)
with self._connection() as handler:
return handler.load_collection("", collection_name=collection_name, timeout=timeout, **kwargs)
@retry_on_rpc_failure(retry_times=10, wait=1)
def release_collection(self, collection_name, timeout=None):
"""
Clear collection data from memory.
:param collection_name: The name of collection to release.
:type collection_name: str
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:return: None
:rtype: NoneType
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
BaseException: If the return result from server is not ok
"""
check_pass_param(collection_name=collection_name)
with self._connection() as handler:
return handler.release_collection(db_name="", collection_name=collection_name, timeout=timeout)
@retry_on_rpc_failure(retry_times=10, wait=1)
def get_collection_stats(self, collection_name, timeout=None, **kwargs):
"""
Returns collection statistics information.
Example: {"row_count": 10}
:param collection_name: The name of collection.
:type collection_name: str.
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:return: statistics information
:rtype: dict
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
BaseException: If the return result from server is not ok
"""
with self._connection() as handler:
stats = handler.get_collection_stats(collection_name, timeout, **kwargs)
result = {stat.key: stat.value for stat in stats}
result["row_count"] = int(result["row_count"])
return result
@retry_on_rpc_failure(retry_times=10, wait=1)
def list_collections(self, timeout=None):
"""
Returns a list of all collection names.
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:return: List of collection names, return when operation is successful
:rtype: list[str]
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
BaseException: If the return result from server is not ok
"""
with self._connection() as handler:
return handler.list_collections(timeout)
@retry_on_rpc_failure(retry_times=10, wait=1)
def create_partition(self, collection_name, partition_name, timeout=None):
"""
Creates a partition in a specified collection. You only need to import the
parameters of partition_name to create a partition. A collection cannot hold
partitions of the same tag, whilst you can insert the same tag in different collections.
:param collection_name: The name of the collection to create partitions in.
:type collection_name: str
:param partition_name: The tag name of the partition to create.
:type partition_name: str
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:return: None
:rtype: NoneType
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
BaseException: If the return result from server is not ok
"""
check_pass_param(collection_name=collection_name, partition_name=partition_name)
with self._connection() as handler:
return handler.create_partition(collection_name, partition_name, timeout)
@retry_on_rpc_failure(retry_times=10, wait=1)
def drop_partition(self, collection_name, partition_name, timeout=None):
"""
Deletes the specified partition in a collection. Note that the default partition
'_default' is not permitted to delete. When a partition deleted, all data stored in it
will be deleted.
:param collection_name: The name of the collection to delete partitions from.
:type collection_name: str
:param partition_name: The tag name of the partition to delete.
:type partition_name: str
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:return: None
:rtype: NoneType
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
BaseException: If the return result from server is not ok
"""
check_pass_param(collection_name=collection_name, partition_name=partition_name)
with self._connection() as handler:
return handler.drop_partition(collection_name, partition_name, timeout)
@retry_on_rpc_failure(retry_times=10, wait=1)
def has_partition(self, collection_name, partition_name, timeout=None):
"""
Checks if a specified partition exists in a collection.
:param collection_name: The name of the collection to find the partition in.
:type collection_name: str
:param partition_name: The tag name of the partition to check
:type partition_name: str
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:return: Whether a specified partition exists in a collection.
:rtype: bool
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
BaseException: If the return result from server is not ok
"""
check_pass_param(collection_name=collection_name, partition_name=partition_name)
with self._connection() as handler:
return handler.has_partition(collection_name, partition_name, timeout)
@retry_on_rpc_failure(retry_times=10, wait=1)
def load_partitions(self, collection_name, partition_names, timeout=None):
"""
Load specified partitions from disk to memory.
:param collection_name: The collection name which partitions belong to.
:type collection_name: str
:param partition_names: The specified partitions to load.
:type partition_names: list[str]
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:return: None
:rtype: NoneType
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
BaseException: If the return result from server is not ok
"""
check_pass_param(collection_name=collection_name)
with self._connection() as handler:
return handler.load_partitions(db_name="", collection_name=collection_name,
partition_names=partition_names, timeout=timeout)
@retry_on_rpc_failure(retry_times=10, wait=1)
def release_partitions(self, collection_name, partition_names, timeout=None):
"""
Clear partitions data from memory.
:param collection_name: The collection name which partitions belong to.
:type collection_name: str
:param partition_names: The specified partition to release.
:type partition_names: list[str]
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:return: None
:rtype: NoneType
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
BaseException: If the return result from server is not ok
"""
check_pass_param(collection_name=collection_name)
with self._connection() as handler:
return handler.release_partitions(db_name="", collection_name=collection_name,
partition_names=partition_names, timeout=timeout)
@retry_on_rpc_failure(retry_times=10, wait=1)
def list_partitions(self, collection_name, timeout=None):
"""
Returns a list of all partition tags in a specified collection.
:param collection_name: The name of the collection to retrieve partition tags from.
:type collection_name: str
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:return: A list of all partition tags in specified collection.
:rtype: list[str]
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
BaseException: If the return result from server is not ok
"""
check_pass_param(collection_name=collection_name)
with self._connection() as handler:
return handler.list_partitions(collection_name, timeout)
@retry_on_rpc_failure(retry_times=10, wait=1)
def get_partition_stats(self, collection_name, partition_name, timeout=None, **kwargs):
"""
Returns partition statistics information.
Example: {"row_count": 10}
:param collection_name: The name of collection.
:type collection_name: str.
:param partition_name: The name of partition.
:type partition_name: str.
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:return: statistics information
:rtype: dict
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
BaseException: If the return result from server is not ok
"""
check_pass_param(collection_name=collection_name)
with self._connection() as handler:
stats = handler.get_partition_stats(collection_name, partition_name, timeout, **kwargs)
result = {stat.key: stat.value for stat in stats}
result["row_count"] = int(result["row_count"])
return result
@retry_on_rpc_failure(retry_times=10, wait=1)
def create_alias(self, collection_name, alias, timeout=None, **kwargs):
"""
Specify alias for a collection.
Alias cannot be duplicated, you can't assign same alias to different collections.
But you can specify multiple aliases for a collection, for example:
before create_alias("collection_1", "bob"):
collection_1's aliases = ["tom"]
after create_alias("collection_1", "bob"):
collection_1's aliases = ["tom", "bob"]
:param collection_name: The name of collection.
:type collection_name: str.
:param alias: The alias of the collection.
:type alias: str.
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:return: None
:rtype: NoneType
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
BaseException: If the return result from server is not ok
"""
check_pass_param(collection_name=collection_name)
with self._connection() as handler:
return handler.create_alias(collection_name, alias, timeout, **kwargs)
@retry_on_rpc_failure(retry_times=10, wait=1)
def drop_alias(self, alias, timeout=None, **kwargs):
"""
Delete an alias.
This api no need to specify collection name because the milvus server knows which collection it belongs.
For example:
before drop_alias("bob"):
collection_1's aliases = ["tom", "bob"]
after drop_alias("bob"):
collection_1's aliases = ["tom"]
:param alias: The alias to be deleted.
:type alias: str.
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:return: None
:rtype: NoneType
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
BaseException: If the return result from server is not ok
"""
with self._connection() as handler:
return handler.drop_alias(alias, timeout, **kwargs)
@retry_on_rpc_failure(retry_times=10, wait=1)
def alter_alias(self, collection_name, alias, timeout=None, **kwargs):
"""
Change alias of a collection to another collection. If the alias doesn't exist, the api will return error.
Alias cannot be duplicated, you can't assign same alias to different collections.
This api can change alias owner collection, for example:
before alter_alias("collection_2", "bob"):
collection_1's aliases = ["bob"]
collection_2's aliases = []
after alter_alias("collection_2", "bob"):
collection_1's aliases = []
collection_2's aliases = ["bob"]
:param collection_name: The name of collection.
:type collection_name: str.
:param alias: The new alias of the collection.
:type alias: str.
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:return: None
:rtype: NoneType
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
BaseException: If the return result from server is not ok
"""
check_pass_param(collection_name=collection_name)
with self._connection() as handler:
return handler.alter_alias(collection_name, alias, timeout, **kwargs)
@retry_on_rpc_failure(retry_times=10, wait=1)
def create_index(self, collection_name, field_name, params, timeout=None, **kwargs):
"""
Creates an index for a field in a specified collection. Milvus does not support creating multiple
indexes for a field. In a scenario where the field already has an index, if you create another one,
the server will replace the existing index files with the new ones.
Note that you need to call load_collection() or load_partitions() to make the new index take effect
on searching tasks.
:param collection_name: The name of the collection to create field indexes.
:type collection_name: str
:param field_name: The name of the field to create an index for.
:type field_name: str
:param params: Indexing parameters.
:type params: dict
There are examples of supported indexes:
IVF_FLAT:
` {
"metric_type":"L2",
"index_type": "IVF_FLAT",
"params":{"nlist": 1024}
}`
IVF_PQ:
`{
"metric_type": "L2",
"index_type": "IVF_PQ",
"params": {"nlist": 1024, "m": 8, "nbits": 8}
}`
IVF_SQ8:
`{
"metric_type": "L2",
"index_type": "IVF_SQ8",
"params": {"nlist": 1024}
}`
BIN_IVF_FLAT:
`{
"metric_type": "JACCARD",
"index_type": "BIN_IVF_FLAT",
"params": {"nlist": 1024}
}`
HNSW:
`{
"metric_type": "L2",
"index_type": "HNSW",
"params": {"M": 48, "efConstruction": 50}
}`
RHNSW_FLAT:
`{
"metric_type": "L2",
"index_type": "RHNSW_FLAT",
"params": {"M": 48, "efConstruction": 50}
}`
RHNSW_PQ:
`{
"metric_type": "L2",
"index_type": "RHNSW_PQ",
"params": {"M": 48, "efConstruction": 50, "PQM": 8}
}`
RHNSW_SQ:
`{
"metric_type": "L2",
"index_type": "RHNSW_SQ",
"params": {"M": 48, "efConstruction": 50}
}`
ANNOY:
`{
"metric_type": "L2",
"index_type": "ANNOY",
"params": {"n_trees": 8}
}`
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:param kwargs:
* *_async* (``bool``) --
Indicate if invoke asynchronously. When value is true, method returns a IndexFuture object;
otherwise, method returns results from server.
* *_callback* (``function``) --
The callback function which is invoked after server response successfully. It only take
effect when _async is set to True.
:return: None
:rtype: NoneType
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
BaseException: If the return result from server is not ok
"""
params = params or dict()
if not isinstance(params, dict):
raise ParamError("Params must be a dictionary type")
# params preliminary validate
if 'index_type' not in params:
raise ParamError("Params must contains key: 'index_type'")
if 'params' not in params:
raise ParamError("Params must contains key: 'params'")
if 'metric_type' not in params:
raise ParamError("Params must contains key: 'metric_type'")
if not isinstance(params['params'], dict):
raise ParamError("Params['params'] must be a dictionary type")
if params['index_type'] not in valid_index_types:
raise ParamError("Invalid index_type: " + params['index_type'] +
", which must be one of: " + str(valid_index_types))
for k in params['params'].keys():
if k not in valid_index_params_keys:
raise ParamError("Invalid params['params'].key: " + k)
for v in params['params'].values():
if not isinstance(v, int):
raise ParamError("Invalid params['params'].value: " + v + ", which must be an integer")
# filter invalid metric type
if params['index_type'] in valid_binary_index_types:
if not is_legal_binary_index_metric_type(params['index_type'], params['metric_type']):
raise ParamError("Invalid metric_type: " + params['metric_type'] +
", which does not match the index type: " + params['index_type'])
else:
if not is_legal_index_metric_type(params['index_type'], params['metric_type']):
raise ParamError("Invalid metric_type: " + params['metric_type'] +
", which does not match the index type: " + params['index_type'])
with self._connection() as handler:
return handler.create_index(collection_name, field_name, params, timeout, **kwargs)
@retry_on_rpc_failure(retry_times=10, wait=1)
def drop_index(self, collection_name, field_name, timeout=None):
"""
Removes the index of a field in a specified collection.
:param collection_name: The name of the collection to remove the field index from.
:type collection_name: str
:param field_name: The name of the field to remove the index of.
:type field_name: str
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:return: None
:rtype: NoneType
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
BaseException: If the return result from server is not ok
"""
check_pass_param(collection_name=collection_name)
check_pass_param(field_name=field_name)
with self._connection() as handler:
return handler.drop_index(collection_name=collection_name,
field_name=field_name, index_name="_default_idx", timeout=timeout)
@retry_on_rpc_failure(retry_times=10, wait=1)
def describe_index(self, collection_name, index_name="", timeout=None):
"""
Returns the schema of index built on specified field.
Example: {'index_type': 'FLAT', 'metric_type': 'L2', 'params': {'nlist': 128}}
:param collection_name: The name of the collection which field belong to.
:type collection_name: str
:param field_name: The name of field to describe.
:type field_name: str
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:return: the schema of index built on specified field.
:rtype: dict
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
BaseException: If the return result from server is not ok
"""
check_pass_param(collection_name=collection_name)
with self._connection() as handler:
return handler.describe_index(collection_name, index_name, timeout)
@retry_on_rpc_failure(retry_times=10, wait=1)
def insert(self, collection_name, entities, partition_name=None, timeout=None, **kwargs):
"""
Inserts entities in a specified collection.
:param collection_name: The name of the collection to insert entities in.
:type collection_name: str.
:param entities: The entities to insert.
:type entities: list
:param partition_name: The name of the partition to insert entities in. The default value is
None. The server stores entities in the “_default” partition by default.
:type partition_name: str
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:param kwargs:
* *_async* (``bool``) --
Indicate if invoke asynchronously. When value is true, method returns a MutationFuture object;
otherwise, method returns results from server.
* *_callback* (``function``) --
The callback function which is invoked after server response successfully. It only take
effect when _async is set to True.
:return: list of ids of the inserted vectors.
:rtype: list[int]
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
BaseException: If the return result from server is not ok
"""
# filter invalid binary data #1352: https://github.com/zilliztech/milvus-distributed/issues/1352
if not check_invalid_binary_vector(entities):
raise ParamError("Invalid binary vector data exists")
with self._connection() as handler:
return handler.bulk_insert(collection_name, entities, partition_name, timeout, **kwargs)
@retry_on_rpc_failure(retry_times=10, wait=1)
def delete(self, collection_name, expr, partition_name=None, timeout=None, **kwargs):
"""
Delete entities with an expression condition.
And return results to show which primary key is deleted successfully
:param collection_name: Name of the collection to delete entities from
:type collection_name: str
:param expr: The expression to specify entities to be deleted
:type expr: str
:param partition_name: Name of partitions that contain entities
:type partition_name: str
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur
:type timeout: float
:return: list of ids of the deleted vectors.
:rtype: list
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
BaseException: If the return result from server is not ok
"""
check_pass_param(collection_name=collection_name)
with self._connection() as handler:
return handler.delete(collection_name, expr, partition_name, timeout, **kwargs)
@retry_on_rpc_failure(retry_times=10, wait=1)
def flush(self, collection_names=None, timeout=None, **kwargs):
"""
Internally, Milvus organizes data into segments, and indexes are built in a per-segment manner.
By default, a segment will be sealed if it grows large enough (according to segment size configuration).
If any index is specified on certain field, the index-creating task will be triggered automatically
when a segment is sealed.
The flush() call will seal all the growing segments immediately of the given collection,
and force trigger the index-creating tasks.
:param collection_names: The name of collection to flush.
:type collection_names: list[str]
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:param kwargs:
* *_async* (``bool``) --
Indicate if invoke asynchronously. When value is true, method returns a FlushFuture object;
otherwise, method returns results from server.
* *_callback* (``function``) --
The callback function which is invoked after server response successfully. It only take
effect when _async is set to True.
:return: None
:rtype: NoneType
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
BaseException: If the return result from server is not ok
"""
if collection_names in (None, []):
raise ParamError("Collection name list can not be None or empty")
if not isinstance(collection_names, list):
raise ParamError("Collection name array must be type of list")
if len(collection_names) <= 0:
raise ParamError("Collection name array is not allowed to be empty")
for name in collection_names:
check_pass_param(collection_name=name)
with self._connection() as handler:
return handler.flush(collection_names, timeout, **kwargs)
@retry_on_rpc_failure(retry_times=10, wait=1)
def search(self, collection_name, dsl, partition_names=None, fields=None, timeout=None, **kwargs):
"""
Searches a collection based on the given DSL clauses and returns query results.
:param collection_name: The name of the collection to search.
:type collection_name: str
:param dsl: The DSL that defines the query.
:type dsl: dict
` {
"bool": {
"must": [
{
"range": {
"A": {
"GT": 1,
"LT": "100"
}
}
},
{
"vector": {
"Vec": {
"metric_type": "L2",
"params": {
"nprobe": 10
},
"query": vectors,
"topk": 10
}
}
}
]
}
}`
:param partition_names: The tags of partitions to search.
:type partition_names: list[str]
:param fields: The fields to return in the search result
:type fields: list[str]
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:param kwargs:
* *_async* (``bool``) --
Indicate if invoke asynchronously. When value is true, method returns a SearchFuture object;
otherwise, method returns results from server.
* *_callback* (``function``) --
The callback function which is invoked after server response successfully. It only take
effect when _async is set to True.
:return: Query result. QueryResult is iterable and is a 2d-array-like class, the first dimension is
the number of vectors to query (nq), the second dimension is the number of topk.
:rtype: QueryResult
Suppose the nq in dsl is 4, topk in dsl is 10:
:example:
>>> client = Milvus(host='localhost', port='19530')
>>> result = client.search(collection_name, dsl)
>>> print(len(result))
4
>>> print(len(result[0]))
10
>>> print(len(result[0].ids))
10
>>> result[0].ids
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> len(result[0].distances)
10
>>> result[0].distances
[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
>>> top1 = result[0][0]
>>> top1.id
0
>>> top1.distance
0.1
>>> top1.score # now, the score is equal to distance
0.1
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
BaseException: If the return result from server is not ok
"""
with self._connection() as handler:
kwargs["_deploy_mode"] = self._deploy_mode
return handler.search(collection_name, dsl, partition_names, fields, timeout=timeout, **kwargs)
@retry_on_rpc_failure(retry_times=10, wait=1)
def search_with_expression(self, collection_name, data, anns_field, param, limit, expression=None, partition_names=None,
output_fields=None, timeout=None, round_decimal=-1, **kwargs):
"""
Searches a collection based on the given expression and returns query results.
:param collection_name: The name of the collection to search.
:type collection_name: str
:param data: The vectors of search data, the length of data is number of query (nq), the dim of every vector in
data must be equal to vector field's of collection.
:type data: list[list[float]]
:param anns_field: The vector field used to search of collection.
:type anns_field: str
:param param: The parameters of search, such as nprobe, etc.
:type param: dict
:param limit: The max number of returned record, we also called this parameter as topk.
:type limit: int
:param expression: The boolean expression used to filter attribute.
:type expression: str
:param partition_names: The names of partitions to search.
:type partition_names: list[str]
:param output_fields: The fields to return in the search result, not supported now.
:type output_fields: list[str]
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:param round_decimal: The specified number of decimal places of returned distance
:type round_decimal: int
:param kwargs:
* *_async* (``bool``) --
Indicate if invoke asynchronously. When value is true, method returns a SearchFuture object;
otherwise, method returns results from server.
* *_callback* (``function``) --
The callback function which is invoked after server response successfully. It only take
effect when _async is set to True.
:return: Query result. QueryResult is iterable and is a 2d-array-like class, the first dimension is
the number of vectors to query (nq), the second dimension is the number of limit(topk).
:rtype: QueryResult
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
BaseException: If the return result from server is not ok
"""
check_pass_param(
limit=limit,
round_decimal=round_decimal,
anns_field=anns_field,
search_data=data,
partition_name_array=partition_names,
output_fields=output_fields,
)
with self._connection() as handler:
kwargs["_deploy_mode"] = self._deploy_mode
return handler.search_with_expression(collection_name, data, anns_field, param, limit, expression,
partition_names, output_fields, timeout, round_decimal, **kwargs)
@retry_on_rpc_failure(retry_times=10, wait=1)
def calc_distance(self, vectors_left, vectors_right, params=None, timeout=None, **kwargs):
"""
Calculate distance between two vector arrays.
:param vectors_left: The vectors on the left of operator.
:type vectors_left: dict
`{"ids": [1, 2, 3, .... n], "collection": "c_1", "partition": "p_1", "field": "v_1"}`
or
`{"float_vectors": [[1.0, 2.0], [3.0, 4.0], ... [9.0, 10.0]]}`
or
`{"bin_vectors": [b'\x94', b'N', ... b'\xca']}`
:param vectors_right: The vectors on the right of operator.
:type vectors_right: dict
`{"ids": [1, 2, 3, .... n], "collection": "col_1", "partition": "p_1", "field": "v_1"}`
or
`{"float_vectors": [[1.0, 2.0], [3.0, 4.0], ... [9.0, 10.0]]}`
or
`{"bin_vectors": [b'\x94', b'N', ... b'\xca']}`
:param params: key-value pair parameters
Key: "metric_type"/"metric" Value: "L2"/"IP"/"HAMMING"/"TANIMOTO", default is "L2",
Key: "sqrt" Value: true or false, default is false Only for "L2" distance
Key: "dim" Value: set this value if dimension is not a multiple of 8,
otherwise the dimension will be calculted by list length,
only for "HAMMING" and "TANIMOTO"
:type params: dict
Examples of supported metric_type:
`{"metric_type": "L2", "sqrt": true}`
`{"metric_type": "IP"}`
`{"metric_type": "HAMMING", "dim": 17}`
`{"metric_type": "TANIMOTO"}`
Note: metric type are case insensitive
:return: 2-d array distances
:rtype: list[list[int]] for "HAMMING" or list[list[float]] for others
Assume the vectors_left: L_1, L_2, L_3
Assume the vectors_right: R_a, R_b
Distance between L_n and R_m we called "D_n_m"
The returned distances are arranged like this:
[D_1_a, D_1_b, D_2_a, D_2_b, D_3_a, D_3_b]
"""
with self._connection() as handler:
return handler.calc_distance(vectors_left, vectors_right, params, timeout, **kwargs)
@retry_on_rpc_failure(retry_times=10, wait=1)
def load_collection_progress(self, collection_name, timeout=None):
with self._connection() as handler:
return handler.load_collection_progress(collection_name, timeout=timeout)
@retry_on_rpc_failure(retry_times=10, wait=1)
def load_partitions_progress(self, collection_name, partition_names, timeout=None):
with self._connection() as handler:
return handler.load_partitions_progress(collection_name, partition_names, timeout=timeout)
@retry_on_rpc_failure(retry_times=10, wait=1)
def wait_for_loading_collection_complete(self, collection_name, timeout=None):
with self._connection() as handler:
return handler.wait_for_loading_collection(collection_name, timeout=timeout)
@retry_on_rpc_failure(retry_times=10, wait=1)
def wait_for_loading_partitions_complete(self, collection_name, partition_names, timeout=None):
with self._connection() as handler:
return handler.wait_for_loading_partitions(collection_name, partition_names, timeout=timeout)
@retry_on_rpc_failure(retry_times=10, wait=1)
def get_index_build_progress(self, collection_name, index_name, timeout=None):
with self._connection() as handler:
return handler.get_index_build_progress(collection_name, index_name, timeout=timeout)
@retry_on_rpc_failure(retry_times=10, wait=1)
def wait_for_creating_index(self, collection_name, index_name, timeout=None):
with self._connection() as handler:
return handler.wait_for_creating_index(collection_name, index_name, timeout=timeout)
@retry_on_rpc_failure(retry_times=10, wait=1)
def dummy(self, request_type, timeout=None):
with self._connection() as handler:
return handler.dummy(request_type, timeout=timeout)
@retry_on_rpc_failure(retry_times=10, wait=1)
def query(self, collection_name, expr, output_fields=None, partition_names=None, timeout=None):
"""
Query with a set of criteria, and results in a list of records that match the query exactly.
:param collection_name: Name of the collection to retrieve entities from
:type collection_name: str
:param expr: The query expression
:type expr: str
:param output_fields: A list of fields to return
:type output_fields: list[str]
:param partition_names: Name of partitions that contain entities
:type partition_names: list[str]
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur
:type timeout: float
:return: A list that contains all results
:rtype: list
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
BaseException: If the return result from server is not ok
"""
with self._connection() as handler:
return handler.query(collection_name, expr, output_fields, partition_names, timeout=timeout)
|
#######################################################################
# Copyright (C) #
# 2016 Shangtong Zhang(zhangshangtong.cpp@gmail.com) #
# 2016 Kenta Shimada(hyperkentakun@gmail.com) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
# world height
WORLD_HEIGHT = 7
# world width
WORLD_WIDTH = 10
# wind strength for each column
WIND = [0, 0, 0, 1, 1, 1, 2, 2, 1, 0]
# possible actions
ACTION_UP = 0
ACTION_DOWN = 1
ACTION_LEFT = 2
ACTION_RIGHT = 3
# probability for exploration
EPSILON = 0.1
# Sarsa step size
ALPHA = 0.5
# reward for each step
REWARD = -1.0
# state action pair value
stateActionValues = np.zeros((WORLD_HEIGHT, WORLD_WIDTH, 4))
startState = [3, 0]
goalState = [3, 7]
actions = [ACTION_UP, ACTION_DOWN, ACTION_LEFT, ACTION_RIGHT]
# set up destinations for each action in each state
actionDestination = []
for i in range(0, WORLD_HEIGHT):
actionDestination.append([])
for j in range(0, WORLD_WIDTH):
destination = dict()
destination[ACTION_UP] = [max(i - 1 - WIND[j], 0), j]
destination[ACTION_DOWN] = [max(min(i + 1 - WIND[j], WORLD_HEIGHT - 1), 0), j]
destination[ACTION_LEFT] = [max(i - WIND[j], 0), max(j - 1, 0)]
destination[ACTION_RIGHT] = [max(i - WIND[j], 0), min(j + 1, WORLD_WIDTH - 1)]
actionDestination[-1].append(destination)
# play for an episode
def oneEpisode():
# track the total time steps in this episode
time = 0
# initialize state
currentState = startState
# choose an action based on epsilon-greedy algorithm
if np.random.binomial(1, EPSILON) == 1:
currentAction = np.random.choice(actions)
else:
values_ = stateActionValues[currentState[0], currentState[1], :]
currentAction = np.random.choice([action_ for action_, value_ in enumerate(values_) if value_ == np.max(values_)])
# keep going until get to the goal state
while currentState != goalState:
newState = actionDestination[currentState[0]][currentState[1]][currentAction]
if np.random.binomial(1, EPSILON) == 1:
newAction = np.random.choice(actions)
else:
values_ = stateActionValues[newState[0], newState[1], :]
newAction = np.random.choice([action_ for action_, value_ in enumerate(values_) if value_ == np.max(values_)])
# Sarsa update
stateActionValues[currentState[0], currentState[1], currentAction] += \
ALPHA * (REWARD + stateActionValues[newState[0], newState[1], newAction] -
stateActionValues[currentState[0], currentState[1], currentAction])
currentState = newState
currentAction = newAction
time += 1
return time
# play for 500 episodes to make sure to get a more converged policy
# figure 6.4
episodeLimit = 500
ep = 0
episodes = []
while ep < episodeLimit:
time = oneEpisode()
episodes.extend([ep] * time)
ep += 1
plt.figure()
plt.plot(episodes)
plt.xlabel('Time steps')
plt.ylabel('Episodes')
plt.show()
# display the optimal policy
optimalPolicy = []
for i in range(0, WORLD_HEIGHT):
optimalPolicy.append([])
for j in range(0, WORLD_WIDTH):
if [i, j] == goalState:
optimalPolicy[-1].append('G')
continue
bestAction = np.argmax(stateActionValues[i, j, :])
if bestAction == ACTION_UP:
optimalPolicy[-1].append('U')
elif bestAction == ACTION_DOWN:
optimalPolicy[-1].append('D')
elif bestAction == ACTION_LEFT:
optimalPolicy[-1].append('L')
elif bestAction == ACTION_RIGHT:
optimalPolicy[-1].append('R')
print('Optimal policy is:')
for row in optimalPolicy:
print(row)
print('Wind strength for each column:\n{}'.format([str(w) for w in WIND]))
|
# Generated by Django 3.1.7 on 2021-04-01 08:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('listing', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='listing_slug',
),
migrations.AddField(
model_name='post',
name='listing_content',
field=models.TextField(default='***'),
),
]
|
# Copyright 2016 - 2022 Alexey Stepanov aka penguinolog
# Copyright 2016 Mirantis, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""repr_utils module.
This is no reason to import this submodule directly, all required methods is
available from the main module.
"""
from __future__ import annotations
# Standard Library
import abc
import collections
import inspect
import types
import typing
if typing.TYPE_CHECKING:
# Standard Library
import dataclasses
from collections.abc import Callable
from collections.abc import Iterable
__all__ = ("PrettyFormat", "PrettyRepr", "PrettyStr", "pretty_repr", "pretty_str")
_SIMPLE_MAGIC_ATTRIBUTES = ("__repr__", "__str__")
@typing.runtime_checkable
class _AttributeHolderProto(typing.Protocol):
__slots__ = ()
def _get_kwargs(self) -> list[tuple[str, typing.Any]]:
"""Protocol stub."""
def _get_args(self) -> list[str]:
"""Protocol stub."""
@typing.runtime_checkable
class _NamedTupleProto(typing.Protocol):
__slots__ = ()
def _asdict(self) -> dict[str, typing.Any]:
"""Protocol stub."""
def __getnewargs__(self) -> tuple[typing.Any, ...]:
"""Protocol stub."""
def _replace(self, /, **kwds: dict[str, typing.Any]) -> _NamedTupleProto:
"""Protocol stub."""
@classmethod
def _make(cls, iterable: Iterable[typing.Any]) -> _NamedTupleProto:
"""Protocol stub."""
@typing.runtime_checkable
class _DataClassProto(typing.Protocol):
__slots__ = ()
__dataclass_params__: dataclasses._DataclassParams # type: ignore[name-defined]
__dataclass_fields__: dict[str, dataclasses.Field[typing.Any]] = {}
def _known_callable(item: typing.Any) -> bool:
"""Check for possibility to parse callable.
:param item: item to check for repr() way
:type item: typing.Any
:return: item is callable and should be processed not using repr
:rtype: bool
"""
return isinstance(item, (types.FunctionType, types.MethodType))
def _simple(item: typing.Any) -> bool:
"""Check for nested iterations: True, if not.
:param item: item to check for repr() way
:type item: typing.Any
:return: use repr() iver item by default
:rtype: bool
"""
return not any(
(
isinstance(item, data_type)
and all(
getattr(type(item), attribute) is getattr(data_type, attribute)
for attribute in _SIMPLE_MAGIC_ATTRIBUTES
)
)
for data_type in (list, set, tuple, dict, frozenset, collections.deque)
)
class ReprParameter:
"""Parameter wrapper wor repr and str operations over signature."""
__slots__ = ("_value", "_parameter")
POSITIONAL_ONLY = inspect.Parameter.POSITIONAL_ONLY
POSITIONAL_OR_KEYWORD = inspect.Parameter.POSITIONAL_OR_KEYWORD
VAR_POSITIONAL = inspect.Parameter.VAR_POSITIONAL
KEYWORD_ONLY = inspect.Parameter.KEYWORD_ONLY
VAR_KEYWORD = inspect.Parameter.VAR_KEYWORD
empty = inspect.Parameter.empty
def __init__(self, parameter: inspect.Parameter, value: typing.Any = inspect.Parameter.empty) -> None:
"""Parameter-like object store for repr and str tasks.
:param parameter: parameter from signature
:type parameter: inspect.Parameter
:param value: default value override
:type value: typing.Any
"""
self._parameter: inspect.Parameter = parameter
self._value: typing.Any = value if value is not parameter.empty else parameter.default
@property
def parameter(self) -> inspect.Parameter:
"""Parameter object.
:return: original inspect.Parameter object
:rtype: inspect.Parameter
"""
return self._parameter
@property
def name(self) -> None | str:
"""Parameter name.
:return: parameter name. For `*args` and `**kwargs` add corresponding prefixes
:rtype: None | str
"""
if self.kind == inspect.Parameter.VAR_POSITIONAL:
return "*" + self.parameter.name
if self.kind == inspect.Parameter.VAR_KEYWORD:
return "**" + self.parameter.name
return self.parameter.name
@property
def value(self) -> typing.Any:
"""Parameter value to log.
:return: If function is bound to class -> value is class instance else default value.
:rtype: typing.Any
"""
return self._value
@property
def annotation(self) -> inspect.Parameter.empty | str: # type: ignore[valid-type]
"""Parameter annotation.
:return: parameter annotation from signature
:rtype: inspect.Parameter.empty | str
"""
return self.parameter.annotation # type: ignore[no-any-return]
@property
def kind(self) -> int:
"""Parameter kind.
:return: parameter kind from inspect.Parameter
:rtype: int
"""
# noinspection PyTypeChecker
return self.parameter.kind
def __hash__(self) -> typing.NoReturn: # pylint: disable=invalid-hash-returned
"""Block hashing.
:raises TypeError: Not hashable.
"""
msg = f"not hashable type: '{self.__class__.__name__}'"
raise TypeError(msg)
def __repr__(self) -> str:
"""Debug purposes.
:return: parameter repr for debug purposes
:rtype: str
"""
return f'<{self.__class__.__name__} "{self}">'
def _prepare_repr(func: types.FunctionType | types.MethodType) -> list[ReprParameter]:
"""Get arguments lists with defaults.
:param func: Callable object to process
:type func: types.FunctionType | types.MethodType
:return: repr of callable parameter from signature
:rtype: list[ReprParameter]
"""
ismethod: bool = isinstance(func, types.MethodType)
self_processed: bool = False
result: list[ReprParameter] = []
if not ismethod:
real_func: Callable[..., typing.Any] = func
else:
real_func = func.__func__ # type: ignore[union-attr]
for param in inspect.signature(real_func).parameters.values():
if not self_processed and ismethod and func.__self__ is not None: # type: ignore[union-attr]
result.append(ReprParameter(param, value=func.__self__)) # type: ignore[union-attr]
self_processed = True
else:
result.append(ReprParameter(param))
return result
class PrettyFormat(metaclass=abc.ABCMeta):
"""Pretty Formatter.
Designed for usage as __repr__ and __str__ replacement on complex objects
"""
__slots__ = ("__max_indent", "__indent_step")
def __init__(self, max_indent: int = 20, indent_step: int = 4) -> None:
"""Pretty Formatter.
:param max_indent: maximal indent before classic repr() call
:type max_indent: int
:param indent_step: step for the next indentation level
:type indent_step: int
"""
self.__max_indent: int = max_indent
self.__indent_step: int = indent_step
@property
def max_indent(self) -> int:
"""Max indent getter.
:return: maximal indent before switch to normal repr
:rtype: int
"""
return self.__max_indent
@property
def indent_step(self) -> int:
"""Indent step getter.
:return: indent step for nested definitions
:rtype: int
"""
return self.__indent_step
def next_indent(self, indent: int, multiplier: int = 1) -> int:
"""Next indentation value.
:param indent: current indentation value
:type indent: int
:param multiplier: step multiplier
:type multiplier: int
:return: next indentation value
:rtype: int
"""
return indent + multiplier * self.indent_step
def _repr_callable(
self,
src: types.FunctionType | types.MethodType,
indent: int = 0,
) -> str:
"""Repr callable object (function or method).
:param src: Callable to process
:type src: types.FunctionType | types.MethodType
:param indent: start indentation
:type indent: int
:return: Repr of function or method with signature.
:rtype: str
"""
param_repr: list[str] = []
next_indent = self.next_indent(indent)
prefix: str = "\n" + " " * next_indent
for param in _prepare_repr(src):
param_repr.append(f"{prefix}{param.name}")
annotation_exist = param.annotation is not param.empty # type: ignore[comparison-overlap]
if annotation_exist:
param_repr.append(f": {getattr(param.annotation, '__name__', param.annotation)!s}")
if param.value is not param.empty:
if annotation_exist:
param_repr.append(" = ")
else:
param_repr.append("=")
param_repr.append(self.process_element(src=param.value, indent=next_indent, no_indent_start=True))
param_repr.append(",")
if param_repr:
param_repr.append("\n")
param_repr.append(" " * indent)
param_str = "".join(param_repr)
sig: inspect.Signature = inspect.signature(src)
if sig.return_annotation is inspect.Parameter.empty:
annotation: str = ""
elif sig.return_annotation is type(None): # noqa: E721
# Python 3.10 special case
annotation = " -> None"
else:
annotation = f" -> {getattr(sig.return_annotation, '__name__', sig.return_annotation)!s}"
return (
f"{'':<{indent}}"
f"<{src.__class__.__name__} {src.__module__}.{src.__name__} with interface ({param_str}){annotation}>"
)
def _repr_attribute_holder(
self,
src: _AttributeHolderProto,
indent: int = 0,
no_indent_start: bool = False,
) -> str:
"""Repr attribute holder object (like argparse objects).
:param src: attribute holder object to process
:type src: _AttributeHolderProto
:param indent: start indentation
:type indent: int
:return: Repr of attribute holder object.
:rtype: str
"""
param_repr: list[str] = []
star_args: dict[str, typing.Any] = {}
next_indent = self.next_indent(indent)
prefix: str = "\n" + " " * next_indent
for arg in src._get_args(): # pylint: disable=protected-access
repr_val = self.process_element(arg, indent=next_indent)
param_repr.append(f"{prefix}{repr_val},")
for name, value in src._get_kwargs(): # pylint: disable=protected-access
if name.isidentifier():
repr_val = self.process_element(value, indent=next_indent, no_indent_start=True)
param_repr.append(f"{prefix}{name}={repr_val},")
else:
star_args[name] = value
if star_args:
repr_val = self.process_element(star_args, indent=next_indent, no_indent_start=True)
param_repr.append(f"{prefix}**{repr_val},")
if param_repr:
param_repr.append("\n")
param_repr.append(" " * indent)
param_str = "".join(param_repr)
return f"{'':<{indent if not no_indent_start else 0}}{src.__module__}.{src.__class__.__name__}({param_str})"
def _repr_named_tuple(
self,
src: _NamedTupleProto,
indent: int = 0,
no_indent_start: bool = False,
) -> str:
"""Repr named tuple.
:param src: named tuple object to process
:type src: _NamedTupleProto
:param indent: start indentation
:type indent: int
:return: Repr of named tuple object.
:rtype: str
"""
param_repr: list[str] = []
# noinspection PyBroadException
try:
args_annotations: dict[str, typing.Any] = typing.get_type_hints(type(src))
except BaseException: # NOSONAR
args_annotations = {}
next_indent = self.next_indent(indent)
prefix: str = "\n" + " " * next_indent
for arg_name, value in src._asdict().items():
repr_val = self.process_element(value, indent=next_indent, no_indent_start=True)
param_repr.append(f"{prefix}{arg_name}={repr_val},")
if arg_name in args_annotations and not isinstance(
getattr(args_annotations, arg_name, None), typing.ForwardRef
):
annotation = getattr(args_annotations[arg_name], "__name__", args_annotations[arg_name])
param_repr.append(f"# type: {annotation!s}")
if param_repr:
param_repr.append("\n")
param_repr.append(" " * indent)
param_str = "".join(param_repr)
return f"{'':<{indent if not no_indent_start else 0}}{src.__module__}.{src.__class__.__name__}({param_str})"
def _repr_dataclass(
self,
src: _DataClassProto,
indent: int = 0,
no_indent_start: bool = False,
) -> str:
"""Repr dataclass.
:param src: dataclass object to process
:type src: _DataClassProto
:param indent: start indentation
:type indent: int
:return: Repr of dataclass.
:rtype: str
"""
param_repr: list[str] = []
next_indent = self.next_indent(indent)
prefix: str = "\n" + " " * next_indent
for arg_name, field in src.__dataclass_fields__.items():
if not field.repr:
continue
repr_val = self.process_element(getattr(src, arg_name), indent=next_indent, no_indent_start=True)
comment: list[str] = []
if field.type:
if isinstance(field.type, str):
comment.append(f"type: {field.type}")
else:
comment.append(f"type: {field.type.__name__}")
if getattr(field, "kw_only", False): # python 3.10+
comment.append("kw_only")
if comment:
comment_str = " # " + " # ".join(comment)
else:
comment_str = ""
param_repr.append(f"{prefix}{arg_name}={repr_val},{comment_str}")
if param_repr:
param_repr.append("\n")
param_repr.append(" " * indent)
param_str = "".join(param_repr)
return f"{'':<{indent if not no_indent_start else 0}}{src.__module__}.{src.__class__.__name__}({param_str})"
@abc.abstractmethod
def _repr_simple(
self,
src: typing.Any,
indent: int = 0,
no_indent_start: bool = False,
) -> str:
"""Repr object without iteration.
:param src: Source object
:type src: typing.Any
:param indent: start indentation
:type indent: int
:param no_indent_start: ignore indent
:type no_indent_start: bool
:return: simple repr() over object
:rtype: str
"""
@abc.abstractmethod
def _repr_dict_items(
self,
src: dict[typing.Any, typing.Any],
indent: int = 0,
) -> str:
"""Repr dict items.
:param src: object to process
:type src: dict[typing.Any, typing.Any]
:param indent: start indentation
:type indent: int
:return: repr of key/value pairs from dict
:rtype: str
"""
@staticmethod
@abc.abstractmethod
def _repr_iterable_item(
obj_type: str,
prefix: str,
indent: int,
no_indent_start: bool,
result: str,
suffix: str,
) -> str:
"""Repr iterable item.
:param obj_type: Object type
:type obj_type: str
:param prefix: prefix
:type prefix: str
:param indent: start indentation
:type indent: int
:param no_indent_start: do not indent open bracket and simple parameters
:type no_indent_start: bool
:param result: result of pre-formatting
:type result: str
:param suffix: suffix
:type suffix: str
:return: formatted repr of "result" with prefix and suffix to explain type.
:rtype: str
"""
def _repr_iterable_items(
self,
src: Iterable[typing.Any],
indent: int = 0,
) -> str:
"""Repr iterable items (not designed for dicts).
:param src: object to process
:type src: Iterable[typing.Any]
:param indent: start indentation
:type indent: int
:return: repr of elements in iterable item
:rtype: str
"""
next_indent: int = self.next_indent(indent)
buf: list[str] = []
for elem in src:
buf.append("\n")
buf.append(self.process_element(src=elem, indent=next_indent))
buf.append(",")
return "".join(buf)
@property
@abc.abstractmethod
def _magic_method_name(self) -> str:
"""Magic method name.
:return: magic method name to lookup in processing objects
:rtype: str
"""
def process_element(
self,
src: typing.Any,
indent: int = 0,
no_indent_start: bool = False,
) -> str:
"""Make human readable representation of object.
:param src: object to process
:type src: typing.Any
:param indent: start indentation
:type indent: int
:param no_indent_start: do not indent open bracket and simple parameters
:type no_indent_start: bool
:return: formatted string
:rtype: str
"""
if hasattr(src, self._magic_method_name):
result = getattr(src, self._magic_method_name)(self, indent=indent, no_indent_start=no_indent_start)
return result # type: ignore[no-any-return]
if _known_callable(src):
return self._repr_callable(src=src, indent=indent)
if isinstance(src, _AttributeHolderProto):
return self._repr_attribute_holder(src=src, indent=indent, no_indent_start=no_indent_start)
if isinstance(src, tuple) and isinstance(src, _NamedTupleProto):
return self._repr_named_tuple(src=src, indent=indent, no_indent_start=no_indent_start)
if isinstance(src, _DataClassProto) and not isinstance(src, type) and src.__dataclass_params__.repr:
return self._repr_dataclass(src=src, indent=indent, no_indent_start=no_indent_start)
if _simple(src) or indent >= self.max_indent or not src:
return self._repr_simple(src=src, indent=indent, no_indent_start=no_indent_start)
if isinstance(src, dict):
prefix, suffix = "{", "}"
result = self._repr_dict_items(src=src, indent=indent)
elif isinstance(src, collections.deque):
result = self._repr_iterable_items(src=src, indent=self.next_indent(indent))
prefix, suffix = "(", ")"
else:
if isinstance(src, list):
prefix, suffix = "[", "]"
elif isinstance(src, tuple):
prefix, suffix = "(", ")"
elif isinstance(src, (set, frozenset)):
prefix, suffix = "{", "}"
else:
prefix, suffix = "", ""
result = self._repr_iterable_items(src=src, indent=indent)
if isinstance(src, collections.deque):
next_indent = self.next_indent(indent)
return (
f"{'':<{indent if not no_indent_start else 0}}"
f"{src.__class__.__name__}(\n"
f"{'':<{next_indent}}{prefix}{result}\n"
f"{'':<{next_indent}}{suffix},\n"
f"{'':<{self.next_indent(indent)}}maxlen={src.maxlen},\n"
f"{'':<{indent}})"
)
if type(src) in (list, tuple, set, dict):
return f"{'':<{indent if not no_indent_start else 0}}{prefix}{result}\n{'':<{indent}}{suffix}"
return self._repr_iterable_item(
obj_type=src.__class__.__name__,
prefix=prefix,
indent=indent,
no_indent_start=no_indent_start,
result=result,
suffix=suffix,
)
def __call__(
self,
src: typing.Any,
indent: int = 0,
no_indent_start: bool = False,
) -> str:
"""Make human-readable representation of object. The main entry point.
:param src: object to process
:type src: typing.Any
:param indent: start indentation
:type indent: int
:param no_indent_start: do not indent open bracket and simple parameters
:type no_indent_start: bool
:return: formatted string
:rtype: str
"""
result = self.process_element(src, indent=indent, no_indent_start=no_indent_start)
return result
class PrettyRepr(PrettyFormat):
"""Pretty repr.
Designed for usage as __repr__ replacement on complex objects
"""
__slots__ = ()
@property
def _magic_method_name(self) -> str:
"""Magic method name.
:return: magic method name to lookup in processing objects
:rtype: str
"""
return "__pretty_repr__"
def _repr_simple(
self,
src: typing.Any,
indent: int = 0,
no_indent_start: bool = False,
) -> str:
"""Repr object without iteration.
:param src: Source object
:type src: typing.Any
:param indent: start indentation
:type indent: int
:param no_indent_start: ignore indent
:type no_indent_start: bool
:return: simple repr() over object, except strings (add prefix) and set (uniform py2/py3)
:rtype: str
"""
return f"{'':<{0 if no_indent_start else indent}}{src!r}"
def _repr_dict_items(
self,
src: dict[typing.Any, typing.Any],
indent: int = 0,
) -> str:
"""Repr dict items.
:param src: object to process
:type src: dict[typing.Any, typing.Any]
:param indent: start indentation
:type indent: int
:return: repr of key/value pairs from dict
:rtype: str
"""
max_len: int = max(len(repr(key)) for key in src) if src else 0
next_indent: int = self.next_indent(indent)
prefix: str = "\n" + " " * next_indent
buf: list[str] = []
for key, val in src.items():
buf.append(prefix)
buf.append(f"{key!r:{max_len}}: ")
buf.append(self.process_element(val, indent=next_indent, no_indent_start=True))
buf.append(",")
return "".join(buf)
@staticmethod
def _repr_iterable_item(
obj_type: str,
prefix: str,
indent: int,
no_indent_start: bool,
result: str,
suffix: str,
) -> str:
"""Repr iterable item.
:param obj_type: Object type
:type obj_type: str
:param prefix: prefix
:type prefix: str
:param indent: start indentation
:type indent: int
:param no_indent_start: do not indent open bracket and simple parameters
:type no_indent_start: bool
:param result: result of pre-formatting
:type result: str
:param suffix: suffix
:type suffix: str
:return: formatted repr of "result" with prefix and suffix to explain type.
:rtype: str
"""
return f"{'':<{indent if not no_indent_start else 0}}{obj_type}({prefix}{result}\n{'':<{indent}}{suffix})"
class PrettyStr(PrettyFormat):
"""Pretty str.
Designed for usage as __str__ replacement on complex objects
"""
__slots__ = ()
@property
def _magic_method_name(self) -> str:
"""Magic method name.
:rtype: str
"""
return "__pretty_str__"
@staticmethod
def _strings_str(
indent: int,
val: bytes | str,
) -> str:
"""Custom str for strings and binary strings.
:param indent: result indent
:type indent: int
:param val: value for repr
:type val: bytes | str
:return: indented string as `str`
:rtype: str
"""
if isinstance(val, bytes):
string: str = val.decode(encoding="utf-8", errors="backslashreplace")
else:
string = val
return f"{'':<{indent}}{string}"
def _repr_simple(
self,
src: typing.Any,
indent: int = 0,
no_indent_start: bool = False,
) -> str:
"""Repr object without iteration.
:param src: Source object
:type src: typing.Any
:param indent: start indentation
:type indent: int
:param no_indent_start: ignore indent
:type no_indent_start: bool
:return: simple repr() over object, except strings (decode) and set (uniform py2/py3)
:rtype: str
"""
indent = 0 if no_indent_start else indent
if isinstance(src, (bytes, str)):
return self._strings_str(indent=indent, val=src)
return f"{'':<{indent}}{src!s}"
def _repr_dict_items(
self,
src: dict[typing.Any, typing.Any],
indent: int = 0,
) -> str:
"""Repr dict items.
:param src: object to process
:type src: dict[typing.Any, typing.Any]
:param indent: start indentation
:type indent: int
:return: repr of key/value pairs from dict
:rtype: str
"""
max_len = max(len(str(key)) for key in src) if src else 0
next_indent: int = self.next_indent(indent)
prefix: str = "\n" + " " * next_indent
buf: list[str] = []
for key, val in src.items():
buf.append(prefix)
buf.append(f"{key!s:{max_len}}: ")
buf.append(self.process_element(val, indent=next_indent, no_indent_start=True))
buf.append(",")
return "".join(buf)
@staticmethod
def _repr_iterable_item(
obj_type: str,
prefix: str,
indent: int,
no_indent_start: bool,
result: str,
suffix: str,
) -> str:
"""Repr iterable item.
:param obj_type: Object type
:type obj_type: str
:param prefix: prefix
:type prefix: str
:param indent: start indentation
:type indent: int
:param no_indent_start: do not indent open bracket and simple parameters
:type no_indent_start: bool
:param result: result of pre-formatting
:type result: str
:param suffix: suffix
:type suffix: str
:return: formatted repr of "result" with prefix and suffix to explain type.
:rtype: str
"""
return f"{'':<{indent if not no_indent_start else 0}}{prefix}{result}\n{'':<{indent}}{suffix}"
def pretty_repr(
src: typing.Any,
indent: int = 0,
no_indent_start: bool = False,
max_indent: int = 20,
indent_step: int = 4,
) -> str:
"""Make human readable repr of object.
:param src: object to process
:type src: typing.Any
:param indent: start indentation, all next levels is +indent_step
:type indent: int
:param no_indent_start: do not indent open bracket and simple parameters
:type no_indent_start: bool
:param max_indent: maximal indent before classic repr() call
:type max_indent: int
:param indent_step: step for the next indentation level
:type indent_step: int
:return: formatted string
:rtype: str
"""
return PrettyRepr(max_indent=max_indent, indent_step=indent_step)(
src=src,
indent=indent,
no_indent_start=no_indent_start,
)
def pretty_str(
src: typing.Any,
indent: int = 0,
no_indent_start: bool = False,
max_indent: int = 20,
indent_step: int = 4,
) -> str:
"""Make human readable str of object.
:param src: object to process
:type src: typing.Any
:param indent: start indentation, all next levels is +indent_step
:type indent: int
:param no_indent_start: do not indent open bracket and simple parameters
:type no_indent_start: bool
:param max_indent: maximal indent before classic repr() call
:type max_indent: int
:param indent_step: step for the next indentation level
:type indent_step: int
:return: formatted string
"""
return PrettyStr(max_indent=max_indent, indent_step=indent_step)(
src=src,
indent=indent,
no_indent_start=no_indent_start,
)
|
import sys
import os
import librosa
import numpy as np
from multiprocessing import Pool
import pickle
from librosa.filters import mel as librosa_mel_fn
import torch
from torch import nn
from torch.nn import functional as F
from sklearn.preprocessing import StandardScaler
import warnings
warnings.filterwarnings('ignore')
class Audio2Mel(nn.Module):
def __init__(
self,
n_fft=1024,
hop_length=256,
win_length=1024,
sampling_rate=22050,
n_mel_channels=240,
mel_fmin=0.0,
mel_fmax=None,
):
super().__init__()
##############################################
# FFT Parameters #
##############################################
window = torch.hann_window(win_length).float()
mel_basis = librosa_mel_fn(
sampling_rate, n_fft, n_mel_channels, mel_fmin, mel_fmax
)
mel_basis = torch.from_numpy(mel_basis).float()
self.register_buffer("mel_basis", mel_basis)
self.register_buffer("window", window)
self.n_fft = n_fft
self.hop_length = hop_length
self.win_length = win_length
self.sampling_rate = sampling_rate
self.n_mel_channels = n_mel_channels
def forward(self, audio):
p = (self.n_fft - self.hop_length) // 2
audio = F.pad(audio, (p, p), "reflect").squeeze(1)
fft = torch.stft(
audio,
n_fft=self.n_fft,
hop_length=self.hop_length,
win_length=self.win_length,
window=self.window,
center=False,
)
real_part, imag_part = fft.unbind(-1)
magnitude = torch.sqrt(real_part ** 2 + imag_part ** 2)
mel_output = torch.matmul(self.mel_basis, magnitude)
log_mel_spec = torch.log10(torch.clamp(mel_output, min=1e-5))
return log_mel_spec
def convert_file(path):
y, _ = librosa.load(path, sr=sr)
y, index = librosa.effects.trim(y, top_db=20)
y = torch.from_numpy(y)
y = y[None, None]
mel = extract_func(y)
mel = mel.numpy()
mel = mel[0]
return mel.astype(np.float32)
def process_audios(path):
id = path.split('/')[-1][:-4]
out_dir = os.path.join(base_out_dir, feat_type)
os.makedirs(out_dir, exist_ok=True)
out_fp = os.path.join(out_dir, f'{id}.npy')
if os.path.exists(out_fp):
print('Done before')
return id, 0
try:
m = convert_file(path)
if m.shape[1] < 128:
return id, 0
print(m.shape)
np.save(out_fp, m, allow_pickle=False)
except Exception:
return id, 0
return id, m.shape[-1]
if __name__ == "__main__":
base_out_dir = '/files/xxx/VC/VCTK/VCTK-Corpus/'
audio_dir = '/files/xxx/VC/VCTK/VCTK-Corpus/wav48/'
feat_type = 'melspectrogram_vctk_100'
extension = '.wav'
peak_norm = False
n_fft = 1024
hop_length = 256
win_length = 1024
sampling_rate = 22050
n_mel_channels = 80
extract_func = Audio2Mel(n_fft, hop_length, win_length, sampling_rate, n_mel_channels)
sr = sampling_rate
audio_files = []
for dirPath, dirNames, fileNames in os.walk(f"{audio_dir}"):
for f in fileNames:
if f.endswith(extension):
audio_files += [os.path.join(dirPath, f)]
print(audio_files[:5])
if len(audio_files) == 0:
print('Please point wav_path in hparams.py to your dataset,')
print('or use the --path option.\n')
else:
dataset = []
for audio in audio_files:
id, length = process_audios(audio)
print(id)
if length == 0:
continue
dataset += [(id, length)]
print('\n\nCompleted. ')
|
from fastapi import APIRouter, BackgroundTasks
from app.core.celery_app import celery_app
from app.worker import download_files, identify_files
from app.db.models import Group
router = APIRouter()
@router.get('/')
async def read_root():
return {'Hello': 'World'}
@router.get('/items/{item_id}')
async def read_item(item_id: int, q: str = None):
return {'item_id': item_id, 'q': q}
@router.get('/books')
async def ping_pong():
return {
'status': 'success',
'books': 'BOOKS'
}
@router.get('/files')
async def files():
return {
'status': 'success',
'books': 'BOOKS'
}
@router.post('/books')
async def books(title: str, author: str, read: bool):
return {
'status': 'success',
'message': 'Book added!'
}
@router.post("/test-celery/", status_code=201)
def test_celery():
"""
Test Celery worker.
"""
celery_app.send_task("app.worker.test_celery", args=['szaa----'])
return {"msg": "Word received"}
@router.get('/download')
def download():
result = download_files.delay()
return {'msg': 'dl start', 'task_id': result.task_id}
@router.get('/identify')
def identify():
result = identify_files.delay()
return {'msg': 'dl start', 'task_id': result.task_id}
|
from .docx2txt import get_output, process # noqa
from .docx_file import DocxFile # noqa
VERSION = '0.8'
|
'''
The following class is used for the Transfer Entropy Measurements.
There are three main functions, the rest are helpers.
The three functions to call are:
computeTEUsers
computeTEUserEvents
computeTERepos
Note computeTEUsersEvents requires TEUsers to have been ran. If computeTEUserEvents is called then it will calculate the computeTEUsers automatically.
'''
import numpy as np
import pandas as pd
from collections import defaultdict
import jpype
import pickle as pkl
class TEMeasurements():
def __init__(object):
super(TE, self).__init__()
'''
Used For ALL 3 methods
'''
def readPickleFile(self,ipFile):
with open(ipFile, 'rb') as handle:
obj = pkl.load(handle)
return obj
def computeBasicStats(self,timeseries):
maxTime = 0.0
for repo,actorsTS in timeseries.iteritems():
maxTime = max(maxTime,max({key: max(value) for key, value in timeseries[repo].items()}.values()))
return maxTime
def getTESigPairsRepo(self,actorTSSrc,actorTSDest,teThresh, delayUnits, nReps, kE, kN):
actorsSrc = actorTSSrc.keys()
actorsDest = actorTSDest.keys()
tSSrc = actorTSSrc.values()
tSDest = actorTSDest.values()
nActSrc = len(actorsSrc)
nActDest = len(actorsDest)
print("Number of source / destination actors (repos) in this repo (repo group ) : ", nActSrc, " ", nActDest)
allEdges = {}
allNodes = {}
for idxS in range(nActSrc):
src = tSSrc[idxS]
nodeTEVal = 0.0
for idxD in range(nActDest):
if (actorsSrc[idxS] != actorsDest[idxD]):
dest = tSDest[idxD]
TEDelays = np.zeros((len(delayUnits)))
for idx in range(len(delayUnits)):
TEDelays[idx] = self.getTETimeSeriesPairBinary(src, dest, teThresh, delayUnits[idx], nReps)
if (np.max(TEDelays) > 0.0):
allEdges[tuple((actorsSrc[idxS],actorsDest[idxD]))] = np.max(TEDelays)
nodeTEVal = nodeTEVal + np.max(TEDelays)
if (nodeTEVal > 0.0):
allNodes[actorsSrc[idxS]] = nodeTEVal
topEdges = sorted(allEdges.items(), key=lambda (k,v): v, reverse = True)
if (len(topEdges) > kE):
topEdges = topEdges[:kE]
topNodes = sorted(allNodes.items(), key=lambda (k,v): v, reverse = True)
if (len(topNodes) > kN):
topNodes = topNodes[:kN]
return (topEdges, topNodes)
def getTETimeSeriesPairBinary(self,src, dest, teThresh, delayParam, nReps):
teCalcClass = jpype.JPackage("infodynamics.measures.discrete").TransferEntropyCalculatorDiscrete
teCalc = teCalcClass(2,1,1,1,1,delayParam)
teCalc.initialise()
teCalc.addObservations(src,dest)
te = teCalc.computeAverageLocalOfObservations()
if(te > teThresh):
teNullDist = teCalc.computeSignificance(nReps);
teNullMean = teNullDist.getMeanOfDistribution()
teNullStd = teNullDist.getStdOfDistribution()
if teNullStd > 0:
z_score = (te-teNullMean)/teNullStd
else:
z_score = 0.0
te = 0.0
if (z_score < 3.0):
te = 0.0
else:
te = 0.0
return te
'''
For TE Users
'''
def getTimeSeriesUsers(self):
df = self.main_df[self.main_df['repo'].isin(self.repo_actors.keys())]
timeseries = dict()
for repo in self.repo_actors.keys():
tempdf = df[df['repo'] == repo]
if (not tempdf.empty):
tempdf = df[df['user'].isin(self.repo_actors[repo])]
if (not tempdf.empty):
tempdf['time'] = pd.to_datetime(tempdf['time'])
tempdf['time'] = (tempdf['time'] - self.startTime).astype('timedelta64[s]')
tempDic = tempdf[['user','time']].groupby('user')['time'].apply(list).to_dict()
timeseries[repo] = tempDic
return timeseries
def getBinnedBinaryTimeSeries(self,groupEntityTS,binSize,totalBins):
binnedTS = defaultdict(dict)
for group,entityTS in groupEntityTS.iteritems():
entitiesBinnedTS = {}
for entity, timeSeries in entityTS.iteritems():
entitiesBinnedTS[entity] = self.getBinnedTimeSeriesBinarySingle(totalBins, binSize, timeSeries)
binnedTS[group] = entitiesBinnedTS
return binnedTS
def getBinnedTimeSeriesBinarySingle(self,totalBins,binSize,timeSeries):
tsBinned = np.zeros((totalBins), dtype=int)
for timeVal in timeSeries:
try:
idx = (timeVal // binSize)
tsBinned[int(idx)] = 1
except:
continue
return tsBinned
def createAllTEMatrices(self,rATSrc, rATDest, teThresh, delayUnits, nReps, kE, kN):
if (set(rATSrc.keys()) != set(rATDest.keys())):
sys.exit("The repos in the source and target time series data structure is different. Please check.")
topEdges = defaultdict(dict)
topNodes = {}
for repo in rATSrc.keys():
print("Computing for repo (repo group) : ", repo)
edges,nodes = self.getTESigPairsRepo(rATSrc[repo],rATDest[repo],teThresh,delayUnits, nReps, kE, kN)
topEdges[repo] = edges
topNodes[repo] = nodes
return (topEdges, topNodes)
#main function to call
def computeTEUsers(self):
#if not jpype.isJVMStarted():
# jpype.startJVM(jpype.getDefaultJVMPath(), "-ea", "-Djava.class.path=" + "../infodynamics.jar")
repoActorsTS = self.getTimeSeriesUsers()
maxTime = self.computeBasicStats(repoActorsTS)
totalBins = int(np.ceil(maxTime/float(self.binSize)))
repoActorsBinned = self.getBinnedBinaryTimeSeries(repoActorsTS, self.binSize, totalBins)
topEdges, topNodes = self.createAllTEMatrices(repoActorsBinned, repoActorsBinned, self.teThresh[0], self.delayUnits, nReps = self.nReps, kE=self.kE, kN=self.kN)
#jpype.shutdownJVM()
self.top_edges = topEdges
self.top_users = topNodes
#with open('top_users.pkl','w') as handle:
# pkl.dump(allDic,handle)
return topEdges, topNodes
'''
Compute TEUSerEvents
'''
def createAllTEMatrices(self, rATSrc, rATDest, teThresh, delayUnits, nReps, kE, kN):
if (set(rATSrc.keys()) != set(rATDest.keys())):
sys.exit("The repos in the source and target time series data structure is different. Please check.")
topEdges = defaultdict(dict)
topNodes = {}
for repo in rATSrc.keys():
print("Computing for repo (repo group) : ", repo)
edges,nodes = self.getTESigPairsRepo(rATSrc[repo],rATDest[repo],teThresh,delayUnits, nReps, kE, kN)
topEdges[repo] = edges
topNodes[repo] = nodes
return (topEdges, topNodes)
def getSourceTargetUserEventTS(self,repoActorEventsTS,repoRockstars, rockStarEvent, otherEvents):
repoActorsSRC = defaultdict(dict)
repoActorsTAR = defaultdict(dict)
reposConsidered = repoRockstars.keys()
#First the rockstars who will act as sources
for repo,actorEventTS in repoActorEventsTS.iteritems():
if (repo in reposConsidered):
rockStars = [x[0] for x in repoRockstars[repo]]
for actor,eventTS in actorEventTS.iteritems():
if ((actor in rockStars) and (rockStarEvent in eventTS.keys())):
if (len(eventTS[rockStarEvent]) > 20):
repoActorsSRC[repo][actor] = eventTS[rockStarEvent]
#The other users who form the targets
for repo,actorEventTS in repoActorEventsTS.iteritems():
if (repo in reposConsidered):
rockStars = [x[0] for x in repoRockstars[repo]]
for actor,eventTS in actorEventTS.iteritems():
if (actor not in rockStars):
combinedEvent = []
for event in otherEvents:
if (event in eventTS.keys()):
combinedEvent = combinedEvent + eventTS[event]
if (len(combinedEvent) > 20):
repoActorsTAR[repo][actor] = combinedEvent
#Ensure that both SRC and TAR contain exactly the same repos since filtering criteria are different
srcKeys = repoActorsSRC.keys()
tarKeys = repoActorsTAR.keys()
differenceKeys = []
if (len(srcKeys) > len(tarKeys)):
differenceKeys = list(set(srcKeys).difference(set(tarKeys)))
for diffkey in differenceKeys:
del repoActorsSRC[diffkey]
elif (len(tarKeys) > len(srcKeys)):
differenceKeys = list(set(tarKeys).difference(set(srcKeys)))
for diffkey in differenceKeys:
del repoActorsTAR[diffkey]
return (repoActorsSRC, repoActorsTAR)
def getTimeSeriesUsersEvents(self,df,repoActors):
df = df[df['repo'].isin(repoActors.keys())]
timeseries = dict()
for repo in repoActors.keys():
tempdf = df[df['repo'] == repo]
if len(tempdf) == 0:
timeseries[repo] = dict()
continue
tempdf = df[df['user'].isin(repoActors[repo])]
if len(tempdf) == 0:
timeseries[repo] = dict()
continue
tempdf['time'] = pd.to_datetime(tempdf['time'])
tempdf['time'] = (tempdf['time'] - self.startTime).astype('timedelta64[s]')
tempdf = pd.DataFrame(tempdf[['user','event','time']].groupby(['user','event'])['time'].apply(list))
tempdf = tempdf.reset_index()
tempdic = dict()
for ele in tempdf['user'].unique():
tm = dict()
curdf = tempdf[tempdf['user'] == ele]
for eventT in curdf['event'].unique():
tm[eventT] = curdf[curdf['event'] == eventT]['time'].values[0]
tempdic[ele] = tm
timeseries[repo] = tempdic
return timeseries
def computeTEUserEvents(self):
repoActorEventsTS = self.getTimeSeriesUsersEvents(self.main_df, self.repo_actors)
if len(self.top_users) == 0:
self.computeTEUsers()
repoRockstars = self.top_users
# #Divide up the data into SRC (rockstars) and TAR (others) time series
repoActorsSRC, repoActorsTAR = self.getSourceTargetUserEventTS(repoActorEventsTS,repoRockstars, self.starEvent, self.otherEvents)
#Get binned time series
maxT = max(self.computeBasicStats(repoActorsSRC), self.computeBasicStats(repoActorsTAR))
totalBins = int(np.ceil(maxT/float(self.binSize)))
repoActorsSRCBinned = self.getBinnedBinaryTimeSeries(repoActorsSRC, self.binSize, totalBins)
repoActorsTARBinned = self.getBinnedBinaryTimeSeries(repoActorsTAR, self.binSize, totalBins)
topEdges, topNodes = self.createAllTEMatrices(repoActorsSRCBinned, repoActorsTARBinned, self.teThresh[1], self.delayUnits, nReps = self.nReps, kE=self.kE, kN=self.kN)
return topEdges, topNodes
'''
TE REPOS
'''
def getTimeSeriesRepos(self):
timeseries = dict()
for desc,repos in self.repo_groups.iteritems():
tempdf = self.main_df[self.main_df['repo'].isin(repos)] #get only repos we care about
if (not tempdf.empty):
tempdf['time'] = pd.to_datetime(tempdf['time'])
tempdf['time'] = (tempdf['time'] - self.startTime).astype('timedelta64[s]')
tempDic = tempdf[['repo','time']].groupby('repo')['time'].apply(list).to_dict()
timeseries[desc] = tempDic
return timeseries
def computeTERepos(self):
print("Getting time series from CSV data file.")
repoTS = self.getTimeSeriesRepos()
#Get binned time series
maxT = self.computeBasicStats(repoTS)
totalBins = int(np.ceil(maxT/float(self.binSize)))
reposBinned = self.getBinnedBinaryTimeSeries(repoTS, self.binSize, totalBins)
topEdges, topNodes = self.createAllTEMatrices(reposBinned, reposBinned, self.teThresh[2], self.delayUnits, nReps = self.nReps, kE = self.kE, kN = self.kN)
return topEdges, topNodes
|
# -*- coding: utf-8 -*-
def build_geometry(self, sym=1, alpha=0, delta=0, is_simplified=False):
"""Build geometry of the LamSquirrelCage
Parameters
----------
self :
LamSquirrelCage Object
sym : int
Symmetry factor (1= full machine, 2= half of the machine...)
alpha : float
Angle for rotation [rad]
delta : complex
Complex value for translation
is_simplified: bool
True to avoid line superposition
Returns
-------
list
surf_list: list of surfaces
"""
surf_list = super(type(self), self).build_geometry(
sym=sym, is_simplified=is_simplified, alpha=alpha, delta=delta
)
# Adapt the label
for surf in surf_list:
if "Wind" in surf.label:
surf.label = surf.label.replace("Wind", "Bar")
return surf_list
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from contextlib import closing
from datetime import datetime
from typing import Any, Optional
from urllib.parse import quote_plus
from sqlalchemy import create_engine
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.typing_compat import Protocol
class ConnectorProtocol(Protocol):
"""A protocol where you can connect to a database."""
def connect(self, host: str, port: int, username: str, schema: str) -> Any:
"""
Connect to a database.
:param host: The database host to connect to.
:param port: The database port to connect to.
:param username: The database username used for the authentication.
:param schema: The database schema to connect to.
:return: the authorized connection object.
"""
class DbApiHook(BaseHook):
"""Abstract base class for sql hooks."""
# Override to provide the connection name.
conn_name_attr = None # type: str
# Override to have a default connection id for a particular dbHook
default_conn_name = 'default_conn_id'
# Override if this db supports autocommit.
supports_autocommit = False
# Override with the object that exposes the connect method
connector = None # type: Optional[ConnectorProtocol]
def __init__(self, *args, **kwargs):
super().__init__()
if not self.conn_name_attr:
raise AirflowException("conn_name_attr is not defined")
elif len(args) == 1:
setattr(self, self.conn_name_attr, args[0])
elif self.conn_name_attr not in kwargs:
setattr(self, self.conn_name_attr, self.default_conn_name)
else:
setattr(self, self.conn_name_attr, kwargs[self.conn_name_attr])
def get_conn(self):
"""Returns a connection object"""
db = self.get_connection(getattr(self, self.conn_name_attr))
return self.connector.connect(host=db.host, port=db.port, username=db.login, schema=db.schema)
def get_uri(self) -> str:
"""
Extract the URI from the connection.
:return: the extracted uri.
"""
conn = self.get_connection(getattr(self, self.conn_name_attr))
login = ''
if conn.login:
login = f'{quote_plus(conn.login)}:{quote_plus(conn.password)}@'
host = conn.host
if conn.port is not None:
host += f':{conn.port}'
uri = f'{conn.conn_type}://{login}{host}/'
if conn.schema:
uri += conn.schema
return uri
def get_sqlalchemy_engine(self, engine_kwargs=None):
"""
Get an sqlalchemy_engine object.
:param engine_kwargs: Kwargs used in :func:`~sqlalchemy.create_engine`.
:return: the created engine.
"""
if engine_kwargs is None:
engine_kwargs = {}
return create_engine(self.get_uri(), **engine_kwargs)
def get_pandas_df(self, sql, parameters=None, **kwargs):
"""
Executes the sql and returns a pandas dataframe
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: dict or iterable
:param kwargs: (optional) passed into pandas.io.sql.read_sql method
:type kwargs: dict
"""
from pandas.io import sql as psql
with closing(self.get_conn()) as conn:
return psql.read_sql(sql, con=conn, params=parameters, **kwargs)
def get_records(self, sql, parameters=None):
"""
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: dict or iterable
"""
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchall()
def get_first(self, sql, parameters=None):
"""
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: dict or iterable
"""
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchone()
def run(self, sql, autocommit=False, parameters=None, handler=None):
"""
Runs a command or a list of commands. Pass a list of sql
statements to the sql parameter to get them to execute
sequentially
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:type autocommit: bool
:param parameters: The parameters to render the SQL query with.
:type parameters: dict or iterable
:param handler: The result handler which is called with the result of each statement.
:type handler: callable
:return: query results if handler was provided.
"""
scalar = isinstance(sql, str)
if scalar:
sql = [sql]
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, autocommit)
with closing(conn.cursor()) as cur:
results = []
for sql_statement in sql:
self._run_command(cur, sql_statement, parameters)
if handler is not None:
result = handler(cur)
results.append(result)
# If autocommit was set to False for db that supports autocommit,
# or if db does not supports autocommit, we do a manual commit.
if not self.get_autocommit(conn):
conn.commit()
if handler is None:
return None
if scalar:
return results[0]
return results
def _run_command(self, cur, sql_statement, parameters):
"""Runs a statement using an already open cursor."""
self.log.info("Running statement: %s, parameters: %s", sql_statement, parameters)
if parameters:
cur.execute(sql_statement, parameters)
else:
cur.execute(sql_statement)
# According to PEP 249, this is -1 when query result is not applicable.
if cur.rowcount >= 0:
self.log.info("Rows affected: %s", cur.rowcount)
def set_autocommit(self, conn, autocommit):
"""Sets the autocommit flag on the connection"""
if not self.supports_autocommit and autocommit:
self.log.warning(
"%s connection doesn't support autocommit but autocommit activated.",
getattr(self, self.conn_name_attr),
)
conn.autocommit = autocommit
def get_autocommit(self, conn):
"""
Get autocommit setting for the provided connection.
Return True if conn.autocommit is set to True.
Return False if conn.autocommit is not set or set to False or conn
does not support autocommit.
:param conn: Connection to get autocommit setting from.
:type conn: connection object.
:return: connection autocommit setting.
:rtype: bool
"""
return getattr(conn, 'autocommit', False) and self.supports_autocommit
def get_cursor(self):
"""Returns a cursor"""
return self.get_conn().cursor()
@staticmethod
def _generate_insert_sql(table, values, target_fields, replace, **kwargs):
"""
Static helper method that generate the INSERT SQL statement.
The REPLACE variant is specific to MySQL syntax.
:param table: Name of the target table
:type table: str
:param values: The row to insert into the table
:type values: tuple of cell values
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param replace: Whether to replace instead of insert
:type replace: bool
:return: The generated INSERT or REPLACE SQL statement
:rtype: str
"""
placeholders = [
"%s",
] * len(values)
if target_fields:
target_fields = ", ".join(target_fields)
target_fields = f"({target_fields})"
else:
target_fields = ''
if not replace:
sql = "INSERT INTO "
else:
sql = "REPLACE INTO "
sql += f"{table} {target_fields} VALUES ({','.join(placeholders)})"
return sql
def insert_rows(self, table, rows, target_fields=None, commit_every=1000, replace=False, **kwargs):
"""
A generic way to insert a set of tuples into a table,
a new transaction is created every commit_every rows
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
:param replace: Whether to replace instead of insert
:type replace: bool
"""
i = 0
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, False)
conn.commit()
with closing(conn.cursor()) as cur:
for i, row in enumerate(rows, 1):
lst = []
for cell in row:
lst.append(self._serialize_cell(cell, conn))
values = tuple(lst)
sql = self._generate_insert_sql(table, values, target_fields, replace, **kwargs)
cur.execute(sql, values)
if commit_every and i % commit_every == 0:
conn.commit()
self.log.info("Loaded %s rows into %s so far", i, table)
conn.commit()
self.log.info("Done loading. Loaded a total of %s rows", i)
@staticmethod
def _serialize_cell(cell, conn=None): # pylint: disable=unused-argument
"""
Returns the SQL literal of the cell as a string.
:param cell: The cell to insert into the table
:type cell: object
:param conn: The database connection
:type conn: connection object
:return: The serialized cell
:rtype: str
"""
if cell is None:
return None
if isinstance(cell, datetime):
return cell.isoformat()
return str(cell)
def bulk_dump(self, table, tmp_file):
"""
Dumps a database table into a tab-delimited file
:param table: The name of the source table
:type table: str
:param tmp_file: The path of the target file
:type tmp_file: str
"""
raise NotImplementedError()
def bulk_load(self, table, tmp_file):
"""
Loads a tab-delimited file into a database table
:param table: The name of the target table
:type table: str
:param tmp_file: The path of the file to load into the table
:type tmp_file: str
"""
raise NotImplementedError()
def test_connection(self):
"""Tests the connection by executing a select 1 query"""
status, message = False, ''
try:
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
cur.execute("select 1")
if cur.fetchone():
status = True
message = 'Connection successfully tested'
except Exception as e: # noqa pylint: disable=broad-except
status = False
message = str(e)
return status, message
|
from .property_plotter import PropertyPlotter
|
import json
import os
import datalabs
from datalabs.tasks import Summarization
_CITATION = None
_DESCRIPTION = """
Arxiv dataset for summarization.
From paper: A Discourse-Aware Attention Model for Abstractive Summarization of Long Documents" by A. Cohan et al.
See: https://aclanthology.org/N18-2097.pdf
See: https://github.com/armancohan/long-summarization
"""
_CITATION = """\
@inproceedings{cohan-etal-2018-discourse,
title = "A Discourse-Aware Attention Model for Abstractive Summarization of Long Documents",
author = "Cohan, Arman and
Dernoncourt, Franck and
Kim, Doo Soon and
Bui, Trung and
Kim, Seokhwan and
Chang, Walter and
Goharian, Nazli",
booktitle = "Proceedings of the 2018 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers)",
month = jun,
year = "2018",
address = "New Orleans, Louisiana",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/N18-2097",
doi = "10.18653/v1/N18-2097",
pages = "615--621",
abstract = "Neural abstractive summarization models have led to promising results in summarizing relatively short documents. We propose the first model for abstractive summarization of single, longer-form documents (e.g., research papers). Our approach consists of a new hierarchical encoder that models the discourse structure of a document, and an attentive discourse-aware decoder to generate the summary. Empirical results on two large-scale datalab of scientific papers show that our model significantly outperforms state-of-the-art models.",
}
"""
_ABSTRACT = "summary"
_ARTICLE = "text"
class ArxivSumConfig(datalabs.BuilderConfig):
"""BuilderConfig for ArxivSummarization."""
def __init__(self, **kwargs):
"""BuilderConfig for ArxivSummarization.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(ArxivSumConfig, self).__init__(**kwargs)
class ArxivSumDataset(datalabs.GeneratorBasedBuilder):
"""ArxivSummarization Dataset."""
_TRAIN_FILE = "https://huggingface.co/datalab/ccdv/arxiv-summarization/resolve/main/train.zip"
_VAL_FILE = "https://huggingface.co/datalab/ccdv/arxiv-summarization/resolve/main/val.zip"
_TEST_FILE = "https://huggingface.co/datalab/ccdv/arxiv-summarization/resolve/main/test.zip"
BUILDER_CONFIGS = [
ArxivSumConfig(
name="section",
version=datalabs.Version("1.0.0"),
description="Arxiv dataset for summarization, concatenated sections",
),
ArxivSumConfig(
name="document",
version=datalabs.Version("1.0.0"),
description="Arxiv dataset for summarization, document",
),
]
DEFAULT_CONFIG_NAME = "document"
def _info(self):
# Should return a datalab.DatasetInfo object
return datalabs.DatasetInfo(
description=_DESCRIPTION,
features=datalabs.Features(
{
_ARTICLE: datalabs.Value("string"),
_ABSTRACT: datalabs.Value("string"),
# "id": datalab.Value("string"),
}
),
supervised_keys=None,
homepage="https://github.com/armancohan/long-summarization",
citation=_CITATION,
task_templates=[Summarization(
text_column=_ARTICLE,
summary_column=_ABSTRACT),
],
)
def _split_generators(self, dl_manager):
train_path = dl_manager.download_and_extract(self._TRAIN_FILE) + "/train.txt"
val_path = dl_manager.download_and_extract(self._VAL_FILE) + "/val.txt"
test_path = dl_manager.download_and_extract(self._TEST_FILE) + "/test.txt"
return [
datalabs.SplitGenerator(
name=datalabs.Split.TRAIN, gen_kwargs={"filepath": train_path}
),
datalabs.SplitGenerator(
name=datalabs.Split.VALIDATION, gen_kwargs={"filepath": val_path}
),
datalabs.SplitGenerator(
name=datalabs.Split.TEST, gen_kwargs={"filepath": test_path}
),
]
def _generate_examples(self, filepath):
"""Generate ArxivSummarization examples."""
with open(filepath, encoding="utf-8") as f:
for id_, row in enumerate(f):
data = json.loads(row)
"""
'article_id': str,
'abstract_text': List[str],
'article_text': List[str],
'section_names': List[str],
'sections': List[List[str]]
"""
if self.config.name == "document":
article = data["article_text"]
else:
article = [item.strip() for sublist in data["sections"] for item in sublist]
abstract = data["abstract_text"]
yield id_, {"text": ' '.join(article), "summary": ' '.join(abstract)}
|
from pyramid.config import Configurator
from pyramid.threadlocal import get_current_registry
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
config = Configurator(settings=settings)
config.include('pyramid_chameleon')
config.add_static_view('static', 'static', cache_max_age=3600)
config.add_route('home', '/')
config.add_route('article', '/wiki/{article}')
config.add_route('search', '/search')
get_current_registry().settings = settings
config.scan()
return config.make_wsgi_app()
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import collections
import copy
import datetime
import os
import shutil
import sys
import traceback
import yaml
import six
import re
def parse_opts(argv):
parser = argparse.ArgumentParser(
description='Convert an old style NIC config file into the new format using '
'run-os-net-config.sh')
parser.add_argument('--script-dir', metavar='<script directory>',
help="Relative path to run-os-net-config.sh",
default="network/scripts/run-os-net-config.sh")
parser.add_argument('files', nargs="+", metavar='<file>',
help='List of one or more NIC config files to convert')
parser.add_argument('--yes',
action='store_true',
help=("Use --yes to skip the confirmation "
"to overwrite the original config file "),
)
opts = parser.parse_args(argv[1:])
return opts
#convert comments into 'comments<num>: ...' YAML
def to_commented_yaml(filename):
out_str = ''
last_non_comment_spaces = ''
with open(filename, 'r') as f:
comment_count = 0
for line in f:
# skip blank line
if line.isspace():
continue;
char_count = 0
spaces = ''
for char in line:
char_count += 1
if char == ' ':
spaces+=' '
next;
elif char == '#':
last_non_comment_spaces = spaces
comment_count += 1
comment = line[char_count:-1]
out_str += "%scomment%i_%i: '%s'\n" % (last_non_comment_spaces, comment_count, len(spaces), comment)
break;
else:
last_non_comment_spaces = spaces
out_str += line
#inline comments check
m = re.match(".*:.*#(.*)", line)
if m:
comment_count += 1
out_str += "%s inline_comment%i: '%s'\n" % (last_non_comment_spaces, comment_count, m.group(1))
break;
with open(filename, 'w') as f:
f.write(out_str)
return out_str
#convert back to normal #commented YAML
def to_normal_yaml(filename):
with open(filename, 'r') as f:
data = f.read()
out_str = ''
next_line_break = False
for line in data.split('\n'):
# get_input not supported by run-os-net-config.sh script
line = line.replace('get_input: ', '')
m = re.match(" +comment[0-9]+_([0-9]+): '(.*)'.*", line) #normal comments
i = re.match(" +inline_comment[0-9]+: '(.*)'.*", line) #inline comments
if m:
if next_line_break:
out_str += '\n'
next_line_break = False
for x in range(0, int(m.group(1))):
out_str += " "
out_str += "#%s\n" % m.group(2)
elif i:
out_str += " #%s\n" % i.group(1)
next_line_break = False
else:
if next_line_break:
out_str += '\n'
out_str += line
next_line_break = True
if next_line_break:
out_str += '\n'
with open(filename, 'w') as f:
f.write(out_str)
return out_str
class description(six.text_type):
pass
# FIXME: Some of this duplicates code from build_endpoint_map.py, we should
# refactor to share the common code
class TemplateDumper(yaml.SafeDumper):
def represent_ordered_dict(self, data):
return self.represent_dict(data.items())
def description_presenter(self, data):
if '\n' in data:
style = '>'
else:
style = ''
return self.represent_scalar(
yaml.resolver.BaseResolver.DEFAULT_SCALAR_TAG, data, style=style)
# We load mappings into OrderedDict to preserve their order
class TemplateLoader(yaml.SafeLoader):
def construct_mapping(self, node):
self.flatten_mapping(node)
return collections.OrderedDict(self.construct_pairs(node))
TemplateDumper.add_representer(description,
TemplateDumper.description_presenter)
TemplateDumper.add_representer(collections.OrderedDict,
TemplateDumper.represent_ordered_dict)
TemplateLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
TemplateLoader.construct_mapping)
def write_template(template, filename=None):
with open(filename, 'w') as f:
yaml.dump(template, f, TemplateDumper, width=120, default_flow_style=False)
def convert(filename, script_path):
print('Converting %s' % filename)
try:
tpl = yaml.load(open(filename).read(), Loader=TemplateLoader)
except Exception:
print(traceback.format_exc())
return 0
for r in (tpl.get('resources', {})).items():
if (r[1].get('type') == 'OS::Heat::StructuredConfig' and
r[1].get('properties', {}).get('group') == 'os-apply-config' and
r[1].get('properties', {}).get('config', {}).get('os_net_config')):
#print("match %s" % r[0])
new_r = collections.OrderedDict()
new_r['type'] = 'OS::Heat::SoftwareConfig'
new_r['properties'] = collections.OrderedDict()
new_r['properties']['group'] = 'script'
old_net_config = r[1].get(
'properties', {}).get('config', {}).get('os_net_config')
new_config = {'str_replace': collections.OrderedDict()}
new_config['str_replace']['template'] = {'get_file': script_path}
new_config['str_replace']['params'] = {'$network_config': old_net_config}
new_r['properties']['config'] = new_config
tpl['resources'][r[0]] = new_r
else:
print("No match %s" % r[0])
return 0
# Preserve typical HOT template key ordering
od_result = collections.OrderedDict()
# Need to bump the HOT version so str_replace supports serializing to json
od_result['heat_template_version'] = "rocky"
if tpl.get('description'):
od_result['description'] = description(tpl['description'])
od_result['parameters'] = tpl['parameters']
od_result['resources'] = tpl['resources']
od_result['outputs'] = tpl['outputs']
#print('Result:')
#print('%s' % yaml.dump(od_result, Dumper=TemplateDumper, width=120, default_flow_style=False))
#print('---')
write_template(od_result, filename)
return 1
def check_old_style(filename):
with open(filename, 'r') as f:
tpl = yaml.load(open(filename).read())
if isinstance(tpl.get('resources', {}), dict):
for r in (tpl.get('resources', {})).items():
if (r[1].get('type') == 'OS::Heat::StructuredConfig' and
r[1].get('properties', {}).get('group') == 'os-apply-config' and
r[1].get('properties', {}).get('config', {}).get('os_net_config')):
return True
return False
opts = parse_opts(sys.argv)
exit_val = 0
num_converted = 0
for base_path in opts.files:
if os.path.isfile(base_path) and base_path.endswith('.yaml'):
if check_old_style(base_path):
# Check for script in the user entered (or default) location or in
# path relative to NIC config files
script_paths = [opts.script_dir]
script_paths.append('../../scripts/run-os-net-config.sh')
script_paths.append('../network/scripts/run-os-net-config.sh')
script_paths.append(
'/usr/share/openstack-tripleo-heat-templates/network/scripts/run-os-net-config.sh')
script_path = None
for p in script_paths:
if os.path.isfile(os.path.join(os.path.dirname(base_path), p)):
script_path = p
break
if script_path is None:
print("Error couldn't find run-os-net-config.sh relative to filename")
sys.exit(1)
print("Using script at %s" % script_path)
extension = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
backup_filename = os.path.realpath(base_path) + '.' + extension
print('The yaml file will be overwritten and the original saved as %s'
% backup_filename)
if not (opts.yes or input("Overwrite %s? [y/n] " % base_path).lower() == 'y'):
print("Skipping file %s" % base_path)
continue
if os.path.exists(backup_filename):
print("Backup file already exists, skipping file %s" % base_path)
continue
shutil.copyfile(base_path, backup_filename)
to_commented_yaml(base_path)
num_converted += convert(base_path, script_path)
to_normal_yaml(base_path)
else:
print('File %s is not using old style NIC configuration' % base_path)
else:
print('Unexpected argument %s' % base_path)
if num_converted == 0:
exit_val = 1
sys.exit(exit_val)
|
"""DAG demonstrating the umbrella use case with dummy operators."""
import airflow.utils.dates
from airflow import DAG
from airflow.operators.dummy import DummyOperator
dag = DAG(
dag_id="01_umbrella",
description="Umbrella example with DummyOperators.",
start_date=airflow.utils.dates.days_ago(5),
schedule_interval="@daily",
)
fetch_weather_forecast = DummyOperator(task_id="fetch_weather_forecast", dag=dag)
fetch_sales_data = DummyOperator(task_id="fetch_sales_data", dag=dag)
clean_forecast_data = DummyOperator(task_id="clean_forecast_data", dag=dag)
clean_sales_data = DummyOperator(task_id="clean_sales_data", dag=dag)
join_datasets = DummyOperator(task_id="join_datasets", dag=dag)
train_ml_model = DummyOperator(task_id="train_ml_model", dag=dag)
deploy_ml_model = DummyOperator(task_id="deploy_ml_model", dag=dag)
# Set dependencies between all tasks
fetch_weather_forecast >> clean_forecast_data
fetch_sales_data >> clean_sales_data
[clean_forecast_data, clean_sales_data] >> join_datasets
join_datasets >> train_ml_model >> deploy_ml_model
|
import boto3
from Slack_Lambda_Layer import *
bot_user_id = 'UECS2J05D'
def get_key_from_ddb(key):
ddb = boto3.client('dynamodb')
response = ddb.get_item(
TableName='alert-log',
Key={
'messageID': {
'S': key
}
}
)
if 'Item' in response:
return response['Item']
return False
def get_users_from_ddb(team):
ddb = boto3.client('dynamodb')
users = ddb.scan(
TableName = 'user',
ScanFilter = {
'teams': {
'AttributeValueList': [{'S': team.lower()}],
'ComparisonOperator': 'CONTAINS'
}
}
)
return users['Items']
def result(result_type, content, args = {}):
result = {
'sessionAttributes': {},
'dialogAction': {
'type': result_type,
'message': {
'contentType': 'PlainText',
'content': content
}
}
}
if result_type == 'Close':
result['dialogAction']['fulfillmentState'] = args['fulfillmentState']
elif result_type == 'ElicitSlot':
result['dialogAction']['intentName'] = 'CreateSlackChannelIntent'
result['dialogAction']['slots'] = args['slots']
result['dialogAction']['slotToElicit'] = args['slotToElicit']
return result
def lambda_handler(event, context):
team = event['currentIntent']['slots']['team']
incidentId = event['currentIntent']['slots']['incidentId']
incident = get_key_from_ddb(incidentId)
if not incident:
return result('ElicitSlot', 'There is no incident with id "' + str(incidentId) + '". Please try another one.', {'slots': {'team': team, 'incidentId': incidentId}, 'slotToElicit': 'incidentId'})
channel_name = 'incident_' + incident['messageID']['S']
message = ''
try:
channel = create_channel(channel_name)
try:
invite_to_channel(channel['id'], bot_user_id)
except SlackException as e:
if e.error in ['already_in_channel', 'user_not_found', 'cant_invite_self']:
pass
else:
return result('Failed', 'The method "' + e.method + '" failed with error "' + e.error + '"')
set_channel_topic(channel['id'], 'Incident with ID: ' + incident['messageID']['S'] + ' and message: "' + (incident['message']['S'][:200] + '[...]' if len(incident['message']['S']) > 200 else incident['message']['S']) + '"')
set_channel_purpose(channel['id'], 'Resolving incident with ID: ' + incident['messageID']['S'] + ' and message: "' + (incident['message']['S'][:200] + '[...]' if len(incident['message']['S']) > 200 else incident['message']['S']) + '"')
post_message(channel['id'], 'I created this channel for you to handle the incident with the message: "' + incident['message']['S'] + '".\n\nLet\'s resolve this issue as fast as possible! :rocket:')
message += 'The Slack channel "' + channel_name + '" has been created. '
except SlackException as e:
if e.error == 'name_taken':
channel = [c for c in get_channels() if c['name'] == channel_name][0]
if channel['is_archived']:
unarchive_channel(channel['id'])
message += 'The Slack channel "' + channel_name + '" has been unarchived. '
try:
invite_to_channel(channel['id'], bot_user_id)
except SlackException as e:
if e.error in ['already_in_channel', 'user_not_found', 'cant_invite_self']:
pass
else:
return result('Failed', 'The method "' + e.method + '" failed with error "' + e.error + '"')
join_channel(channel_name)
else:
return result('Close', 'The method "' + e.method + '" failed with error "' + e.error + '"', {'fulfillmentState': 'Failed'})
users = get_users_from_ddb(team)
if len(users) == 0:
return result('ElicitSlot', 'The team "' + team + '" could not be found. Please try another one.', {'slots': {'team': team, 'incidentId': incidentId}, 'slotToElicit': 'team'})
for user in users:
try:
invite_to_channel(channel['id'], user['slackUserID']['S'])
except SlackException as e:
if e.error in ['already_in_channel', 'user_not_found', 'cant_invite_self']:
pass
else:
return result('Failed', 'The method "' + e.method + '" failed with error "' + e.error + '"')
message += 'The team "' + team + '" has been invited to the channel. '
post_message(channel['id'], 'Welcome, team ' + team + '! :wave:')
if 'bot' in event:
return result('Close', message, {'fulfillmentState': 'Fulfilled'})
else:
return {'channel_creation': message}
|
#! /usr/bin/env python3
import sys
sys.argv.append( '-b' ) # batch mode
import os
import ROOT
import yaml
import math
from ROOT import gROOT
# gROOT.LoadMacro("asdf.cxx")
from ROOT import TProfile2D,TProfile
import array
# Sum up the bins of the y-axis, return array of (val,err)
def SumUpProfile(Pf2,CentBin):
valList = []
nBinsX = Pf2.GetXaxis().GetNbins()
nBinsY = Pf2.GetYaxis().GetNbins()
YProj = Pf2.ProfileY(("%s_pfy_Cent%d" % (Pf2.GetName(),CentBin)))
for j in range(nBinsY):
# print(j)
localVal = YProj.GetBinContent(j+1)
localErr = YProj.GetBinError(j+1)
# print("bin %d has total %f \\pm %f" % (j,localVal,localErr))
valList.append((localVal,localErr))
return valList
# for i in range (nBinsX):
# localSum += Pf2.GetBinContent(i,j)
# localErr
# delete YProj
def CalcEventPlaneResolution():
print("---------------------------------")
print("| Starting Event Plane Res Calc.|")
print("---------------------------------")
InputFileName="AnalysisResults.root"
RootDirCent0Name="AliAnalysisTaskMBPi0Candv_Pi0H_SE_tracks_caloClusters_Cent0_histos"
RootDirCent1Name="AliAnalysisTaskMBPi0Candv_Pi0H_SE_tracks_caloClusters_Cent1_histos"
RootDirCent2Name="AliAnalysisTaskMBPi0Candv_Pi0H_SE_tracks_caloClusters_Cent2_histos"
RootDirCent3Name="AliAnalysisTaskMBPi0Candv_Pi0H_SE_tracks_caloClusters_Cent3_histos"
InputFile = ROOT.TFile(InputFileName)
if ( InputFile == 0 ):
print("Could not open file")
dirlist = InputFile.GetListOfKeys()
iter = dirlist.MakeIterator()
key = iter.Next()
dirs = {}
td = None
while key:
if key.GetClassName() == 'AliEmcalList': #'TDirectory'
td = key.ReadObj()
dirName = td.GetName()
print("found directory", dirName)
dirs[dirName] = td
key = iter.Next()
for dir in dirs:
print(dir)
# just use Pi0Cand wagons for now.
if 'Pi0Cand' not in dir:
print("\tNot using this for EPR")
continue
CentIndex=4+dir.find('Cent')
CentBin = int(dir[CentIndex:CentIndex+1])
print("Cent Bin %d" % CentBin)
localDir = InputFile.Get(dir)
# Vals = []
# Event Plane 2
# EP3,4 stored as EP3R_CosD%d_N%d, EP4R_...
print("Finding EPRs for Event Plane 2")
for i in range(6): # individual N values
LocalVals = []
for j in range(3): # individual Dn values
Pf2Name="EPR_CosD%d_N%d" % (j+1,i+1)
Pf2=localDir.FindObject(Pf2Name)
ValArray = SumUpProfile(Pf2,CentBin) # returns a list of tuples, one per cent bin
LocalVals.append(ValArray)
# print("Cent = %d" % CentBin)
# print(LocalVals)
LocalRn=0
LocalRn_Un=0
MeanCosD1=LocalVals[0][CentBin][0]
MeanCosD2=LocalVals[1][CentBin][0]
MeanCosD3=LocalVals[2][CentBin][0]
MeanCosD1_Un=LocalVals[0][CentBin][1]
MeanCosD2_Un=LocalVals[1][CentBin][1]
MeanCosD3_Un=LocalVals[2][CentBin][1]
if (LocalVals[2][CentBin][0] > 0.):
LocalRn=math.sqrt((MeanCosD1 * MeanCosD2) / MeanCosD3)
#LocalRn=math.sqrt((LocalVals[0][CentBin][0] * LocalVals[1][CentBin][0]) / LocalVals[2][CentBin][0])
LocalRn_Un = LocalRn * math.sqrt((0.5)*math.pow(MeanCosD1_Un/MeanCosD1,2) + (0.5)*math.pow(MeanCosD2_Un/MeanCosD2,2) + (0.5)*math.pow(MeanCosD3_Un/MeanCosD3,2))
#(LocalVals[0][CentBin][0]/LocalVals[0][CentBin][1]) + LocalVals[1][CentBin][0]) / LocalVals[2][CentBin][0])
print("Found R_{%d,2} = %f \\pm %f" % (i+1,LocalRn,LocalRn_Un))
print("Finding EPRs for Event Plane 3")
for i in range(6): # individual N values
LocalVals = []
for j in range(3): # individual Dn values
Pf2Name="EP3R_CosD%d_N%d" % (j+1,i+1)
Pf2=localDir.FindObject(Pf2Name)
ValArray = SumUpProfile(Pf2,CentBin) # returns a list of tuples, one per cent bin
LocalVals.append(ValArray)
LocalRn=0
LocalRn_Un=0
MeanCosD1=LocalVals[0][CentBin][0]
MeanCosD2=LocalVals[1][CentBin][0]
MeanCosD3=LocalVals[2][CentBin][0]
MeanCosD1_Un=LocalVals[0][CentBin][1]
MeanCosD2_Un=LocalVals[1][CentBin][1]
MeanCosD3_Un=LocalVals[2][CentBin][1]
if (LocalVals[2][CentBin][0] > 0.):
LocalRn=math.sqrt((MeanCosD1 * MeanCosD2) / MeanCosD3)
#LocalRn=math.sqrt((LocalVals[0][CentBin][0] * LocalVals[1][CentBin][0]) / LocalVals[2][CentBin][0])
LocalRn_Un = LocalRn * math.sqrt((0.5)*math.pow(MeanCosD1_Un/MeanCosD1,2) + (0.5)*math.pow(MeanCosD2_Un/MeanCosD2,2) + (0.5)*math.pow(MeanCosD3_Un/MeanCosD3,2))
#(LocalVals[0][CentBin][0]/LocalVals[0][CentBin][1]) + LocalVals[1][CentBin][0]) / LocalVals[2][CentBin][0])
print("Found EP3 R_{%d,3} = %f \\pm %f" % (i+1,LocalRn,LocalRn_Un))
print("Finding EPRs for Event Plane 4")
for i in range(6): # individual N values
LocalVals = []
for j in range(3): # individual Dn values
Pf2Name="EP4R_CosD%d_N%d" % (j+1,i+1)
Pf2=localDir.FindObject(Pf2Name)
ValArray = SumUpProfile(Pf2,CentBin) # returns a list of tuples, one per cent bin
LocalVals.append(ValArray)
LocalRn=0
LocalRn_Un=0
MeanCosD1=LocalVals[0][CentBin][0]
MeanCosD2=LocalVals[1][CentBin][0]
MeanCosD3=LocalVals[2][CentBin][0]
MeanCosD1_Un=LocalVals[0][CentBin][1]
MeanCosD2_Un=LocalVals[1][CentBin][1]
MeanCosD3_Un=LocalVals[2][CentBin][1]
if (LocalVals[2][CentBin][0] > 0.):
LocalRn=math.sqrt((MeanCosD1 * MeanCosD2) / MeanCosD3)
#LocalRn=math.sqrt((LocalVals[0][CentBin][0] * LocalVals[1][CentBin][0]) / LocalVals[2][CentBin][0])
LocalRn_Un = LocalRn * math.sqrt((0.5)*math.pow(MeanCosD1_Un/MeanCosD1,2) + (0.5)*math.pow(MeanCosD2_Un/MeanCosD2,2) + (0.5)*math.pow(MeanCosD3_Un/MeanCosD3,2))
#(LocalVals[0][CentBin][0]/LocalVals[0][CentBin][1]) + LocalVals[1][CentBin][0]) / LocalVals[2][CentBin][0])
print("Found EP4 R_{%d,4} = %f \\pm %f" % (i+1,LocalRn,LocalRn_Un))
# print("Seraching for object "+ RootDirCent0Name + ".EPR_CosD1_N1")
# Pf2_CosD1_N1 = InputFile.Get(RootDirCent0Name + ".EPR_CosD1_N1")
# if (Pf2_CosD1_N1 == 0):
# print("Could not get object "+ RootDirCent0Name + ".EPR_CosD1_N1")
# exit()
#
# print("Found " + Pf2_CosD1_N1.GetName() + " succesfully")
if __name__ == '__main__':
CalcEventPlaneResolution()
|
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# snippet-start:[deeplens.python.deeplens_frameworks_mxnet.export_model_gluon_api]
import mxnet as mx
from mxnet.gluon.model_zoo import vision
squeezenet = vision.squeezenet_v1(pretrained=True, ctx=mx.cpu())
# To export, you need to hybridize your gluon model,
squeezenet.hybridize()
# SqueezeNet’s input pattern is 224 pixel X 224 pixel images. Prepare a fake image,
fake_image = mx.nd.random.uniform(shape=(1,3,224,224), ctx=mx.cpu())
# Run the model once.
result = squeezenet(fake_image)
# Now you can export the model. You can use a path if you want ‘models/squeezenet’.
squeezenet.export(‘squeezenet')
# snippet-end:[deeplens.python.deeplens_frameworks_mxnet.export_model_gluon_api]
# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[deeplens_frameworks_mxnet.py demonstrates how to create an inference Lambda function on an AWS DeepLens model.]
# snippet-keyword:[Python]
# snippet-keyword:[AWS Lambda]
# snippet-keyword:[Code Sample]
# snippet-keyword:[AWS DeepLens]
# snippet-service:[deeplens]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2019-01-07]
# snippet-sourceauthor:[AWS]
|
import logging
import os
import socket
import threading
import time
import dateutil.parser
import schedule
from watchdog.events import EVENT_TYPE_CREATED
from watchdog.events import FileSystemEvent
from lib.media_file_processing import MediaProcessingThread
from lib.media_file_state import MediaFileState
from lib.nodes.node_state import NodeState
from lib.utils import compare_list
from lib.connection_manager import ConnectionManager
from lib import logger
class MediaProcessing(object):
SCAN_FOR_NEW_MEDIA_FILES_FOR_PROCESSING_TIMEOUT = 10
def __init__(self, mfq, handbreak_command, handbreak_timeout, nodes, delete):
self.mfq = mfq
self.handbreak_command = handbreak_command
self.handbreak_timeout = handbreak_timeout
self.delete = delete
self.system_call_thread = None
self.exiting = False
self.lock = threading.Lock()
self.nodes = nodes
self.last_silent_periods = None
self.suspended = False
def get_queue_files(self):
result = {}
for media_file in self.mfq:
result[media_file.id] = media_file
return result
@ConnectionManager.connection(transaction=True)
def delete_media_file(self, media_file):
if media_file in self.mfq and self.mfq[media_file].status != MediaFileState.PROCESSING:
del self.mfq[media_file]
else:
raise Exception('can\'t delete {} while it\'s processing'.format(media_file))
@ConnectionManager.connection(transaction=True)
def retry_media_files(self, media_file=None):
if not media_file:
logger.info("Retrying all media files")
for media_file in self.mfq:
self.mfq[media_file.id, media_file.file_path] = MediaFileState.WAITING
else:
logger.info("Retrying [{}] media file".format(media_file))
self.mfq[media_file] = MediaFileState.WAITING
def start(self):
while not self.exiting:
with self.lock:
self.system_call_thread = MediaProcessingThread(self.mfq,
self.handbreak_command,
self.handbreak_timeout,
self.delete,
name=MediaProcessingThread.__module__)
self.system_call_thread.start()
while self.system_call_thread.isAlive():
self.__check_media_processing_state()
self.__schedule_silent_periods()
time.sleep(10)
self.system_call_thread = None
time.sleep(self.SCAN_FOR_NEW_MEDIA_FILES_FOR_PROCESSING_TIMEOUT)
def stop(self):
self.exiting = True
if self.system_call_thread:
self.system_call_thread.join()
@ConnectionManager.connection(transaction=True)
def initial_processing(self, watch_directories, event_handler):
for watch_directory in watch_directories:
for root, dir_names, file_names in os.walk(watch_directory):
for filename in file_names:
file_path = os.path.join(root, filename).decode('utf-8')
if file_path not in self.mfq or (
file_path in self.mfq
and self.mfq[file_path].status != MediaFileState.FAILED):
file_event = FileSystemEvent(file_path)
file_event.is_directory = False
file_event.event_type = EVENT_TYPE_CREATED
event_handler.on_any_event(file_event)
def __check_media_processing_state(self):
if not self.suspended and self.nodes[socket.gethostname()].status == NodeState.SUSPENDED:
self.__suspend_media_processing()
self.suspended = True
elif self.suspended and self.nodes[socket.gethostname()].status == NodeState.ONLINE:
self.__resume_media_processing()
self.suspended = False
def __schedule_silent_periods(self):
try:
periods = self.nodes.get_silent_periods(socket.gethostname())
if not self.last_silent_periods or not compare_list(self.last_silent_periods, periods):
schedule.clear()
for period in periods:
split_period = period.split('-')
starting_time = dateutil.parser.parse(split_period[0])
end_time = dateutil.parser.parse(split_period[1])
schedule.every().day.at(starting_time.strftime("%H:%M")).do(self.__suspend_media_processing)
schedule.every().day.at(end_time.strftime("%H:%M")).do(self.__resume_media_processing)
self.last_silent_periods = periods
logger.debug("new silent periods rescheduled {}".format(periods))
schedule.run_pending()
except Exception:
logger.debug('no silent periods configured')
def __suspend_media_processing(self):
if self.system_call_thread:
self.system_call_thread.suspend_media_processing()
else:
raise Exception('no running media processing found')
def __resume_media_processing(self):
if self.system_call_thread:
self.system_call_thread.resume_media_processing()
else:
raise Exception('no running media processing found')
|
#!/usr/bin/env python2
import sys
from jsonschema import Draft4Validator, validate
import json
def include_fileref(parent, mykey, dic):
for key in dic.keys():
if key == '$ref':
fpath = '../' + dic[key][5:]
schemafile = open(fpath, 'r')
schemastr = schemafile.read()
schemafile.close()
config_schema = json.loads(schemastr)
parent[mykey] = config_schema
elif type(dic[key]) is dict:
include_fileref(dic, key, dic[key])
def main():
schemafile = open('schema.json', 'r')
schemastr = schemafile.read()
schemafile.close()
config_schema = json.loads(schemastr)
include_fileref(None, None, config_schema)
Draft4Validator.check_schema(config_schema)
configfile = open(sys.argv[1], 'r')
configstr = configfile.read()
configfile.close()
config = json.loads(configstr)
validate(config, config_schema)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test OGR INDEX support.
# Author: Frank Warmerdam <warmerdam@pobox.com>
#
###############################################################################
# Copyright (c) 2003, Frank Warmerdam <warmerdam@pobox.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
###############################################################################
import os
import sys
sys.path.append( '../pymod' )
import gdaltest
from osgeo import ogr
import ogrtest
###############################################################################
# Create a MIF file to be our primary table.
def ogr_index_1():
from osgeo import gdal
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
try:
ogr.GetDriverByName( 'MapInfo File' ).DeleteDataSource( 'index_p.mif' )
except:
pass
try:
ogr.GetDriverByName( 'ESRI Shapefile' ).DeleteDataSource( 'join_t.dbf')
except:
pass
gdal.PopErrorHandler()
drv = ogr.GetDriverByName('MapInfo File')
gdaltest.p_ds = drv.CreateDataSource( 'index_p.mif' )
gdaltest.p_lyr = gdaltest.p_ds.CreateLayer( 'index_p' )
ogrtest.quick_create_layer_def( gdaltest.p_lyr,[('PKEY', ogr.OFTInteger)] )
ogrtest.quick_create_feature( gdaltest.p_lyr, [5], None )
ogrtest.quick_create_feature( gdaltest.p_lyr, [10], None )
ogrtest.quick_create_feature( gdaltest.p_lyr, [9], None )
ogrtest.quick_create_feature( gdaltest.p_lyr, [4], None )
ogrtest.quick_create_feature( gdaltest.p_lyr, [3], None )
ogrtest.quick_create_feature( gdaltest.p_lyr, [1], None )
# It turns out mapinfo format doesn't allow GetFeatureCount() calls while
# writing ... it just blows an assert!
# if gdaltest.p_lyr.GetFeatureCount() != 7:
# gdaltest.post_reason( 'FeatureCount wrong' )
# return 'failure'
# Close and reopen, since it seems the .mif driver does not allow reading
# from a newly created (updatable) file.
gdaltest.p_ds.Destroy()
gdaltest.p_ds = ogr.OpenShared( 'index_p.mif', update = 0 )
gdaltest.p_lyr = gdaltest.p_ds.GetLayerByName( 'index_p' )
return 'success'
###############################################################################
# Create a dbf file to be our secondary table. Close it, and reopen shared.
def ogr_index_2():
drv = ogr.GetDriverByName('ESRI Shapefile')
gdaltest.s_ds = drv.CreateDataSource( 'join_t.dbf' )
gdaltest.s_lyr = gdaltest.s_ds.CreateLayer( 'join_t',
geom_type = ogr.wkbNone )
ogrtest.quick_create_layer_def( gdaltest.s_lyr,
[('SKEY', ogr.OFTInteger),
('VALUE', ogr.OFTString, 16)] )
for i in range(20):
ogrtest.quick_create_feature( gdaltest.s_lyr, [i,'Value '+str(i)],None)
if gdaltest.s_lyr.GetFeatureCount() != 20:
gdaltest.post_reason( 'FeatureCount wrong' )
return 'failure'
gdaltest.s_ds.Release()
gdaltest.s_lyr = None
gdaltest.s_ds = None
gdaltest.s_ds = ogr.OpenShared( 'join_t.dbf', update = 1 )
gdaltest.s_lyr = gdaltest.s_ds.GetLayerByName( 'join_t' )
return 'success'
###############################################################################
# Verify a simple join without indexing.
def ogr_index_3():
expect = [ 'Value 5', 'Value 10', 'Value 9', 'Value 4', 'Value 3',
'Value 1' ]
sql_lyr = gdaltest.p_ds.ExecuteSQL( \
'SELECT * FROM index_p p ' \
+ 'LEFT JOIN "join_t.dbf".join_t j ON p.PKEY = j.SKEY ' )
tr = ogrtest.check_features_against_list( sql_lyr, 'VALUE', expect )
gdaltest.p_ds.ReleaseResultSet( sql_lyr )
if tr:
return 'success'
else:
return 'fail'
###############################################################################
# Create an INDEX on the SKEY and VALUE field in the join table.
def ogr_index_4():
gdaltest.s_ds.ExecuteSQL( 'CREATE INDEX ON join_t USING value' )
gdaltest.s_ds.ExecuteSQL( 'CREATE INDEX ON join_t USING skey' )
return 'success'
###############################################################################
# Check that indexable single int lookup works.
def ogr_index_5():
gdaltest.s_lyr.SetAttributeFilter( 'SKEY = 5' )
expect = [ 'Value 5' ]
tr = ogrtest.check_features_against_list( gdaltest.s_lyr, 'VALUE', expect )
if tr:
return 'success'
else:
return 'fail'
###############################################################################
# Check that indexable single string lookup works.
#
# We also close the datasource and reopen to ensure that reloaded indexes
# work OK too.
def ogr_index_6():
gdaltest.s_ds.Release()
gdaltest.s_ds = ogr.OpenShared( 'join_t.dbf', update = 1 )
gdaltest.s_lyr = gdaltest.s_ds.GetLayerByName( 'join_t' )
gdaltest.s_lyr.SetAttributeFilter( 'VALUE="Value 5"' )
expect = [ 5 ]
tr = ogrtest.check_features_against_list( gdaltest.s_lyr, 'SKEY', expect )
if tr:
return 'success'
else:
return 'fail'
###############################################################################
# Check that range query that isn't currently implemented using index works.
def ogr_index_7():
gdaltest.s_lyr.SetAttributeFilter( 'SKEY < 3' )
expect = [ 0, 1, 2 ]
tr = ogrtest.check_features_against_list( gdaltest.s_lyr, 'SKEY', expect )
if tr:
return 'success'
else:
return 'fail'
###############################################################################
# Try join again.
def ogr_index_8():
expect = [ 'Value 5', 'Value 10', 'Value 9', 'Value 4', 'Value 3',
'Value 1' ]
sql_lyr = gdaltest.p_ds.ExecuteSQL( \
'SELECT * FROM index_p p ' \
+ 'LEFT JOIN "join_t.dbf".join_t j ON p.PKEY = j.SKEY ' )
tr = ogrtest.check_features_against_list( sql_lyr, 'VALUE', expect )
gdaltest.p_ds.ReleaseResultSet( sql_lyr )
if tr:
return 'success'
else:
return 'fail'
###############################################################################
# Verify that dropping both indexes gets rid of them, and that results still
# work.
def ogr_index_9():
gdaltest.s_ds.ExecuteSQL( 'DROP INDEX ON join_t USING value' )
gdaltest.s_ds.ExecuteSQL( 'DROP INDEX ON join_t USING skey' )
gdaltest.s_lyr.SetAttributeFilter( 'SKEY = 5' )
expect = [ 'Value 5' ]
tr = ogrtest.check_features_against_list( gdaltest.s_lyr, 'VALUE', expect )
if not tr:
return 'fail'
gdaltest.s_ds.Release()
# After dataset closing, check that the index files do not exist after
# dropping the index
for filename in ['join_t.idm','join_t.ind']:
try:
os.stat(filename)
gdaltest.post_reason("%s shouldn't exist" % filename)
return 'fail'
except:
pass
# Re-create an index
gdaltest.s_ds = ogr.OpenShared( 'join_t.dbf', update = 1 )
gdaltest.s_ds.ExecuteSQL( 'CREATE INDEX ON join_t USING value' )
gdaltest.s_ds.Release()
for filename in ['join_t.idm','join_t.ind']:
try:
os.stat(filename)
except:
gdaltest.post_reason("%s should exist" % filename)
return 'fail'
pass
f = open('join_t.idm', 'rt')
xml = f.read()
f.close()
if xml.find('VALUE') == -1:
gdaltest.post_reason('VALUE column is not indexed (1)')
print(xml)
return 'fail'
# Close the dataset and re-open
gdaltest.s_ds = ogr.OpenShared( 'join_t.dbf', update = 1 )
# At this point the .ind was opened in read-only. Now it
# will be re-opened in read-write mode
gdaltest.s_ds.ExecuteSQL( 'CREATE INDEX ON join_t USING skey' )
gdaltest.s_ds.Release()
f = open('join_t.idm', 'rt')
xml = f.read()
f.close()
if xml.find('VALUE') == -1:
gdaltest.post_reason('VALUE column is not indexed (2)')
print(xml)
return 'fail'
if xml.find('SKEY') == -1:
gdaltest.post_reason('SKEY column is not indexed (2)')
print(xml)
return 'fail'
return 'success'
###############################################################################
# Test fix for #4326
def ogr_index_10():
ds = ogr.GetDriverByName( 'ESRI Shapefile' ).CreateDataSource('tmp/ogr_index_10.shp')
lyr = ds.CreateLayer('ogr_index_10')
lyr.CreateField(ogr.FieldDefn('intfield', ogr.OFTInteger))
lyr.CreateField(ogr.FieldDefn('realfield', ogr.OFTReal))
lyr.CreateField(ogr.FieldDefn('strfield', ogr.OFTString))
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField(0, 1)
feat.SetField(1, 1)
feat.SetField(2, "foo")
lyr.CreateFeature(feat)
feat = None
ds.ExecuteSQL('create index on ogr_index_10 using intfield')
ds.ExecuteSQL('create index on ogr_index_10 using realfield')
lyr.SetAttributeFilter('intfield IN (1)')
lyr.ResetReading()
feat = lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('failed')
return 'fail'
lyr.SetAttributeFilter('intfield = 1')
lyr.ResetReading()
feat = lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('failed')
return 'fail'
lyr.SetAttributeFilter('intfield IN (2)')
lyr.ResetReading()
feat = lyr.GetNextFeature()
if feat is not None:
gdaltest.post_reason('failed')
return 'fail'
lyr.SetAttributeFilter('intfield IN (1.0)')
lyr.ResetReading()
feat = lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('failed')
return 'fail'
lyr.SetAttributeFilter('intfield = 1.0')
lyr.ResetReading()
feat = lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('failed')
return 'fail'
lyr.SetAttributeFilter('intfield IN (1.1)')
lyr.ResetReading()
feat = lyr.GetNextFeature()
if feat is not None:
gdaltest.post_reason('failed')
return 'fail'
lyr.SetAttributeFilter("intfield IN ('1')")
lyr.ResetReading()
feat = lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('failed')
return 'fail'
lyr.SetAttributeFilter('realfield IN (1.0)')
lyr.ResetReading()
feat = lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('failed')
return 'fail'
lyr.SetAttributeFilter('realfield = 1.0')
lyr.ResetReading()
feat = lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('failed')
return 'fail'
lyr.SetAttributeFilter('realfield IN (1.1)')
lyr.ResetReading()
feat = lyr.GetNextFeature()
if feat is not None:
gdaltest.post_reason('failed')
return 'fail'
lyr.SetAttributeFilter('realfield IN (1)')
lyr.ResetReading()
feat = lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('failed')
return 'fail'
lyr.SetAttributeFilter('realfield = 1')
lyr.ResetReading()
feat = lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('failed')
return 'fail'
lyr.SetAttributeFilter('realfield IN (2)')
lyr.ResetReading()
feat = lyr.GetNextFeature()
if feat is not None:
gdaltest.post_reason('failed')
return 'fail'
lyr.SetAttributeFilter("realfield IN ('1')")
lyr.ResetReading()
feat = lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('failed')
return 'fail'
lyr.SetAttributeFilter("strfield IN ('foo')")
lyr.ResetReading()
feat = lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('failed')
return 'fail'
lyr.SetAttributeFilter("strfield = 'foo'")
lyr.ResetReading()
feat = lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('failed')
return 'fail'
lyr.SetAttributeFilter("strfield IN ('bar')")
lyr.ResetReading()
feat = lyr.GetNextFeature()
if feat is not None:
gdaltest.post_reason('failed')
return 'fail'
ds = None
return 'success'
###############################################################################
# Test support for OR and AND expression
def ogr_index_11_check(lyr, expected_fids):
lyr.ResetReading()
for i in range(len(expected_fids)):
feat = lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('failed')
return 'fail'
if feat.GetFID() != expected_fids[i]:
gdaltest.post_reason('failed')
return 'fail'
return 'success'
def ogr_index_11():
ds = ogr.GetDriverByName( 'ESRI Shapefile' ).CreateDataSource('tmp/ogr_index_11.dbf')
lyr = ds.CreateLayer('ogr_index_11', geom_type = ogr.wkbNone)
lyr.CreateField(ogr.FieldDefn('intfield', ogr.OFTInteger))
lyr.CreateField(ogr.FieldDefn('strfield', ogr.OFTString))
ogrtest.quick_create_feature(lyr, [1, "foo"], None)
ogrtest.quick_create_feature(lyr, [1, "bar"], None)
ogrtest.quick_create_feature(lyr, [2, "foo"], None)
ogrtest.quick_create_feature(lyr, [2, "bar"], None)
ogrtest.quick_create_feature(lyr, [3, "bar"], None)
ds.ExecuteSQL('CREATE INDEX ON ogr_index_11 USING intfield')
ds.ExecuteSQL('CREATE INDEX ON ogr_index_11 USING strfield')
lyr.SetAttributeFilter("intfield = 1 OR strfield = 'bar'")
ret = ogr_index_11_check(lyr, [ 0, 1, 3 ])
if ret != 'success':
return ret
lyr.SetAttributeFilter("intfield = 1 AND strfield = 'bar'")
ret = ogr_index_11_check(lyr, [ 1 ])
if ret != 'success':
return ret
lyr.SetAttributeFilter("intfield = 1 AND strfield = 'foo'")
ret = ogr_index_11_check(lyr, [ 0 ])
if ret != 'success':
return ret
lyr.SetAttributeFilter("intfield = 3 AND strfield = 'foo'")
ret = ogr_index_11_check(lyr, [ ])
if ret != 'success':
return ret
ds = None
return 'success'
###############################################################################
def ogr_index_cleanup():
try:
gdaltest.p_ds.Release()
except:
pass
gdaltest.p_ds = None
gdaltest.s_ds = None
gdaltest.p_lyr = None
gdaltest.s_lyr = None
ogr.GetDriverByName( 'MapInfo File' ).DeleteDataSource( 'index_p.mif' )
ogr.GetDriverByName( 'ESRI Shapefile' ).DeleteDataSource( 'join_t.dbf' )
for filename in ['join_t.idm','join_t.ind']:
try:
os.stat(filename)
gdaltest.post_reason("%s shouldn't exist" % filename)
return 'fail'
except:
pass
ogr.GetDriverByName( 'ESRI Shapefile' ).DeleteDataSource( 'tmp/ogr_index_10.shp' )
ogr.GetDriverByName( 'ESRI Shapefile' ).DeleteDataSource( 'tmp/ogr_index_11.dbf' )
return 'success'
gdaltest_list = [
ogr_index_1,
ogr_index_2,
ogr_index_3,
ogr_index_4,
ogr_index_5,
ogr_index_6,
ogr_index_7,
ogr_index_8,
ogr_index_9,
ogr_index_10,
ogr_index_11,
ogr_index_cleanup ]
if __name__ == '__main__':
gdaltest.setup_run( 'ogr_index_test' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
|
'''
This module contains standard and udf functions to calculate comparisons between
households for use in household matching.
'''
import itertools
import re
from pyspark.sql.types import FloatType, IntegerType, StringType
from pyspark.sql.functions import udf
import numpy as np
from nltk.metrics import edit_distance
import jellyfish
# ---------------------------------------- #
# ---- Functions using PySpark Arrays ---- #
# ---------------------------------------- #
# Common elements function
def common(a, b):
"""
Calculates the number of unique common elements between two lists.
Parameters
----------
a: list
list of strings to be compared to b
b: list
list of strings to be compared to a
Raises
------
TypeError
if a or b are not lists
Returns
-------
integer
number of unique common elements
Example
--------
>>> list_1 = ['dog', 'dog', 'cat', 'fish']
>>> list_2 = ['dog', 'fish']
>>> common(list_1, list_2)
2
"""
# Check variable types
if not ((isinstance(a, list)) and (isinstance(b, list))):
raise TypeError('Both variables being compared must contain lists')
# Number of common elements
value = len(list(set(filter(lambda x: x in a, b)))
) if a is not None and b is not None else None
return value
# Common ages function
def common_age(a, b):
"""
Calculates the number of ages in common between two lists of ages.
Allows for ages to be one year apart.
Parameters
----------
a: list
list of age strings to be compared to b
b: list
list of age strings to be compared to a
Raises
------
TypeError
if a or b are not lists
Returns
-------
integer
number of ages in common
Example
--------
>>> list_1 = ['15', '20', '2']
>>> list_2 = ['15', '15', '20', '2', '99']
>>> common_age(list_1, list_2)
4
"""
# Check variable types
if not ((isinstance(a, list)) and (isinstance(b, list))):
raise TypeError('Both variables being compared must contain lists')
# Compare two age sets against each other
comparisons = list(itertools.product(a, b))
# Count how many are equal or 1 year apart
value = [abs(int(x)-int(y)) for x, y in comparisons]
value = len(list(filter(lambda x: x <= 1, value)))
return value
# Max Lev Distance between two sets of names
def max_lev(a, b):
"""
Compares two lists of names and calculates the maximum standardised edit distance
score between the names being compared
Parameters
----------
a: list
list of name strings to be compared to b
b: list
list of name strings to be compared to a
Raises
------
TypeError
if a or b are not lists
Returns
-------
float
maximum standardised edit distance score
Example
--------
>>> list_1 = ['Charlie', 'John', 'Steve', 'Bob']
>>> list_2 = ['Dave', 'Charles']
>>> max_lev(list_1, list_2)
0.7142857142857143
"""
# Check variable types
if not ((isinstance(a, list)) and (isinstance(b, list))):
raise TypeError('Both variables being compared must contain lists')
# Compare two name sets and calcualte max lev score
comparisons = list(itertools.product(a, b))
max_score = float(np.max(
[(1 - (edit_distance(x, y) / (np.max([len(x), len(y)])))) for x, y in comparisons]))
return max_score
# Name similarity Function - Max Jaro Distance between two sets of names
def max_jaro(a, b):
"""
Compares two lists of names and calculates the maximum jaro winkler similarity
score between the names being compared
Parameters
----------
a: list
list of name strings to be compared to b
b: list
list of name strings to be compared to a
Raises
------
TypeError
if a or b are not lists
Returns
-------
float
maximum jaro winkler similarity score
Example
--------
>>> list_1 = ['Charlie', 'John', 'Steve', 'Bob']
>>> list_2 = ['Dave', 'Charles']
>>> max_lev(list_1, list_2)
0.9428571428571428
"""
# Check variable types
if not ((isinstance(a, list)) and (isinstance(b, list))):
raise TypeError('Both variables being compared must contain lists')
# Compare two name sets and calcualte max lev score
comparisons = list(itertools.product(a, b))
max_score = max([jellyfish.jaro_winkler(x, y) for x, y in (comparisons)])
return max_score
# -------------------------------------- #
# ---- House / Flat Number Functions --- #
# -------------------------------------- #
# Terms to remove from addresses before we apply house number function
terms = ['FIRST FLOOR FLAT', 'FLAT 1ST FLOOR', 'SECOND FLOOR FLAT',
'FLAT 2ND FLOOR','FLAT 1ST AND 2ND FLOOR', 'FLAT 1ST 2ND AND 3RD FLOOR', 'THIRD FLOOR FLAT',
'FLAT 2ND AND 3RD FLOOR', 'FLAT 3RD AND 4TH FLOOR', 'FLAT 4TH AND 5TH FLOOR',
'GROUND FLOOR FLAT']
def replace_all(string, terms):
"""
Removes any occurances of words specified in the terms list from the chosen string
Parameters
----------
string: str
string to remove terms from
terms: list
list of terms to be removed from string
Returns
-------
str
string with terms removed
Example
--------
>>> string = 'FIRST FLOOR FLAT 20 PARK ROAD'
>>> terms = ['FIRST FLOOR FLAT', 'SECOND FLOOR FLAT']
>>> replace_all(string, terms)
'20 PARK ROAD'
"""
for term in terms:
string = string.replace(term, '').strip()
return string
def house_number(address):
"""
Extracts house number from an address string. Flat numbers and apartment numbers are removed before
extraction to make it more likely that the correct number is extracted, although this function will not
get it right 100% of the time.
Parameters
----------
address: str
address string to extract house number from
Returns
-------
str
house number
Example
--------
>>> address = 'FIRST FLOOR FLAT 20 PARK ROAD'
>>> house_number(address)
'20'
"""
# Upper Case
address = address.upper()
# If postcode is in address, remove from the end of the string (after the final comma)
address = ','.join(address.split(',')[:-1])
# Remove terms from address to improve performance
address = replace_all(address, terms)
# Remove possible flat/apartment numbers from address
address = re.sub('FLAT(\s*)(\d+)', '', address).strip()
address = re.sub('APARTMENT(\s*)(\d+)', '', address).strip()
address = re.sub('UNIT(\s*)(\d+)', '', address).strip()
address = re.sub('ROOM(\s*)(\d+)', '', address).strip()
# Create list of possible house numbers from address
numbers = re.findall(r'\d+', address)
# Remove zeros & leading zeros
numbers = [number.lstrip('0') for number in numbers if number not in ['0']]
# Take first number as house number
if len(numbers) > 0:
house_no = str(numbers[0])
else:
house_no = None
return house_no
def flat_number(address):
"""
Extracts flat/apartment number from an address string.
Parameters
----------
address: str
address string to extract flat/apartment number from
Returns
-------
str
flat/apartment number
Example
--------
>>> address = 'FLAT 5, 15 PARK ROAD'
>>> flat_number(address)
'5'
"""
# Upper Case
address = address.upper()
# Get FLAT/APARTMENT NUMBER
flat = re.findall(r'FLAT (\d+)', address)
apart = re.findall(r'APARTMENT (\d+)', address)
if len(flat) > 0:
number = flat[0]
else:
if len(apart) > 0:
number = apart[0]
else:
number = '0'
# Remove zeros & leading zeros
if number != '0':
number = number.lstrip('0')
else:
number = None
return number
# ---------------------------------- #
# ------------ UDFs ---------------- #
# ---------------------------------- #
common_udf = udf(common, IntegerType())
common_age_udf = udf(common_age, IntegerType())
max_lev_udf = udf(max_lev, FloatType())
max_jaro_udf = udf(max_jaro, FloatType())
house_number_udf = udf(lambda x: house_number(x) if x is not None else None, StringType())
flat_number_udf = udf(lambda x: flat_number(x) if x is not None else None, StringType())
|
import os
from collections import defaultdict
from enum import Enum
from typing import Dict, Generator, List, Tuple
def solution1(data: List[int]) -> int:
computer = intcode_computer(data, None)
next(computer)
screen = {}
while True:
try:
col = next(computer)
row = next(computer)
tile = next(computer)
except StopIteration:
break
screen[(row, col)] = tile
return sum(tile_id == 2 for tile_id in screen.values())
def solution2(data: List[int]) -> object:
computer = intcode_computer(data, 0, 2)
next(computer)
screen = {}
def print_screen():
for row in range(20):
s = ""
for col in range(50):
tile = screen.get((row, col))
if tile in (None, 0):
s += " " # empty
elif tile == 1:
s += "#" # wall
elif tile == 2:
s += "X" # block
elif tile == 3:
s += "x" # paddle
elif tile == 4:
s += "o" # ball
else:
assert False
print(s)
score = 0
direction = 0
player_col = 18
ball_col = 16
while True:
try:
col = computer.send(direction)
row = computer.send(direction)
tile = computer.send(direction)
except StopIteration:
break
if col == -1 and row == 0:
# Special combination
score = tile
continue
screen[(row, col)] = tile
if tile == 4:
ball_col = col
os.system("clear")
print_screen()
print(score)
if tile == 3:
player_col = col
if player_col < ball_col:
direction = 1
elif player_col > ball_col:
direction = -1
else:
direction = 0
return score
class ParameterMode(Enum):
POSITION = 0
IMMEDIATE = 1
RELATIVE = 2
def intcode_computer(
data: List[int], setting: int, seed: int = 1
) -> Generator[int, int, None]:
registers = defaultdict(int)
for i, value in enumerate(data):
registers[i] = value
registers[0] = seed
next_input = yield
setting_has_been_set = False
i = 0
relative_offset = 0
while registers[i] != 99:
opcode, *modes = parse_instruction(registers[i])
if opcode in (1, 2):
j = get_value(registers, registers[i + 1], modes[0], relative_offset)
k = get_value(registers, registers[i + 2], modes[1], relative_offset)
m = get_write_location(registers[i + 3], modes[2], relative_offset)
registers[m] = j + k if opcode == 1 else j * k
i += 4
elif opcode == 3:
m = get_write_location(registers[i + 1], modes[0], relative_offset)
registers[m] = setting if not setting_has_been_set else next_input
setting_has_been_set = True
i += 2
elif opcode == 4:
next_input = yield get_value(
registers, registers[i + 1], modes[0], relative_offset
)
i += 2
elif opcode == 5:
j = get_value(registers, registers[i + 1], modes[0], relative_offset)
k = get_value(registers, registers[i + 2], modes[1], relative_offset)
i = k if j != 0 else i + 3
elif opcode == 6:
j = get_value(registers, registers[i + 1], modes[0], relative_offset)
k = get_value(registers, registers[i + 2], modes[1], relative_offset)
i = k if j == 0 else i + 3
elif opcode == 7:
j = get_value(registers, registers[i + 1], modes[0], relative_offset)
k = get_value(registers, registers[i + 2], modes[1], relative_offset)
m = get_write_location(registers[i + 3], modes[2], relative_offset)
registers[m] = 1 if j < k else 0
i += 4
elif opcode == 8:
j = get_value(registers, registers[i + 1], modes[0], relative_offset)
k = get_value(registers, registers[i + 2], modes[1], relative_offset)
m = get_write_location(registers[i + 3], modes[2], relative_offset)
registers[m] = 1 if j == k else 0
i += 4
elif opcode == 9:
j = get_value(registers, registers[i + 1], modes[0], relative_offset)
relative_offset += j
i += 2
else:
assert False, "Received invalid opcode"
def parse_instruction(instruction: int) -> Tuple[int, bool, bool, bool]:
opcode = instruction % 100
instruction //= 100
param1 = instruction % 10
instruction //= 10
param2 = instruction % 10
instruction //= 10
param3 = instruction % 10
return opcode, ParameterMode(param1), ParameterMode(param2), ParameterMode(param3)
def get_value(
registers: Dict[int, int], pos: int, mode: ParameterMode, offset: int
) -> int:
if mode == ParameterMode.POSITION:
return registers[pos]
elif mode == ParameterMode.IMMEDIATE:
return pos
elif mode == ParameterMode.RELATIVE:
return registers[offset + pos]
assert False
def get_write_location(pos: int, mode: ParameterMode, offset: int) -> int:
return pos + offset if mode == ParameterMode.RELATIVE else pos
if __name__ == "__main__":
with open("input.txt", "r") as f:
data = list(map(int, f.readline().split(",")))
result2 = solution2(data)
print(f"Solution 1: {solution1(data)}")
print(f"Solution 2: {result2}")
|
#!/usr/bin/env python3
import unittest
import gen_db
class TestGenDb(unittest.TestCase):
def test_linkify(self) -> None:
# Checks that text wrapped in <B> is not linked (it tends to be Mod. E.
# or Latin), that the longest abbreviations are linked, and that
# normalization works as expected ("Hwæt" -> "hwaet").
term_nid = {'hwaet': 7, 'a': 8, 'b': 9, 'a b': 10, 'c': 11}
abbrevs = ['A.', 'b.', 'A. b.']
rex = gen_db.compile_linkify_regex(abbrevs, min_len=3)
s = '<B>a</B> Hwæt! A. b. c'
h = gen_db.linkify(s, term_nid, rex, current_nid=11, skip=0)
eh = ('<B>a</B> <a href="https://btc.invalid/7">Hwæt</a>! '
'<a href="https://btc.invalid/10">A. b.</a> c')
self.assertEqual(eh, h)
def test_linkify_skip(self) -> None:
term_nid = {'aaa': 8}
rex = gen_db.compile_linkify_regex([], min_len=3)
h = gen_db.linkify('aaa aaa', term_nid, rex, current_nid=11, skip=1)
eh = 'aaa <a href="https://btc.invalid/8">aaa</a>'
self.assertEqual(eh, h)
def test_linkify_variant(self) -> None:
term_nid = {'aaa': 8}
rex = gen_db.compile_linkify_regex([], min_len=3)
h = gen_db.linkify('aaa v. aaa', term_nid, rex, current_nid=11, skip=8)
eh = 'aaa v. <a href="https://btc.invalid/8">aaa</a>'
self.assertEqual(eh, h)
def test_linkify_with_dashes(self) -> None:
term_nid = {'a': 8, 'aa': 9}
rex = gen_db.compile_linkify_regex([], min_len=1)
h = gen_db.linkify('a a-a a- -a', term_nid, rex, current_nid=0, skip=0)
eh = ('<a href="https://btc.invalid/8">a</a> '
'<a href="https://btc.invalid/9">a-a</a> a- -a')
self.assertEqual(eh, h)
if __name__ == '__main__':
unittest.main()
|
"""
WSGI config for CastleApartment project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'CastleApartment.settings')
application = get_wsgi_application()
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, gneiss development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from collections import OrderedDict
import numpy as np
import pandas as pd
from gneiss.regression._model import RegressionModel
from gneiss.util import _type_cast_to_float
from gneiss.balances import balance_basis
from skbio.stats.composition import ilr_inv
from statsmodels.iolib.summary2 import Summary
from statsmodels.sandbox.tools.cross_val import LeaveOneOut
from patsy import dmatrix
from scipy import stats
def ols(formula, table, metadata):
""" Ordinary Least Squares applied to balances.
An ordinary least squares (OLS) regression is a method for estimating
parameters in a linear regression model. OLS is a common statistical
technique for fitting and testing the effects of covariates on a response.
This implementation is focused on performing a multivariate response
regression where the response is a matrix of balances (`table`) and the
covariates (`metadata`) are made up of external variables.
Global statistical tests indicating goodness of fit and contributions
from covariates can be accessed from a coefficient of determination (`r2`),
leave-one-variable-out cross validation (`lovo`), leave-one-out
cross validation (`loo`) and k-fold cross validation (`kfold`).
In addition residuals (`residuals`) can be accessed for diagnostic
purposes.
T-statistics (`tvalues`) and p-values (`pvalues`) can be obtained to
investigate to evaluate statistical significance for a covariate for a
given balance. Predictions on the resulting model can be made using
(`predict`), and these results can be interpreted as either balances or
proportions.
Parameters
----------
formula : str
Formula representing the statistical equation to be evaluated.
These strings are similar to how equations are handled in R and
statsmodels. Note that the dependent variable in this string should
not be specified, since this method will be run on each of the
individual balances. See `patsy` for more details.
table : pd.DataFrame
Contingency table where samples correspond to rows and
balances correspond to columns.
metadata: pd.DataFrame
Metadata table that contains information about the samples contained
in the `table` object. Samples correspond to rows and covariates
correspond to columns.
Returns
-------
OLSModel
Container object that holds information about the overall fit.
This includes information about coefficients, pvalues, residuals
and coefficient of determination from the resulting regression.
Example
-------
>>> import numpy as np
>>> import pandas as pd
>>> from skbio import TreeNode
>>> from gneiss.regression import ols
Here, we will define a table of balances as follows
>>> np.random.seed(0)
>>> n = 100
>>> g1 = np.linspace(0, 15, n)
>>> y1 = g1 + 5
>>> y2 = -g1 - 2
>>> Y = pd.DataFrame({'y1': y1, 'y2': y2})
Once we have the balances defined, we will add some errors
>>> e = np.random.normal(loc=1, scale=0.1, size=(n, 2))
>>> Y = Y + e
Now we will define the environment variables that we want to
regress against the balances.
>>> X = pd.DataFrame({'g1': g1})
Once these variables are defined, a regression can be performed.
These proportions will be converted to balances according to the
tree specified. And the regression formula is specified to run
`temp` and `ph` against the proportions in a single model.
>>> res = ols('g1', Y, X)
>>> res.fit()
From the summary results of the `ols` function, we can view the
pvalues according to how well each individual balance fitted in the
regression model.
>>> res.pvalues
y1 y2
Intercept 8.826379e-148 7.842085e-71
g1 1.923597e-163 1.277152e-163
We can also view the balance coefficients estimated in the regression
model. These coefficients can also be viewed as proportions by passing
`project=True` as an argument in `res.coefficients()`.
>>> res.coefficients()
y1 y2
Intercept 6.016459 -0.983476
g1 0.997793 -1.000299
The overall model fit can be obtained as follows
>>> res.r2
0.99945903186495066
"""
# one-time creation of exogenous data matrix allows for faster run-time
metadata = _type_cast_to_float(metadata.copy())
x = dmatrix(formula, metadata, return_type='dataframe')
ilr_table, x = table.align(x, join='inner', axis=0)
return OLSModel(Y=ilr_table, Xs=x)
class OLSModel(RegressionModel):
""" Summary object for storing ordinary least squares results.
A `OLSModel` object stores information about the
individual balances used in the regression, the coefficients,
residuals. This object can be used to perform predictions.
In addition, summary statistics such as the coefficient
of determination for the overall fit can be calculated.
Attributes
----------
submodels : list of statsmodels objects
List of statsmodels result objects.
balances : pd.DataFrame
A table of balances where samples are rows and
balances are columns. These balances were calculated
using `tree`.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def fit(self, **kwargs):
""" Fit the ordinary least squares model.
Here, the coefficients of the model are estimated.
In addition, there are additional summary statistics
that are being calculated, such as residuals, t-statistics,
pvalues and coefficient of determination.
Parameters
----------
**kwargs : dict
Keyword arguments used to tune the parameter estimation.
"""
Y = self.response_matrix
X = self.design_matrices
n, p = X.shape
inv = np.linalg.pinv(np.dot(X.T, X))
cross = np.dot(inv, X.T)
beta = np.dot(cross, Y)
pX = np.dot(X, beta)
resid = (Y - pX)
sst = (Y - Y.mean(axis=0))
sse = (resid**2).sum(axis=0)
sst_balance = ((Y - Y.mean(axis=0))**2).sum(axis=0)
sse_balance = (resid**2).sum(axis=0)
ssr_balance = (sst_balance - sse_balance)
df_resid = n - p + 1
mse = sse / df_resid
self._mse = mse
# t tests
cov = np.linalg.pinv(np.dot(X.T, X))
bse = np.sqrt(np.outer(np.diag(cov), mse))
tvalues = np.divide(beta, bse)
pvals = stats.t.sf(np.abs(tvalues), df_resid)*2
self._tvalues = pd.DataFrame(tvalues, index=X.columns,
columns=Y.columns)
self._pvalues = pd.DataFrame(pvals, index=X.columns,
columns=Y.columns)
self._beta = pd.DataFrame(beta, index=X.columns,
columns=Y.columns)
self._resid = pd.DataFrame(resid, index=Y.index,
columns=Y.columns)
self._fitted = True
self._ess = ssr_balance
self._r2 = 1 - ((resid**2).values.sum() / (sst**2).values.sum())
def predict(self, X=None, tree=None, **kwargs):
""" Performs a prediction based on model.
Parameters
----------
X : pd.DataFrame, optional
Input table of covariates, where columns are covariates, and
rows are samples. If not specified, then the fitted values
calculated from training the model will be returned.
tree : skbio.TreeNode, optional
The tree used to perform the ilr transformation. If this
is specified, then the prediction will be represented
as proportions. Otherwise, if this is not specified,
the prediction will be represented as balances. (default: None).
**kwargs : dict
Other arguments to be passed into the model prediction.
Returns
-------
pd.DataFrame
A table of predicted values where columns are coefficients,
and the rows are balances. If `tree` is specified, then
the rows are proportions.
"""
if not self._fitted:
ValueError(('Model not fitted - coefficients not calculated.'
'See `fit()`'))
if X is None:
X = self.design_matrices
prediction = X.dot(self._beta)
if tree is not None:
basis, _ = balance_basis(tree)
proj_prediction = ilr_inv(prediction.values, basis=basis)
ids = [n.name for n in tree.tips()]
return pd.DataFrame(proj_prediction,
columns=ids,
index=prediction.index)
else:
return prediction
@property
def pvalues(self):
""" Return pvalues from each of the coefficients in the fit. """
return self._pvalues
@property
def tvalues(self):
""" Return t-statistics from each of the coefficients in the fit. """
return self._tvalues
@property
def r2(self):
""" Coefficient of determination for overall fit"""
return self._r2
@property
def mse(self):
""" Mean Sum of squares Error"""
return self._mse
@property
def ess(self):
""" Explained Sum of squares"""
return self._ess
def summary(self, kfolds, lovo):
""" Summarize the Ordinary Least Squares Regression Results.
Parameters
----------
kfold : pd.DataFrame
Results from kfold cross-validation
lovo : pd.DataFrame
Results from leave-one-variable-out cross-validation.
Returns
-------
str :
This holds the summary of regression coefficients and fit
information.
"""
_r2 = self.r2
self.params = self._beta
# number of observations
self.nobs = self.response_matrix.shape[0]
self.model = None
# Start filling in summary information
smry = Summary()
# Top results
info = OrderedDict()
info["No. Observations"] = self.nobs
info["Model:"] = "OLS"
info["Rsquared: "] = _r2
# TODO: Investigate how to properly resize the tables
smry.add_dict(info, ncols=1)
smry.add_title("Simplicial Least Squares Results")
smry.add_df(lovo, align='l')
smry.add_df(kfolds, align='l')
return smry
def kfold(self, num_folds=10, **kwargs):
""" K-fold cross-validation.
Performs k-fold cross-validation by spliting the data
into k partitions, building a model on k-1 partitions and
evaluating the predictions on the remaining partition.
This process is performed k times.
Parameters
----------
num_folds: int, optional
The number of partitions used for the cross validation.
**kwargs : dict
Keyword arguments used to tune the parameter estimation.
Returns
-------
pd.DataFrame
model_mse : np.array, float
The within model mean sum of squares error for each iteration of
the cross validation.
Rsquared : np.array, float
The Rsquared of the model fitted on training data.
pred_mse : np.array, float
Prediction mean sum of squares error for each iteration of
the cross validation.
"""
# number of observations (i.e. samples)
nobs = self.response_matrix.shape[0]
s = nobs // num_folds
folds = [np.arange(i*s, ((i*s)+s) % nobs) for i in range(num_folds)]
results = pd.DataFrame(index=['fold_%d' % i for i in range(num_folds)],
columns=['model_mse', 'Rsquared', 'pred_mse'],
dtype=np.float64)
for k in range(num_folds):
test = folds[k]
train = np.hstack(folds[:k] + folds[k+1:])
res_i = OLSModel(self.response_matrix.iloc[train],
self.design_matrix.iloc[train])
res_i.fit(**kwargs)
# model error
p = res_i.predict(X=self.design_matrix.iloc[train]).values
r = self.response_matrix.iloc[train].values
model_resid = ((p - r)**2)
model_mse = np.mean(model_resid.sum(axis=0))
results.loc['fold_%d' % k, 'model_mse'] = model_mse
results.loc['fold_%d' % k, 'Rsquared'] = res_i.r2
# prediction error
p = res_i.predict(X=self.design_matrix.iloc[test]).values
r = self.response_matrix.iloc[test].values
pred_resid = ((p - r)**2)
pred_mse = np.mean(pred_resid.sum(axis=0))
results.loc['fold_%d' % k, 'pred_mse'] = pred_mse
return results
def loo(self, **kwargs):
""" Leave one out cross-validation.
Calculates summary statistics for each iteraction of
leave one out cross-validation, specially `mse` on entire model
and `pred_err` to measure prediction error.
Parameters
----------
**kwargs : dict
Keyword arguments used to tune the parameter estimation.
Returns
-------
pd.DataFrame
model_mse : np.array, float
Mean sum of squares error for each iteration of
the cross validation.
pred_mse : np.array, float
Prediction mean sum of squares error for each iteration of
the cross validation.
See Also
--------
fit
statsmodels.regression.linear_model.
"""
# number of observations (i.e. samples)
nobs = self.response_matrix.shape[0]
cv_iter = LeaveOneOut(nobs)
results = pd.DataFrame(index=self.response_matrix.index,
columns=['model_mse', 'pred_mse'],
dtype=np.float64)
for i, (train, test) in enumerate(cv_iter):
sample_id = self.response_matrix.index[i]
res_i = OLSModel(self.response_matrix.iloc[train],
self.design_matrix.iloc[train])
res_i.fit(**kwargs)
# model error
predicted = res_i.predict(X=self.design_matrix.iloc[train])
r = self.response_matrix.iloc[train].values
p = predicted.values
model_resid = ((r - p)**2)
model_mse = np.mean(model_resid.sum(axis=0))
results.loc[sample_id, 'model_mse'] = model_mse
# prediction error
predicted = res_i.predict(X=self.design_matrix.iloc[test])
r = self.response_matrix.iloc[test].values
p = predicted.values
pred_resid = ((r - p)**2)
pred_mse = np.mean(pred_resid.sum(axis=0))
results.loc[sample_id, 'pred_mse'] = pred_mse
return results
def lovo(self, **kwargs):
""" Leave one variable out cross-validation.
Calculates summary statistics for each iteraction of leave one variable
out cross-validation, specially `r2` and `mse` on entire model.
This technique is particularly useful for feature selection.
Parameters
----------
**kwargs : dict
Keyword arguments used to tune the parameter estimation.
Returns
-------
pd.DataFrame
mse : np.array, float
Mean sum of squares error for each iteration of
the cross validation.
Rsquared : np.array, float
Coefficient of determination for each variable left out.
R2diff : np.array, float
Decrease in Rsquared for each variable left out.
"""
cv_iter = LeaveOneOut(len(self.design_matrix.columns))
results = pd.DataFrame(index=self.design_matrix.columns,
columns=['mse', 'Rsquared', 'R2diff'],
dtype=np.float64)
for i, (inidx, outidx) in enumerate(cv_iter):
feature_id = self.design_matrix.columns[i]
res_i = OLSModel(Y=self.response_matrix,
Xs=self.design_matrix.iloc[:, inidx])
res_i.fit(**kwargs)
predicted = res_i.predict()
r = self.response_matrix.values
p = predicted.values
model_resid = ((r - p)**2)
model_mse = np.mean(model_resid.sum(axis=0))
results.loc[feature_id, 'mse'] = model_mse
results.loc[feature_id, 'Rsquared'] = res_i.r2
results.loc[feature_id, 'R2diff'] = self.r2 - res_i.r2
return results
|
# Standard library imports
from typing import Iterator, List
# Third-party imports
import numpy as np
# First-party imports
from gluonts.core.component import validated
from gluonts.transform import DataEntry, FlatMapTransformation, shift_timestamp
class ForkingSequenceSplitter(FlatMapTransformation):
"""Forking sequence splitter."""
@validated()
def __init__(
self,
train_sampler,
enc_len: int,
dec_len: int,
time_series_fields: List[str] = None,
target_in="target",
is_pad_out: str = "is_pad",
start_in: str = "start",
forecast_start_out: str = "forecast_start",
) -> None:
assert enc_len > 0, "The value of `enc_len` should be > 0"
assert dec_len > 0, "The value of `dec_len` should be > 0"
self.train_sampler = train_sampler
self.enc_len = enc_len
self.dec_len = dec_len
self.ts_fields = (
time_series_fields if time_series_fields is not None else []
)
self.target_in = target_in
self.is_pad_out = is_pad_out
self.start_in = start_in
self.forecast_start_out = forecast_start_out
def _past(self, col_name):
return f"past_{col_name}"
def _future(self, col_name):
return f"future_{col_name}"
def flatmap_transform(
self, data: DataEntry, is_train: bool
) -> Iterator[DataEntry]:
dec_len = self.dec_len
slice_cols = self.ts_fields + [self.target_in]
target = data[self.target_in]
if is_train:
if len(target) < self.dec_len:
# We currently cannot handle time series that are shorter than the
# prediction length during training, so we just skip these.
# If we want to include them we would need to pad and to mask
# the loss.
sampling_indices: List[int] = []
else:
sampling_indices = self.train_sampler(
target, 0, len(target) - self.dec_len
)
else:
sampling_indices = [len(target)]
for i in sampling_indices:
pad_length = max(self.enc_len - i, 0)
d = data.copy()
for ts_field in slice_cols:
if i > self.enc_len:
# truncate to past_length
past_piece = d[ts_field][..., i - self.enc_len : i]
elif i < self.enc_len:
pad_block = np.zeros(
d[ts_field].shape[:-1] + (pad_length,)
)
past_piece = np.concatenate(
[pad_block, d[ts_field][..., :i]], axis=-1
)
else:
past_piece = d[ts_field][..., :i]
d[self._past(ts_field)] = np.expand_dims(past_piece, -1)
if is_train and ts_field is self.target_in:
forking_dec_field = np.zeros(
shape=(self.enc_len, self.dec_len)
)
for j in range(self.enc_len):
start_idx = i - self.enc_len + j + 1
if start_idx >= 0:
forking_dec_field[j, :] = d[ts_field][
..., start_idx : start_idx + dec_len
]
d[self._future(ts_field)] = forking_dec_field
del d[ts_field]
pad_indicator = np.zeros(self.enc_len)
if pad_length > 0:
pad_indicator[:pad_length] = 1
d[self._past(self.is_pad_out)] = pad_indicator
d[self.forecast_start_out] = shift_timestamp(d[self.start_in], i)
yield d
|
import numpy as np
import pandas as pd
import sparse
import lightgbm
import scipy.sparse
import pytest
import dask.array as da
from dask.array.utils import assert_eq
import dask.dataframe as dd
from dask.distributed import Client
from sklearn.datasets import make_blobs
from distributed.utils_test import gen_cluster, loop, cluster # noqa
from sklearn.metrics import confusion_matrix
import dask_lightgbm.core as dlgbm
# Workaround for conflict with distributed 1.23.0
# https://github.com/dask/dask-xgboost/pull/27#issuecomment-417474734
from concurrent.futures import ThreadPoolExecutor
import distributed.comm.utils
distributed.comm.utils._offload_executor = ThreadPoolExecutor(max_workers=2)
def _create_data(n_samples=100, centers=2, output="array", chunk_size=50):
X, y = make_blobs(n_samples=n_samples, centers=centers, random_state=42)
rnd = np.random.RandomState(42)
w = rnd.rand(X.shape[0])*0.01
if output == "array":
dX = da.from_array(X, (chunk_size, X.shape[1]))
dy = da.from_array(y, chunk_size)
dw = da.from_array(w, chunk_size)
elif output == "dataframe":
X_df = pd.DataFrame(X, columns=[f"feature_{i}" for i in range(X.shape[1])])
y_df = pd.Series(y, name="target")
dX = dd.from_pandas(X_df, chunksize=chunk_size)
dy = dd.from_pandas(y_df, chunksize=chunk_size)
dw = dd.from_array(w, chunksize=chunk_size)
elif output == "scipy_csr_matrix":
dX = da.from_array(X, chunks=(chunk_size, X.shape[1])).map_blocks(scipy.sparse.csr_matrix)
dy = da.from_array(y, chunks=chunk_size)
dw = da.from_array(w, chunk_size)
elif output == "sparse":
dX = da.from_array(X, chunks=(chunk_size, X.shape[1])).map_blocks(sparse.COO)
dy = da.from_array(y, chunks=chunk_size)
dw = da.from_array(w, chunk_size)
return X, y, w, dX, dy, dw
@pytest.mark.parametrize("output, listen_port, centers", [ #noqa
('array', 11400, [[-4, -4], [4, 4]]),
('array', 12400, [[-4, -4], [4, 4], [-4, 4]]),
('scipy_csr_matrix', 13400, [[-4, -4], [4, 4]]),
('scipy_csr_matrix', 14400, [[-4, -4], [4, 4], [-4, 4]]),
('sparse', 15400, [[-4, -4], [4, 4]]),
('sparse', 16400, [[-4, -4], [4, 4], [-4, 4]]),
('dataframe', 17400, [[-4, -4], [4, 4]]),
('dataframe', 18400, [[-4, -4], [4, 4], [-4, 4]])
]) # noqa
def test_classifier(loop, output, listen_port, centers):
with cluster() as (s, [a, b]):
with Client(s['address'], loop=loop) as client:
X, y, w, dX, dy, dw = _create_data(output=output, centers=centers)
a = dlgbm.LGBMClassifier(local_listen_port=listen_port)
a = a.fit(dX, dy, sample_weight=dw)
p1 = a.predict(dX, client=client)
p1 = p1.compute()
b = lightgbm.LGBMClassifier()
b.fit(X, y, sample_weight=w)
p2 = b.predict(X)
print(confusion_matrix(y, p1))
print(confusion_matrix(y, p2))
assert_eq(p1, p2)
assert_eq(y, p1)
assert_eq(y, p2)
@pytest.mark.parametrize("output, listen_port, centers", [ #noqa
('array', 21400, [[-4, -4], [4, 4]]),
('array', 22400, [[-4, -4], [4, 4], [-4, 4]]),
('scipy_csr_matrix', 23400, [[-4, -4], [4, 4]]),
('scipy_csr_matrix', 24400, [[-4, -4], [4, 4], [-4, 4]]),
('sparse', 25400, [[-4, -4], [4, 4]]),
('sparse', 26400, [[-4, -4], [4, 4], [-4, 4]]),
('dataframe', 27400, [[-4, -4], [4, 4]]),
('dataframe', 28400, [[-4, -4], [4, 4], [-4, 4]])
]) # noqa
def test_classifier_proba(loop, output, listen_port, centers):
with cluster() as (s, [a, b]):
with Client(s['address'], loop=loop) as client:
X, y, w, dX, dy, dw = _create_data(output=output, centers=centers)
a = dlgbm.LGBMClassifier(local_listen_port=listen_port)
a = a.fit(dX, dy, sample_weight=dw)
p1 = a.predict_proba(dX, client=client)
p1 = p1.compute()
b = lightgbm.LGBMClassifier()
b.fit(X, y, sample_weight=w)
p2 = b.predict_proba(X)
assert_eq(p1, p2, atol=0.3)
def test_classifier_local_predict(loop): #noqa
with cluster() as (s, [a, b]):
with Client(s['address'], loop=loop):
X, y, w, dX, dy, dw = _create_data(output="array")
a = dlgbm.LGBMClassifier(local_listen_port=11400)
a = a.fit(dX, dy, sample_weight=dw)
p1 = a.to_local().predict(dX)
b = lightgbm.LGBMClassifier()
b.fit(X, y, sample_weight=w)
p2 = b.predict(X)
assert_eq(p1, p2)
assert_eq(y, p1)
assert_eq(y, p2)
def test_build_network_params():
workers_ips = [
"tcp://192.168.0.1:34545",
"tcp://192.168.0.2:34346",
"tcp://192.168.0.3:34347"
]
params = dlgbm.build_network_params(workers_ips, "tcp://192.168.0.2:34346", 12400, 120)
exp_params = {
"machines": "192.168.0.1:12400,192.168.0.2:12401,192.168.0.3:12402",
"local_listen_port": 12401,
"num_machines": len(workers_ips),
"listen_time_out": 120
}
assert exp_params == params
@gen_cluster(client=True, timeout=None, check_new_threads=False)
def test_errors(c, s, a, b):
def f(part):
raise Exception('foo')
df = dd.demo.make_timeseries()
df = df.map_partitions(f, meta=df._meta)
with pytest.raises(Exception) as info:
yield dlgbm.train(c, df, df.x, params={}, model_factory=lightgbm.LGBMClassifier)
assert 'foo' in str(info.value)
|
from sieve import *
# runtime of sumPrimesBelow(2000000, 5000) is 217.033s
# sum all primes below a number N
def sumPrimesBelow(N, windowSize):
# list of primes from sieve of eratosthenes
primes = []
# keep track of max prime
maxPrime = 0
# starting windowEnd
windowEnd = windowSize
# while largest prime is less than N
while maxPrime < N:
# set/increment windowStart
windowStart = windowEnd - (windowSize - 1)
# add new primes to primes list
primes.extend(slidingSieveOfEratosthenes(windowStart, windowEnd, primes))
# set max prime to largest prime in primes list (last number)
maxPrime = primes[-1]
# increment windowEnd
windowEnd += windowSize
# sum for all primes below N
primesSum = 0
# for every prime in prime list
for prime in primes:
# update sum if prime is below N
if prime < N:
primesSum += prime
# return sum of all primes below N
return primesSum
|
#!/usr/bin/env python
import rospy
from apriltag_ros.msg import AprilTagDetection, AprilTagDetectionArray
from gazebo_msgs.srv import GetModelState
from geometry_msgs.msg import Point, Point32, Pose, PoseStamped, Quaternion, TransformStamped, Twist
from nav_msgs.msg import Odometry
import math
from math import *
# Mostly Useless Imports
from std_msgs.msg import Header, String, UInt16
import tf
from std_msgs.msg import Float32
# ---------------------------------------------------------------------------------
size = 0
yoda = Odometry()
tag = 0
sock = AprilTagDetectionArray()
odo_tr = TransformStamped()
flag = [False for i in range(10)]
i=0
# ---------------------------------------------------------------------------------
def move(omega,left,right):
left.publish(omega)
right.publish(omega)
#def brake(left,right):
# left.publish(0)
# right.publish(0)
def callback(data):
global size
global yoda
global tag
global sock
global i
global odo_tr
br = tf.TransformBroadcaster()
sock = data
size = len(sock.detections)
print(size)
if size > 0:
for i in range(0,size):
print("Index: "),
print(i)
tag = sock.detections[i].id[0]
print("Tag ID: "),
print(tag)
if tag == 0:
print("THIS IS ID0::")
if flag[0] == False:
left_pub = rospy.Publisher('/ditto'+i+'/left_wheel_speed', Float32, queue_size=1000)
right_pub = rospy.Publisher('/ditto'+i+'/right_wheel_speed', Float32, queue_size=1000)
flag[0] == True
move(0.5,left_pub,right_pub)
elif tag == 1:
print("THIS IS ID1::")
if flag[1] == False:
left_pub = rospy.Publisher('/ditto'+i+'/left_wheel_speed', Float32, queue_size=1000)
right_pub = rospy.Publisher('/ditto'+i+'/right_wheel_speed', Float32, queue_size=1000)
flag[1] == True
move(0.5,left_pub,right_pub)
elif tag == 2:
print("THIS IS ID2::")
if flag[2] == False:
left_pub = rospy.Publisher('/ditto'+i+'/left_wheel_speed', Float32, queue_size=1000)
right_pub = rospy.Publisher('/ditto'+i+'/right_wheel_speed', Float32, queue_size=1000)
flag[2] == True
move(0.5,left_pub,right_pub)
elif tag == 3:
print("THIS IS ID3::")
if flag[3] == False:
left_pub = rospy.Publisher('/ditto'+i+'/left_wheel_speed', Float32, queue_size=1000)
right_pub = rospy.Publisher('/ditto'+i+'/right_wheel_speed', Float32, queue_size=1000)
flag[3] == True
move(0.5,left_pub,right_pub)
elif tag == 4:
if flag[4] == False:
left_pub = rospy.Publisher('/ditto'+i+'/left_wheel_speed', Float32, queue_size=1000)
right_pub = rospy.Publisher('/ditto'+i+'/right_wheel_speed', Float32, queue_size=1000)
flag[4] == True
move(0.5,left_pub,right_pub)
elif tag == 5:
if flag[5] == False:
left_pub = rospy.Publisher('/ditto'+i+'/left_wheel_speed', Float32, queue_size=1000)
right_pub = rospy.Publisher('/ditto'+i+'/right_wheel_speed', Float32, queue_size=1000)
flag[5] == True
move(0.5,left_pub,right_pub)
elif tag == 6:
print("THIS IS 6:")
if flag[6] == False:
left_pub = rospy.Publisher('/ditto'+i+'/left_wheel_speed', Float32, queue_size=1000)
right_pub = rospy.Publisher('/ditto'+i+'/right_wheel_speed', Float32, queue_size=1000)
flag[6] == True
move(0.5,left_pub,right_pub)
elif tag == 7:
if flag[7] == False:
left_pub = rospy.Publisher('/ditto'+i+'/left_wheel_speed', Float32, queue_size=1000)
right_pub = rospy.Publisher('/ditto'+i+'/right_wheel_speed', Float32, queue_size=1000)
flag[7] == True
move(0.5,left_pub,right_pub)
elif tag == 8:
if flag[8] == False:
left_pub = rospy.Publisher('/ditto'+i+'/left_wheel_speed', Float32, queue_size=1000)
right_pub = rospy.Publisher('/ditto'+i+'/right_wheel_speed', Float32, queue_size=1000)
flag[8] == True
move(0.5,left_pub,right_pub)
elif tag == 9:
if flag[9] == False:
left_pub = rospy.Publisher('/ditto'+i+'/left_wheel_speed', Float32, queue_size=1000)
right_pub = rospy.Publisher('/ditto'+i+'/right_wheel_speed', Float32, queue_size=1000)
flag[9] == True
move(0.5,left_pub,right_pub)
# ---------------------------------------------------------------------------------
def py_accu_check():
rospy.init_node('movecam_hybrid', anonymous=True)
rospy.Subscriber("/tag_detections", AprilTagDetectionArray, callback)
rospy.spin()
# ---------------------------------------------------------------------------------
if __name__ == '__main__':
py_accu_check()
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests clean exit of server/client on Python Interpreter exit/sigint.
The tests in this module spawn a subprocess for each test case, the
test is considered successful if it doesn't hang/timeout.
"""
import atexit
import os
import signal
import six
import subprocess
import sys
import threading
import datetime
import time
import unittest
import logging
from tests.unit import _exit_scenarios
SCENARIO_FILE = os.path.abspath(
os.path.join(os.path.dirname(os.path.realpath(__file__)),
'_exit_scenarios.py'))
INTERPRETER = sys.executable
BASE_COMMAND = [INTERPRETER, SCENARIO_FILE]
BASE_SIGTERM_COMMAND = BASE_COMMAND + ['--wait_for_interrupt']
INIT_TIME = datetime.timedelta(seconds=1)
WAIT_CHECK_INTERVAL = datetime.timedelta(milliseconds=100)
WAIT_CHECK_DEFAULT_TIMEOUT = datetime.timedelta(seconds=5)
processes = []
process_lock = threading.Lock()
# Make sure we attempt to clean up any
# processes we may have left running
def cleanup_processes():
with process_lock:
for process in processes:
try:
process.kill()
except Exception: # pylint: disable=broad-except
pass
atexit.register(cleanup_processes)
def _process_wait_with_timeout(process, timeout=WAIT_CHECK_DEFAULT_TIMEOUT):
"""A funciton to mimic 3.3+ only timeout argument in process.wait."""
deadline = datetime.datetime.now() + timeout
while (process.poll() is None) and (datetime.datetime.now() < deadline):
time.sleep(WAIT_CHECK_INTERVAL.total_seconds())
if process.returncode is None:
raise RuntimeError('Process failed to exit within %s' % timeout)
def interrupt_and_wait(process):
with process_lock:
processes.append(process)
time.sleep(INIT_TIME.total_seconds())
os.kill(process.pid, signal.SIGINT)
_process_wait_with_timeout(process)
def wait(process):
with process_lock:
processes.append(process)
_process_wait_with_timeout(process)
# TODO(lidiz) enable exit tests once the root cause found.
@unittest.skip('https://github.com/grpc/grpc/issues/23982')
@unittest.skip('https://github.com/grpc/grpc/issues/23028')
class ExitTest(unittest.TestCase):
def test_unstarted_server(self):
process = subprocess.Popen(BASE_COMMAND +
[_exit_scenarios.UNSTARTED_SERVER],
stdout=sys.stdout,
stderr=sys.stderr)
wait(process)
def test_unstarted_server_terminate(self):
process = subprocess.Popen(BASE_SIGTERM_COMMAND +
[_exit_scenarios.UNSTARTED_SERVER],
stdout=sys.stdout)
interrupt_and_wait(process)
def test_running_server(self):
process = subprocess.Popen(BASE_COMMAND +
[_exit_scenarios.RUNNING_SERVER],
stdout=sys.stdout,
stderr=sys.stderr)
wait(process)
def test_running_server_terminate(self):
process = subprocess.Popen(BASE_SIGTERM_COMMAND +
[_exit_scenarios.RUNNING_SERVER],
stdout=sys.stdout,
stderr=sys.stderr)
interrupt_and_wait(process)
def test_poll_connectivity_no_server(self):
process = subprocess.Popen(
BASE_COMMAND + [_exit_scenarios.POLL_CONNECTIVITY_NO_SERVER],
stdout=sys.stdout,
stderr=sys.stderr)
wait(process)
def test_poll_connectivity_no_server_terminate(self):
process = subprocess.Popen(
BASE_SIGTERM_COMMAND +
[_exit_scenarios.POLL_CONNECTIVITY_NO_SERVER],
stdout=sys.stdout,
stderr=sys.stderr)
interrupt_and_wait(process)
def test_poll_connectivity(self):
process = subprocess.Popen(BASE_COMMAND +
[_exit_scenarios.POLL_CONNECTIVITY],
stdout=sys.stdout,
stderr=sys.stderr)
wait(process)
def test_poll_connectivity_terminate(self):
process = subprocess.Popen(BASE_SIGTERM_COMMAND +
[_exit_scenarios.POLL_CONNECTIVITY],
stdout=sys.stdout,
stderr=sys.stderr)
interrupt_and_wait(process)
@unittest.skipIf(os.name == 'nt',
'os.kill does not have required permission on Windows')
def test_in_flight_unary_unary_call(self):
process = subprocess.Popen(BASE_COMMAND +
[_exit_scenarios.IN_FLIGHT_UNARY_UNARY_CALL],
stdout=sys.stdout,
stderr=sys.stderr)
interrupt_and_wait(process)
@unittest.skipIf(os.name == 'nt',
'os.kill does not have required permission on Windows')
def test_in_flight_unary_stream_call(self):
process = subprocess.Popen(
BASE_COMMAND + [_exit_scenarios.IN_FLIGHT_UNARY_STREAM_CALL],
stdout=sys.stdout,
stderr=sys.stderr)
interrupt_and_wait(process)
@unittest.skipIf(os.name == 'nt',
'os.kill does not have required permission on Windows')
def test_in_flight_stream_unary_call(self):
process = subprocess.Popen(
BASE_COMMAND + [_exit_scenarios.IN_FLIGHT_STREAM_UNARY_CALL],
stdout=sys.stdout,
stderr=sys.stderr)
interrupt_and_wait(process)
@unittest.skipIf(os.name == 'nt',
'os.kill does not have required permission on Windows')
def test_in_flight_stream_stream_call(self):
process = subprocess.Popen(
BASE_COMMAND + [_exit_scenarios.IN_FLIGHT_STREAM_STREAM_CALL],
stdout=sys.stdout,
stderr=sys.stderr)
interrupt_and_wait(process)
@unittest.skipIf(os.name == 'nt',
'os.kill does not have required permission on Windows')
def test_in_flight_partial_unary_stream_call(self):
process = subprocess.Popen(
BASE_COMMAND +
[_exit_scenarios.IN_FLIGHT_PARTIAL_UNARY_STREAM_CALL],
stdout=sys.stdout,
stderr=sys.stderr)
interrupt_and_wait(process)
@unittest.skipIf(os.name == 'nt',
'os.kill does not have required permission on Windows')
def test_in_flight_partial_stream_unary_call(self):
process = subprocess.Popen(
BASE_COMMAND +
[_exit_scenarios.IN_FLIGHT_PARTIAL_STREAM_UNARY_CALL],
stdout=sys.stdout,
stderr=sys.stderr)
interrupt_and_wait(process)
@unittest.skipIf(os.name == 'nt',
'os.kill does not have required permission on Windows')
def test_in_flight_partial_stream_stream_call(self):
process = subprocess.Popen(
BASE_COMMAND +
[_exit_scenarios.IN_FLIGHT_PARTIAL_STREAM_STREAM_CALL],
stdout=sys.stdout,
stderr=sys.stderr)
interrupt_and_wait(process)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main(verbosity=2)
|
#!/usr/bin/env python
import os
import re
from setuptools import setup, find_packages
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f:
long_description = f.read()
def get_version(package):
"""
Return package version as listed in `__version__` in `__init__.py`.
"""
path = os.path.join(os.path.dirname(__file__), package, '__init__.py')
with open(path, 'rb') as f:
init_py = f.read().decode('utf-8')
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
setup(
name='prompt_toolkit',
author='Jonathan Slenders',
version=get_version('prompt_toolkit'),
license='BSD-3-Clause',
url='https://github.com/jonathanslenders/python-prompt-toolkit',
description='Library for building powerful interactive command lines in Python',
long_description=long_description,
packages=find_packages('.'),
install_requires=[
'six>=1.9.0',
'wcwidth',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python',
'Topic :: Software Development',
],
)
|
from .models import ServiceAccount
from .utils import token_from_request
class ServiceAccountTokenBackend:
def authenticate(self, request, service_token=None):
if request and not service_token:
service_token = token_from_request(request)
try:
return ServiceAccount.objects.get(token=service_token)
except ServiceAccount.DoesNotExist:
return None
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/00_demo_model.ipynb (unless otherwise specified).
__all__ = ['load_if_present', 'dump_if_path', 'get_scaler', 'generate_data', 'split_data', 'train_model', 'predict',
'data_root']
# Cell
from ..imports import *
# Cell
data_root = Path('stack/data')
def load_if_present(path, mode='rb'):
if path is None: return None
if not Path.exists(path): return None
return load(open(path, mode))
def dump_if_path(o, path=None, mode='wb'):
if path is not None: dump(o, open(path, mode))
return o
def get_scaler(X=None, fn=StandardScaler, path=None):
scaler = load_if_present(path)
if not X is None:
scaler = fn()
scaler.fit(X)
if not path is None: dump(scaler, open(path, 'wb'))
return scaler
def generate_data(path=None):
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
ds = make_circles(noise=0.2, factor=0.5, random_state=1)
X, y = ds
scaler = get_scaler(X=X, path=path)
X = scaler.transform(X)
return X, y, scaler
def split_data(X, y, test_size=.4, **kwargs):
return train_test_split(X, y, test_size=test_size, **kwargs)
def train_model(X_train, y_train, path=None):
clf = load_if_present(path)
if clf is None:
clf = SVC(gamma=2, C=1, probability=True)
clf.fit(X_train, y_train)
dump_if_path(clf, path=path)
return clf
def predict(params):
root = Path('stack/tmp')
scaler_path = root/'scaler.pkl'
clf_path = root/'clf.pkl'
clf = load_if_present(clf_path)
scaler = load_if_present(scaler_path)
params = np.asarray(params).reshape(1, -1)
X = scaler.transform(params)
choice = int(clf.predict(X)[0])
probabilities = clf.predict_proba(X)[0].tolist()
return choice, probabilities
|
import nltk
#nltk.download('punkt')
from nltk.stem.lancaster import LancasterStemmer
stemmer = LancasterStemmer()
import numpy
import tensorflow
import random
import tflearn
import json
import pickle
import speech_recognition as sr
## Fetch the training data
with open("intents.json") as file:
data = json.load(file)
try:
with open("data.pickle", "rb") as f:
words, labels, training, output = pickle.load(f)
except:
##Tokenize
words = []
docs_x = []
docs_y = []
labels = []
for intent in data["intents"]:
for pattern in intent["patterns"]:
wrds = nltk.word_tokenize(pattern)
print("wrds : =====> ")
print(wrds)
words.extend(wrds)
print("words : =====>")
print(words)
docs_x.append(wrds)
print("docs_x : =====>")
print(docs_x)
docs_y.append(intent["tag"])
print("docs_y : =====>")
print(docs_y)
if intent["tag"] not in labels:
labels.append(intent["tag"])
print("labels : =====>")
print(labels)
words = [stemmer.stem(w.lower()) for w in words if w not in "?"]
words = sorted(list(set(words)))
labels = sorted(labels)
training = []
output = []
out_empty = [0 for _ in range(len(labels))]
for x, doc in enumerate(docs_x):
bag = []
wrds = [stemmer.stem(w) for w in doc]
for w in words:
if w in wrds:
bag.append(1)
else:
bag.append(0)
output_row = out_empty[:]
output_row[labels.index(docs_y[x])] =1
training.append(bag)
output.append(output_row)
training = numpy.array(training)
output = numpy.array(output)
with open("data.pickle", "wb") as f:
pickle.dump((words, labels, training, output), f)
tensorflow.reset_default_graph()
net = tflearn.input_data(shape = [None, len(training[0])])
net = tflearn.fully_connected(net, 8 )
net = tflearn.fully_connected(net, 8 )
net = tflearn.fully_connected(net, len(output[0]), activation = "softmax")
net = tflearn.regression(net)
model = tflearn.DNN(net)
#try:
# model.load("model.tflearn")
#except:
model.fit(training, output, n_epoch = 1000, batch_size = 8, show_metric = True)
model.save("model.tflearn")
def bag_of_words(s, words):
bag = [0 for _ in range(len(words))]
s_words = nltk.word_tokenize(s)
s_words = [stemmer.stem(word.lower()) for word in s_words]
for se in s_words:
for i, w in enumerate(words):
if w == se:
bag[i] = 1
return numpy.array(bag)
def chat():
print("Start talking with Arya - (Type quit to stop)!")
while True:
inp = input(" You: ")
if inp.lower() == "quit":
break
results = model.predict([bag_of_words(inp, words)])[0]
results_index = numpy.argmax(results)
tag = labels[results_index]
if results[results_index] > 0.7:
for tg in data["intents"]:
if tg['tag'] == tag:
responses = tg['responses']
print(" Arya: " + random.choice(responses))
else:
print(" Arya: I didn't get that, try some other...")
|
#!/usr/bin/env python3
# SPDX-license-identifier: Apache-2.0
# Copyright © 2021 Intel Corporation
"""Script for running a single project test.
This script is meant for Meson developers who want to run a single project
test, with all of the rules from the test.json file loaded.
"""
import argparse
import pathlib
import typing as T
from mesonbuild import mlog
from run_project_tests import TestDef, load_test_json, run_test, BuildStep
from run_project_tests import setup_commands, detect_system_compiler, print_tool_versions
if T.TYPE_CHECKING:
from run_project_tests import CompilerArgumentType
class ArgumentType(CompilerArgumentType):
"""Typing information for command line arguments."""
case: pathlib.Path
subtests: T.List[int]
backend: str
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument('case', type=pathlib.Path, help='The test case to run')
parser.add_argument('--subtest', type=int, action='append', dest='subtests', help='which subtests to run')
parser.add_argument('--backend', action='store', help="Which backend to use")
parser.add_argument('--cross-file', action='store', help='File describing cross compilation environment.')
parser.add_argument('--native-file', action='store', help='File describing native compilation environment.')
parser.add_argument('--use-tmpdir', action='store_true', help='Use tmp directory for temporary files.')
args = T.cast('ArgumentType', parser.parse_args())
setup_commands(args.backend)
detect_system_compiler(args)
print_tool_versions()
test = TestDef(args.case, args.case.stem, [])
tests = load_test_json(test, False)
if args.subtests:
tests = [t for i, t in enumerate(tests) if i in args.subtests]
def should_fail(path: pathlib.Path) -> str:
dir_ = path.parent.stem
# FIXME: warning tets might not be handled correctly still…
if dir_.startswith(('failing', 'warning')):
if ' ' in dir_:
return dir_.split(' ')[1]
return 'meson'
return ''
results = [run_test(t, t.args, should_fail(t.path), args.use_tmpdir) for t in tests]
failed = False
for test, result in zip(tests, results):
if (result is None) or ('MESON_SKIP_TEST' in result.stdo):
msg = mlog.yellow('SKIP:')
elif result.msg:
msg = mlog.red('FAIL:')
failed = True
else:
msg = mlog.green('PASS:')
mlog.log(msg, *test.display_name())
if result is not None and result.msg and 'MESON_SKIP_TEST' not in result.stdo:
mlog.log('reason:', result.msg)
if result.step is BuildStep.configure:
# For configure failures, instead of printing stdout,
# print the meson log if available since it's a superset
# of stdout and often has very useful information.
mlog.log(result.mlog)
else:
mlog.log(result.stdo)
for cmd_res in result.cicmds:
mlog.log(cmd_res)
mlog.log(result.stde)
exit(1 if failed else 0)
if __name__ == "__main__":
main()
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['GroupArgs', 'Group']
@pulumi.input_type
class GroupArgs:
def __init__(__self__, *,
max_size: pulumi.Input[int],
min_size: pulumi.Input[int],
availability_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
capacity_rebalance: Optional[pulumi.Input[bool]] = None,
default_cooldown: Optional[pulumi.Input[int]] = None,
desired_capacity: Optional[pulumi.Input[int]] = None,
enabled_metrics: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
force_delete: Optional[pulumi.Input[bool]] = None,
force_delete_warm_pool: Optional[pulumi.Input[bool]] = None,
health_check_grace_period: Optional[pulumi.Input[int]] = None,
health_check_type: Optional[pulumi.Input[str]] = None,
initial_lifecycle_hooks: Optional[pulumi.Input[Sequence[pulumi.Input['GroupInitialLifecycleHookArgs']]]] = None,
instance_refresh: Optional[pulumi.Input['GroupInstanceRefreshArgs']] = None,
launch_configuration: Optional[pulumi.Input[str]] = None,
launch_template: Optional[pulumi.Input['GroupLaunchTemplateArgs']] = None,
load_balancers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
max_instance_lifetime: Optional[pulumi.Input[int]] = None,
metrics_granularity: Optional[pulumi.Input[Union[str, 'MetricsGranularity']]] = None,
min_elb_capacity: Optional[pulumi.Input[int]] = None,
mixed_instances_policy: Optional[pulumi.Input['GroupMixedInstancesPolicyArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
name_prefix: Optional[pulumi.Input[str]] = None,
placement_group: Optional[pulumi.Input[str]] = None,
protect_from_scale_in: Optional[pulumi.Input[bool]] = None,
service_linked_role_arn: Optional[pulumi.Input[str]] = None,
suspended_processes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['GroupTagArgs']]]] = None,
tags_collection: Optional[pulumi.Input[Sequence[pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
target_group_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
termination_policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
vpc_zone_identifiers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
wait_for_capacity_timeout: Optional[pulumi.Input[str]] = None,
wait_for_elb_capacity: Optional[pulumi.Input[int]] = None,
warm_pool: Optional[pulumi.Input['GroupWarmPoolArgs']] = None):
"""
The set of arguments for constructing a Group resource.
:param pulumi.Input[int] max_size: The maximum size of the Auto Scaling Group.
:param pulumi.Input[int] min_size: Specifies the minimum number of instances to maintain in the warm pool. This helps you to ensure that there is always a certain number of warmed instances available to handle traffic spikes. Defaults to 0 if not specified.
:param pulumi.Input[Sequence[pulumi.Input[str]]] availability_zones: A list of one or more availability zones for the group. Used for EC2-Classic and default subnets when not specified with `vpc_zone_identifier` argument. Conflicts with `vpc_zone_identifier`.
:param pulumi.Input[bool] capacity_rebalance: Indicates whether capacity rebalance is enabled. Otherwise, capacity rebalance is disabled.
:param pulumi.Input[int] default_cooldown: The amount of time, in seconds, after a scaling activity completes before another scaling activity can start.
:param pulumi.Input[int] desired_capacity: The number of Amazon EC2 instances that
should be running in the group. (See also Waiting for
Capacity below.)
:param pulumi.Input[Sequence[pulumi.Input[str]]] enabled_metrics: A list of metrics to collect. The allowed values are `GroupDesiredCapacity`, `GroupInServiceCapacity`, `GroupPendingCapacity`, `GroupMinSize`, `GroupMaxSize`, `GroupInServiceInstances`, `GroupPendingInstances`, `GroupStandbyInstances`, `GroupStandbyCapacity`, `GroupTerminatingCapacity`, `GroupTerminatingInstances`, `GroupTotalCapacity`, `GroupTotalInstances`.
:param pulumi.Input[bool] force_delete: Allows deleting the Auto Scaling Group without waiting
for all instances in the pool to terminate. You can force an Auto Scaling Group to delete
even if it's in the process of scaling a resource. Normally, this provider
drains all the instances before deleting the group. This bypasses that
behavior and potentially leaves resources dangling.
:param pulumi.Input[int] health_check_grace_period: Time (in seconds) after instance comes into service before checking health.
:param pulumi.Input[str] health_check_type: "EC2" or "ELB". Controls how health checking is done.
:param pulumi.Input[Sequence[pulumi.Input['GroupInitialLifecycleHookArgs']]] initial_lifecycle_hooks: One or more
[Lifecycle Hooks](http://docs.aws.amazon.com/autoscaling/latest/userguide/lifecycle-hooks.html)
to attach to the Auto Scaling Group **before** instances are launched. The
syntax is exactly the same as the separate
`autoscaling.LifecycleHook`
resource, without the `autoscaling_group_name` attribute. Please note that this will only work when creating
a new Auto Scaling Group. For all other use-cases, please use `autoscaling.LifecycleHook` resource.
:param pulumi.Input['GroupInstanceRefreshArgs'] instance_refresh: If this block is configured, start an
[Instance Refresh](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html)
when this Auto Scaling Group is updated. Defined below.
:param pulumi.Input[str] launch_configuration: The name of the launch configuration to use.
:param pulumi.Input['GroupLaunchTemplateArgs'] launch_template: Nested argument containing launch template settings along with the overrides to specify multiple instance types and weights. Defined below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] load_balancers: A list of elastic load balancer names to add to the autoscaling
group names. Only valid for classic load balancers. For ALBs, use `target_group_arns` instead.
:param pulumi.Input[int] max_instance_lifetime: The maximum amount of time, in seconds, that an instance can be in service, values must be either equal to 0 or between 604800 and 31536000 seconds.
:param pulumi.Input[Union[str, 'MetricsGranularity']] metrics_granularity: The granularity to associate with the metrics to collect. The only valid value is `1Minute`. Default is `1Minute`.
:param pulumi.Input[int] min_elb_capacity: Setting this causes the provider to wait for
this number of instances from this Auto Scaling Group to show up healthy in the
ELB only on creation. Updates will not wait on ELB instance number changes.
(See also Waiting for Capacity below.)
:param pulumi.Input['GroupMixedInstancesPolicyArgs'] mixed_instances_policy: Configuration block containing settings to define launch targets for Auto Scaling groups. Defined below.
:param pulumi.Input[str] name: The name of the Auto Scaling Group. By default generated by this provider.
:param pulumi.Input[str] name_prefix: Creates a unique name beginning with the specified
prefix. Conflicts with `name`.
:param pulumi.Input[str] placement_group: The name of the placement group into which you'll launch your instances, if any.
:param pulumi.Input[bool] protect_from_scale_in: Allows setting instance protection. The
Auto Scaling Group will not select instances with this setting for termination
during scale in events.
:param pulumi.Input[str] service_linked_role_arn: The ARN of the service-linked role that the ASG will use to call other AWS services
:param pulumi.Input[Sequence[pulumi.Input[str]]] suspended_processes: A list of processes to suspend for the Auto Scaling Group. The allowed values are `Launch`, `Terminate`, `HealthCheck`, `ReplaceUnhealthy`, `AZRebalance`, `AlarmNotification`, `ScheduledActions`, `AddToLoadBalancer`.
Note that if you suspend either the `Launch` or `Terminate` process types, it can prevent your Auto Scaling Group from functioning properly.
:param pulumi.Input[Sequence[pulumi.Input['GroupTagArgs']]] tags: Configuration block(s) containing resource tags. Conflicts with `tags`. Documented below.
:param pulumi.Input[Sequence[pulumi.Input[Mapping[str, pulumi.Input[str]]]]] tags_collection: Set of maps containing resource tags. Conflicts with `tag`. Documented below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] target_group_arns: A set of `alb.TargetGroup` ARNs, for use with Application or Network Load Balancing.
:param pulumi.Input[Sequence[pulumi.Input[str]]] termination_policies: A list of policies to decide how the instances in the Auto Scaling Group should be terminated. The allowed values are `OldestInstance`, `NewestInstance`, `OldestLaunchConfiguration`, `ClosestToNextInstanceHour`, `OldestLaunchTemplate`, `AllocationStrategy`, `Default`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] vpc_zone_identifiers: A list of subnet IDs to launch resources in. Subnets automatically determine which availability zones the group will reside. Conflicts with `availability_zones`.
:param pulumi.Input[str] wait_for_capacity_timeout: A maximum
[duration](https://golang.org/pkg/time/#ParseDuration) that this provider should
wait for ASG instances to be healthy before timing out. (See also Waiting
for Capacity below.) Setting this to "0" causes
this provider to skip all Capacity Waiting behavior.
:param pulumi.Input[int] wait_for_elb_capacity: Setting this will cause the provider to wait
for exactly this number of healthy instances from this Auto Scaling Group in
all attached load balancers on both create and update operations. (Takes
precedence over `min_elb_capacity` behavior.)
(See also Waiting for Capacity below.)
:param pulumi.Input['GroupWarmPoolArgs'] warm_pool: If this block is configured, add a [Warm Pool](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-warm-pools.html)
to the specified Auto Scaling group. Defined below
"""
pulumi.set(__self__, "max_size", max_size)
pulumi.set(__self__, "min_size", min_size)
if availability_zones is not None:
pulumi.set(__self__, "availability_zones", availability_zones)
if capacity_rebalance is not None:
pulumi.set(__self__, "capacity_rebalance", capacity_rebalance)
if default_cooldown is not None:
pulumi.set(__self__, "default_cooldown", default_cooldown)
if desired_capacity is not None:
pulumi.set(__self__, "desired_capacity", desired_capacity)
if enabled_metrics is not None:
pulumi.set(__self__, "enabled_metrics", enabled_metrics)
if force_delete is not None:
pulumi.set(__self__, "force_delete", force_delete)
if force_delete_warm_pool is not None:
pulumi.set(__self__, "force_delete_warm_pool", force_delete_warm_pool)
if health_check_grace_period is not None:
pulumi.set(__self__, "health_check_grace_period", health_check_grace_period)
if health_check_type is not None:
pulumi.set(__self__, "health_check_type", health_check_type)
if initial_lifecycle_hooks is not None:
pulumi.set(__self__, "initial_lifecycle_hooks", initial_lifecycle_hooks)
if instance_refresh is not None:
pulumi.set(__self__, "instance_refresh", instance_refresh)
if launch_configuration is not None:
pulumi.set(__self__, "launch_configuration", launch_configuration)
if launch_template is not None:
pulumi.set(__self__, "launch_template", launch_template)
if load_balancers is not None:
pulumi.set(__self__, "load_balancers", load_balancers)
if max_instance_lifetime is not None:
pulumi.set(__self__, "max_instance_lifetime", max_instance_lifetime)
if metrics_granularity is not None:
pulumi.set(__self__, "metrics_granularity", metrics_granularity)
if min_elb_capacity is not None:
pulumi.set(__self__, "min_elb_capacity", min_elb_capacity)
if mixed_instances_policy is not None:
pulumi.set(__self__, "mixed_instances_policy", mixed_instances_policy)
if name is not None:
pulumi.set(__self__, "name", name)
if name_prefix is not None:
pulumi.set(__self__, "name_prefix", name_prefix)
if placement_group is not None:
pulumi.set(__self__, "placement_group", placement_group)
if protect_from_scale_in is not None:
pulumi.set(__self__, "protect_from_scale_in", protect_from_scale_in)
if service_linked_role_arn is not None:
pulumi.set(__self__, "service_linked_role_arn", service_linked_role_arn)
if suspended_processes is not None:
pulumi.set(__self__, "suspended_processes", suspended_processes)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_collection is not None:
pulumi.set(__self__, "tags_collection", tags_collection)
if target_group_arns is not None:
pulumi.set(__self__, "target_group_arns", target_group_arns)
if termination_policies is not None:
pulumi.set(__self__, "termination_policies", termination_policies)
if vpc_zone_identifiers is not None:
pulumi.set(__self__, "vpc_zone_identifiers", vpc_zone_identifiers)
if wait_for_capacity_timeout is not None:
pulumi.set(__self__, "wait_for_capacity_timeout", wait_for_capacity_timeout)
if wait_for_elb_capacity is not None:
pulumi.set(__self__, "wait_for_elb_capacity", wait_for_elb_capacity)
if warm_pool is not None:
pulumi.set(__self__, "warm_pool", warm_pool)
@property
@pulumi.getter(name="maxSize")
def max_size(self) -> pulumi.Input[int]:
"""
The maximum size of the Auto Scaling Group.
"""
return pulumi.get(self, "max_size")
@max_size.setter
def max_size(self, value: pulumi.Input[int]):
pulumi.set(self, "max_size", value)
@property
@pulumi.getter(name="minSize")
def min_size(self) -> pulumi.Input[int]:
"""
Specifies the minimum number of instances to maintain in the warm pool. This helps you to ensure that there is always a certain number of warmed instances available to handle traffic spikes. Defaults to 0 if not specified.
"""
return pulumi.get(self, "min_size")
@min_size.setter
def min_size(self, value: pulumi.Input[int]):
pulumi.set(self, "min_size", value)
@property
@pulumi.getter(name="availabilityZones")
def availability_zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of one or more availability zones for the group. Used for EC2-Classic and default subnets when not specified with `vpc_zone_identifier` argument. Conflicts with `vpc_zone_identifier`.
"""
return pulumi.get(self, "availability_zones")
@availability_zones.setter
def availability_zones(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "availability_zones", value)
@property
@pulumi.getter(name="capacityRebalance")
def capacity_rebalance(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether capacity rebalance is enabled. Otherwise, capacity rebalance is disabled.
"""
return pulumi.get(self, "capacity_rebalance")
@capacity_rebalance.setter
def capacity_rebalance(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "capacity_rebalance", value)
@property
@pulumi.getter(name="defaultCooldown")
def default_cooldown(self) -> Optional[pulumi.Input[int]]:
"""
The amount of time, in seconds, after a scaling activity completes before another scaling activity can start.
"""
return pulumi.get(self, "default_cooldown")
@default_cooldown.setter
def default_cooldown(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "default_cooldown", value)
@property
@pulumi.getter(name="desiredCapacity")
def desired_capacity(self) -> Optional[pulumi.Input[int]]:
"""
The number of Amazon EC2 instances that
should be running in the group. (See also Waiting for
Capacity below.)
"""
return pulumi.get(self, "desired_capacity")
@desired_capacity.setter
def desired_capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "desired_capacity", value)
@property
@pulumi.getter(name="enabledMetrics")
def enabled_metrics(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of metrics to collect. The allowed values are `GroupDesiredCapacity`, `GroupInServiceCapacity`, `GroupPendingCapacity`, `GroupMinSize`, `GroupMaxSize`, `GroupInServiceInstances`, `GroupPendingInstances`, `GroupStandbyInstances`, `GroupStandbyCapacity`, `GroupTerminatingCapacity`, `GroupTerminatingInstances`, `GroupTotalCapacity`, `GroupTotalInstances`.
"""
return pulumi.get(self, "enabled_metrics")
@enabled_metrics.setter
def enabled_metrics(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "enabled_metrics", value)
@property
@pulumi.getter(name="forceDelete")
def force_delete(self) -> Optional[pulumi.Input[bool]]:
"""
Allows deleting the Auto Scaling Group without waiting
for all instances in the pool to terminate. You can force an Auto Scaling Group to delete
even if it's in the process of scaling a resource. Normally, this provider
drains all the instances before deleting the group. This bypasses that
behavior and potentially leaves resources dangling.
"""
return pulumi.get(self, "force_delete")
@force_delete.setter
def force_delete(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force_delete", value)
@property
@pulumi.getter(name="forceDeleteWarmPool")
def force_delete_warm_pool(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "force_delete_warm_pool")
@force_delete_warm_pool.setter
def force_delete_warm_pool(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force_delete_warm_pool", value)
@property
@pulumi.getter(name="healthCheckGracePeriod")
def health_check_grace_period(self) -> Optional[pulumi.Input[int]]:
"""
Time (in seconds) after instance comes into service before checking health.
"""
return pulumi.get(self, "health_check_grace_period")
@health_check_grace_period.setter
def health_check_grace_period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "health_check_grace_period", value)
@property
@pulumi.getter(name="healthCheckType")
def health_check_type(self) -> Optional[pulumi.Input[str]]:
"""
"EC2" or "ELB". Controls how health checking is done.
"""
return pulumi.get(self, "health_check_type")
@health_check_type.setter
def health_check_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "health_check_type", value)
@property
@pulumi.getter(name="initialLifecycleHooks")
def initial_lifecycle_hooks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GroupInitialLifecycleHookArgs']]]]:
"""
One or more
[Lifecycle Hooks](http://docs.aws.amazon.com/autoscaling/latest/userguide/lifecycle-hooks.html)
to attach to the Auto Scaling Group **before** instances are launched. The
syntax is exactly the same as the separate
`autoscaling.LifecycleHook`
resource, without the `autoscaling_group_name` attribute. Please note that this will only work when creating
a new Auto Scaling Group. For all other use-cases, please use `autoscaling.LifecycleHook` resource.
"""
return pulumi.get(self, "initial_lifecycle_hooks")
@initial_lifecycle_hooks.setter
def initial_lifecycle_hooks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GroupInitialLifecycleHookArgs']]]]):
pulumi.set(self, "initial_lifecycle_hooks", value)
@property
@pulumi.getter(name="instanceRefresh")
def instance_refresh(self) -> Optional[pulumi.Input['GroupInstanceRefreshArgs']]:
"""
If this block is configured, start an
[Instance Refresh](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html)
when this Auto Scaling Group is updated. Defined below.
"""
return pulumi.get(self, "instance_refresh")
@instance_refresh.setter
def instance_refresh(self, value: Optional[pulumi.Input['GroupInstanceRefreshArgs']]):
pulumi.set(self, "instance_refresh", value)
@property
@pulumi.getter(name="launchConfiguration")
def launch_configuration(self) -> Optional[pulumi.Input[str]]:
"""
The name of the launch configuration to use.
"""
return pulumi.get(self, "launch_configuration")
@launch_configuration.setter
def launch_configuration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "launch_configuration", value)
@property
@pulumi.getter(name="launchTemplate")
def launch_template(self) -> Optional[pulumi.Input['GroupLaunchTemplateArgs']]:
"""
Nested argument containing launch template settings along with the overrides to specify multiple instance types and weights. Defined below.
"""
return pulumi.get(self, "launch_template")
@launch_template.setter
def launch_template(self, value: Optional[pulumi.Input['GroupLaunchTemplateArgs']]):
pulumi.set(self, "launch_template", value)
@property
@pulumi.getter(name="loadBalancers")
def load_balancers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of elastic load balancer names to add to the autoscaling
group names. Only valid for classic load balancers. For ALBs, use `target_group_arns` instead.
"""
return pulumi.get(self, "load_balancers")
@load_balancers.setter
def load_balancers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "load_balancers", value)
@property
@pulumi.getter(name="maxInstanceLifetime")
def max_instance_lifetime(self) -> Optional[pulumi.Input[int]]:
"""
The maximum amount of time, in seconds, that an instance can be in service, values must be either equal to 0 or between 604800 and 31536000 seconds.
"""
return pulumi.get(self, "max_instance_lifetime")
@max_instance_lifetime.setter
def max_instance_lifetime(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_instance_lifetime", value)
@property
@pulumi.getter(name="metricsGranularity")
def metrics_granularity(self) -> Optional[pulumi.Input[Union[str, 'MetricsGranularity']]]:
"""
The granularity to associate with the metrics to collect. The only valid value is `1Minute`. Default is `1Minute`.
"""
return pulumi.get(self, "metrics_granularity")
@metrics_granularity.setter
def metrics_granularity(self, value: Optional[pulumi.Input[Union[str, 'MetricsGranularity']]]):
pulumi.set(self, "metrics_granularity", value)
@property
@pulumi.getter(name="minElbCapacity")
def min_elb_capacity(self) -> Optional[pulumi.Input[int]]:
"""
Setting this causes the provider to wait for
this number of instances from this Auto Scaling Group to show up healthy in the
ELB only on creation. Updates will not wait on ELB instance number changes.
(See also Waiting for Capacity below.)
"""
return pulumi.get(self, "min_elb_capacity")
@min_elb_capacity.setter
def min_elb_capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_elb_capacity", value)
@property
@pulumi.getter(name="mixedInstancesPolicy")
def mixed_instances_policy(self) -> Optional[pulumi.Input['GroupMixedInstancesPolicyArgs']]:
"""
Configuration block containing settings to define launch targets for Auto Scaling groups. Defined below.
"""
return pulumi.get(self, "mixed_instances_policy")
@mixed_instances_policy.setter
def mixed_instances_policy(self, value: Optional[pulumi.Input['GroupMixedInstancesPolicyArgs']]):
pulumi.set(self, "mixed_instances_policy", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Auto Scaling Group. By default generated by this provider.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="namePrefix")
def name_prefix(self) -> Optional[pulumi.Input[str]]:
"""
Creates a unique name beginning with the specified
prefix. Conflicts with `name`.
"""
return pulumi.get(self, "name_prefix")
@name_prefix.setter
def name_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name_prefix", value)
@property
@pulumi.getter(name="placementGroup")
def placement_group(self) -> Optional[pulumi.Input[str]]:
"""
The name of the placement group into which you'll launch your instances, if any.
"""
return pulumi.get(self, "placement_group")
@placement_group.setter
def placement_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "placement_group", value)
@property
@pulumi.getter(name="protectFromScaleIn")
def protect_from_scale_in(self) -> Optional[pulumi.Input[bool]]:
"""
Allows setting instance protection. The
Auto Scaling Group will not select instances with this setting for termination
during scale in events.
"""
return pulumi.get(self, "protect_from_scale_in")
@protect_from_scale_in.setter
def protect_from_scale_in(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "protect_from_scale_in", value)
@property
@pulumi.getter(name="serviceLinkedRoleArn")
def service_linked_role_arn(self) -> Optional[pulumi.Input[str]]:
"""
The ARN of the service-linked role that the ASG will use to call other AWS services
"""
return pulumi.get(self, "service_linked_role_arn")
@service_linked_role_arn.setter
def service_linked_role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_linked_role_arn", value)
@property
@pulumi.getter(name="suspendedProcesses")
def suspended_processes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of processes to suspend for the Auto Scaling Group. The allowed values are `Launch`, `Terminate`, `HealthCheck`, `ReplaceUnhealthy`, `AZRebalance`, `AlarmNotification`, `ScheduledActions`, `AddToLoadBalancer`.
Note that if you suspend either the `Launch` or `Terminate` process types, it can prevent your Auto Scaling Group from functioning properly.
"""
return pulumi.get(self, "suspended_processes")
@suspended_processes.setter
def suspended_processes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "suspended_processes", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GroupTagArgs']]]]:
"""
Configuration block(s) containing resource tags. Conflicts with `tags`. Documented below.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GroupTagArgs']]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsCollection")
def tags_collection(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
"""
Set of maps containing resource tags. Conflicts with `tag`. Documented below.
"""
return pulumi.get(self, "tags_collection")
@tags_collection.setter
def tags_collection(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "tags_collection", value)
@property
@pulumi.getter(name="targetGroupArns")
def target_group_arns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A set of `alb.TargetGroup` ARNs, for use with Application or Network Load Balancing.
"""
return pulumi.get(self, "target_group_arns")
@target_group_arns.setter
def target_group_arns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "target_group_arns", value)
@property
@pulumi.getter(name="terminationPolicies")
def termination_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of policies to decide how the instances in the Auto Scaling Group should be terminated. The allowed values are `OldestInstance`, `NewestInstance`, `OldestLaunchConfiguration`, `ClosestToNextInstanceHour`, `OldestLaunchTemplate`, `AllocationStrategy`, `Default`.
"""
return pulumi.get(self, "termination_policies")
@termination_policies.setter
def termination_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "termination_policies", value)
@property
@pulumi.getter(name="vpcZoneIdentifiers")
def vpc_zone_identifiers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of subnet IDs to launch resources in. Subnets automatically determine which availability zones the group will reside. Conflicts with `availability_zones`.
"""
return pulumi.get(self, "vpc_zone_identifiers")
@vpc_zone_identifiers.setter
def vpc_zone_identifiers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "vpc_zone_identifiers", value)
@property
@pulumi.getter(name="waitForCapacityTimeout")
def wait_for_capacity_timeout(self) -> Optional[pulumi.Input[str]]:
"""
A maximum
[duration](https://golang.org/pkg/time/#ParseDuration) that this provider should
wait for ASG instances to be healthy before timing out. (See also Waiting
for Capacity below.) Setting this to "0" causes
this provider to skip all Capacity Waiting behavior.
"""
return pulumi.get(self, "wait_for_capacity_timeout")
@wait_for_capacity_timeout.setter
def wait_for_capacity_timeout(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "wait_for_capacity_timeout", value)
@property
@pulumi.getter(name="waitForElbCapacity")
def wait_for_elb_capacity(self) -> Optional[pulumi.Input[int]]:
"""
Setting this will cause the provider to wait
for exactly this number of healthy instances from this Auto Scaling Group in
all attached load balancers on both create and update operations. (Takes
precedence over `min_elb_capacity` behavior.)
(See also Waiting for Capacity below.)
"""
return pulumi.get(self, "wait_for_elb_capacity")
@wait_for_elb_capacity.setter
def wait_for_elb_capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "wait_for_elb_capacity", value)
@property
@pulumi.getter(name="warmPool")
def warm_pool(self) -> Optional[pulumi.Input['GroupWarmPoolArgs']]:
"""
If this block is configured, add a [Warm Pool](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-warm-pools.html)
to the specified Auto Scaling group. Defined below
"""
return pulumi.get(self, "warm_pool")
@warm_pool.setter
def warm_pool(self, value: Optional[pulumi.Input['GroupWarmPoolArgs']]):
pulumi.set(self, "warm_pool", value)
@pulumi.input_type
class _GroupState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
availability_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
capacity_rebalance: Optional[pulumi.Input[bool]] = None,
default_cooldown: Optional[pulumi.Input[int]] = None,
desired_capacity: Optional[pulumi.Input[int]] = None,
enabled_metrics: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
force_delete: Optional[pulumi.Input[bool]] = None,
force_delete_warm_pool: Optional[pulumi.Input[bool]] = None,
health_check_grace_period: Optional[pulumi.Input[int]] = None,
health_check_type: Optional[pulumi.Input[str]] = None,
initial_lifecycle_hooks: Optional[pulumi.Input[Sequence[pulumi.Input['GroupInitialLifecycleHookArgs']]]] = None,
instance_refresh: Optional[pulumi.Input['GroupInstanceRefreshArgs']] = None,
launch_configuration: Optional[pulumi.Input[str]] = None,
launch_template: Optional[pulumi.Input['GroupLaunchTemplateArgs']] = None,
load_balancers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
max_instance_lifetime: Optional[pulumi.Input[int]] = None,
max_size: Optional[pulumi.Input[int]] = None,
metrics_granularity: Optional[pulumi.Input[Union[str, 'MetricsGranularity']]] = None,
min_elb_capacity: Optional[pulumi.Input[int]] = None,
min_size: Optional[pulumi.Input[int]] = None,
mixed_instances_policy: Optional[pulumi.Input['GroupMixedInstancesPolicyArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
name_prefix: Optional[pulumi.Input[str]] = None,
placement_group: Optional[pulumi.Input[str]] = None,
protect_from_scale_in: Optional[pulumi.Input[bool]] = None,
service_linked_role_arn: Optional[pulumi.Input[str]] = None,
suspended_processes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['GroupTagArgs']]]] = None,
tags_collection: Optional[pulumi.Input[Sequence[pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
target_group_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
termination_policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
vpc_zone_identifiers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
wait_for_capacity_timeout: Optional[pulumi.Input[str]] = None,
wait_for_elb_capacity: Optional[pulumi.Input[int]] = None,
warm_pool: Optional[pulumi.Input['GroupWarmPoolArgs']] = None):
"""
Input properties used for looking up and filtering Group resources.
:param pulumi.Input[str] arn: The ARN for this Auto Scaling Group
:param pulumi.Input[Sequence[pulumi.Input[str]]] availability_zones: A list of one or more availability zones for the group. Used for EC2-Classic and default subnets when not specified with `vpc_zone_identifier` argument. Conflicts with `vpc_zone_identifier`.
:param pulumi.Input[bool] capacity_rebalance: Indicates whether capacity rebalance is enabled. Otherwise, capacity rebalance is disabled.
:param pulumi.Input[int] default_cooldown: The amount of time, in seconds, after a scaling activity completes before another scaling activity can start.
:param pulumi.Input[int] desired_capacity: The number of Amazon EC2 instances that
should be running in the group. (See also Waiting for
Capacity below.)
:param pulumi.Input[Sequence[pulumi.Input[str]]] enabled_metrics: A list of metrics to collect. The allowed values are `GroupDesiredCapacity`, `GroupInServiceCapacity`, `GroupPendingCapacity`, `GroupMinSize`, `GroupMaxSize`, `GroupInServiceInstances`, `GroupPendingInstances`, `GroupStandbyInstances`, `GroupStandbyCapacity`, `GroupTerminatingCapacity`, `GroupTerminatingInstances`, `GroupTotalCapacity`, `GroupTotalInstances`.
:param pulumi.Input[bool] force_delete: Allows deleting the Auto Scaling Group without waiting
for all instances in the pool to terminate. You can force an Auto Scaling Group to delete
even if it's in the process of scaling a resource. Normally, this provider
drains all the instances before deleting the group. This bypasses that
behavior and potentially leaves resources dangling.
:param pulumi.Input[int] health_check_grace_period: Time (in seconds) after instance comes into service before checking health.
:param pulumi.Input[str] health_check_type: "EC2" or "ELB". Controls how health checking is done.
:param pulumi.Input[Sequence[pulumi.Input['GroupInitialLifecycleHookArgs']]] initial_lifecycle_hooks: One or more
[Lifecycle Hooks](http://docs.aws.amazon.com/autoscaling/latest/userguide/lifecycle-hooks.html)
to attach to the Auto Scaling Group **before** instances are launched. The
syntax is exactly the same as the separate
`autoscaling.LifecycleHook`
resource, without the `autoscaling_group_name` attribute. Please note that this will only work when creating
a new Auto Scaling Group. For all other use-cases, please use `autoscaling.LifecycleHook` resource.
:param pulumi.Input['GroupInstanceRefreshArgs'] instance_refresh: If this block is configured, start an
[Instance Refresh](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html)
when this Auto Scaling Group is updated. Defined below.
:param pulumi.Input[str] launch_configuration: The name of the launch configuration to use.
:param pulumi.Input['GroupLaunchTemplateArgs'] launch_template: Nested argument containing launch template settings along with the overrides to specify multiple instance types and weights. Defined below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] load_balancers: A list of elastic load balancer names to add to the autoscaling
group names. Only valid for classic load balancers. For ALBs, use `target_group_arns` instead.
:param pulumi.Input[int] max_instance_lifetime: The maximum amount of time, in seconds, that an instance can be in service, values must be either equal to 0 or between 604800 and 31536000 seconds.
:param pulumi.Input[int] max_size: The maximum size of the Auto Scaling Group.
:param pulumi.Input[Union[str, 'MetricsGranularity']] metrics_granularity: The granularity to associate with the metrics to collect. The only valid value is `1Minute`. Default is `1Minute`.
:param pulumi.Input[int] min_elb_capacity: Setting this causes the provider to wait for
this number of instances from this Auto Scaling Group to show up healthy in the
ELB only on creation. Updates will not wait on ELB instance number changes.
(See also Waiting for Capacity below.)
:param pulumi.Input[int] min_size: Specifies the minimum number of instances to maintain in the warm pool. This helps you to ensure that there is always a certain number of warmed instances available to handle traffic spikes. Defaults to 0 if not specified.
:param pulumi.Input['GroupMixedInstancesPolicyArgs'] mixed_instances_policy: Configuration block containing settings to define launch targets for Auto Scaling groups. Defined below.
:param pulumi.Input[str] name: The name of the Auto Scaling Group. By default generated by this provider.
:param pulumi.Input[str] name_prefix: Creates a unique name beginning with the specified
prefix. Conflicts with `name`.
:param pulumi.Input[str] placement_group: The name of the placement group into which you'll launch your instances, if any.
:param pulumi.Input[bool] protect_from_scale_in: Allows setting instance protection. The
Auto Scaling Group will not select instances with this setting for termination
during scale in events.
:param pulumi.Input[str] service_linked_role_arn: The ARN of the service-linked role that the ASG will use to call other AWS services
:param pulumi.Input[Sequence[pulumi.Input[str]]] suspended_processes: A list of processes to suspend for the Auto Scaling Group. The allowed values are `Launch`, `Terminate`, `HealthCheck`, `ReplaceUnhealthy`, `AZRebalance`, `AlarmNotification`, `ScheduledActions`, `AddToLoadBalancer`.
Note that if you suspend either the `Launch` or `Terminate` process types, it can prevent your Auto Scaling Group from functioning properly.
:param pulumi.Input[Sequence[pulumi.Input['GroupTagArgs']]] tags: Configuration block(s) containing resource tags. Conflicts with `tags`. Documented below.
:param pulumi.Input[Sequence[pulumi.Input[Mapping[str, pulumi.Input[str]]]]] tags_collection: Set of maps containing resource tags. Conflicts with `tag`. Documented below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] target_group_arns: A set of `alb.TargetGroup` ARNs, for use with Application or Network Load Balancing.
:param pulumi.Input[Sequence[pulumi.Input[str]]] termination_policies: A list of policies to decide how the instances in the Auto Scaling Group should be terminated. The allowed values are `OldestInstance`, `NewestInstance`, `OldestLaunchConfiguration`, `ClosestToNextInstanceHour`, `OldestLaunchTemplate`, `AllocationStrategy`, `Default`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] vpc_zone_identifiers: A list of subnet IDs to launch resources in. Subnets automatically determine which availability zones the group will reside. Conflicts with `availability_zones`.
:param pulumi.Input[str] wait_for_capacity_timeout: A maximum
[duration](https://golang.org/pkg/time/#ParseDuration) that this provider should
wait for ASG instances to be healthy before timing out. (See also Waiting
for Capacity below.) Setting this to "0" causes
this provider to skip all Capacity Waiting behavior.
:param pulumi.Input[int] wait_for_elb_capacity: Setting this will cause the provider to wait
for exactly this number of healthy instances from this Auto Scaling Group in
all attached load balancers on both create and update operations. (Takes
precedence over `min_elb_capacity` behavior.)
(See also Waiting for Capacity below.)
:param pulumi.Input['GroupWarmPoolArgs'] warm_pool: If this block is configured, add a [Warm Pool](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-warm-pools.html)
to the specified Auto Scaling group. Defined below
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if availability_zones is not None:
pulumi.set(__self__, "availability_zones", availability_zones)
if capacity_rebalance is not None:
pulumi.set(__self__, "capacity_rebalance", capacity_rebalance)
if default_cooldown is not None:
pulumi.set(__self__, "default_cooldown", default_cooldown)
if desired_capacity is not None:
pulumi.set(__self__, "desired_capacity", desired_capacity)
if enabled_metrics is not None:
pulumi.set(__self__, "enabled_metrics", enabled_metrics)
if force_delete is not None:
pulumi.set(__self__, "force_delete", force_delete)
if force_delete_warm_pool is not None:
pulumi.set(__self__, "force_delete_warm_pool", force_delete_warm_pool)
if health_check_grace_period is not None:
pulumi.set(__self__, "health_check_grace_period", health_check_grace_period)
if health_check_type is not None:
pulumi.set(__self__, "health_check_type", health_check_type)
if initial_lifecycle_hooks is not None:
pulumi.set(__self__, "initial_lifecycle_hooks", initial_lifecycle_hooks)
if instance_refresh is not None:
pulumi.set(__self__, "instance_refresh", instance_refresh)
if launch_configuration is not None:
pulumi.set(__self__, "launch_configuration", launch_configuration)
if launch_template is not None:
pulumi.set(__self__, "launch_template", launch_template)
if load_balancers is not None:
pulumi.set(__self__, "load_balancers", load_balancers)
if max_instance_lifetime is not None:
pulumi.set(__self__, "max_instance_lifetime", max_instance_lifetime)
if max_size is not None:
pulumi.set(__self__, "max_size", max_size)
if metrics_granularity is not None:
pulumi.set(__self__, "metrics_granularity", metrics_granularity)
if min_elb_capacity is not None:
pulumi.set(__self__, "min_elb_capacity", min_elb_capacity)
if min_size is not None:
pulumi.set(__self__, "min_size", min_size)
if mixed_instances_policy is not None:
pulumi.set(__self__, "mixed_instances_policy", mixed_instances_policy)
if name is not None:
pulumi.set(__self__, "name", name)
if name_prefix is not None:
pulumi.set(__self__, "name_prefix", name_prefix)
if placement_group is not None:
pulumi.set(__self__, "placement_group", placement_group)
if protect_from_scale_in is not None:
pulumi.set(__self__, "protect_from_scale_in", protect_from_scale_in)
if service_linked_role_arn is not None:
pulumi.set(__self__, "service_linked_role_arn", service_linked_role_arn)
if suspended_processes is not None:
pulumi.set(__self__, "suspended_processes", suspended_processes)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_collection is not None:
pulumi.set(__self__, "tags_collection", tags_collection)
if target_group_arns is not None:
pulumi.set(__self__, "target_group_arns", target_group_arns)
if termination_policies is not None:
pulumi.set(__self__, "termination_policies", termination_policies)
if vpc_zone_identifiers is not None:
pulumi.set(__self__, "vpc_zone_identifiers", vpc_zone_identifiers)
if wait_for_capacity_timeout is not None:
pulumi.set(__self__, "wait_for_capacity_timeout", wait_for_capacity_timeout)
if wait_for_elb_capacity is not None:
pulumi.set(__self__, "wait_for_elb_capacity", wait_for_elb_capacity)
if warm_pool is not None:
pulumi.set(__self__, "warm_pool", warm_pool)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The ARN for this Auto Scaling Group
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="availabilityZones")
def availability_zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of one or more availability zones for the group. Used for EC2-Classic and default subnets when not specified with `vpc_zone_identifier` argument. Conflicts with `vpc_zone_identifier`.
"""
return pulumi.get(self, "availability_zones")
@availability_zones.setter
def availability_zones(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "availability_zones", value)
@property
@pulumi.getter(name="capacityRebalance")
def capacity_rebalance(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether capacity rebalance is enabled. Otherwise, capacity rebalance is disabled.
"""
return pulumi.get(self, "capacity_rebalance")
@capacity_rebalance.setter
def capacity_rebalance(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "capacity_rebalance", value)
@property
@pulumi.getter(name="defaultCooldown")
def default_cooldown(self) -> Optional[pulumi.Input[int]]:
"""
The amount of time, in seconds, after a scaling activity completes before another scaling activity can start.
"""
return pulumi.get(self, "default_cooldown")
@default_cooldown.setter
def default_cooldown(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "default_cooldown", value)
@property
@pulumi.getter(name="desiredCapacity")
def desired_capacity(self) -> Optional[pulumi.Input[int]]:
"""
The number of Amazon EC2 instances that
should be running in the group. (See also Waiting for
Capacity below.)
"""
return pulumi.get(self, "desired_capacity")
@desired_capacity.setter
def desired_capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "desired_capacity", value)
@property
@pulumi.getter(name="enabledMetrics")
def enabled_metrics(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of metrics to collect. The allowed values are `GroupDesiredCapacity`, `GroupInServiceCapacity`, `GroupPendingCapacity`, `GroupMinSize`, `GroupMaxSize`, `GroupInServiceInstances`, `GroupPendingInstances`, `GroupStandbyInstances`, `GroupStandbyCapacity`, `GroupTerminatingCapacity`, `GroupTerminatingInstances`, `GroupTotalCapacity`, `GroupTotalInstances`.
"""
return pulumi.get(self, "enabled_metrics")
@enabled_metrics.setter
def enabled_metrics(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "enabled_metrics", value)
@property
@pulumi.getter(name="forceDelete")
def force_delete(self) -> Optional[pulumi.Input[bool]]:
"""
Allows deleting the Auto Scaling Group without waiting
for all instances in the pool to terminate. You can force an Auto Scaling Group to delete
even if it's in the process of scaling a resource. Normally, this provider
drains all the instances before deleting the group. This bypasses that
behavior and potentially leaves resources dangling.
"""
return pulumi.get(self, "force_delete")
@force_delete.setter
def force_delete(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force_delete", value)
@property
@pulumi.getter(name="forceDeleteWarmPool")
def force_delete_warm_pool(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "force_delete_warm_pool")
@force_delete_warm_pool.setter
def force_delete_warm_pool(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force_delete_warm_pool", value)
@property
@pulumi.getter(name="healthCheckGracePeriod")
def health_check_grace_period(self) -> Optional[pulumi.Input[int]]:
"""
Time (in seconds) after instance comes into service before checking health.
"""
return pulumi.get(self, "health_check_grace_period")
@health_check_grace_period.setter
def health_check_grace_period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "health_check_grace_period", value)
@property
@pulumi.getter(name="healthCheckType")
def health_check_type(self) -> Optional[pulumi.Input[str]]:
"""
"EC2" or "ELB". Controls how health checking is done.
"""
return pulumi.get(self, "health_check_type")
@health_check_type.setter
def health_check_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "health_check_type", value)
@property
@pulumi.getter(name="initialLifecycleHooks")
def initial_lifecycle_hooks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GroupInitialLifecycleHookArgs']]]]:
"""
One or more
[Lifecycle Hooks](http://docs.aws.amazon.com/autoscaling/latest/userguide/lifecycle-hooks.html)
to attach to the Auto Scaling Group **before** instances are launched. The
syntax is exactly the same as the separate
`autoscaling.LifecycleHook`
resource, without the `autoscaling_group_name` attribute. Please note that this will only work when creating
a new Auto Scaling Group. For all other use-cases, please use `autoscaling.LifecycleHook` resource.
"""
return pulumi.get(self, "initial_lifecycle_hooks")
@initial_lifecycle_hooks.setter
def initial_lifecycle_hooks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GroupInitialLifecycleHookArgs']]]]):
pulumi.set(self, "initial_lifecycle_hooks", value)
@property
@pulumi.getter(name="instanceRefresh")
def instance_refresh(self) -> Optional[pulumi.Input['GroupInstanceRefreshArgs']]:
"""
If this block is configured, start an
[Instance Refresh](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html)
when this Auto Scaling Group is updated. Defined below.
"""
return pulumi.get(self, "instance_refresh")
@instance_refresh.setter
def instance_refresh(self, value: Optional[pulumi.Input['GroupInstanceRefreshArgs']]):
pulumi.set(self, "instance_refresh", value)
@property
@pulumi.getter(name="launchConfiguration")
def launch_configuration(self) -> Optional[pulumi.Input[str]]:
"""
The name of the launch configuration to use.
"""
return pulumi.get(self, "launch_configuration")
@launch_configuration.setter
def launch_configuration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "launch_configuration", value)
@property
@pulumi.getter(name="launchTemplate")
def launch_template(self) -> Optional[pulumi.Input['GroupLaunchTemplateArgs']]:
"""
Nested argument containing launch template settings along with the overrides to specify multiple instance types and weights. Defined below.
"""
return pulumi.get(self, "launch_template")
@launch_template.setter
def launch_template(self, value: Optional[pulumi.Input['GroupLaunchTemplateArgs']]):
pulumi.set(self, "launch_template", value)
@property
@pulumi.getter(name="loadBalancers")
def load_balancers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of elastic load balancer names to add to the autoscaling
group names. Only valid for classic load balancers. For ALBs, use `target_group_arns` instead.
"""
return pulumi.get(self, "load_balancers")
@load_balancers.setter
def load_balancers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "load_balancers", value)
@property
@pulumi.getter(name="maxInstanceLifetime")
def max_instance_lifetime(self) -> Optional[pulumi.Input[int]]:
"""
The maximum amount of time, in seconds, that an instance can be in service, values must be either equal to 0 or between 604800 and 31536000 seconds.
"""
return pulumi.get(self, "max_instance_lifetime")
@max_instance_lifetime.setter
def max_instance_lifetime(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_instance_lifetime", value)
@property
@pulumi.getter(name="maxSize")
def max_size(self) -> Optional[pulumi.Input[int]]:
"""
The maximum size of the Auto Scaling Group.
"""
return pulumi.get(self, "max_size")
@max_size.setter
def max_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_size", value)
@property
@pulumi.getter(name="metricsGranularity")
def metrics_granularity(self) -> Optional[pulumi.Input[Union[str, 'MetricsGranularity']]]:
"""
The granularity to associate with the metrics to collect. The only valid value is `1Minute`. Default is `1Minute`.
"""
return pulumi.get(self, "metrics_granularity")
@metrics_granularity.setter
def metrics_granularity(self, value: Optional[pulumi.Input[Union[str, 'MetricsGranularity']]]):
pulumi.set(self, "metrics_granularity", value)
@property
@pulumi.getter(name="minElbCapacity")
def min_elb_capacity(self) -> Optional[pulumi.Input[int]]:
"""
Setting this causes the provider to wait for
this number of instances from this Auto Scaling Group to show up healthy in the
ELB only on creation. Updates will not wait on ELB instance number changes.
(See also Waiting for Capacity below.)
"""
return pulumi.get(self, "min_elb_capacity")
@min_elb_capacity.setter
def min_elb_capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_elb_capacity", value)
@property
@pulumi.getter(name="minSize")
def min_size(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the minimum number of instances to maintain in the warm pool. This helps you to ensure that there is always a certain number of warmed instances available to handle traffic spikes. Defaults to 0 if not specified.
"""
return pulumi.get(self, "min_size")
@min_size.setter
def min_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_size", value)
@property
@pulumi.getter(name="mixedInstancesPolicy")
def mixed_instances_policy(self) -> Optional[pulumi.Input['GroupMixedInstancesPolicyArgs']]:
"""
Configuration block containing settings to define launch targets for Auto Scaling groups. Defined below.
"""
return pulumi.get(self, "mixed_instances_policy")
@mixed_instances_policy.setter
def mixed_instances_policy(self, value: Optional[pulumi.Input['GroupMixedInstancesPolicyArgs']]):
pulumi.set(self, "mixed_instances_policy", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Auto Scaling Group. By default generated by this provider.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="namePrefix")
def name_prefix(self) -> Optional[pulumi.Input[str]]:
"""
Creates a unique name beginning with the specified
prefix. Conflicts with `name`.
"""
return pulumi.get(self, "name_prefix")
@name_prefix.setter
def name_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name_prefix", value)
@property
@pulumi.getter(name="placementGroup")
def placement_group(self) -> Optional[pulumi.Input[str]]:
"""
The name of the placement group into which you'll launch your instances, if any.
"""
return pulumi.get(self, "placement_group")
@placement_group.setter
def placement_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "placement_group", value)
@property
@pulumi.getter(name="protectFromScaleIn")
def protect_from_scale_in(self) -> Optional[pulumi.Input[bool]]:
"""
Allows setting instance protection. The
Auto Scaling Group will not select instances with this setting for termination
during scale in events.
"""
return pulumi.get(self, "protect_from_scale_in")
@protect_from_scale_in.setter
def protect_from_scale_in(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "protect_from_scale_in", value)
@property
@pulumi.getter(name="serviceLinkedRoleArn")
def service_linked_role_arn(self) -> Optional[pulumi.Input[str]]:
"""
The ARN of the service-linked role that the ASG will use to call other AWS services
"""
return pulumi.get(self, "service_linked_role_arn")
@service_linked_role_arn.setter
def service_linked_role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_linked_role_arn", value)
@property
@pulumi.getter(name="suspendedProcesses")
def suspended_processes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of processes to suspend for the Auto Scaling Group. The allowed values are `Launch`, `Terminate`, `HealthCheck`, `ReplaceUnhealthy`, `AZRebalance`, `AlarmNotification`, `ScheduledActions`, `AddToLoadBalancer`.
Note that if you suspend either the `Launch` or `Terminate` process types, it can prevent your Auto Scaling Group from functioning properly.
"""
return pulumi.get(self, "suspended_processes")
@suspended_processes.setter
def suspended_processes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "suspended_processes", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GroupTagArgs']]]]:
"""
Configuration block(s) containing resource tags. Conflicts with `tags`. Documented below.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GroupTagArgs']]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsCollection")
def tags_collection(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
"""
Set of maps containing resource tags. Conflicts with `tag`. Documented below.
"""
return pulumi.get(self, "tags_collection")
@tags_collection.setter
def tags_collection(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "tags_collection", value)
@property
@pulumi.getter(name="targetGroupArns")
def target_group_arns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A set of `alb.TargetGroup` ARNs, for use with Application or Network Load Balancing.
"""
return pulumi.get(self, "target_group_arns")
@target_group_arns.setter
def target_group_arns(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "target_group_arns", value)
@property
@pulumi.getter(name="terminationPolicies")
def termination_policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of policies to decide how the instances in the Auto Scaling Group should be terminated. The allowed values are `OldestInstance`, `NewestInstance`, `OldestLaunchConfiguration`, `ClosestToNextInstanceHour`, `OldestLaunchTemplate`, `AllocationStrategy`, `Default`.
"""
return pulumi.get(self, "termination_policies")
@termination_policies.setter
def termination_policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "termination_policies", value)
@property
@pulumi.getter(name="vpcZoneIdentifiers")
def vpc_zone_identifiers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of subnet IDs to launch resources in. Subnets automatically determine which availability zones the group will reside. Conflicts with `availability_zones`.
"""
return pulumi.get(self, "vpc_zone_identifiers")
@vpc_zone_identifiers.setter
def vpc_zone_identifiers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "vpc_zone_identifiers", value)
@property
@pulumi.getter(name="waitForCapacityTimeout")
def wait_for_capacity_timeout(self) -> Optional[pulumi.Input[str]]:
"""
A maximum
[duration](https://golang.org/pkg/time/#ParseDuration) that this provider should
wait for ASG instances to be healthy before timing out. (See also Waiting
for Capacity below.) Setting this to "0" causes
this provider to skip all Capacity Waiting behavior.
"""
return pulumi.get(self, "wait_for_capacity_timeout")
@wait_for_capacity_timeout.setter
def wait_for_capacity_timeout(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "wait_for_capacity_timeout", value)
@property
@pulumi.getter(name="waitForElbCapacity")
def wait_for_elb_capacity(self) -> Optional[pulumi.Input[int]]:
"""
Setting this will cause the provider to wait
for exactly this number of healthy instances from this Auto Scaling Group in
all attached load balancers on both create and update operations. (Takes
precedence over `min_elb_capacity` behavior.)
(See also Waiting for Capacity below.)
"""
return pulumi.get(self, "wait_for_elb_capacity")
@wait_for_elb_capacity.setter
def wait_for_elb_capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "wait_for_elb_capacity", value)
@property
@pulumi.getter(name="warmPool")
def warm_pool(self) -> Optional[pulumi.Input['GroupWarmPoolArgs']]:
"""
If this block is configured, add a [Warm Pool](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-warm-pools.html)
to the specified Auto Scaling group. Defined below
"""
return pulumi.get(self, "warm_pool")
@warm_pool.setter
def warm_pool(self, value: Optional[pulumi.Input['GroupWarmPoolArgs']]):
pulumi.set(self, "warm_pool", value)
class Group(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
availability_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
capacity_rebalance: Optional[pulumi.Input[bool]] = None,
default_cooldown: Optional[pulumi.Input[int]] = None,
desired_capacity: Optional[pulumi.Input[int]] = None,
enabled_metrics: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
force_delete: Optional[pulumi.Input[bool]] = None,
force_delete_warm_pool: Optional[pulumi.Input[bool]] = None,
health_check_grace_period: Optional[pulumi.Input[int]] = None,
health_check_type: Optional[pulumi.Input[str]] = None,
initial_lifecycle_hooks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GroupInitialLifecycleHookArgs']]]]] = None,
instance_refresh: Optional[pulumi.Input[pulumi.InputType['GroupInstanceRefreshArgs']]] = None,
launch_configuration: Optional[pulumi.Input[str]] = None,
launch_template: Optional[pulumi.Input[pulumi.InputType['GroupLaunchTemplateArgs']]] = None,
load_balancers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
max_instance_lifetime: Optional[pulumi.Input[int]] = None,
max_size: Optional[pulumi.Input[int]] = None,
metrics_granularity: Optional[pulumi.Input[Union[str, 'MetricsGranularity']]] = None,
min_elb_capacity: Optional[pulumi.Input[int]] = None,
min_size: Optional[pulumi.Input[int]] = None,
mixed_instances_policy: Optional[pulumi.Input[pulumi.InputType['GroupMixedInstancesPolicyArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
name_prefix: Optional[pulumi.Input[str]] = None,
placement_group: Optional[pulumi.Input[str]] = None,
protect_from_scale_in: Optional[pulumi.Input[bool]] = None,
service_linked_role_arn: Optional[pulumi.Input[str]] = None,
suspended_processes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GroupTagArgs']]]]] = None,
tags_collection: Optional[pulumi.Input[Sequence[pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
target_group_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
termination_policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
vpc_zone_identifiers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
wait_for_capacity_timeout: Optional[pulumi.Input[str]] = None,
wait_for_elb_capacity: Optional[pulumi.Input[int]] = None,
warm_pool: Optional[pulumi.Input[pulumi.InputType['GroupWarmPoolArgs']]] = None,
__props__=None):
"""
Provides an Auto Scaling Group resource.
> **Note:** You must specify either `launch_configuration`, `launch_template`, or `mixed_instances_policy`.
> **NOTE on Auto Scaling Groups and ASG Attachments:** This provider currently provides
both a standalone `autoscaling.Attachment` resource
(describing an ASG attached to an ELB or ALB), and an `autoscaling.Group`
with `load_balancers` and `target_group_arns` defined in-line. These two methods are not
mutually-exclusive. If `autoscaling.Attachment` resources are used, either alone or with inline
`load_balancers` or `target_group_arns`, the `autoscaling.Group` resource must be configured
to ignore changes to the `load_balancers` and `target_group_arns` arguments.
## Example Usage
### With Latest Version Of Launch Template
```python
import pulumi
import pulumi_aws as aws
foobar = aws.ec2.LaunchTemplate("foobar",
name_prefix="foobar",
image_id="ami-1a2b3c",
instance_type="t2.micro")
bar = aws.autoscaling.Group("bar",
availability_zones=["us-east-1a"],
desired_capacity=1,
max_size=1,
min_size=1,
launch_template=aws.autoscaling.GroupLaunchTemplateArgs(
id=foobar.id,
version="$Latest",
))
```
### Mixed Instances Policy
```python
import pulumi
import pulumi_aws as aws
example_launch_template = aws.ec2.LaunchTemplate("exampleLaunchTemplate",
name_prefix="example",
image_id=data["aws_ami"]["example"]["id"],
instance_type="c5.large")
example_group = aws.autoscaling.Group("exampleGroup",
availability_zones=["us-east-1a"],
desired_capacity=1,
max_size=1,
min_size=1,
mixed_instances_policy=aws.autoscaling.GroupMixedInstancesPolicyArgs(
launch_template={
"launchTemplateSpecification": {
"launchTemplateId": example_launch_template.id,
},
"overrides": [
{
"instance_type": "c4.large",
"weightedCapacity": "3",
},
{
"instance_type": "c3.large",
"weightedCapacity": "2",
},
],
},
))
```
### Mixed Instances Policy with Spot Instances and Capacity Rebalance
```python
import pulumi
import pulumi_aws as aws
example_launch_template = aws.ec2.LaunchTemplate("exampleLaunchTemplate",
name_prefix="example",
image_id=data["aws_ami"]["example"]["id"],
instance_type="c5.large")
example_group = aws.autoscaling.Group("exampleGroup",
capacity_rebalance=True,
desired_capacity=12,
max_size=15,
min_size=12,
vpc_zone_identifiers=[
aws_subnet["example1"]["id"],
aws_subnet["example2"]["id"],
],
mixed_instances_policy=aws.autoscaling.GroupMixedInstancesPolicyArgs(
instances_distribution=aws.autoscaling.GroupMixedInstancesPolicyInstancesDistributionArgs(
on_demand_base_capacity=0,
on_demand_percentage_above_base_capacity=25,
spot_allocation_strategy="capacity-optimized",
),
launch_template={
"launchTemplateSpecification": {
"launchTemplateId": example_launch_template.id,
},
"overrides": [
{
"instance_type": "c4.large",
"weightedCapacity": "3",
},
{
"instance_type": "c3.large",
"weightedCapacity": "2",
},
],
},
))
```
### Mixed Instances Policy with Instance level LaunchTemplateSpecification Overrides
When using a diverse instance set, some instance types might require a launch template with configuration values unique to that instance type such as a different AMI (Graviton2), architecture specific user data script, different EBS configuration, or different networking configuration.
```python
import pulumi
import pulumi_aws as aws
example_launch_template = aws.ec2.LaunchTemplate("exampleLaunchTemplate",
name_prefix="example",
image_id=data["aws_ami"]["example"]["id"],
instance_type="c5.large")
example2 = aws.ec2.LaunchTemplate("example2",
name_prefix="example2",
image_id=data["aws_ami"]["example2"]["id"])
example_group = aws.autoscaling.Group("exampleGroup",
availability_zones=["us-east-1a"],
desired_capacity=1,
max_size=1,
min_size=1,
mixed_instances_policy=aws.autoscaling.GroupMixedInstancesPolicyArgs(
launch_template={
"launchTemplateSpecification": {
"launchTemplateId": example_launch_template.id,
},
"overrides": [
{
"instance_type": "c4.large",
"weightedCapacity": "3",
},
{
"instance_type": "c6g.large",
"launchTemplateSpecification": {
"launchTemplateId": example2.id,
},
"weightedCapacity": "2",
},
],
},
))
```
### Automatically refresh all instances after the group is updated
```python
import pulumi
import pulumi_aws as aws
example_ami = aws.ec2.get_ami(most_recent=True,
owners=["amazon"],
filters=[aws.ec2.GetAmiFilterArgs(
name="name",
values=["amzn-ami-hvm-*-x86_64-gp2"],
)])
example_launch_template = aws.ec2.LaunchTemplate("exampleLaunchTemplate",
image_id=example_ami.id,
instance_type="t3.nano")
example_group = aws.autoscaling.Group("exampleGroup",
availability_zones=["us-east-1a"],
desired_capacity=1,
max_size=2,
min_size=1,
launch_template=aws.autoscaling.GroupLaunchTemplateArgs(
id=example_launch_template.id,
version=example_launch_template.latest_version,
),
tags=[aws.autoscaling.GroupTagArgs(
key="Key",
value="Value",
propagate_at_launch=True,
)],
instance_refresh=aws.autoscaling.GroupInstanceRefreshArgs(
strategy="Rolling",
preferences=aws.autoscaling.GroupInstanceRefreshPreferencesArgs(
min_healthy_percentage=50,
),
triggers=["tag"],
))
```
### Auto Scaling group with Warm Pool
```python
import pulumi
import pulumi_aws as aws
example_launch_template = aws.ec2.LaunchTemplate("exampleLaunchTemplate",
name_prefix="example",
image_id=data["aws_ami"]["example"]["id"],
instance_type="c5.large")
example_group = aws.autoscaling.Group("exampleGroup",
availability_zones=["us-east-1a"],
desired_capacity=1,
max_size=5,
min_size=1,
warm_pool=aws.autoscaling.GroupWarmPoolArgs(
pool_state="Stopped",
min_size=1,
max_group_prepared_capacity=10,
))
```
## Waiting for Capacity
A newly-created ASG is initially empty and begins to scale to `min_size` (or
`desired_capacity`, if specified) by launching instances using the provided
Launch Configuration. These instances take time to launch and boot.
On ASG Update, changes to these values also take time to result in the target
number of instances providing service.
This provider provides two mechanisms to help consistently manage ASG scale up
time across dependent resources.
#### Waiting for ASG Capacity
The first is default behavior. This provider waits after ASG creation for
`min_size` (or `desired_capacity`, if specified) healthy instances to show up
in the ASG before continuing.
If `min_size` or `desired_capacity` are changed in a subsequent update,
this provider will also wait for the correct number of healthy instances before
continuing.
This provider considers an instance "healthy" when the ASG reports `HealthStatus:
"Healthy"` and `LifecycleState: "InService"`. See the [AWS AutoScaling
Docs](https://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroupLifecycle.html)
for more information on an ASG's lifecycle.
This provider will wait for healthy instances for up to
`wait_for_capacity_timeout`. If ASG creation is taking more than a few minutes,
it's worth investigating for scaling activity errors, which can be caused by
problems with the selected Launch Configuration.
Setting `wait_for_capacity_timeout` to `"0"` disables ASG Capacity waiting.
#### Waiting for ELB Capacity
The second mechanism is optional, and affects ASGs with attached ELBs specified
via the `load_balancers` attribute or with ALBs specified with `target_group_arns`.
The `min_elb_capacity` parameter causes this provider to wait for at least the
requested number of instances to show up `"InService"` in all attached ELBs
during ASG creation. It has no effect on ASG updates.
If `wait_for_elb_capacity` is set, this provider will wait for exactly that number
of Instances to be `"InService"` in all attached ELBs on both creation and
updates.
These parameters can be used to ensure that service is being provided before
this provider moves on. If new instances don't pass the ELB's health checks for any
reason, the deployment will time out, and the ASG will be marked as
tainted (i.e. marked to be destroyed in a follow up run).
As with ASG Capacity, this provider will wait for up to `wait_for_capacity_timeout`
for the proper number of instances to be healthy.
#### Troubleshooting Capacity Waiting Timeouts
If ASG creation takes more than a few minutes, this could indicate one of a
number of configuration problems. See the [AWS Docs on Load Balancer
Troubleshooting](https://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-troubleshooting.html)
for more information.
## Import
Auto Scaling Groups can be imported using the `name`, e.g.
```sh
$ pulumi import aws:autoscaling/group:Group web web-asg
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] availability_zones: A list of one or more availability zones for the group. Used for EC2-Classic and default subnets when not specified with `vpc_zone_identifier` argument. Conflicts with `vpc_zone_identifier`.
:param pulumi.Input[bool] capacity_rebalance: Indicates whether capacity rebalance is enabled. Otherwise, capacity rebalance is disabled.
:param pulumi.Input[int] default_cooldown: The amount of time, in seconds, after a scaling activity completes before another scaling activity can start.
:param pulumi.Input[int] desired_capacity: The number of Amazon EC2 instances that
should be running in the group. (See also Waiting for
Capacity below.)
:param pulumi.Input[Sequence[pulumi.Input[str]]] enabled_metrics: A list of metrics to collect. The allowed values are `GroupDesiredCapacity`, `GroupInServiceCapacity`, `GroupPendingCapacity`, `GroupMinSize`, `GroupMaxSize`, `GroupInServiceInstances`, `GroupPendingInstances`, `GroupStandbyInstances`, `GroupStandbyCapacity`, `GroupTerminatingCapacity`, `GroupTerminatingInstances`, `GroupTotalCapacity`, `GroupTotalInstances`.
:param pulumi.Input[bool] force_delete: Allows deleting the Auto Scaling Group without waiting
for all instances in the pool to terminate. You can force an Auto Scaling Group to delete
even if it's in the process of scaling a resource. Normally, this provider
drains all the instances before deleting the group. This bypasses that
behavior and potentially leaves resources dangling.
:param pulumi.Input[int] health_check_grace_period: Time (in seconds) after instance comes into service before checking health.
:param pulumi.Input[str] health_check_type: "EC2" or "ELB". Controls how health checking is done.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GroupInitialLifecycleHookArgs']]]] initial_lifecycle_hooks: One or more
[Lifecycle Hooks](http://docs.aws.amazon.com/autoscaling/latest/userguide/lifecycle-hooks.html)
to attach to the Auto Scaling Group **before** instances are launched. The
syntax is exactly the same as the separate
`autoscaling.LifecycleHook`
resource, without the `autoscaling_group_name` attribute. Please note that this will only work when creating
a new Auto Scaling Group. For all other use-cases, please use `autoscaling.LifecycleHook` resource.
:param pulumi.Input[pulumi.InputType['GroupInstanceRefreshArgs']] instance_refresh: If this block is configured, start an
[Instance Refresh](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html)
when this Auto Scaling Group is updated. Defined below.
:param pulumi.Input[str] launch_configuration: The name of the launch configuration to use.
:param pulumi.Input[pulumi.InputType['GroupLaunchTemplateArgs']] launch_template: Nested argument containing launch template settings along with the overrides to specify multiple instance types and weights. Defined below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] load_balancers: A list of elastic load balancer names to add to the autoscaling
group names. Only valid for classic load balancers. For ALBs, use `target_group_arns` instead.
:param pulumi.Input[int] max_instance_lifetime: The maximum amount of time, in seconds, that an instance can be in service, values must be either equal to 0 or between 604800 and 31536000 seconds.
:param pulumi.Input[int] max_size: The maximum size of the Auto Scaling Group.
:param pulumi.Input[Union[str, 'MetricsGranularity']] metrics_granularity: The granularity to associate with the metrics to collect. The only valid value is `1Minute`. Default is `1Minute`.
:param pulumi.Input[int] min_elb_capacity: Setting this causes the provider to wait for
this number of instances from this Auto Scaling Group to show up healthy in the
ELB only on creation. Updates will not wait on ELB instance number changes.
(See also Waiting for Capacity below.)
:param pulumi.Input[int] min_size: Specifies the minimum number of instances to maintain in the warm pool. This helps you to ensure that there is always a certain number of warmed instances available to handle traffic spikes. Defaults to 0 if not specified.
:param pulumi.Input[pulumi.InputType['GroupMixedInstancesPolicyArgs']] mixed_instances_policy: Configuration block containing settings to define launch targets for Auto Scaling groups. Defined below.
:param pulumi.Input[str] name: The name of the Auto Scaling Group. By default generated by this provider.
:param pulumi.Input[str] name_prefix: Creates a unique name beginning with the specified
prefix. Conflicts with `name`.
:param pulumi.Input[str] placement_group: The name of the placement group into which you'll launch your instances, if any.
:param pulumi.Input[bool] protect_from_scale_in: Allows setting instance protection. The
Auto Scaling Group will not select instances with this setting for termination
during scale in events.
:param pulumi.Input[str] service_linked_role_arn: The ARN of the service-linked role that the ASG will use to call other AWS services
:param pulumi.Input[Sequence[pulumi.Input[str]]] suspended_processes: A list of processes to suspend for the Auto Scaling Group. The allowed values are `Launch`, `Terminate`, `HealthCheck`, `ReplaceUnhealthy`, `AZRebalance`, `AlarmNotification`, `ScheduledActions`, `AddToLoadBalancer`.
Note that if you suspend either the `Launch` or `Terminate` process types, it can prevent your Auto Scaling Group from functioning properly.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GroupTagArgs']]]] tags: Configuration block(s) containing resource tags. Conflicts with `tags`. Documented below.
:param pulumi.Input[Sequence[pulumi.Input[Mapping[str, pulumi.Input[str]]]]] tags_collection: Set of maps containing resource tags. Conflicts with `tag`. Documented below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] target_group_arns: A set of `alb.TargetGroup` ARNs, for use with Application or Network Load Balancing.
:param pulumi.Input[Sequence[pulumi.Input[str]]] termination_policies: A list of policies to decide how the instances in the Auto Scaling Group should be terminated. The allowed values are `OldestInstance`, `NewestInstance`, `OldestLaunchConfiguration`, `ClosestToNextInstanceHour`, `OldestLaunchTemplate`, `AllocationStrategy`, `Default`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] vpc_zone_identifiers: A list of subnet IDs to launch resources in. Subnets automatically determine which availability zones the group will reside. Conflicts with `availability_zones`.
:param pulumi.Input[str] wait_for_capacity_timeout: A maximum
[duration](https://golang.org/pkg/time/#ParseDuration) that this provider should
wait for ASG instances to be healthy before timing out. (See also Waiting
for Capacity below.) Setting this to "0" causes
this provider to skip all Capacity Waiting behavior.
:param pulumi.Input[int] wait_for_elb_capacity: Setting this will cause the provider to wait
for exactly this number of healthy instances from this Auto Scaling Group in
all attached load balancers on both create and update operations. (Takes
precedence over `min_elb_capacity` behavior.)
(See also Waiting for Capacity below.)
:param pulumi.Input[pulumi.InputType['GroupWarmPoolArgs']] warm_pool: If this block is configured, add a [Warm Pool](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-warm-pools.html)
to the specified Auto Scaling group. Defined below
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: GroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides an Auto Scaling Group resource.
> **Note:** You must specify either `launch_configuration`, `launch_template`, or `mixed_instances_policy`.
> **NOTE on Auto Scaling Groups and ASG Attachments:** This provider currently provides
both a standalone `autoscaling.Attachment` resource
(describing an ASG attached to an ELB or ALB), and an `autoscaling.Group`
with `load_balancers` and `target_group_arns` defined in-line. These two methods are not
mutually-exclusive. If `autoscaling.Attachment` resources are used, either alone or with inline
`load_balancers` or `target_group_arns`, the `autoscaling.Group` resource must be configured
to ignore changes to the `load_balancers` and `target_group_arns` arguments.
## Example Usage
### With Latest Version Of Launch Template
```python
import pulumi
import pulumi_aws as aws
foobar = aws.ec2.LaunchTemplate("foobar",
name_prefix="foobar",
image_id="ami-1a2b3c",
instance_type="t2.micro")
bar = aws.autoscaling.Group("bar",
availability_zones=["us-east-1a"],
desired_capacity=1,
max_size=1,
min_size=1,
launch_template=aws.autoscaling.GroupLaunchTemplateArgs(
id=foobar.id,
version="$Latest",
))
```
### Mixed Instances Policy
```python
import pulumi
import pulumi_aws as aws
example_launch_template = aws.ec2.LaunchTemplate("exampleLaunchTemplate",
name_prefix="example",
image_id=data["aws_ami"]["example"]["id"],
instance_type="c5.large")
example_group = aws.autoscaling.Group("exampleGroup",
availability_zones=["us-east-1a"],
desired_capacity=1,
max_size=1,
min_size=1,
mixed_instances_policy=aws.autoscaling.GroupMixedInstancesPolicyArgs(
launch_template={
"launchTemplateSpecification": {
"launchTemplateId": example_launch_template.id,
},
"overrides": [
{
"instance_type": "c4.large",
"weightedCapacity": "3",
},
{
"instance_type": "c3.large",
"weightedCapacity": "2",
},
],
},
))
```
### Mixed Instances Policy with Spot Instances and Capacity Rebalance
```python
import pulumi
import pulumi_aws as aws
example_launch_template = aws.ec2.LaunchTemplate("exampleLaunchTemplate",
name_prefix="example",
image_id=data["aws_ami"]["example"]["id"],
instance_type="c5.large")
example_group = aws.autoscaling.Group("exampleGroup",
capacity_rebalance=True,
desired_capacity=12,
max_size=15,
min_size=12,
vpc_zone_identifiers=[
aws_subnet["example1"]["id"],
aws_subnet["example2"]["id"],
],
mixed_instances_policy=aws.autoscaling.GroupMixedInstancesPolicyArgs(
instances_distribution=aws.autoscaling.GroupMixedInstancesPolicyInstancesDistributionArgs(
on_demand_base_capacity=0,
on_demand_percentage_above_base_capacity=25,
spot_allocation_strategy="capacity-optimized",
),
launch_template={
"launchTemplateSpecification": {
"launchTemplateId": example_launch_template.id,
},
"overrides": [
{
"instance_type": "c4.large",
"weightedCapacity": "3",
},
{
"instance_type": "c3.large",
"weightedCapacity": "2",
},
],
},
))
```
### Mixed Instances Policy with Instance level LaunchTemplateSpecification Overrides
When using a diverse instance set, some instance types might require a launch template with configuration values unique to that instance type such as a different AMI (Graviton2), architecture specific user data script, different EBS configuration, or different networking configuration.
```python
import pulumi
import pulumi_aws as aws
example_launch_template = aws.ec2.LaunchTemplate("exampleLaunchTemplate",
name_prefix="example",
image_id=data["aws_ami"]["example"]["id"],
instance_type="c5.large")
example2 = aws.ec2.LaunchTemplate("example2",
name_prefix="example2",
image_id=data["aws_ami"]["example2"]["id"])
example_group = aws.autoscaling.Group("exampleGroup",
availability_zones=["us-east-1a"],
desired_capacity=1,
max_size=1,
min_size=1,
mixed_instances_policy=aws.autoscaling.GroupMixedInstancesPolicyArgs(
launch_template={
"launchTemplateSpecification": {
"launchTemplateId": example_launch_template.id,
},
"overrides": [
{
"instance_type": "c4.large",
"weightedCapacity": "3",
},
{
"instance_type": "c6g.large",
"launchTemplateSpecification": {
"launchTemplateId": example2.id,
},
"weightedCapacity": "2",
},
],
},
))
```
### Automatically refresh all instances after the group is updated
```python
import pulumi
import pulumi_aws as aws
example_ami = aws.ec2.get_ami(most_recent=True,
owners=["amazon"],
filters=[aws.ec2.GetAmiFilterArgs(
name="name",
values=["amzn-ami-hvm-*-x86_64-gp2"],
)])
example_launch_template = aws.ec2.LaunchTemplate("exampleLaunchTemplate",
image_id=example_ami.id,
instance_type="t3.nano")
example_group = aws.autoscaling.Group("exampleGroup",
availability_zones=["us-east-1a"],
desired_capacity=1,
max_size=2,
min_size=1,
launch_template=aws.autoscaling.GroupLaunchTemplateArgs(
id=example_launch_template.id,
version=example_launch_template.latest_version,
),
tags=[aws.autoscaling.GroupTagArgs(
key="Key",
value="Value",
propagate_at_launch=True,
)],
instance_refresh=aws.autoscaling.GroupInstanceRefreshArgs(
strategy="Rolling",
preferences=aws.autoscaling.GroupInstanceRefreshPreferencesArgs(
min_healthy_percentage=50,
),
triggers=["tag"],
))
```
### Auto Scaling group with Warm Pool
```python
import pulumi
import pulumi_aws as aws
example_launch_template = aws.ec2.LaunchTemplate("exampleLaunchTemplate",
name_prefix="example",
image_id=data["aws_ami"]["example"]["id"],
instance_type="c5.large")
example_group = aws.autoscaling.Group("exampleGroup",
availability_zones=["us-east-1a"],
desired_capacity=1,
max_size=5,
min_size=1,
warm_pool=aws.autoscaling.GroupWarmPoolArgs(
pool_state="Stopped",
min_size=1,
max_group_prepared_capacity=10,
))
```
## Waiting for Capacity
A newly-created ASG is initially empty and begins to scale to `min_size` (or
`desired_capacity`, if specified) by launching instances using the provided
Launch Configuration. These instances take time to launch and boot.
On ASG Update, changes to these values also take time to result in the target
number of instances providing service.
This provider provides two mechanisms to help consistently manage ASG scale up
time across dependent resources.
#### Waiting for ASG Capacity
The first is default behavior. This provider waits after ASG creation for
`min_size` (or `desired_capacity`, if specified) healthy instances to show up
in the ASG before continuing.
If `min_size` or `desired_capacity` are changed in a subsequent update,
this provider will also wait for the correct number of healthy instances before
continuing.
This provider considers an instance "healthy" when the ASG reports `HealthStatus:
"Healthy"` and `LifecycleState: "InService"`. See the [AWS AutoScaling
Docs](https://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroupLifecycle.html)
for more information on an ASG's lifecycle.
This provider will wait for healthy instances for up to
`wait_for_capacity_timeout`. If ASG creation is taking more than a few minutes,
it's worth investigating for scaling activity errors, which can be caused by
problems with the selected Launch Configuration.
Setting `wait_for_capacity_timeout` to `"0"` disables ASG Capacity waiting.
#### Waiting for ELB Capacity
The second mechanism is optional, and affects ASGs with attached ELBs specified
via the `load_balancers` attribute or with ALBs specified with `target_group_arns`.
The `min_elb_capacity` parameter causes this provider to wait for at least the
requested number of instances to show up `"InService"` in all attached ELBs
during ASG creation. It has no effect on ASG updates.
If `wait_for_elb_capacity` is set, this provider will wait for exactly that number
of Instances to be `"InService"` in all attached ELBs on both creation and
updates.
These parameters can be used to ensure that service is being provided before
this provider moves on. If new instances don't pass the ELB's health checks for any
reason, the deployment will time out, and the ASG will be marked as
tainted (i.e. marked to be destroyed in a follow up run).
As with ASG Capacity, this provider will wait for up to `wait_for_capacity_timeout`
for the proper number of instances to be healthy.
#### Troubleshooting Capacity Waiting Timeouts
If ASG creation takes more than a few minutes, this could indicate one of a
number of configuration problems. See the [AWS Docs on Load Balancer
Troubleshooting](https://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-troubleshooting.html)
for more information.
## Import
Auto Scaling Groups can be imported using the `name`, e.g.
```sh
$ pulumi import aws:autoscaling/group:Group web web-asg
```
:param str resource_name: The name of the resource.
:param GroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(GroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
availability_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
capacity_rebalance: Optional[pulumi.Input[bool]] = None,
default_cooldown: Optional[pulumi.Input[int]] = None,
desired_capacity: Optional[pulumi.Input[int]] = None,
enabled_metrics: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
force_delete: Optional[pulumi.Input[bool]] = None,
force_delete_warm_pool: Optional[pulumi.Input[bool]] = None,
health_check_grace_period: Optional[pulumi.Input[int]] = None,
health_check_type: Optional[pulumi.Input[str]] = None,
initial_lifecycle_hooks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GroupInitialLifecycleHookArgs']]]]] = None,
instance_refresh: Optional[pulumi.Input[pulumi.InputType['GroupInstanceRefreshArgs']]] = None,
launch_configuration: Optional[pulumi.Input[str]] = None,
launch_template: Optional[pulumi.Input[pulumi.InputType['GroupLaunchTemplateArgs']]] = None,
load_balancers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
max_instance_lifetime: Optional[pulumi.Input[int]] = None,
max_size: Optional[pulumi.Input[int]] = None,
metrics_granularity: Optional[pulumi.Input[Union[str, 'MetricsGranularity']]] = None,
min_elb_capacity: Optional[pulumi.Input[int]] = None,
min_size: Optional[pulumi.Input[int]] = None,
mixed_instances_policy: Optional[pulumi.Input[pulumi.InputType['GroupMixedInstancesPolicyArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
name_prefix: Optional[pulumi.Input[str]] = None,
placement_group: Optional[pulumi.Input[str]] = None,
protect_from_scale_in: Optional[pulumi.Input[bool]] = None,
service_linked_role_arn: Optional[pulumi.Input[str]] = None,
suspended_processes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GroupTagArgs']]]]] = None,
tags_collection: Optional[pulumi.Input[Sequence[pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
target_group_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
termination_policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
vpc_zone_identifiers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
wait_for_capacity_timeout: Optional[pulumi.Input[str]] = None,
wait_for_elb_capacity: Optional[pulumi.Input[int]] = None,
warm_pool: Optional[pulumi.Input[pulumi.InputType['GroupWarmPoolArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = GroupArgs.__new__(GroupArgs)
__props__.__dict__["availability_zones"] = availability_zones
__props__.__dict__["capacity_rebalance"] = capacity_rebalance
__props__.__dict__["default_cooldown"] = default_cooldown
__props__.__dict__["desired_capacity"] = desired_capacity
__props__.__dict__["enabled_metrics"] = enabled_metrics
__props__.__dict__["force_delete"] = force_delete
__props__.__dict__["force_delete_warm_pool"] = force_delete_warm_pool
__props__.__dict__["health_check_grace_period"] = health_check_grace_period
__props__.__dict__["health_check_type"] = health_check_type
__props__.__dict__["initial_lifecycle_hooks"] = initial_lifecycle_hooks
__props__.__dict__["instance_refresh"] = instance_refresh
__props__.__dict__["launch_configuration"] = launch_configuration
__props__.__dict__["launch_template"] = launch_template
__props__.__dict__["load_balancers"] = load_balancers
__props__.__dict__["max_instance_lifetime"] = max_instance_lifetime
if max_size is None and not opts.urn:
raise TypeError("Missing required property 'max_size'")
__props__.__dict__["max_size"] = max_size
__props__.__dict__["metrics_granularity"] = metrics_granularity
__props__.__dict__["min_elb_capacity"] = min_elb_capacity
if min_size is None and not opts.urn:
raise TypeError("Missing required property 'min_size'")
__props__.__dict__["min_size"] = min_size
__props__.__dict__["mixed_instances_policy"] = mixed_instances_policy
__props__.__dict__["name"] = name
__props__.__dict__["name_prefix"] = name_prefix
__props__.__dict__["placement_group"] = placement_group
__props__.__dict__["protect_from_scale_in"] = protect_from_scale_in
__props__.__dict__["service_linked_role_arn"] = service_linked_role_arn
__props__.__dict__["suspended_processes"] = suspended_processes
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_collection"] = tags_collection
__props__.__dict__["target_group_arns"] = target_group_arns
__props__.__dict__["termination_policies"] = termination_policies
__props__.__dict__["vpc_zone_identifiers"] = vpc_zone_identifiers
__props__.__dict__["wait_for_capacity_timeout"] = wait_for_capacity_timeout
__props__.__dict__["wait_for_elb_capacity"] = wait_for_elb_capacity
__props__.__dict__["warm_pool"] = warm_pool
__props__.__dict__["arn"] = None
super(Group, __self__).__init__(
'aws:autoscaling/group:Group',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
availability_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
capacity_rebalance: Optional[pulumi.Input[bool]] = None,
default_cooldown: Optional[pulumi.Input[int]] = None,
desired_capacity: Optional[pulumi.Input[int]] = None,
enabled_metrics: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
force_delete: Optional[pulumi.Input[bool]] = None,
force_delete_warm_pool: Optional[pulumi.Input[bool]] = None,
health_check_grace_period: Optional[pulumi.Input[int]] = None,
health_check_type: Optional[pulumi.Input[str]] = None,
initial_lifecycle_hooks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GroupInitialLifecycleHookArgs']]]]] = None,
instance_refresh: Optional[pulumi.Input[pulumi.InputType['GroupInstanceRefreshArgs']]] = None,
launch_configuration: Optional[pulumi.Input[str]] = None,
launch_template: Optional[pulumi.Input[pulumi.InputType['GroupLaunchTemplateArgs']]] = None,
load_balancers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
max_instance_lifetime: Optional[pulumi.Input[int]] = None,
max_size: Optional[pulumi.Input[int]] = None,
metrics_granularity: Optional[pulumi.Input[Union[str, 'MetricsGranularity']]] = None,
min_elb_capacity: Optional[pulumi.Input[int]] = None,
min_size: Optional[pulumi.Input[int]] = None,
mixed_instances_policy: Optional[pulumi.Input[pulumi.InputType['GroupMixedInstancesPolicyArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
name_prefix: Optional[pulumi.Input[str]] = None,
placement_group: Optional[pulumi.Input[str]] = None,
protect_from_scale_in: Optional[pulumi.Input[bool]] = None,
service_linked_role_arn: Optional[pulumi.Input[str]] = None,
suspended_processes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GroupTagArgs']]]]] = None,
tags_collection: Optional[pulumi.Input[Sequence[pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
target_group_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
termination_policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
vpc_zone_identifiers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
wait_for_capacity_timeout: Optional[pulumi.Input[str]] = None,
wait_for_elb_capacity: Optional[pulumi.Input[int]] = None,
warm_pool: Optional[pulumi.Input[pulumi.InputType['GroupWarmPoolArgs']]] = None) -> 'Group':
"""
Get an existing Group resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The ARN for this Auto Scaling Group
:param pulumi.Input[Sequence[pulumi.Input[str]]] availability_zones: A list of one or more availability zones for the group. Used for EC2-Classic and default subnets when not specified with `vpc_zone_identifier` argument. Conflicts with `vpc_zone_identifier`.
:param pulumi.Input[bool] capacity_rebalance: Indicates whether capacity rebalance is enabled. Otherwise, capacity rebalance is disabled.
:param pulumi.Input[int] default_cooldown: The amount of time, in seconds, after a scaling activity completes before another scaling activity can start.
:param pulumi.Input[int] desired_capacity: The number of Amazon EC2 instances that
should be running in the group. (See also Waiting for
Capacity below.)
:param pulumi.Input[Sequence[pulumi.Input[str]]] enabled_metrics: A list of metrics to collect. The allowed values are `GroupDesiredCapacity`, `GroupInServiceCapacity`, `GroupPendingCapacity`, `GroupMinSize`, `GroupMaxSize`, `GroupInServiceInstances`, `GroupPendingInstances`, `GroupStandbyInstances`, `GroupStandbyCapacity`, `GroupTerminatingCapacity`, `GroupTerminatingInstances`, `GroupTotalCapacity`, `GroupTotalInstances`.
:param pulumi.Input[bool] force_delete: Allows deleting the Auto Scaling Group without waiting
for all instances in the pool to terminate. You can force an Auto Scaling Group to delete
even if it's in the process of scaling a resource. Normally, this provider
drains all the instances before deleting the group. This bypasses that
behavior and potentially leaves resources dangling.
:param pulumi.Input[int] health_check_grace_period: Time (in seconds) after instance comes into service before checking health.
:param pulumi.Input[str] health_check_type: "EC2" or "ELB". Controls how health checking is done.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GroupInitialLifecycleHookArgs']]]] initial_lifecycle_hooks: One or more
[Lifecycle Hooks](http://docs.aws.amazon.com/autoscaling/latest/userguide/lifecycle-hooks.html)
to attach to the Auto Scaling Group **before** instances are launched. The
syntax is exactly the same as the separate
`autoscaling.LifecycleHook`
resource, without the `autoscaling_group_name` attribute. Please note that this will only work when creating
a new Auto Scaling Group. For all other use-cases, please use `autoscaling.LifecycleHook` resource.
:param pulumi.Input[pulumi.InputType['GroupInstanceRefreshArgs']] instance_refresh: If this block is configured, start an
[Instance Refresh](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html)
when this Auto Scaling Group is updated. Defined below.
:param pulumi.Input[str] launch_configuration: The name of the launch configuration to use.
:param pulumi.Input[pulumi.InputType['GroupLaunchTemplateArgs']] launch_template: Nested argument containing launch template settings along with the overrides to specify multiple instance types and weights. Defined below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] load_balancers: A list of elastic load balancer names to add to the autoscaling
group names. Only valid for classic load balancers. For ALBs, use `target_group_arns` instead.
:param pulumi.Input[int] max_instance_lifetime: The maximum amount of time, in seconds, that an instance can be in service, values must be either equal to 0 or between 604800 and 31536000 seconds.
:param pulumi.Input[int] max_size: The maximum size of the Auto Scaling Group.
:param pulumi.Input[Union[str, 'MetricsGranularity']] metrics_granularity: The granularity to associate with the metrics to collect. The only valid value is `1Minute`. Default is `1Minute`.
:param pulumi.Input[int] min_elb_capacity: Setting this causes the provider to wait for
this number of instances from this Auto Scaling Group to show up healthy in the
ELB only on creation. Updates will not wait on ELB instance number changes.
(See also Waiting for Capacity below.)
:param pulumi.Input[int] min_size: Specifies the minimum number of instances to maintain in the warm pool. This helps you to ensure that there is always a certain number of warmed instances available to handle traffic spikes. Defaults to 0 if not specified.
:param pulumi.Input[pulumi.InputType['GroupMixedInstancesPolicyArgs']] mixed_instances_policy: Configuration block containing settings to define launch targets for Auto Scaling groups. Defined below.
:param pulumi.Input[str] name: The name of the Auto Scaling Group. By default generated by this provider.
:param pulumi.Input[str] name_prefix: Creates a unique name beginning with the specified
prefix. Conflicts with `name`.
:param pulumi.Input[str] placement_group: The name of the placement group into which you'll launch your instances, if any.
:param pulumi.Input[bool] protect_from_scale_in: Allows setting instance protection. The
Auto Scaling Group will not select instances with this setting for termination
during scale in events.
:param pulumi.Input[str] service_linked_role_arn: The ARN of the service-linked role that the ASG will use to call other AWS services
:param pulumi.Input[Sequence[pulumi.Input[str]]] suspended_processes: A list of processes to suspend for the Auto Scaling Group. The allowed values are `Launch`, `Terminate`, `HealthCheck`, `ReplaceUnhealthy`, `AZRebalance`, `AlarmNotification`, `ScheduledActions`, `AddToLoadBalancer`.
Note that if you suspend either the `Launch` or `Terminate` process types, it can prevent your Auto Scaling Group from functioning properly.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GroupTagArgs']]]] tags: Configuration block(s) containing resource tags. Conflicts with `tags`. Documented below.
:param pulumi.Input[Sequence[pulumi.Input[Mapping[str, pulumi.Input[str]]]]] tags_collection: Set of maps containing resource tags. Conflicts with `tag`. Documented below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] target_group_arns: A set of `alb.TargetGroup` ARNs, for use with Application or Network Load Balancing.
:param pulumi.Input[Sequence[pulumi.Input[str]]] termination_policies: A list of policies to decide how the instances in the Auto Scaling Group should be terminated. The allowed values are `OldestInstance`, `NewestInstance`, `OldestLaunchConfiguration`, `ClosestToNextInstanceHour`, `OldestLaunchTemplate`, `AllocationStrategy`, `Default`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] vpc_zone_identifiers: A list of subnet IDs to launch resources in. Subnets automatically determine which availability zones the group will reside. Conflicts with `availability_zones`.
:param pulumi.Input[str] wait_for_capacity_timeout: A maximum
[duration](https://golang.org/pkg/time/#ParseDuration) that this provider should
wait for ASG instances to be healthy before timing out. (See also Waiting
for Capacity below.) Setting this to "0" causes
this provider to skip all Capacity Waiting behavior.
:param pulumi.Input[int] wait_for_elb_capacity: Setting this will cause the provider to wait
for exactly this number of healthy instances from this Auto Scaling Group in
all attached load balancers on both create and update operations. (Takes
precedence over `min_elb_capacity` behavior.)
(See also Waiting for Capacity below.)
:param pulumi.Input[pulumi.InputType['GroupWarmPoolArgs']] warm_pool: If this block is configured, add a [Warm Pool](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-warm-pools.html)
to the specified Auto Scaling group. Defined below
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _GroupState.__new__(_GroupState)
__props__.__dict__["arn"] = arn
__props__.__dict__["availability_zones"] = availability_zones
__props__.__dict__["capacity_rebalance"] = capacity_rebalance
__props__.__dict__["default_cooldown"] = default_cooldown
__props__.__dict__["desired_capacity"] = desired_capacity
__props__.__dict__["enabled_metrics"] = enabled_metrics
__props__.__dict__["force_delete"] = force_delete
__props__.__dict__["force_delete_warm_pool"] = force_delete_warm_pool
__props__.__dict__["health_check_grace_period"] = health_check_grace_period
__props__.__dict__["health_check_type"] = health_check_type
__props__.__dict__["initial_lifecycle_hooks"] = initial_lifecycle_hooks
__props__.__dict__["instance_refresh"] = instance_refresh
__props__.__dict__["launch_configuration"] = launch_configuration
__props__.__dict__["launch_template"] = launch_template
__props__.__dict__["load_balancers"] = load_balancers
__props__.__dict__["max_instance_lifetime"] = max_instance_lifetime
__props__.__dict__["max_size"] = max_size
__props__.__dict__["metrics_granularity"] = metrics_granularity
__props__.__dict__["min_elb_capacity"] = min_elb_capacity
__props__.__dict__["min_size"] = min_size
__props__.__dict__["mixed_instances_policy"] = mixed_instances_policy
__props__.__dict__["name"] = name
__props__.__dict__["name_prefix"] = name_prefix
__props__.__dict__["placement_group"] = placement_group
__props__.__dict__["protect_from_scale_in"] = protect_from_scale_in
__props__.__dict__["service_linked_role_arn"] = service_linked_role_arn
__props__.__dict__["suspended_processes"] = suspended_processes
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_collection"] = tags_collection
__props__.__dict__["target_group_arns"] = target_group_arns
__props__.__dict__["termination_policies"] = termination_policies
__props__.__dict__["vpc_zone_identifiers"] = vpc_zone_identifiers
__props__.__dict__["wait_for_capacity_timeout"] = wait_for_capacity_timeout
__props__.__dict__["wait_for_elb_capacity"] = wait_for_elb_capacity
__props__.__dict__["warm_pool"] = warm_pool
return Group(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The ARN for this Auto Scaling Group
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="availabilityZones")
def availability_zones(self) -> pulumi.Output[Sequence[str]]:
"""
A list of one or more availability zones for the group. Used for EC2-Classic and default subnets when not specified with `vpc_zone_identifier` argument. Conflicts with `vpc_zone_identifier`.
"""
return pulumi.get(self, "availability_zones")
@property
@pulumi.getter(name="capacityRebalance")
def capacity_rebalance(self) -> pulumi.Output[Optional[bool]]:
"""
Indicates whether capacity rebalance is enabled. Otherwise, capacity rebalance is disabled.
"""
return pulumi.get(self, "capacity_rebalance")
@property
@pulumi.getter(name="defaultCooldown")
def default_cooldown(self) -> pulumi.Output[int]:
"""
The amount of time, in seconds, after a scaling activity completes before another scaling activity can start.
"""
return pulumi.get(self, "default_cooldown")
@property
@pulumi.getter(name="desiredCapacity")
def desired_capacity(self) -> pulumi.Output[int]:
"""
The number of Amazon EC2 instances that
should be running in the group. (See also Waiting for
Capacity below.)
"""
return pulumi.get(self, "desired_capacity")
@property
@pulumi.getter(name="enabledMetrics")
def enabled_metrics(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A list of metrics to collect. The allowed values are `GroupDesiredCapacity`, `GroupInServiceCapacity`, `GroupPendingCapacity`, `GroupMinSize`, `GroupMaxSize`, `GroupInServiceInstances`, `GroupPendingInstances`, `GroupStandbyInstances`, `GroupStandbyCapacity`, `GroupTerminatingCapacity`, `GroupTerminatingInstances`, `GroupTotalCapacity`, `GroupTotalInstances`.
"""
return pulumi.get(self, "enabled_metrics")
@property
@pulumi.getter(name="forceDelete")
def force_delete(self) -> pulumi.Output[Optional[bool]]:
"""
Allows deleting the Auto Scaling Group without waiting
for all instances in the pool to terminate. You can force an Auto Scaling Group to delete
even if it's in the process of scaling a resource. Normally, this provider
drains all the instances before deleting the group. This bypasses that
behavior and potentially leaves resources dangling.
"""
return pulumi.get(self, "force_delete")
@property
@pulumi.getter(name="forceDeleteWarmPool")
def force_delete_warm_pool(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "force_delete_warm_pool")
@property
@pulumi.getter(name="healthCheckGracePeriod")
def health_check_grace_period(self) -> pulumi.Output[Optional[int]]:
"""
Time (in seconds) after instance comes into service before checking health.
"""
return pulumi.get(self, "health_check_grace_period")
@property
@pulumi.getter(name="healthCheckType")
def health_check_type(self) -> pulumi.Output[str]:
"""
"EC2" or "ELB". Controls how health checking is done.
"""
return pulumi.get(self, "health_check_type")
@property
@pulumi.getter(name="initialLifecycleHooks")
def initial_lifecycle_hooks(self) -> pulumi.Output[Optional[Sequence['outputs.GroupInitialLifecycleHook']]]:
"""
One or more
[Lifecycle Hooks](http://docs.aws.amazon.com/autoscaling/latest/userguide/lifecycle-hooks.html)
to attach to the Auto Scaling Group **before** instances are launched. The
syntax is exactly the same as the separate
`autoscaling.LifecycleHook`
resource, without the `autoscaling_group_name` attribute. Please note that this will only work when creating
a new Auto Scaling Group. For all other use-cases, please use `autoscaling.LifecycleHook` resource.
"""
return pulumi.get(self, "initial_lifecycle_hooks")
@property
@pulumi.getter(name="instanceRefresh")
def instance_refresh(self) -> pulumi.Output[Optional['outputs.GroupInstanceRefresh']]:
"""
If this block is configured, start an
[Instance Refresh](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html)
when this Auto Scaling Group is updated. Defined below.
"""
return pulumi.get(self, "instance_refresh")
@property
@pulumi.getter(name="launchConfiguration")
def launch_configuration(self) -> pulumi.Output[Optional[str]]:
"""
The name of the launch configuration to use.
"""
return pulumi.get(self, "launch_configuration")
@property
@pulumi.getter(name="launchTemplate")
def launch_template(self) -> pulumi.Output[Optional['outputs.GroupLaunchTemplate']]:
"""
Nested argument containing launch template settings along with the overrides to specify multiple instance types and weights. Defined below.
"""
return pulumi.get(self, "launch_template")
@property
@pulumi.getter(name="loadBalancers")
def load_balancers(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A list of elastic load balancer names to add to the autoscaling
group names. Only valid for classic load balancers. For ALBs, use `target_group_arns` instead.
"""
return pulumi.get(self, "load_balancers")
@property
@pulumi.getter(name="maxInstanceLifetime")
def max_instance_lifetime(self) -> pulumi.Output[Optional[int]]:
"""
The maximum amount of time, in seconds, that an instance can be in service, values must be either equal to 0 or between 604800 and 31536000 seconds.
"""
return pulumi.get(self, "max_instance_lifetime")
@property
@pulumi.getter(name="maxSize")
def max_size(self) -> pulumi.Output[int]:
"""
The maximum size of the Auto Scaling Group.
"""
return pulumi.get(self, "max_size")
@property
@pulumi.getter(name="metricsGranularity")
def metrics_granularity(self) -> pulumi.Output[Optional[str]]:
"""
The granularity to associate with the metrics to collect. The only valid value is `1Minute`. Default is `1Minute`.
"""
return pulumi.get(self, "metrics_granularity")
@property
@pulumi.getter(name="minElbCapacity")
def min_elb_capacity(self) -> pulumi.Output[Optional[int]]:
"""
Setting this causes the provider to wait for
this number of instances from this Auto Scaling Group to show up healthy in the
ELB only on creation. Updates will not wait on ELB instance number changes.
(See also Waiting for Capacity below.)
"""
return pulumi.get(self, "min_elb_capacity")
@property
@pulumi.getter(name="minSize")
def min_size(self) -> pulumi.Output[int]:
"""
Specifies the minimum number of instances to maintain in the warm pool. This helps you to ensure that there is always a certain number of warmed instances available to handle traffic spikes. Defaults to 0 if not specified.
"""
return pulumi.get(self, "min_size")
@property
@pulumi.getter(name="mixedInstancesPolicy")
def mixed_instances_policy(self) -> pulumi.Output[Optional['outputs.GroupMixedInstancesPolicy']]:
"""
Configuration block containing settings to define launch targets for Auto Scaling groups. Defined below.
"""
return pulumi.get(self, "mixed_instances_policy")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the Auto Scaling Group. By default generated by this provider.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="namePrefix")
def name_prefix(self) -> pulumi.Output[Optional[str]]:
"""
Creates a unique name beginning with the specified
prefix. Conflicts with `name`.
"""
return pulumi.get(self, "name_prefix")
@property
@pulumi.getter(name="placementGroup")
def placement_group(self) -> pulumi.Output[Optional[str]]:
"""
The name of the placement group into which you'll launch your instances, if any.
"""
return pulumi.get(self, "placement_group")
@property
@pulumi.getter(name="protectFromScaleIn")
def protect_from_scale_in(self) -> pulumi.Output[Optional[bool]]:
"""
Allows setting instance protection. The
Auto Scaling Group will not select instances with this setting for termination
during scale in events.
"""
return pulumi.get(self, "protect_from_scale_in")
@property
@pulumi.getter(name="serviceLinkedRoleArn")
def service_linked_role_arn(self) -> pulumi.Output[str]:
"""
The ARN of the service-linked role that the ASG will use to call other AWS services
"""
return pulumi.get(self, "service_linked_role_arn")
@property
@pulumi.getter(name="suspendedProcesses")
def suspended_processes(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A list of processes to suspend for the Auto Scaling Group. The allowed values are `Launch`, `Terminate`, `HealthCheck`, `ReplaceUnhealthy`, `AZRebalance`, `AlarmNotification`, `ScheduledActions`, `AddToLoadBalancer`.
Note that if you suspend either the `Launch` or `Terminate` process types, it can prevent your Auto Scaling Group from functioning properly.
"""
return pulumi.get(self, "suspended_processes")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence['outputs.GroupTag']]]:
"""
Configuration block(s) containing resource tags. Conflicts with `tags`. Documented below.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsCollection")
def tags_collection(self) -> pulumi.Output[Optional[Sequence[Mapping[str, str]]]]:
"""
Set of maps containing resource tags. Conflicts with `tag`. Documented below.
"""
return pulumi.get(self, "tags_collection")
@property
@pulumi.getter(name="targetGroupArns")
def target_group_arns(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A set of `alb.TargetGroup` ARNs, for use with Application or Network Load Balancing.
"""
return pulumi.get(self, "target_group_arns")
@property
@pulumi.getter(name="terminationPolicies")
def termination_policies(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A list of policies to decide how the instances in the Auto Scaling Group should be terminated. The allowed values are `OldestInstance`, `NewestInstance`, `OldestLaunchConfiguration`, `ClosestToNextInstanceHour`, `OldestLaunchTemplate`, `AllocationStrategy`, `Default`.
"""
return pulumi.get(self, "termination_policies")
@property
@pulumi.getter(name="vpcZoneIdentifiers")
def vpc_zone_identifiers(self) -> pulumi.Output[Sequence[str]]:
"""
A list of subnet IDs to launch resources in. Subnets automatically determine which availability zones the group will reside. Conflicts with `availability_zones`.
"""
return pulumi.get(self, "vpc_zone_identifiers")
@property
@pulumi.getter(name="waitForCapacityTimeout")
def wait_for_capacity_timeout(self) -> pulumi.Output[Optional[str]]:
"""
A maximum
[duration](https://golang.org/pkg/time/#ParseDuration) that this provider should
wait for ASG instances to be healthy before timing out. (See also Waiting
for Capacity below.) Setting this to "0" causes
this provider to skip all Capacity Waiting behavior.
"""
return pulumi.get(self, "wait_for_capacity_timeout")
@property
@pulumi.getter(name="waitForElbCapacity")
def wait_for_elb_capacity(self) -> pulumi.Output[Optional[int]]:
"""
Setting this will cause the provider to wait
for exactly this number of healthy instances from this Auto Scaling Group in
all attached load balancers on both create and update operations. (Takes
precedence over `min_elb_capacity` behavior.)
(See also Waiting for Capacity below.)
"""
return pulumi.get(self, "wait_for_elb_capacity")
@property
@pulumi.getter(name="warmPool")
def warm_pool(self) -> pulumi.Output[Optional['outputs.GroupWarmPool']]:
"""
If this block is configured, add a [Warm Pool](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-warm-pools.html)
to the specified Auto Scaling group. Defined below
"""
return pulumi.get(self, "warm_pool")
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script runs every build as the first hook (See DEPS). If it detects that
the build should be clobbered, it will delete the contents of the build
directory.
A landmine is tripped when a builder checks out a different revision, and the
diff between the new landmines and the old ones is non-null. At this point, the
build is clobbered.
"""
import difflib
import errno
import gyp_environment
import logging
import optparse
import os
import sys
import subprocess
import time
import clobber
import landmine_utils
def get_build_dir(build_tool, src_dir, is_iphone=False):
"""
Returns output directory absolute path dependent on build and targets.
Examples:
r'c:\b\build\slave\win\build\src\out'
'/mnt/data/b/build/slave/linux/build/src/out'
'/b/build/slave/ios_rel_device/build/src/xcodebuild'
Keep this function in sync with tools/build/scripts/slave/compile.py
"""
ret = None
if build_tool == 'xcode':
ret = os.path.join(src_dir, 'xcodebuild')
elif build_tool in ['make', 'ninja', 'ninja-ios']: # TODO: Remove ninja-ios.
if 'CHROMIUM_OUT_DIR' in os.environ:
output_dir = os.environ.get('CHROMIUM_OUT_DIR').strip()
if not output_dir:
raise Error('CHROMIUM_OUT_DIR environment variable is set but blank!')
else:
output_dir = landmine_utils.gyp_generator_flags().get('output_dir', 'out')
ret = os.path.join(src_dir, output_dir)
else:
raise NotImplementedError('Unexpected GYP_GENERATORS (%s)' % build_tool)
return os.path.abspath(ret)
def clobber_if_necessary(new_landmines, src_dir):
"""Does the work of setting, planting, and triggering landmines."""
out_dir = get_build_dir(landmine_utils.builder(), src_dir)
landmines_path = os.path.normpath(os.path.join(src_dir, '.landmines'))
try:
os.makedirs(out_dir)
except OSError as e:
if e.errno == errno.EEXIST:
pass
if os.path.exists(landmines_path):
with open(landmines_path, 'r') as f:
old_landmines = f.readlines()
if old_landmines != new_landmines:
old_date = time.ctime(os.stat(landmines_path).st_ctime)
diff = difflib.unified_diff(old_landmines, new_landmines,
fromfile='old_landmines', tofile='new_landmines',
fromfiledate=old_date, tofiledate=time.ctime(), n=0)
sys.stdout.write('Clobbering due to:\n')
sys.stdout.writelines(diff)
sys.stdout.flush()
clobber.clobber(out_dir)
# Save current set of landmines for next time.
with open(landmines_path, 'w') as f:
f.writelines(new_landmines)
def process_options():
"""Returns an options object containing the configuration for this script."""
parser = optparse.OptionParser()
parser.add_option(
'-s', '--landmine-scripts', action='append',
help='Path to the script which emits landmines to stdout. The target '
'is passed to this script via option -t. Note that an extra '
'script can be specified via an env var EXTRA_LANDMINES_SCRIPT.')
parser.add_option('-d', '--src-dir',
help='Path of the source root dir. Overrides the default location of the '
'source root dir when calculating the build directory.')
parser.add_option('-v', '--verbose', action='store_true',
default=('LANDMINES_VERBOSE' in os.environ),
help=('Emit some extra debugging information (default off). This option '
'is also enabled by the presence of a LANDMINES_VERBOSE environment '
'variable.'))
options, args = parser.parse_args()
if args:
parser.error('Unknown arguments %s' % args)
logging.basicConfig(
level=logging.DEBUG if options.verbose else logging.ERROR)
if options.src_dir:
if not os.path.isdir(options.src_dir):
parser.error('Cannot find source root dir at %s' % options.src_dir)
logging.debug('Overriding source root dir. Using: %s', options.src_dir)
else:
options.src_dir = \
os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if not options.landmine_scripts:
options.landmine_scripts = [os.path.join(options.src_dir, 'build',
'get_landmines.py')]
extra_script = os.environ.get('EXTRA_LANDMINES_SCRIPT')
if extra_script:
options.landmine_scripts += [extra_script]
return options
def main():
options = process_options()
if landmine_utils.builder() in ('dump_dependency_json', 'eclipse'):
return 0
gyp_environment.SetEnvironment()
landmines = []
for s in options.landmine_scripts:
proc = subprocess.Popen([sys.executable, s], stdout=subprocess.PIPE)
output, _ = proc.communicate()
landmines.extend([('%s\n' % l.strip()) for l in output.splitlines()])
clobber_if_necessary(landmines, options.src_dir)
return 0
if __name__ == '__main__':
sys.exit(main())
|
from django.contrib import admin
from . import models
@admin.register(models.Currency)
class CurrencyAdmin(admin.ModelAdmin):
list_display = ['code', 'name']
@admin.register(models.Rate)
class RateAdmin(admin.ModelAdmin):
list_display = ['currency', 'date', 'value']
date_hierarchy = 'date'
def get_queryset(self, request):
return super().get_queryset(request).select_related('currency')
|
from __future__ import with_statement
from os import environ, chdir, path as p
try:
import json
assert json
except ImportError:
import simplejson as json
from rdflib import ConjunctiveGraph, Graph, Literal, URIRef
from rdflib.compare import isomorphic
from six import PY3
import rdflib_jsonld.parser
from rdflib_jsonld.parser import to_rdf
from rdflib_jsonld.serializer import from_rdf
from rdflib_jsonld.keys import CONTEXT, GRAPH
rdflib_jsonld.parser.ALLOW_LISTS_OF_LISTS = False
# monkey-patch NTriplesParser to keep source bnode id:s ..
from rdflib.plugins.parsers.ntriples import NTriplesParser, r_nodeid, bNode
def _preserving_nodeid(self):
if not self.peek('_'):
return False
return bNode(self.eat(r_nodeid).group(1))
NTriplesParser.nodeid = _preserving_nodeid
# .. and accept bnodes everywhere
_uriref = NTriplesParser.uriref
def _uriref_or_nodeid(self):
return _uriref(self) or self.nodeid()
NTriplesParser.uriref = _uriref_or_nodeid
unsupported_tests = ("frame", "normalize")
unsupported_tests += ("error", "remote",)
unsupported_tests += ("flatten", "compact", "expand")
known_bugs = (
# invalid nquads (bnode as predicate)
#"toRdf-0078-in", "toRdf-0108-in",
# TODO: Literal doesn't preserve representations
"fromRdf-0002-in", "toRdf-0035-in", "toRdf-0101-in",
"fromRdf-0008-in", # TODO: needs to disallow outer lists-of-lists
#"toRdf-0091-in", # TODO: multiple aliases version?
)
import sys
if sys.version_info[:2] < (2, 6):
# Fails on bug in older urlparse.urljoin; ignoring..
known_bugs += ('toRdf-0069-in','toRdf-0102-in')
TC_BASE = "http://json-ld.org/test-suite/tests/"
testsuite_dir = environ.get("JSONLD_TESTSUITE") or p.join(
p.abspath(p.dirname(__file__)), "test-suite")
test_dir = p.join(testsuite_dir, "tests")
def read_manifest(skiptests):
f = open(p.join(testsuite_dir, "manifest.jsonld"), 'r')
manifestdata = json.load(f)
f.close()
# context = manifestdata.get('context')
for m in manifestdata.get('sequence'):
if any(token in m for token in unsupported_tests):
continue
f = open(p.join(testsuite_dir, m), 'r')
md = json.load(f)
f.close()
for test in md.get('sequence'):
parts = test.get(u'input', '').split('.')[0].split('-')
category, testnum, direction = parts
if test.get(u'input', '').split('.')[0] in skiptests \
or category in skiptests:
pass
else:
inputpath = test.get(u'input')
expectedpath = test.get(u'expect')
context = test.get(u'context', False)
options = test.get(u'option') or {}
yield category, testnum, inputpath, expectedpath, context, options
def test_suite(skip_known_bugs=True):
skiptests = unsupported_tests
if skip_known_bugs:
skiptests += known_bugs
chdir(test_dir)
for cat, num, inputpath, expectedpath, context, options in read_manifest(skiptests):
if inputpath.endswith(".jsonld"): # toRdf
if expectedpath.endswith(".jsonld"): # compact/expand/flatten
func = _test_json
else: # toRdf
func = _test_parser
else: # fromRdf
func = _test_serializer
#func.description = "%s-%s-%s" % (group, case)
yield func, cat, num, inputpath, expectedpath, context, options
def _test_json(cat, num, inputpath, expectedpath, context, options):
base = TC_BASE + inputpath
input_obj = _load_json(inputpath)
input_graph = ConjunctiveGraph()
to_rdf(input_obj, input_graph, base=base, context_data=context,
produce_generalized_rdf=True)
expected_json = _load_json(expectedpath)
use_native_types = True # CONTEXT in input_obj
result_json = from_rdf(input_graph, context, base=TC_BASE + inputpath,
use_native_types=options.get('useNativeTypes', use_native_types),
use_rdf_type=options.get('useRdfType', False))
def _prune_json(data):
if CONTEXT in data:
data.pop(CONTEXT)
if GRAPH in data:
data = data[GRAPH]
#def _remove_empty_sets(obj):
return data
expected_json = _prune_json(expected_json)
result_json = _prune_json(result_json)
_compare_json(expected_json, result_json)
def _test_parser(cat, num, inputpath, expectedpath, context, options):
input_obj = _load_json(inputpath)
expected_graph = _load_nquads(expectedpath)
base = TC_BASE + inputpath
result_graph = ConjunctiveGraph()
to_rdf(input_obj, result_graph, base=base, context_data=context,
produce_generalized_rdf = options.get('produceGeneralizedRdf', False))
assert isomorphic(
result_graph, expected_graph), "Expected:\n%s\nGot:\n%s" % (
expected_graph.serialize(format='turtle'),
result_graph.serialize(format='turtle'))
def _test_serializer(cat, num, inputpath, expectedpath, context, options):
input_graph = _load_nquads(inputpath)
expected_json = _load_json(expectedpath)
result_json = from_rdf(input_graph, context, base=TC_BASE + inputpath,
use_native_types=options.get('useNativeTypes', False),
use_rdf_type=options.get('useRdfType', False))
_compare_json(expected_json, result_json)
def _load_nquads(source):
graph = ConjunctiveGraph()
with open(source) as f:
if PY3:
data = f.read()
else:
data = f.read().decode('utf-8')
graph.parse(data=data, format='nquads')
return graph
def _load_json(source):
with open(source) as f:
return json.load(f)
def _to_ordered(obj):
if isinstance(obj, list):
# NOTE: use type in key to handle mixed
# lists of e.g. bool, int, float.
return sorted((_to_ordered(lv) for lv in obj),
key=lambda x: (_ord_key(x), type(x).__name__))
if not isinstance(obj, dict):
return obj
return sorted((k, _to_ordered(v))
for k, v in list(obj.items()))
def _ord_key(x):
if isinstance(x, dict) and '@id' in x:
return x['@id']
else:
return x
def _dump_json(obj):
return json.dumps(obj,
indent=4, separators=(',', ': '),
sort_keys=True, check_circular=True)
def _compare_json(expected, result):
expected = json.loads(_dump_json(expected))
result = json.loads(_dump_json(result))
assert _to_ordered(expected) == _to_ordered(result), \
"Expected JSON:\n%s\nGot:\n%s" % (
_dump_json(expected), _dump_json(result))
if __name__ == '__main__':
import sys
from rdflib import *
from datetime import datetime
EARL = Namespace("http://www.w3.org/ns/earl#")
DC = Namespace("http://purl.org/dc/terms/")
FOAF = Namespace("http://xmlns.com/foaf/0.1/")
DOAP = Namespace("http://usefulinc.com/ns/doap#")
rdflib_jsonld_page = "https://github.com/RDFLib/rdflib-jsonld"
rdflib_jsonld = URIRef(rdflib_jsonld_page + "#it")
args = sys.argv[1:]
asserter = URIRef(args.pop(0)) if args else None
asserter_name = Literal(args.pop(0)) if args else None
graph = Graph()
graph.parse(data="""
@prefix earl: <{EARL}> .
@prefix dc: <{DC}> .
@prefix foaf: <{FOAF}> .
@prefix doap: <{DOAP}> .
<{rdflib_jsonld}> a doap:Project, earl:TestSubject, earl:Software ;
doap:homepage <{rdflib_jsonld_page}> ;
doap:name "RDFLib-JSONLD" ;
doap:programming-language "Python" ;
doap:title "RDFLib plugin for JSON-LD " .
""".format(**vars()), format='turtle')
if asserter_name:
graph.add((asserter, RDF.type, FOAF.Person))
graph.add((asserter, FOAF.name, asserter_name))
graph.add((rdflib_jsonld, DOAP.developer, asserter))
for args in test_suite(skip_known_bugs=False):
try:
args[0](*args[1:])
success = True
except AssertionError:
success = False
assertion = graph.resource(BNode())
assertion.add(RDF.type, EARL.Assertion)
assertion.add(EARL.mode, EARL.automatic)
if asserter:
assertion.add(EARL.assertedBy, asserter)
assertion.add(EARL.subject, rdflib_jsonld)
assertion.add(EARL.test, URIRef(
"http://json-ld.org/test-suite/tests/{1}-manifest.jsonld#t{2}".format(*args)))
result = graph.resource(BNode())
assertion.add(EARL.result, result)
result.add(RDF.type, EARL.TestResult)
result.add(DC.date, Literal(datetime.utcnow()))
result.add(EARL.outcome, EARL.passed if success else EARL.failed)
graph.serialize(sys.stdout, format='turtle')
|
"""
Copyright (C) 2018 Patrick Schwab, ETH Zurich
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import numpy as np
from perfect_match.apps.parameters import clip_percentage
from sklearn.metrics.pairwise import cosine_similarity
from perfect_match.data_access.generator import get_last_id_set
from perfect_match.data_access.tcga.data_access import DataAccess
class TCGABenchmark(object):
def __init__(self, data_dir, num_treatments=2,
num_centroids_mean=7, num_centroids_std=2,
num_relevant_gene_loci_mean=10, num_relevant_gene_loci_std=3,
response_mean_of_mean=0.45, response_std_of_mean=0.15,
response_mean_of_std=0.1, response_std_of_std=0.05,
strength_of_assignment_bias=10, epsilon_std=0.15,
**kwargs):
self.centroids = None
self.data_access = DataAccess(data_dir, **kwargs)
self.assignment_cache = {}
self.num_treatments = num_treatments
self.num_centroids_mean = num_centroids_mean
self.num_centroids_std = num_centroids_std
self.num_relevant_gene_loci_mean = num_relevant_gene_loci_mean
self.num_relevant_gene_loci_std = num_relevant_gene_loci_std
self.response_mean_of_mean = response_mean_of_mean
self.response_std_of_mean = response_std_of_mean
self.response_mean_of_std = response_mean_of_std
self.response_std_of_std = response_std_of_std
self.strength_of_assignment_bias = strength_of_assignment_bias
self.epsilon_std = epsilon_std
self.assign_counterfactuals = False
self.seed = kwargs["seed"]
self.random_generator = None
self.num_features = int(np.rint(kwargs["tcga_num_features"]))
@staticmethod
def get_db_file_name():
return DataAccess.DB_FILE_NAME
def filter(self, patients):
return patients
def set_assign_counterfactuals(self, value):
self.assign_counterfactuals = value
def get_num_treatments(self):
return self.num_treatments
def get_data_access(self):
return self.data_access
def get_input_shapes(self, args):
if self.num_features > 0:
return (self.num_features,)
else:
return (self.data_access.get_rnaseq_dimension(),)
def get_output_shapes(self, args):
return (1,)
def initialise(self, args):
self.random_generator = np.random.RandomState(909)
self.centroids = None
all_features = self.data_access.get_rnaseq_dimension()
if self.num_features > 0 and self.num_features != all_features:
self.selected_features = self.random_generator.choice(self.data_access.get_rnaseq_dimension(),
self.num_features, replace=False)
else:
self.selected_features = np.arange(all_features)
def fit(self, generator, steps, batch_size):
centroids_tmp = []
centroid_indices = sorted(self.random_generator.permutation(steps*batch_size)[:self.num_treatments + 1])
current_idx = 0
while len(centroid_indices) != 0:
x, _ = next(generator)
ids = get_last_id_set()
while len(centroid_indices) != 0 and centroid_indices[0] <= current_idx + len(x[0]):
next_index = centroid_indices[0]
del centroid_indices[0]
is_last_treatment = len(centroid_indices) == 0
if is_last_treatment:
# Last treatment is control = worse expected outcomes.
response_mean_of_mean = 1 - self.response_mean_of_mean
else:
response_mean_of_mean = self.response_mean_of_mean
response_mean = clip_percentage(self.random_generator.normal(response_mean_of_mean,
self.response_std_of_mean))
response_std = clip_percentage(self.random_generator.normal(self.response_mean_of_std,
self.response_std_of_std))
gene_loci_indices = np.arange(len(x[0][next_index]))
rnaseq_data = self.data_access.get_entry_with_id(ids[next_index])[1]["rnaseq"][1]
centroids_tmp.append((gene_loci_indices, rnaseq_data[gene_loci_indices], response_mean, response_std))
current_idx += len(x[0])
self.centroids = centroids_tmp
self.assignment_cache = {}
def get_assignment(self, id, x):
if self.centroids is None:
return 0, 0
if id not in self.assignment_cache:
rnaseq_data = self.data_access.get_entry_with_id(id)[1]["rnaseq"][1]
assigned_treatment, assigned_y = self._assign(rnaseq_data)
self.assignment_cache[id] = assigned_treatment, assigned_y
assigned_treatment, assigned_y = self.assignment_cache[id]
if self.assign_counterfactuals:
return assigned_treatment, assigned_y
else:
return assigned_treatment, assigned_y[assigned_treatment]
@staticmethod
def stable_softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
def select_features(self, x):
return x[:, self.selected_features]
def _assign(self, x):
# Assignment should be biased towards treatments that help more.
assert self.centroids is not None, "Must call __fit__ before __assign__."
distances = self.get_centroid_weights(x)
expected_responses = []
for treatment in range(self.num_treatments + 1):
_, _, response_mean, response_std = self.centroids[treatment]
y_this_treatment = self.random_generator.normal(response_mean, response_std)
expected_responses.append(
clip_percentage(y_this_treatment + self.random_generator.normal(0.0, self.epsilon_std))
)
expected_responses = np.array(expected_responses)
y = []
control_response, control_distance = expected_responses[-1], distances[-1]
for treatment_idx in range(self.num_treatments):
this_response, this_distance = expected_responses[treatment_idx], distances[treatment_idx]
y.append(this_response * (this_distance + control_distance))
y = np.array(y)
# Invert the expected responses, because a lower percentage of recurrence/death is a better outcome.
treatment_chosen = self.random_generator.choice(self.num_treatments,
p=TCGABenchmark.stable_softmax(
self.strength_of_assignment_bias * y)
)
return treatment_chosen, 50*y
def get_centroid_weights(self, x):
similarities = map(lambda indices, centroid: cosine_similarity(x[indices].reshape(1, -1),
centroid.reshape(1, -1)),
map(lambda x: x[0], self.centroids),
map(lambda x: x[1], self.centroids))
return np.squeeze(similarities)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('my_charts', '0002_auto_20140922_0520'),
]
operations = [
migrations.AddField(
model_name='nutritionparameters',
name='nutrition_date',
field=models.DateField(help_text=b'Date Created', null=True),
preserve_default=True,
),
]
|
'''
Code to accompany
"Unsupervised Discovery of Multimodal Links in Multi-Sentence/Multi-Image Documents."
https://github.com/jmhessel/multi-retrieval
This is a work-in-progress TF2.0 port.
'''
import argparse
import collections
import json
import tensorflow as tf
import numpy as np
import os
import sys
import tqdm
import text_utils
import image_utils
import eval_utils
import model_utils
import training_utils
import bipartite_utils
import pickle
import sklearn.preprocessing
from pprint import pprint
def load_data(fname):
with open(fname) as f:
return json.loads(f.read())
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('documents',
help='json of train/val/test documents.')
parser.add_argument('--image_features',
help='path to pre-extracted image-feature numpy array.')
parser.add_argument('--image_id2row',
help='path to mapping from image id --> numpy row for image features.')
parser.add_argument('--joint_emb_dim',
type=int,
help='Embedding dimension of the shared, multimodal space.',
default=1024)
parser.add_argument('--margin',
type=float,
help='Margin for computing hinge loss.',
default=.2)
parser.add_argument('--seq_len',
type=int,
help='Maximum token sequence length for each sentence before truncation.',
default=20)
parser.add_argument('--docs_per_batch',
type=int,
help='How many docs per batch? 11 docs = 10 negative samples per doc.',
default=11)
parser.add_argument('--neg_mining',
help='What type of negative mining?',
default='hard_negative',
choices=['negative_sample', 'hard_negative'],
type=str)
parser.add_argument('--sim_mode',
help='What similarity function should we use?',
default='AP',
choices=['DC','TK','AP'],
type=str)
parser.add_argument('--sim_mode_k',
help='If --sim_mode=TK/AP, what should the k be? '
'k=-1 for dynamic = min(n_images, n_sentences))? '
'if k > 0, then k=ceil(1./k * min(n_images, n_sentences))',
default=-1,
type=float)
parser.add_argument('--lr',
type=float,
help='Starting learning rate',
default=.0002)
parser.add_argument('--n_epochs',
type=int,
help='How many epochs to run for?',
default=60)
parser.add_argument('--checkpoint_dir',
type=str,
help='What directory to save checkpoints in?',
default='checkpoints')
parser.add_argument('--word2vec_binary',
type=str,
help='If cached word embeddings have not been generated, '
'what is the location of the word2vec binary?',
default=None)
parser.add_argument('--cached_word_embeddings',
type=str,
help='Where are/will the cached word embeddings saved?',
default='cached_word2vec.json')
parser.add_argument('--print_metrics',
type=int,
help='Should we print the metrics if there are ground-truth '
'labels, or no?',
default=0)
parser.add_argument('--cached_vocab',
type=str,
help='Where should we cache the vocab, if anywhere '
'(None means no caching)',
default=None)
parser.add_argument('--output',
type=str,
default=None,
help='If output is set, we will save a pkl file'
'with the validation/test metrics.')
parser.add_argument('--seed',
type=int,
help='Random seed',
default=1)
parser.add_argument('--dropout',
type=float,
default=0.5,
help='How much dropout should we apply?')
parser.add_argument('--subsample_image',
type=int,
default=-1,
help='Should we subsample images to constant lengths? '
'This option is useful if the model is being trained end2end '
'and there are memory issues.')
parser.add_argument('--subsample_text',
type=int,
default=-1,
help='Should we subsample sentences to constant lengths? '
'This option is useful if the model is being trained end2end '
'and there are memory issues.')
parser.add_argument('--rnn_type',
type=str,
default='GRU',
help='What RNN should we use')
parser.add_argument('--end2end',
type=int,
default=0,
help='Should we backprop through the whole vision network?')
parser.add_argument('--image_dir',
type=str,
default=None,
help='What image dir should we use, if end2end?')
parser.add_argument('--lr_patience',
type=int,
default=3,
help='What learning rate patience should we use?')
parser.add_argument('--lr_decay',
type=float,
default=.2,
help='What learning rate decay factor should we use?')
parser.add_argument('--min_lr',
type=float,
default=.0000001,
help='What learning rate decay factor should we use?')
parser.add_argument('--full_image_paths',
type=int,
default=0,
help='For end2end training, should we use full image paths '
'(i.e., is the file extention already on images?)?')
parser.add_argument('--test_eval',
type=int,
help='(DEBUG OPTION) If test_eval >= 1, then training '
'only happens over this many batches',
default=-1)
parser.add_argument('--force',
type=int,
default=0,
help='Should we force the run if the output exists?')
parser.add_argument('--save_predictions',
type=str,
default=None,
help='Should we save the train/val/test predictions? '
'If so --- they will be saved in this directory.')
parser.add_argument('--image_model_checkpoint',
type=str,
default=None,
help='If set, the image model will be initialized from '
'this model checkpoint.')
parser.add_argument('--text_model_checkpoint',
type=str,
default=None,
help='If set, the text model will be initialized from '
'this model checkpoint.')
parser.add_argument('--loss_mode',
help='What loss function should we use?',
default='hinge',
choices=['hinge', 'logistic', 'softmax'],
type=str)
parser.add_argument('--compute_mscoco_eval_metrics',
help='Should we compute the mscoco MT metrics?',
default=0,
type=int)
parser.add_argument('--compute_metrics_train',
help='Should we also compute metrics over the training set?',
default=1,
type=int)
parser.add_argument('--lr_warmup_steps',
help='If positive value, we will warmup the learning rate linearly '
'over this many steps.',
default=-1,
type=int)
parser.add_argument('--l2_norm',
help='If 1, we will l2 normalize extracted features, else, no normalization.',
default=1,
type=int)
parser.add_argument('--n_layers',
help='How many layers in the encoders?',
default=1,
type=int,
choices=[1,2,3])
parser.add_argument('--scale_image_features',
help='Should we standard scale image features?',
default=0,
type=int)
args = parser.parse_args()
# check to make sure that various flags are set correctly
if args.end2end:
assert args.image_dir is not None
if not args.end2end:
assert args.image_features is not None and args.image_id2row is not None
# print out some info about the run's inputs/outputs
if args.output and '.pkl' not in args.output:
args.output += '.pkl'
if args.output:
print('Output will be saved to {}'.format(args.output))
print('Model checkpoints will be saved in {}'.format(args.checkpoint_dir))
if args.output and os.path.exists(args.output) and not args.force:
print('{} already done! If you want to force it, set --force 1'.format(args.output))
quit()
if not os.path.exists(args.checkpoint_dir):
os.makedirs(args.checkpoint_dir)
if args.save_predictions:
if not os.path.exists(args.checkpoint_dir):
os.makedirs(args.checkpoint_dir)
os.makedirs(args.checkpoint_dir + '/train')
os.makedirs(args.checkpoint_dir + '/val')
os.makedirs(args.checkpoint_dir + '/test')
return args
def main():
args = parse_args()
np.random.seed(args.seed)
data = load_data(args.documents)
train, val, test = data['train'], data['val'], data['test']
np.random.shuffle(train); np.random.shuffle(val); np.random.shuffle(test)
max_n_sentence, max_n_image = -1, -1
for d in train + val + test:
imgs, sents, meta = d
max_n_sentence = max(max_n_sentence, len(sents))
max_n_image = max(max_n_image, len(imgs))
# remove zero image/zero sentence cases:
before_lens = list(map(len, [train, val, test]))
train = [t for t in train if len(t[0]) > 0 and len(t[1]) > 0]
val = [t for t in val if len(t[0]) > 0 and len(t[1]) > 0]
test = [t for t in test if len(t[0]) > 0 and len(t[1]) > 0]
after_lens = list(map(len, [train, val, test]))
for bl, al, split in zip(before_lens, after_lens, ['train', 'val', 'test']):
if bl == al: continue
print('Removed {} documents from {} split that had zero images and/or sentences'.format(
bl-al, split))
print('Max n sentence={}, max n image={}'.format(max_n_sentence, max_n_image))
if args.cached_vocab:
print('Saving/loading vocab from {}'.format(args.cached_vocab))
# create vocab from training documents:
flattened_train_sents = []
for _, sents, _ in train:
flattened_train_sents.extend([s[0] for s in sents])
word2idx = text_utils.get_vocab(flattened_train_sents, cached=args.cached_vocab)
print('Vocab size was {}'.format(len(word2idx)))
if args.word2vec_binary:
we_init = text_utils.get_word2vec_matrix(
word2idx, args.cached_word_embeddings, args.word2vec_binary)
else:
we_init = np.random.uniform(low=-.02, high=.02, size=(len(word2idx), 300))
if args.end2end:
image_features = None
image_idx2row = None
else:
image_features = np.load(args.image_features)
image_idx2row = load_data(args.image_id2row)
if args.scale_image_features:
ss = sklearn.preprocessing.StandardScaler()
all_train_images = []
for img, txt, cid in train:
all_train_images.extend([x[0] for x in img])
print('standard scaling with {} images total'.format(len(all_train_images)))
all_train_rows = [image_idx2row[cid] for cid in all_train_images]
ss.fit(image_features[np.array(all_train_rows)])
image_features = ss.transform(image_features)
word_emb_dim = 300
if val[0][0][0][1] is not None:
ground_truth = True
print('The input has ground truth, so AUC will be computed.')
else:
ground_truth = False
# Step 1: Specify model inputs/outputs:
# (n docs, n sent, max n words,)
text_inp = tf.keras.layers.Input((None, args.seq_len))
# this input tells you how many sentences are really in each doc
text_n_inp = tf.keras.layers.Input((1,), dtype='int32')
if args.end2end:
# (n docs, n image, x, y, color)
img_inp = tf.keras.layers.Input((None, 224, 224, 3))
else:
# (n docs, n image, feature dim)
img_inp = tf.keras.layers.Input((None, image_features.shape[1]))
# this input tells you how many images are really in each doc
img_n_inp = tf.keras.layers.Input((1,), dtype='int32')
# Step 2: Define transformations to shared multimodal space.
# Step 2.1: The text model:
if args.text_model_checkpoint:
print('Loading pretrained text model from {}'.format(
args.text_model_checkpoint))
single_text_doc_model = tf.keras.models.load_model(args.text_model_checkpoint)
extracted_text_features = single_text_doc_model(text_inp)
else:
word_embedding = tf.keras.layers.Embedding(
len(word2idx),
word_emb_dim,
weights=[we_init] if we_init is not None else None,
mask_zero=True)
element_dropout = tf.keras.layers.SpatialDropout1D(args.dropout)
if args.rnn_type == 'GRU':
rnn_maker = tf.keras.layers.GRU
else:
rnn_maker = tf.keras.layers.LSTM
enc_layers = []
for idx in range(args.n_layers):
if idx == args.n_layers-1:
enc_layers.append(rnn_maker(args.joint_emb_dim))
else:
enc_layers.append(rnn_maker(args.joint_emb_dim, return_sequences=True))
embedded_text_inp = word_embedding(text_inp)
extracted_text_features = tf.keras.layers.TimeDistributed(element_dropout)(embedded_text_inp)
for l in enc_layers:
extracted_text_features = tf.keras.layers.TimeDistributed(l)(extracted_text_features)
# extracted_text_features is now (n docs, max n setnences, multimodal dim)
if args.l2_norm:
l2_norm_layer = tf.keras.layers.Lambda(lambda x: tf.nn.l2_normalize(x, axis=-1))
extracted_text_features = l2_norm_layer(extracted_text_features)
single_text_doc_model = tf.keras.models.Model(
inputs=text_inp,
outputs=extracted_text_features)
# Step 2.2: The image model:
if args.image_model_checkpoint:
print('Loading pretrained image model from {}'.format(
args.image_model_checkpoint))
single_img_doc_model = tf.keras.models.load_model(args.image_model_checkpoint)
extracted_img_features = single_img_doc_model(img_inp)
else:
if args.end2end:
img_projection = tf.keras.layers.Dense(args.joint_emb_dim)
from tf.keras.applications.nasnet import NASNetMobile
cnn = tf.keras.applications.nasnet.NASNetMobile(
include_top=False, input_shape=(224, 224, 3), pooling='avg')
extracted_img_features = tf.keras.layers.TimeDistributed(cnn)(img_inp)
if args.dropout > 0.0:
extracted_img_features = tf.keras.layers.TimeDistributed(
tf.keras.layers.Dropout(args.dropout))(extracted_img_features)
extracted_img_features = keras.layers.TimeDistributed(img_projection)(
extracted_img_features)
else:
extracted_img_features = tf.keras.layers.Masking()(img_inp)
if args.dropout > 0.0:
extracted_img_features = tf.keras.layers.TimeDistributed(
tf.keras.layers.Dropout(args.dropout))(extracted_img_features)
enc_layers = []
for idx in range(args.n_layers):
if idx == args.n_layers-1:
enc_layers.append(tf.keras.layers.Dense(args.joint_emb_dim))
else:
enc_layers.append(tf.keras.layers.Dense(args.joint_emb_dim, activation='relu'))
enc_layers.append(tf.keras.layers.BatchNormalization())
for l in enc_layers:
extracted_img_features = tf.keras.layers.TimeDistributed(l)(extracted_img_features)
# extracted_img_features is now (n docs, max n images, multimodal dim)
if args.l2_norm:
l2_norm_layer = tf.keras.layers.Lambda(lambda x: tf.nn.l2_normalize(x, axis=-1))
extracted_img_features = l2_norm_layer(extracted_img_features)
single_img_doc_model = tf.keras.models.Model(
inputs=img_inp,
outputs=extracted_img_features)
# Step 3: Extract/stack the non-padding image/sentence representations
def mask_slice_and_stack(inp):
stacker = []
features, n_inputs = inp
n_inputs = tf.dtypes.cast(n_inputs, tf.int32)
# for each document, we will extract the portion of input features that are not padding
# this means, for features[doc_idx], we will take the first n_inputs[doc_idx] rows.
# we stack them into one big array so we can do a big cosine sim dot product between all
# sentence image pairs in parallel. We'll slice up this array back up later.
for idx in range(args.docs_per_batch):
cur_valid_idxs = tf.range(n_inputs[idx,0])
cur_valid_features = features[idx]
feats = tf.gather(cur_valid_features, cur_valid_idxs)
stacker.append(feats)
return tf.concat(stacker, axis=0)
# extracted text/img features are (n_docs, max_in_seq, dim)
# we want to compute cosine sims between all (sent, img) pairs quickly
# so we will stack them into new tensors ...
# text_enc has shape (total number of sent in batch, dim)
# img_enc has shape (total number of image in batch, dim)
text_enc = mask_slice_and_stack([extracted_text_features, text_n_inp])
img_enc = mask_slice_and_stack([extracted_img_features, img_n_inp])
def DC_sim(sim_matrix):
text2im_S = tf.reduce_mean(tf.reduce_max(sim_matrix, 1))
im2text_S = tf.reduce_mean(tf.reduce_max(sim_matrix, 0))
return text2im_S + im2text_S
def get_k(sim_matrix):
k = tf.minimum(tf.shape(sim_matrix)[0], tf.shape(sim_matrix)[1])
if args.sim_mode_k > 0:
k = tf.dtypes.cast(k, tf.float32)
k = tf.math.ceil(tf.div(k, args.sim_mode_k))
k = tf.dtypes.cast(k, tf.int32)
return k
def TK_sim(sim_matrix):
k = get_k(sim_matrix)
im2text_S, text2im_S = tf.reduce_max(sim_matrix, 0), tf.reduce_max(sim_matrix, 1)
text2im_S = tf.reduce_mean(tf.math.top_k(text2im_S, k=k)[0], axis=-1)
im2text_S = tf.reduce_mean(tf.math.top_k(im2text_S, k=k)[0], axis=-1)
return text2im_S + im2text_S
bipartite_match_fn = bipartite_utils.generate_fast_hungarian_solving_function()
def AP_sim(sim_matrix):
k = get_k(sim_matrix)
sol = tf.numpy_function(bipartite_match_fn, [sim_matrix, k], tf.int32)
return tf.reduce_mean(tf.gather_nd(sim_matrix, sol))
if args.sim_mode == 'DC':
sim_fn = DC_sim
elif args.sim_mode == 'TK':
sim_fn = TK_sim
elif args.sim_mode == 'AP':
sim_fn = AP_sim
else:
raise NotImplementedError('{} is not implemented sim function'.format(args.sim_fn))
def make_sims(inp):
sims = tf.keras.backend.dot(inp[0], tf.keras.backend.transpose(inp[1]))
return sims
all_sims = make_sims([text_enc, img_enc])
get_pos_neg_sims = model_utils.make_get_pos_neg_sims(
args,
sim_fn)
pos_sims, neg_img_sims, neg_text_sims = tf.keras.layers.Lambda(
get_pos_neg_sims)([all_sims, text_n_inp, img_n_inp])
if args.loss_mode == 'hinge':
def per_neg_loss(inp):
pos_s, neg_s = inp
return tf.math.maximum(neg_s - pos_s + args.margin, 0)
elif args.loss_mode == 'logistic':
def per_neg_loss(inp):
pos_s, neg_s = inp
return tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.ones_like(neg_s),
logits=pos_s - neg_s)
elif args.loss_mode == 'softmax':
def per_neg_loss(inp):
pos_s, neg_s = inp
pos_s -= args.margin
pos_l, neg_l = tf.ones_like(pos_s), tf.zeros_like(neg_s)
return tf.nn.softmax_cross_entropy_with_logits(
tf.concat([pos_l, neg_l], axis=1),
tf.concat([pos_s, neg_s], axis=1))
neg_img_losses = per_neg_loss([pos_sims, neg_img_sims])
neg_text_losses = per_neg_loss([pos_sims, neg_text_sims])
if args.loss_mode != 'softmax':
if args.neg_mining == 'negative_sample':
pool_fn = lambda x: tf.reduce_mean(x, axis=1, keepdims=True)
elif args.neg_mining == 'hard_negative':
pool_fn = lambda x: tf.reduce_max(x, axis=1, keepdims=True)
else:
raise NotImplementedError('{} is not a valid for args.neg_mining'.format(
args.neg_mining))
neg_img_loss = tf.keras.layers.Lambda(pool_fn, name='neg_img')(neg_img_losses)
neg_text_loss = tf.keras.layers.Lambda(pool_fn, name='neg_text')(neg_text_losses)
else:
neg_img_loss = neg_img_losses
neg_text_loss = neg_text_losses
inputs = [text_inp,
img_inp,
text_n_inp,
img_n_inp]
model = tf.keras.models.Model(inputs=inputs,
outputs=[neg_img_loss, neg_text_loss])
opt = tf.keras.optimizers.Adam(args.lr)
def identity(y_true, y_pred):
del y_true
return tf.reduce_mean(y_pred, axis=-1)
model.compile(opt, loss=identity)
if args.test_eval > 0:
train = train[:args.test_eval * args.docs_per_batch]
val = val[:args.test_eval * args.docs_per_batch]
test = test[:args.test_eval * args.docs_per_batch]
train_seq = training_utils.DocumentSequence(
train,
image_features,
image_idx2row,
max_n_sentence,
max_n_image,
word2idx,
args=args,
shuffle_docs=True,
shuffle_sentences=False,
shuffle_images=True)
val_seq = training_utils.DocumentSequence(
val,
image_features,
image_idx2row,
max_n_sentence,
max_n_image,
word2idx,
args=args,
augment=False,
shuffle_sentences=False,
shuffle_docs=False,
shuffle_images=False)
sdm = training_utils.SaveDocModels(
args.checkpoint_dir,
single_text_doc_model,
single_img_doc_model)
if args.loss_mode == 'hinge':
val_loss_thresh = 2 * args.margin # constant prediction performance
else:
val_loss_thresh = np.inf
reduce_lr = training_utils.ReduceLROnPlateauAfterValLoss(
activation_val_loss=val_loss_thresh,
factor=args.lr_decay,
patience=args.lr_patience,
min_lr=args.min_lr,
verbose=True)
callbacks = [reduce_lr, sdm]
if args.print_metrics:
metrics_printer = training_utils.PrintMetrics(
val,
image_features,
image_idx2row,
word2idx,
single_text_doc_model,
single_img_doc_model,
args)
callbacks.append(metrics_printer)
if args.lr_warmup_steps > 0:
warmup_lr = training_utils.LearningRateLinearIncrease(
args.lr,
args.lr_warmup_steps)
callbacks.append(warmup_lr)
history = model.fit(
train_seq,
epochs=args.n_epochs,
validation_data=val_seq,
callbacks=callbacks)
if args.output:
best_image_model_str, best_sentence_model_str, best_logs, best_epoch = sdm.best_checkpoints_and_logs
single_text_doc_model = tf.keras.models.load_model(best_sentence_model_str)
single_image_doc_model = tf.keras.models.load_model(best_image_model_str)
if args.scale_image_features:
with open(args.checkpoint_dir + '/image_standardscaler.pkl', 'wb') as f:
pickle.dump(ss, f)
if ground_truth and args.compute_metrics_train:
train_aucs, train_match_metrics, train_mt_metrics = eval_utils.compute_match_metrics_doc(
train,
image_features,
image_idx2row,
word2idx,
single_text_doc_model,
single_img_doc_model,
args)
else:
train_aucs, train_match_metrics, train_mt_metrics = None, None, None
if ground_truth:
val_aucs, val_match_metrics, val_mt_metrics = eval_utils.compute_match_metrics_doc(
val,
image_features,
image_idx2row,
word2idx,
single_text_doc_model,
single_img_doc_model,
args)
test_aucs, test_match_metrics, test_mt_metrics = eval_utils.compute_match_metrics_doc(
test,
image_features,
image_idx2row,
word2idx,
single_text_doc_model,
single_img_doc_model,
args)
else:
train_aucs, val_aucs, test_aucs = None, None, None
train_match_metrics, val_match_metrics, test_match_metrics = None, None, None
train_mt_metrics, val_mt_metrics, test_mt_metrics = None, None, None
output = {'logs':best_logs,
'best_sentence_model_str':best_sentence_model_str,
'best_image_model_str':best_image_model_str,
'train_aucs':train_aucs,
'train_match_metrics':train_match_metrics,
'train_mt_metrics':train_mt_metrics,
'val_aucs':val_aucs,
'val_match_metrics':val_match_metrics,
'val_mt_metrics':val_mt_metrics,
'test_aucs':test_aucs,
'test_match_metrics':test_match_metrics,
'test_mt_metrics':test_mt_metrics,
'args':args,
'epoch':best_epoch}
if args.scale_image_features:
output['image_standard_scaler_str'] = args.checkpoint_dir + '/image_standardscaler.pkl'
for k, v in history.history.items():
output['history_{}'.format(k)] = v
if args.print_metrics:
for k, v in metrics_printer.history.items():
output['metrics_history_{}'.format(k)] = v
with open(args.output, 'wb') as f:
pickle.dump(output, f, protocol=pickle.HIGHEST_PROTOCOL)
print('saved output to {}'.format(args.output))
if args.save_predictions:
for d, name in zip([train, val, test], ['train', 'val', 'test']):
out_dir = args.save_predictions + '/' + name
if not os.path.exists(out_dir):
os.makedirs(out_dir)
eval_utils.save_predictions(
d,
image_features,
image_idx2row,
word2idx,
single_text_doc_model,
single_img_doc_model,
out_dir,
args)
if __name__ == '__main__':
main()
|
# CUDA_VISIBLE_DEVICES=1 python lfw_eval.py --lfw lfw.zip --epoch_num 2
from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
torch.backends.cudnn.bencmark = True
import os,sys,cv2,random,datetime
import argparse
import numpy as np
import zipfile
from dataset import ImageDataset
from matlab_cp2tform import get_similarity_transform_for_cv2
import net_sphere
def alignment(src_img,src_pts):
ref_pts = [ [30.2946, 51.6963],[65.5318, 51.5014],
[48.0252, 71.7366],[33.5493, 92.3655],[62.7299, 92.2041] ]
crop_size = (96, 112)
src_pts = np.array(src_pts).reshape(5,2)
s = np.array(src_pts).astype(np.float32)
r = np.array(ref_pts).astype(np.float32)
tfm = get_similarity_transform_for_cv2(s, r)
face_img = cv2.warpAffine(src_img, tfm, crop_size)
return face_img
def KFold(n=6000, n_folds=10, shuffle=False):
folds = []
base = list(range(n))
for i in range(n_folds):
test = base[i*n/n_folds:(i+1)*n/n_folds]
train = list(set(base)-set(test))
folds.append([train,test])
return folds
def eval_acc(threshold, diff):
y_true = []
y_predict = []
for d in diff:
same = 1 if float(d[2]) > threshold else 0
y_predict.append(same)
y_true.append(int(d[3]))
y_true = np.array(y_true)
y_predict = np.array(y_predict)
accuracy = 1.0*np.count_nonzero(y_true==y_predict)/len(y_true)
return accuracy
def find_best_threshold(thresholds, predicts):
best_threshold = best_acc = 0
for threshold in thresholds:
accuracy = eval_acc(threshold, predicts)
if accuracy >= best_acc:
best_acc = accuracy
best_threshold = threshold
return best_threshold
parser = argparse.ArgumentParser(description='PyTorch sphereface lfw')
parser.add_argument('--net','-n', default='sphere20a', type=str)
parser.add_argument('--lfw', default='lfw/lfw.zip', type=str)
parser.add_argument('--model','-m', default='sphere20a.pth', type=str)
parser.add_argument('--epoch_num', type=str)
args = parser.parse_args()
predicts=[]
featureNet = getattr(net_sphere,args.net)()
featureNet.load_state_dict(torch.load('saved_models/featureNet_' + args.epoch_num + '.pth'))
featureNet.cuda()
featureNet.eval()
# we dont need maskNet here right?
# maskNet = getattr(adversary, "MaskMan")(512)
# maskNet.load_state_dict(torch.load("saved_models/maskNet_19.pth"))
# maskNet.cuda()
# maskNet.eval()
fcNet = getattr(net_sphere, "fclayers")()
fcNet.load_state_dict(torch.load("saved_models/fcNet_"+ args.epoch_num + ".pth"))
fcNet.cuda()
fcNet.feature = True
fcNet.eval()
# net = getattr(net_sphere,args.net)()
# net.load_state_dict(torch.load(args.model))
# net.cuda()
# net.eval()
# net.feature = True
zfile = zipfile.ZipFile(args.lfw)
# print(zfile)
landmark = {}
with open('data/lfw_landmark.txt') as f:
landmark_lines = f.readlines()
for line in landmark_lines:
l = line.replace('\n','').split('\t')
landmark[l[0]] = [int(k) for k in l[1:]]
with open('data/pairs.txt') as f:
pairs_lines = f.readlines()[1:]
for i in range(6000):
if (i%100 == 0):
print("done:", i)
p = pairs_lines[i].replace('\n','').split('\t')
if 3==len(p):
sameflag = 1
name1 = p[0]+'/'+p[0]+'_'+'{:04}.jpg'.format(int(p[1]))
name2 = p[0]+'/'+p[0]+'_'+'{:04}.jpg'.format(int(p[2]))
if 4==len(p):
sameflag = 0
name1 = p[0]+'/'+p[0]+'_'+'{:04}.jpg'.format(int(p[1]))
name2 = p[2]+'/'+p[2]+'_'+'{:04}.jpg'.format(int(p[3]))
img1 = alignment(cv2.imdecode(np.frombuffer(zfile.read(name1),np.uint8),1),landmark[name1])
img2 = alignment(cv2.imdecode(np.frombuffer(zfile.read(name2),np.uint8),1),landmark[name2])
imglist = [img1,cv2.flip(img1,1),img2,cv2.flip(img2,1)]
for i in range(len(imglist)):
imglist[i] = imglist[i].transpose(2, 0, 1).reshape((1,3,112,96))
imglist[i] = (imglist[i]-127.5)/128.0
img = np.vstack(imglist)
img = Variable(torch.from_numpy(img).float(),volatile=True).cuda()
# output = net(img)
output = featureNet(img)
# print(output)
output = fcNet(output)
# print(output)
f = output.data
f1,f2 = f[0],f[2]
cosdistance = f1.dot(f2)/(f1.norm()*f2.norm()+1e-5)
predicts.append('{}\t{}\t{}\t{}\n'.format(name1,name2,cosdistance,sameflag))
accuracy = []
thd = []
folds = KFold(n=6000, n_folds=10, shuffle=False)
thresholds = np.arange(-1.0, 1.0, 0.005)
predicts = np.array(map(lambda line:line.strip('\n').split(), predicts))
for idx, (train, test) in enumerate(folds):
best_thresh = find_best_threshold(thresholds, predicts[train])
accuracy.append(eval_acc(best_thresh, predicts[test]))
thd.append(best_thresh)
print('LFWACC={:.4f} std={:.4f} thd={:.4f}'.format(np.mean(accuracy), np.std(accuracy), np.mean(thd)))
|
import os
import unittest
import mock
from sia_load_tester import jobs
from sia_load_tester import sia_client as sc
from sia_load_tester import upload_queue
class GenerateUploadQueueTest(unittest.TestCase):
def setUp(self):
self.mock_sia_api_impl = mock.Mock()
self.mock_sia_client = sc.SiaClient(
self.mock_sia_api_impl, sleep_fn=mock.Mock())
# Save original path separator so that we can restore it after tests
# modify it.
self.original_path_sep = os.path.sep
def tearDown(self):
os.path.sep = self.original_path_sep
def test_generates_empty_queue_when_all_files_already_on_sia(self):
upload_jobs = [
jobs.Job(local_path='/dummy-path/a.txt', sia_path='a.txt'),
jobs.Job(local_path='/dummy-path/foo/b.txt', sia_path='foo/b.txt'),
jobs.Job(
local_path='/dummy-path/fiz/baz/c.txt',
sia_path='fiz/baz/c.txt')
]
self.mock_sia_api_impl.get_renter_files.return_value = {
u'files': [
{
u'available': True,
u'redundancy': 3.4,
u'siapath': u'a.txt',
u'localpath': u'/dummy-path/a.txt',
u'uploadprogress': 100,
u'renewing': True,
u'filesize': 204776186,
u'uploadedbytes': 822083584,
u'expiration': 149134
},
{
u'available': True,
u'redundancy': 3.5,
u'siapath': u'foo/b.txt',
u'localpath': u'/dummy-path/foo/b.txt',
u'uploadprogress': 100,
u'renewing': True,
u'filesize': 236596539,
u'uploadedbytes': 1006632960,
u'expiration': 149134
},
{
u'available': True,
u'redundancy': 3.5,
u'siapath': u'fiz/baz/c.txt',
u'localpath': u'/dummy-path/fiz/baz/c.txt',
u'uploadprogress': 100,
u'renewing': True,
u'filesize': 213121263,
u'uploadedbytes': 1010827264,
u'expiration': 149134
},
]
}
queue = upload_queue.from_upload_jobs_and_sia_client(
upload_jobs, self.mock_sia_client)
self.assertEqual(0, queue.qsize())
def test_generates_upload_queue_when_no_files_are_on_sia(self):
upload_jobs = [
jobs.Job(local_path='/dummy-root/a.txt', sia_path='a.txt'),
jobs.Job(
local_path='/dummy-root/fiz/baz/c.txt',
sia_path='fiz/baz/c.txt'),
jobs.Job(local_path='/dummy-root/foo/b.txt', sia_path='foo/b.txt'),
]
self.mock_sia_api_impl.get_renter_files.return_value = {u'files': []}
queue = upload_queue.from_upload_jobs_and_sia_client(
upload_jobs, self.mock_sia_client)
self.assertEqual(
jobs.Job(local_path='/dummy-root/a.txt', sia_path='a.txt'),
queue.get())
self.assertEqual(
jobs.Job(
local_path='/dummy-root/fiz/baz/c.txt',
sia_path='fiz/baz/c.txt'), queue.get())
self.assertEqual(
jobs.Job(local_path='/dummy-root/foo/b.txt', sia_path='foo/b.txt'),
queue.get())
|
from providers.spotify.spotify_id import SpotifyId
from providers.entities.playlist import Playlist
from typing import List
from providers.entities.song import Song
from providers.spotify.spotify_playlist_provider import SpotifyPlaylistProvider
def test_search():
spotify_playlist_provider: SpotifyPlaylistProvider = SpotifyPlaylistProvider(
playlist=Playlist(
playlist_id=SpotifyId("37i9dQZF1DXc4xFsxShkAv"),
name="Led Zeppelin"))
result: List[Song] = spotify_playlist_provider.get_songs()
if len(result) == 0:
raise AssertionError("result must not be empty")
|
import matplotlib.pyplot as plt
from envs.gridworld import AGENT_SIZES, RGB_COLORS
import numpy as np
import os
from envs.key_door import *
from envs.gridworld import *
from utils.gen_utils import *
from model import get_discrete_representation
from torchvision.utils import save_image
def visualize_representations(env, model):
# 1. visualize clustering if number of agents is 1 or 2
# 2. visualize factorization with histogram
# 3. visualize hamming distance graphs
figs = None
if env.name == 'gridworld':
if env.n_agents == 1:
figs = visualize_clusters_online_single(env, model)
elif env.n_agents == 2:
figs = visualize_clusters_online_double(env, model)
if env.name == 'key-wall' or env.name == 'key-corridor':
figs = visualize_single_agent_and_key(env, model)
return figs
def visualize_clusters_online_single(env, model):
assert env.n_agents == 1
num_factors = model.num_onehots
rows = np.arange(0, env.grid_n, env.grid_n//16)
cols = np.arange(0, env.grid_n, env.grid_n//16)
cluster_map = []
for c in cols:
batch_pos = np.transpose(np.stack((np.repeat(c, len(rows)), rows)), (1,0))
im_torch = np_to_var(position_to_image(batch_pos, env.n_agents, env.grid_n)).permute(0,3,1,2)
zs = model.encode(im_torch, vis=True).cpu().numpy()
z_labels = np.sum(np.array([zs[:, i] * model.z_dim ** (num_factors - i - 1) for i in range(num_factors)]),
axis=0, dtype=int)
cluster_map.append(z_labels)
cluster_map = np.array(cluster_map)
print("cluster map")
print(cluster_map)
fig = plt.figure()
plt.imshow(cluster_map, cmap = 'gist_rainbow')
return fig
def visualize_clusters_online_double_fix_one_agent(model, n_agents, grid_n, fixed_agent=0):
fig = plt.figure()
rows = np.arange(0, grid_n, grid_n // 16)
cols = np.arange(0, grid_n, grid_n // 16)
fixed_poses = np.arange(0, grid_n, grid_n // 4)
n_subplots = 4
plot_idx = 1
num_factors = model.num_onehots
onehots_0 = []
onehots_1 = []
for idx, fixed_row in enumerate(fixed_poses):
for fixed_col in fixed_poses:
fixed_pos = np.tile(np.array((fixed_row, fixed_col)), (len(cols), 1))
cluster_map = []
oh0_map = []
oh1_map = []
for c in cols:
batch_pos = np.transpose(np.stack((np.repeat(c, len(rows)), rows)), (1, 0))
batch_pos = np.hstack((batch_pos, fixed_pos)) if fixed_agent == 0 else np.hstack((fixed_pos, batch_pos))
im_torch = np_to_var(position_to_image(batch_pos, n_agents, grid_n)).permute(0, 3, 1, 2)
zs = model.encode(im_torch, vis=True).cpu().numpy()
oh0_map.append(zs[:,0])
oh1_map.append(zs[:,1])
z_labels = np.sum(
np.array([zs[:, i] * model.z_dim ** (num_factors - i - 1) for i in range(num_factors)]),
axis=0, dtype=int)
cluster_map.append(z_labels)
cluster_map = np.array(cluster_map)
ax = fig.add_subplot(n_subplots, n_subplots, plot_idx)
ax.tick_params(axis='both', which='both', bottom=False, top=False, left=False, labelbottom=False,
labelleft=False)
print("cluster map %d fixing agent %d" % (idx, fixed_agent))
print(cluster_map)
plt.imshow(cluster_map, cmap='gist_rainbow')
onehots_0.append(np.array(oh0_map))
onehots_1.append(np.array(oh1_map))
plot_idx += 1
onehot_0_fig = plot_one_onehot_2agents(onehots_0, n_subplots**2)
onehot_1_fig = plot_one_onehot_2agents(onehots_1, n_subplots**2)
return fig, onehot_0_fig, onehot_1_fig
def visualize_clusters_online_double(env, model):
plot_0, onehot_0_fig_0, onehot_1_fig_0 = visualize_clusters_online_double_fix_one_agent(model, env.n_agents, env.grid_n, fixed_agent=0)
plot_1, onehot_0_fig_1, onehot_1_fig_1 = visualize_clusters_online_double_fix_one_agent(model, env.n_agents, env.grid_n, fixed_agent=1)
return plot_0, onehot_0_fig_0, onehot_1_fig_0, plot_1, onehot_0_fig_1, onehot_1_fig_1
def position_to_image(positions, n_agents, grid_n):
'''
Converts batch of agent xy positions to batch of rgb images
:param positions: batch of agent positions
:param n_agents: # agents
:param grid_n: grid size
:return: [sample_size, grid_n, grid_n, n_agents] images
'''
if grid_n not in [16,64]:
raise NotImplementedError("Grid size not supported: %d" % grid_n)
n_samples = positions.shape[0]
ims = np.zeros((n_samples, grid_n, grid_n, 3))
for i in range(n_agents):
agent_dim = AGENT_SIZES[i]
x_cur, y_cur = positions[:, 2*i], positions[:, 2*i+1]
if grid_n in [16, 64]:
for x in range(agent_dim[0]):
for y in range(agent_dim[1]):
ims[np.arange(n_samples),
(x_cur+x) % grid_n,
(y_cur+y) % grid_n] += np.tile(np.array(list(RGB_COLORS.values())[i]), (n_samples,1))
return ims
def visualize_attn_map(amaps): # amap: B X 1 X W X H
fig = plt.figure()
n_subplots = len(amaps) # should be 16
assert n_subplots == 16, "number of attention map samples should be 16"
for i in range(len(amaps)):
activations = amaps[i][0].cpu().detach().numpy()
ax = fig.add_subplot(4, 4, i+1)
ax.tick_params(axis='both', which='both', bottom=False, top=False, left=False, labelbottom=False,
labelleft=False)
plt.imshow(activations, cmap='Greys')
return fig
def plot_one_onehot_2agents(onehot_labels, n):
fig = plt.figure()
plot_idx = 1
n_subplots = 4
for labels in onehot_labels:
grid = labels.reshape(n,n)
ax = fig.add_subplot(n_subplots, n_subplots, plot_idx)
ax.tick_params(axis='both', which='both', bottom=False, top=False, left=False, labelbottom=False,
labelleft=False)
plt.imshow(grid, cmap='gist_rainbow')
plot_idx+=1
return fig
def sample_trajectory(env, len_traj=400, choose_agent_i=0):
'''
Samples 1 trajectory length len_traj, moving agent choose_agent_i, fixing all other agents
:param n_agents: total # agents
:param n: grid width
:param n_samples: number of trajectories to samples
:param choose_agent_i: agent to move
:return: np array of positions along trajectory [len_traj, 2*n_agents]
'''
actions = np.eye(env.n_agents*2)
actions = np.concatenate((actions, -1 * actions))
actions = np.concatenate((actions[choose_agent_i*2: choose_agent_i*2+2],
actions[env.n_agents*2+choose_agent_i*2: env.n_agents*2+choose_agent_i*2+2]))
os = []
for i in range(len_traj):
action = actions[np.random.randint(len(actions))]
env.step(action)
os.append(env.get_obs())
os = np.array(os).astype('int')
return os
def get_factorization_hist(env, model, len_traj=600, n_traj=10):
if env.name == 'gridworld':
return test_factorization_fix_agents(env, model, len_traj, n_traj)
elif env.name == 'key-wall' or env.name == 'key-corridor':
return test_factorization_single_agent_key(env, model)
def test_factorization_fix_agents(env, model, len_traj=600, n_traj=10):
'''
Samples random trajectories moving one agent at a time, returns two lists of histograms of hamming distances and onehot changes
by moving each agent. Histogram lists should be written to tensorboard
:param model: CPC model
:param n_agents: # agents
:param grid_n: grid width
:param len_traj: length of trajectories to sample
:param n_traj: number of trajectories to sample
:return: tuple of two lists of histogram figs (plt.fig), each length n_agents
1. list of histograms of hamming distances
2. list of histograms of onehot changes, moving one agent at a time
'''
hamming_hist_lst = []
onehot_hist_lst = []
for i in range(env.n_agents):
dist_onehots = []
for n in range(n_traj):
dist_onehots = []
x = sample_trajectory(env, len_traj=len_traj, choose_agent_i=i)
# y = position_to_image(x, env.n_agents, env.grid_n)
zs = get_discrete_representation(model, x)
dist_hamming = np.sum(zs[1:] - zs[:-1] != 0, axis=1)
prev_z = zs[0]
for zlabel in zs[1:]:
for k in range(model.num_onehots):
if zlabel[k] != prev_z[k]:
dist_onehots.append(k)
prev_z = zlabel
hammings_hist = plt.figure()
plt.hist(dist_hamming)
plt.ylabel("hamming distance distribution moving only agent %d" % i)
hamming_hist_lst.append(hammings_hist)
onehots_hist = plt.figure()
print("dist_onehots", dist_onehots)
plt.hist(dist_onehots, bins=np.arange(model.num_onehots + 1))
plt.ylabel("onehot changes only moving agent %d" % i)
onehot_hist_lst.append(onehots_hist)
return hamming_hist_lst, onehot_hist_lst
def save_plots(savepath, losses, est_lbs):
plt.plot(losses)
plt.ylabel('C_loss')
loss_path = os.path.join(savepath, "loss.png")
plt.savefig(loss_path)
plt.plot(est_lbs)
plt.ylabel('estimated lowerbound')
lb_path = os.path.join(savepath, "est_lowerbounds.png")
plt.savefig(lb_path)
plt.close('all')
def visualize_node(node, all_labels, dataset, grid_n, savepath):
# visualize some samples
idx = np.where(all_labels == node)[0]
anchors = dataset[0][::8]
node_samples = anchors[idx][:64]
np.random.shuffle(node_samples)
node_samples = np.concatenate((node_samples, np.zeros((len(node_samples), grid_n, grid_n, 1))), axis=3)
samples_tensor = np_to_var(node_samples).permute(0,3,1,2)
save_image(samples_tensor, os.path.join(savepath, "node_%d_samples.png" % node), padding=16)
def get_2agents_density(node, labels, dataset):
idx = np.where(labels == node)[0]
anchors = dataset[0][::8][idx]
sum_positions = anchors.sum(axis=0)
agent_0_pos, agent_1_pos = sum_positions[:,:, 0], sum_positions[:,:,1]
return agent_0_pos/agent_0_pos.max(), agent_1_pos/agent_1_pos.max()
def visualize_density_failed_2agents(cur_pos, cur_node, node_to_go, labels, dataset, savepath, epoch):
# visualize where execute_plan fails
agent_0_dist, agent_1_dist = get_2agents_density(cur_node, labels, dataset)
agent_0_dist_next, agent_1_dist_next = get_2agents_density(node_to_go, labels, dataset)
agent_0_cur_pos = np.zeros(agent_0_dist.shape)
agent_0_cur_pos[cur_pos[0], cur_pos[1]] = 1
agent_1_cur_pos = np.zeros(agent_1_dist.shape)
agent_1_cur_pos[cur_pos[2], cur_pos[3]] = 1
fig = plt.figure()
ax = fig.add_subplot(1, 2, 1)
im0 = np.stack([agent_0_dist, agent_0_dist_next, agent_0_cur_pos], axis=2)
plt.imshow(im0)
ax = fig.add_subplot(1, 2, 2)
im1 = np.stack([agent_1_dist, agent_1_dist_next, agent_1_cur_pos], axis=2)
plt.imshow(im1)
fname = "epoch_%d_failed_to_leave_node_%d" % (epoch, cur_node)
plt.savefig(os.path.join(savepath, fname))
def test_factorization_single_agent_key(env, model, n_traj=10, len_traj=200):
'''
move agent with and with key, count onehot changes for
1. Any agent movement with or without key, not changing key state within trajectory
2. Fixing agent position, placing/taking away key
'''
onehot_hist_lst = []
# 1. -------------- test factorization of agent
dist_onehots_a = []
for traj in range(n_traj):
env.reset()
traj_with_key = env.sample_random_trajectory(len_traj, interact_with_key=False)
zs = get_discrete_representation(model, traj_with_key)
prev_z = zs[0]
for zlabel in zs[1:]:
for k in range(model.num_onehots):
if zlabel[k] != prev_z[k]:
dist_onehots_a.append(k)
prev_z = zlabel
env.remove_all_keys()
traj_no_key = env.sample_random_trajectory(len_traj, interact_with_key=False)
zs = get_discrete_representation(model, traj_no_key)
prev_z = zs[0]
for zlabel in zs[1:]:
for k in range(model.num_onehots):
if zlabel[k] != prev_z[k]:
dist_onehots_a.append(k)
prev_z = zlabel
onehots_hist = plt.figure()
print("dist_onehots for moving agent", dist_onehots_a)
plt.hist(dist_onehots_a, bins=np.arange(model.num_onehots + 1))
plt.ylabel("onehot changes only moving agent")
onehot_hist_lst.append(onehots_hist)
# 2. ----------------- test factorization of key(s)
for key_idx in range(env.n_keys):
dist_onehots_k = []
for i in range(len_traj):
env.reset()
z_key = get_discrete_representation(model, env.get_obs(), single=True)
env.remove_key(key_idx, 0)
z_no_key = get_discrete_representation(model, env.get_obs(), single=True)
for k in range(model.num_onehots):
if z_no_key[k] != z_key[k]:
dist_onehots_k.append(k)
onehots_hist = plt.figure()
print("dist_onehots for changing key %d" % key_idx, dist_onehots_k)
plt.hist(dist_onehots_k, bins=np.arange(model.num_onehots + 1))
plt.ylabel("onehot changes only placing/removing key %d (fixing agent)" % key_idx)
onehot_hist_lst.append(onehots_hist)
return onehot_hist_lst
def visualize_single_agent_and_key(env, model):
# try to place agent at each position, with and without key on the grid
assert isinstance(env, KeyWall) or isinstance(env, KeyCorridor)
map_key = np.full((GRID_N, GRID_N), -1)
map_no_key = np.full((GRID_N, GRID_N), -1)
env.reset()
for x in range(GRID_N):
for y in range(GRID_N):
pos = (x,y)
obs = env.get_obs()
if env.try_place_agent(pos):
if model.encoder_form == 'cswm-key-gt':
z = model.encode((np_to_var(obs).unsqueeze(0).permute(0, 3, 1, 2), 1), vis=True)
else:
z = model.encode(np_to_var(obs).unsqueeze(0).permute(0, 3, 1, 2), vis=True)
z_label = tensor_to_label(z[0], model.num_onehots, model.z_dim)
map_key[pos] = z_label
env.remove_all_keys()
for x in range(GRID_N):
for y in range(GRID_N):
for y in range(GRID_N):
pos = (x, y)
obs = env.get_obs()
if env.try_place_agent(pos):
if model.encoder_form == 'cswm-key-gt':
z = model.encode((np_to_var(obs).unsqueeze(0).permute(0, 3, 1, 2), 0), vis=True)
else:
z = model.encode(np_to_var(obs).unsqueeze(0).permute(0, 3, 1, 2), vis=True)
z_label = tensor_to_label(z[0], model.num_onehots, model.z_dim)
map_no_key[pos] = z_label
print("map with key")
print(map_key)
print()
print("map no key")
print(map_no_key)
fig0 = plt.figure()
plt.imshow(map_key, cmap='gist_rainbow')
fig1 = plt.figure()
plt.imshow(map_no_key, cmap='gist_rainbow')
return fig0, fig1
|
from django.contrib.auth import get_user_model
from django.test import TestCase
from posts.models import Post, Group
User = get_user_model()
class PostModelTest(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user = User.objects.create_user(username='test_user')
cls.group = Group.objects.create(title='test_group')
cls.post = Post.objects.create(
text='Тестовый текст один',
author=cls.user,
group=cls.group,
)
def test_verbose_name(self):
post = PostModelTest.post
field_verboses = {
'text': 'Текст',
'author': 'Автор',
'group': 'Сообщество',
'pub_date': 'Дата публикации'
}
for value, expected in field_verboses.items():
with self.subTest(value=value):
self.assertEqual(
post._meta.get_field(value).verbose_name, expected)
def test_object_name_is_text_field(self):
post = PostModelTest.post
expected_object_name = post.text[:15]
self.assertEqual(expected_object_name, str(post))
def test_text_field_max_length(self):
post = PostModelTest.post
max_length_text_field = 15
length_text_field = len(str(post))
self.assertEqual(max_length_text_field, length_text_field)
class GroupModelTest(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.group = Group.objects.create(
title='Тестовое имя сообщества',
slug='address',
description='Тестовое описание сообщества'
)
def test_object_name_is_title_field(self):
group = GroupModelTest.group
expected_object_name = group.title
self.assertEqual(expected_object_name, str(group))
|
# -*- coding: utf-8 -*-
# tacibot core
# Handles all important main features of any bot.
'''Core File'''
import discord
import os
from discord.ext import commands
import time
import asyncio
import sys
import cpuinfo
import math
import psutil
from extensions.models.help import TaciHelpCommand
class Core(commands.Cog):
"""Provides all core features of a bot."""
def __init__(self, bot):
# Main Stuff
self.bot = bot
self.extensions_list = bot.extensions_list
self.emoji = "\U0001F4E6"
# Help Command
self._original_help_command = bot.help_command
if bot.custom_help:
bot.help_command = TaciHelpCommand()
bot.help_command.cog = self
def _humanbytes(self, B) -> str: # function lifted from StackOverflow
"""Return the given bytes as a human friendly KB, MB, GB, or TB string."""
B = float(B)
KB = float(1024)
MB = float(KB ** 2) # 1,048,576
GB = float(KB ** 3) # 1,073,741,824
TB = float(KB ** 4) # 1,099,511,627,776
if B < KB:
return '{0} {1}'.format(
B, 'Bytes' if 0 == B > 1 else 'Byte')
elif KB <= B < MB:
return '{0:.2f} KB'.format(B/KB)
elif MB <= B < GB:
return '{0:.2f} MB'.format(B/MB)
elif GB <= B < TB:
return '{0:.2f} GB'.format(B/GB)
elif TB <= B:
return '{0:.2f} TB'.format(B/TB)
@commands.command(aliases=['info', 'source', 'server'])
async def about(self, ctx):
"""Returns information about this bot."""
msg = f"__**{self.bot.user.name}**__ - _{self.bot.description}_\n\n"
msg += f"This instance by **{self.bot.appinfo.owner}.**\n\n"
if self.bot.repo:
msg += f"**Source Code:** _<{self.bot.repo}>_\n"
if self.bot.support_server:
msg += f"**Support Server:** _<{self.bot.support_server}>_\n\n"
msg += "_Note: Please attempt to contact the hoster of any separate instances before this server._\n"
msg += f"_See **{ctx.prefix}**`help` for help, `invite` to add the bot, and `stats` for statistics._"
await ctx.send(msg)
@commands.command(aliases=['addbot', 'connect', 'join'])
async def invite(self, ctx):
"""Gets a link to invite this bot to your server."""
msg = (
"**Thanks for checking me out!**\n\n"
"Use the following link to add me:\n"
f"*<https://discordapp.com/oauth2/authorize?client_id={self.bot.user.id}&scope=bot"
)
if self.bot.perms:
msg += f"&permissions={self.bot.perms}>*"
else:
msg += ">*"
await ctx.send(msg)
@commands.command()
async def stats(self, ctx):
"""Provides statistics on the bot itself."""
mem = psutil.virtual_memory()
currproc = psutil.Process(os.getpid())
total_ram = self._humanbytes(mem[0])
available_ram = self._humanbytes(mem[1])
usage = self._humanbytes(currproc.memory_info().rss)
msg = f"""
```
Total RAM: {total_ram}
Available RAM: {available_ram}
RAM used by bot: {usage}
Number of bot commands: {len(ctx.bot.commands)}
Number of extensions present: {len(ctx.bot.cogs)}
```
"""
await ctx.send(msg)
@commands.command()
async def ping(self, ctx):
"""Checks the ping of the bot."""
before = time.monotonic()
pong = await ctx.send("...")
after = time.monotonic()
ping = (after - before) * 1000
await pong.edit(content="`PING discordapp.com {}ms`".format(int(ping)))
@commands.group(aliases=['extensions', 'ext'],
invoke_without_command=True)
@commands.is_owner()
async def extend(self, ctx, name:str = None):
"""Provides status of extensions and lets you hotswap extensions."""
# Provides status of extension
if name is not None:
status = "is" if name in self.extensions_list else "is not"
msg = f"**{name}** {status} currently loaded and/or existent."
# Handles empty calls
else:
msg = (
"**Nothing was provided!**\n\n"
"Please provide an extension name for status, "
"or provide a subcommand."
)
# Sends completed message
await ctx.send(msg)
@extend.command(aliases=['le', 'l'])
@commands.is_owner()
async def load(self, ctx, name: str):
"""Load an extension into the bot."""
m = await ctx.send(f'Loading {name}')
extension_name = f'extensions.{name}'
if extension_name not in self.extensions_list:
try:
self.bot.load_extension(extension_name)
self.extensions_list.append(extension_name)
await m.edit(content='Extension loaded.')
except Exception as e:
await m.edit(
content=f'Error while loading {name}\n`{type(e).__name__}: {e}`')
else:
await m.edit(content='Extension already loaded.')
@extend.command(aliases=["ule", "ul"])
@commands.is_owner()
async def unload(self, ctx, name: str):
"""Unload an extension from the bot."""
m = await ctx.send(f'Unloading {name}')
extension_name = f'extensions.{name}'
if extension_name in self.extensions_list:
self.bot.unload_extension(extension_name)
self.extensions_list.remove(extension_name)
await m.edit(content='Extension unloaded.')
else:
await m.edit(content='Extension not found or not loaded.')
@extend.command(aliases=["rle", "rl"])
@commands.is_owner()
async def reload(self, ctx, name: str):
"""Reload an extension of the bot."""
m = await ctx.send(f'Reloading {name}')
extension_name = f'extensions.{name}'
if extension_name in self.extensions_list:
self.bot.unload_extension(extension_name)
try:
self.bot.load_extension(extension_name)
await m.edit(content='Extension reloaded.')
except Exception as e:
self.extensions_list.remove(extension_name)
await m.edit(
content=f'Failed to reload extension\n`{type(e).__name__}: {e}`')
else:
await m.edit(content='Extension isn\'t loaded.')
@extend.command(name='list')
async def list_cmd(self, ctx):
"""Lists all extensions loaded by the bot."""
# Message Construction
msg = "**Loaded Extensions**\n\n"
msg += '\n'.join(f'`{e}`' for e in self.extensions_list)
msg += "\n\n_See the other subcommands of this command to manage them._"
# Message Sending
await ctx.send(msg)
@commands.command(aliases=['exit', 'reboot'])
@commands.is_owner()
async def restart(self, ctx):
"""Turns the bot off."""
await ctx.send(":zzz: **Restarting...**")
exit()
@commands.command()
@commands.is_owner()
async def leave(self, ctx):
"""Makes the bot leave the server this was called in."""
if ctx.guild:
await ctx.send(
"\U0001F4A8 **Leaving server.** "
"_If you want me back, add me or get an admin to._")
await ctx.guild.leave()
else:
await ctx.send(
"**Can't leave!** _This channel is not inside a guild._")
def cog_unload(self):
self.bot.help_command = self._original_help_command
def setup(bot):
bot.add_cog(Core(bot))
|
import numpy as np
import pandas as pd
import cv2
class target_reader:
'''
Reads in an image of a used archery target and uses openCV to determine
position and score value for each shot. __init__ initializes session
settings and run performs analysis.
'''
# Class-wide settings
# Real-world target dimensions in cm
cm_width = 42
blue_ratio = 24 / cm_width
# HSV ranges for colored regions of the target
colors = {
'yellow': [{'low': np.array([15, 130, 130]),
'high': np.array([45, 255, 255])}],
'red': [{'low': np.array([165, 130, 130]),
'high': np.array([180, 255, 255])},
{'low': np.array([0, 130, 130]),
'high': np.array([15, 255, 255])}],
'blue': [{'low': np.array([80, 80, 80]),
'high': np.array([130, 255, 255])}],
'black': [{'low': np.array([0, 0, 0]),
'high': np.array([180, 255, 130])}],
}
# Counting order of outer ring for each colored region
color_steps = {
'yellow': 2,
'red': 4,
'blue': 6,
'black': 8
}
def __init__(self, out_width=600):
'''
Establishes the output width of the processed target images
Args:
out_width (int): width/height of processed images in pixels
Returns:
None
'''
self.out_width = out_width
self.score_step = out_width * 2 / self.cm_width
return None
def run(self, filename=None, file_obj=None):
'''
Runs all methods for image processing and scoring. Returns None if
analysis is successful, error message if not. Results are saved in the
class variable 'df' and image steps in 'stage_images.'
Args:
filename (str): filepath of the image to analyze
file_obj (werkzeug file object): file object containing image
Returns:
None if successful, str containing error message if not
'''
# Resets class variables
self.orig_image = None
self.image = None
self.image_gray = None
self.stage_images = []
self.keypoints = None
self.df = None
# Loads image from file if it exists
if file_obj is None:
image = cv2.imread(filename, 1)
# Loads image from werkzeug file object
else:
try:
np_img = np.fromstring(file_obj.read(), np.uint8)
image = cv2.imdecode(np_img, cv2.IMREAD_COLOR)
except:
image = None
# Convert image to proper color channels
if image is None:
return 'Could not read image file'
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
self.orig_image = image
self.image = image.copy()
# Runs each step in the process, any error returns a message
if self.remove_skew():
return 'Could not find target corners'
if self.standardize_size():
return 'Could not identify target'
if self.balance_contrast():
return 'Could not balance contrast'
if self.find_shots():
return 'Could not detect shots'
if self.get_shot_data():
return 'Could not create shot dataframe'
return None
def remove_skew(self):
'''
Unskews perspective by moving each target corner to the corner of a new
image
Args:
None
Returns:
None if successful, True if not
'''
# Function settings
gray_width = 600
filter_d = gray_width // 20
filter_sigma = gray_width // 15
canny_t1 = 100
canny_t2 = 400
dilate_kernel = 4
perim_pct = .015
min_area_ratio = .3
# Converts colorspaces and resize images for edge detection
img = self.image.copy()
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
height = int(gray_width / img.shape[1] * img.shape[0])
gray = cv2.resize(gray, (gray_width, height))
# Finds edges of target using grayscale image
gray = cv2.bilateralFilter(gray,
filter_d,
filter_sigma,
filter_sigma)
edges = cv2.Canny(gray, canny_t1, canny_t2)
kernel = np.ones((dilate_kernel, dilate_kernel), np.uint8)
edges = cv2.dilate(edges, kernel, iterations=1)
# Finds the longest contour that can be approximated by four coordinates
# Returns error status if no such contour exists
_, contours, _ = cv2.findContours(edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
paper = None
max_perim = 0
for c in contours:
perim = cv2.arcLength(c, True)
if perim > max_perim:
approx = cv2.approxPolyDP(c, perim * perim_pct, True)
if len(approx) == 4:
paper = approx
max_perim = perim
if paper is None:
return True
# Reorders corner points to Top Left, Top Right, Bottom Right,
# Bottom Left
paper = paper.reshape(4, 2)
bounds = paper.copy()
sums = np.sum(paper, axis=1)
diffs = np.diff(paper, axis=1)
bounds[0] = paper[np.argmin(sums)]
bounds[1] = paper[np.argmin(diffs)]
bounds[2] = paper[np.argmax(sums)]
bounds[3] = paper[np.argmax(diffs)]
# Corrects skew and crops color image to paper
bounds = (bounds * (img.shape[1] / gray_width)).astype('float32')
top_w = np.linalg.norm(bounds[0] - bounds[1])
btm_w = np.linalg.norm(bounds[2] - bounds[3])
lft_h = np.linalg.norm(bounds[0] - bounds[3])
rgt_h = np.linalg.norm(bounds[1] - bounds[2])
new_w = int(min(top_w, btm_w))
new_h = int(min(lft_h, rgt_h))
new_bounds = np.array([
[0, 0],
[new_w, 0],
[new_w, new_h],
[0, new_h]
], dtype='float32')
M = cv2.getPerspectiveTransform(bounds, new_bounds)
img = cv2.warpPerspective(img, M, (new_w, new_h))
# Returns error status if resized image area is below minimum ratio
new_area = np.prod(img.shape)
orig_area = np.prod(self.image.shape)
if (new_area / orig_area) < min_area_ratio:
return True
# Saves image to class variables and returns status
self.image = img
self.stage_images.append(img.copy())
return None
def standardize_size(self):
'''
Resizes image to fit the standard template - meaning image and inner
target circle dimensions match preset values
Args:
None
Returns:
None if successful, True if not
'''
# Function settings
keys = ['yellow', 'red', 'blue']
close_kernel = 2
max_center_dist = self.out_width * .05
# Finds circle centers and blue circle width
hsv = cv2.cvtColor(self.image, cv2.COLOR_RGB2HSV)
circle_data = []
circle_contours = []
for key in keys:
mask = 0
for rng in self.colors[key]:
lyr = cv2.inRange(hsv, rng['low'], rng['high'])
mask = cv2.bitwise_or(lyr, mask)
kernel = np.ones((close_kernel, close_kernel), np.uint8)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
# Finds circle contour and contour center
_, contours, __ = cv2.findContours(mask,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
areas = [cv2.contourArea(contour) for contour in contours]
try:
idx = np.argsort(areas)[-1]
except:
return True
M = cv2.moments(contours[idx])
center_x = int(M["m10"] / M["m00"])
center_y = int(M["m01"] / M["m00"])
circle_contours.append(contours[idx].copy())
circle_data.append([areas[idx], center_x, center_y])
# Checks circle data to make sure areas get larger by circle and centers
# roughly align, returns error status if not
circle_data = np.array(circle_data)
target_center = np.mean(circle_data[:, 1:], axis=0).astype(int)
last_area = 0
for c in circle_data:
dist_to_mean = np.linalg.norm(c[1:] - target_center)
if c[0] < last_area or dist_to_mean > max_center_dist:
return True
last_area = c[0]
# Scales image to final output size
x, y, w, h = cv2.boundingRect(circle_contours[-1])
blue_dim = self.blue_ratio * self.out_width
scl_x = blue_dim / w
scl_y = blue_dim / h
img = cv2.resize(self.image, None, fx=scl_x, fy=scl_y)
# Adds border if it is necessary to match final output size
border_size = self.out_width // 10
img = cv2.copyMakeBorder(img, *([border_size] * 4), cv2.BORDER_REFLECT)
st_x = int(target_center[0] * scl_x + border_size - self.out_width / 2)
st_y = int(target_center[1] * scl_y + border_size - self.out_width / 2)
img = img[st_y:st_y+self.out_width, st_x:st_x+self.out_width]
# Saves image to class variables and returns status
self.image = img
self.stage_images.append(img.copy())
return None
def balance_contrast(self):
'''
Adjusts image values to make arrow holes easier to detect against each
different background color
Args:
None
Returns:
None if successful, True if not
'''
# Function settings
keys = ['red', 'blue', 'black']
pct_max = .01
filter_d = self.out_width // 40
filter_sigma = self.out_width // 30
morph_kernel = 2
clahe_limit = 1
clahe_grid = 12
logos = np.array([
[[5, 525], [55, 600]],
[[465, 545], [535, 600]],
[[545, 525], [595, 600]]
])
ref_size = 600
# Convert to grayscale using HSV because it better separates values
hsv = cv2.cvtColor(self.image, cv2.COLOR_RGB2HSV)
gray = hsv[:, :, 2]
# Creates masks for each area of the target with a different background
circle_masks = []
for key in keys:
target_area = (np.square(self.color_steps[key] * self.score_step) *
np.pi)
circle = 0
for rng in self.colors[key]:
circle = cv2.bitwise_or(cv2.inRange(hsv, rng['low'],
rng['high']), circle)
# Finds contour that best matches estimated circle area
_, contours, __ = cv2.findContours(circle,
cv2.RETR_LIST,
cv2.CHAIN_APPROX_SIMPLE)
areas = [cv2.contourArea(c) for c in contours]
idx = np.argmin(np.abs(areas - target_area))
contour = contours[idx]
# Creates mask based on contour
mask = np.zeros_like(gray)
mask = cv2.fillPoly(mask, pts=[contour], color=255)
circle_masks.append(mask)
# Removes smaller circle areas from larger ones, like a hole to a donut
circle_masks.append(np.full_like(gray, 255))
for x in range(len(circle_masks)-1, 0, -1):
circle_masks[x] -= circle_masks[x-1]
# Rebalances values based on histogram of each masked area
layers = 0
for mask in circle_masks:
# Creates histogram and extracts most frequent value bin
hist = cv2.calcHist([gray], [0], mask, [32], [0, 256])
mode_idx = np.argmax(hist)
limit = hist[mode_idx] * pct_max
# Calculates upper and lower value bounds
limit_idx = np.argwhere(hist < limit)[:, 0]
try:
low_val = max(limit_idx[limit_idx < mode_idx]) * 8
except:
low_val = 0
try:
high_val = min(limit_idx[limit_idx > mode_idx]) * 8
except:
high_val = 255
# Creates a new image on which to perform rebalancing
layer = gray.copy()
mode_val = mode_idx * 8
infl_val = np.mean([mode_val, high_val])
# Darkens highlights on very dark backgrounds so they read as part
# of a hole blob instead of a separate object
if mode_val < 128:
_, m = cv2.threshold(layer, infl_val, 255, cv2.THRESH_BINARY)
m_inv = cv2.bitwise_not(m)
rev = np.interp(layer, (infl_val, 255), (infl_val, 0))
rev = rev.astype(np.uint8)
rev = cv2.bitwise_and(rev, rev, mask=m)
layer = cv2.bitwise_and(layer, layer, mask=m_inv)
layer = cv2.add(layer, rev)
# Rebalances values and adds layer to a combined image
layer = np.clip(layer, low_val, high_val)
layer = np.interp(layer, (low_val, high_val), (0, 255))
layer = layer.astype(np.uint8)
layer = cv2.bitwise_and(layer, layer, mask=mask)
layers = cv2.add(layers, layer)
# Applies global filters to reconstituted image layers
clahe = cv2.createCLAHE(clipLimit=clahe_limit,
tileGridSize=(clahe_grid, clahe_grid))
layers = clahe.apply(layers)
layers = cv2.bilateralFilter(layers,
filter_d,
filter_sigma,
filter_sigma)
kernel = np.ones((morph_kernel, morph_kernel), np.uint8)
layers = cv2.morphologyEx(layers, cv2.MORPH_OPEN, kernel)
layers = cv2.morphologyEx(layers, cv2.MORPH_CLOSE, kernel)
# Obscures logos at the bottom of the target
logos = (logos * self.out_width / ref_size).astype(int)
mask = np.zeros((self.out_width, self.out_width), dtype=np.uint8)
for logo in logos:
mask = cv2.rectangle(mask, tuple(logo[0]), tuple(logo[1]), 1, -1)
layers[mask > 0] = np.max(layers)
# Saves image to class variables and returns status
self.image_gray = layers
self.stage_images.append(cv2.cvtColor(layers, cv2.COLOR_GRAY2RGB))
return None
def find_shots(self):
'''
Uses blob detection to collect keypoint data (position and radius) for
each shot
Args:
None
Returns:
None if successful, True if not
'''
# Sets blob detection parameters
params = cv2.SimpleBlobDetector_Params()
params.minDistBetweenBlobs = 0
params.filterByArea = True
params.minArea = 30
params.maxArea = 600
params.filterByCircularity = True
params.minCircularity = .01
params.filterByConvexity = True
params.minConvexity = .01
params.filterByInertia = True
params.minInertiaRatio = .1
# Runs blob detection and adds keypoints to image
detector = cv2.SimpleBlobDetector_create(params)
self.keypoints = detector.detect(self.image_gray)
if not self.keypoints:
return True
img = cv2.drawKeypoints(self.image,
self.keypoints,
np.array([]),
(0, 255, 0),
cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# Saves image to class variables and returns status
self.image = img
self.stage_images.append(self.image.copy())
return None
def get_shot_data(self):
'''
Derives shot coordinates and scores from keypoint data (position and
radius) and gathers all target data into the class DataFrame 'df'
Args:
None
Returns:
None if successful, True if not
'''
# Function settings
pct_smallest = .2
overlap_penalty = .85
max_overlapped = 3
# Transfers all blob detection keypoint data to a dataframe
arrow_x = []
arrow_y = []
arrow_radii = []
for k in self.keypoints:
arrow_x.append(k.pt[0])
arrow_y.append(k.pt[1])
arrow_radii.append(k.size / 2)
df = pd.DataFrame({'x': arrow_x, 'y': arrow_y, 'radius': arrow_radii})
# Calculates how many shots created a hole based on the mean radius of
# the smallest holes
num_smallest = np.ceil(len(arrow_radii) * pct_smallest).astype(int)
single_size = (np.mean(np.sort(arrow_radii)[: num_smallest]) *
overlap_penalty)
df['count'] = np.clip(df['radius'] // single_size, 0, max_overlapped)
df['count'] = df['count'].replace(0, 1).astype(int)
df['id'] = 0
center = self.out_width // 2
# Simulates positions of overlapping shots
if df['count'].max() > 1:
# Splits dataframe based on which rows represent multiple shots
clus_df = df[df['count'] > 1].copy()
df = df[~(df['count'] > 1)].copy()
clus_df = clus_df.loc[clus_df.index.repeat(clus_df['count'])]
clus_df['id'] =\
clus_df.groupby(['x', 'y', 'radius', 'count']).cumcount()
# Derives a rotation offset for each shot in a cluster
clus_df['radius'] /= 2
clus_df['vec_x'] = center - clus_df['x']
clus_df['vec_y'] = center - clus_df['y']
clus_df['mag'] = np.sqrt(np.square(clus_df['vec_x']) + np.square(clus_df['vec_y']))
clus_df['vec_x'] = (clus_df['vec_x'] / clus_df['mag'] *
clus_df['radius'])
clus_df['vec_y'] = (clus_df['vec_y'] / clus_df['mag'] *
clus_df['radius'])
clus_df['rot'] = np.radians(clus_df['id'] / clus_df['count'] * 360)
# Calculates new shot coordinates from rotation offset
clus_df['x'] = (np.cos(clus_df['rot']) * clus_df['vec_x'] -
np.sin(clus_df['rot']) * clus_df['vec_y'] +
clus_df['x'])
clus_df['y'] = (np.sin(clus_df['rot']) * clus_df['vec_x'] +
np.cos(clus_df['rot']) * clus_df['vec_y'] +
clus_df['y'])
df = df.append(clus_df[['x', 'y', 'radius']], sort=False)
# Calculates score for each shot
df['error'] = np.sqrt(np.square(center - df['x']) +
np.square(center - df['y']))
df['score'] = 10 - ((df['error'] - df['radius']) // self.score_step)
df['score'] = df['score'].clip(0, 10).astype(int)
# Calculates optimized score for each shot
grp_center = np.mean(df[['x', 'y']], axis=0)
df['op_x'] = df['x'] - grp_center['x'] + center
df['op_y'] = df['y'] - grp_center['y'] + center
op_error = np.sqrt(np.square(center - df['op_x']) +
np.square(center - df['op_y']))
df['op_score'] = 10 - ((op_error - df['radius']) // self.score_step)
df['op_score'] = df['op_score'].clip(0, 10).astype(int)
# Sorts dataframe and drops columns that are no longer necessary
df.sort_values(['error'], inplace=True)
df.reset_index(drop=True, inplace=True)
df.drop(['count', 'id'], axis=1, inplace=True)
# Saves dataframe to class variable and returns status
self.df = df
return None
def read_image(self, filename=None, file_obj=None):
# Resets class variables
self.orig_image = None
self.image = None
self.image_gray = None
self.stage_images = []
self.keypoints = None
self.df = None
# Loads image from file if it exists
if file_obj is None:
image = cv2.imread(filename, 1)
# Loads image from werkzeug file object
else:
try:
np_img = np.fromstring(file_obj.read(), np.uint8)
image = cv2.imdecode(np_img, cv2.IMREAD_COLOR)
except:
image = None
# Convert image to proper color channels
if image is None:
return 'Could not read image file'
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
self.orig_image = image
self.image = image.copy()
return None
|
from setuptools import setup
import os
base_dir = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(base_dir, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
about = {}
with open(os.path.join(base_dir, 'pyats_genie_command_parse', 'version.py'), 'r', encoding='utf-8') as f:
exec(f.read(), about)
packages = [
'pyats_genie_command_parse'
]
install_requires = [
'pyats == 21.12',
'genie == 21.12',
]
tests_require = [
'pytest',
]
setup(
name=about['__title__'],
version=about['__version__'],
python_requires='>=3.6',
description=about['__description__'],
long_description=long_description,
long_description_content_type='text/markdown',
keywords='pyATS genie cisco ios ios-xr nxos parse wrapper',
url=about['__url__'],
project_urls={
'Documentation': 'https://pyats-genie-command-parse.readthedocs.io/en/latest/',
'Source': 'https://github.com/btr1975/pyats-genie-command-parse',
'Tracker': 'https://github.com/btr1975/pyats-genie-command-parse/issues',
},
author=about['__author__'],
author_email=about['__email__'],
license=about['__license__'],
packages=packages,
include_package_data=True,
install_requires=install_requires,
test_suite='pytest',
tests_require=tests_require,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: POSIX',
'Operating System :: POSIX :: BSD',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
|
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'j54w1jdm@o(7vmnv=9_duz$c8zg-brf3z%i8yf%9@o(@k4fezu'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost','127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'core',
'gunicorn',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pycoin.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'pycoin/templates/htmlfiles')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pycoin.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(os.path.dirname(__file__), 'templates/media_cdn')
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
#STATIC_ROOT = os.path.join(PROJECT_DIR, 'templates/static')
STATICFILES_DIRS = os.path.join(BASE_DIR, "pycoin/templates/static/"),
|
#!/usr/bin/env python3
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Performance runner for d8.
Call e.g. with tools/run-perf.py --arch ia32 some_suite.json
The suite json format is expected to be:
{
"path": <relative path chunks to perf resources and main file>,
"owners": [<list of email addresses of benchmark owners (required)>],
"name": <optional suite name, file name is default>,
"archs": [<architecture name for which this suite is run>, ...],
"binary": <name of binary to run, default "d8">,
"flags": [<flag to d8>, ...],
"test_flags": [<flag to the test file>, ...],
"run_count": <how often will this suite run (optional)>,
"run_count_XXX": <how often will this suite run for arch XXX (optional)>,
"timeout": <how long test is allowed to run>,
"timeout_XXX": <how long test is allowed run run for arch XXX>,
"retry_count": <how many times to retry failures (in addition to first try)",
"retry_count_XXX": <how many times to retry failures for arch XXX>
"resources": [<js file to be moved to android device>, ...]
"variants": [
{
"name": <name of the variant>,
"flags": [<flag to the test file>, ...],
<other suite properties>
}, ...
]
"main": <main js perf runner file>,
"results_regexp": <optional regexp>,
"results_processor": <optional python results processor script>,
"units": <the unit specification for the performance dashboard>,
"process_size": <flag - collect maximum memory used by the process>,
"tests": [
{
"name": <name of the trace>,
"results_regexp": <optional more specific regexp>,
"results_processor": <optional python results processor script>,
"units": <the unit specification for the performance dashboard>,
"process_size": <flag - collect maximum memory used by the process>,
}, ...
]
}
The tests field can also nest other suites in arbitrary depth. A suite
with a "main" file is a leaf suite that can contain one more level of
tests.
A suite's results_regexp is expected to have one string place holder
"%s" for the trace name. A trace's results_regexp overwrites suite
defaults.
A suite's results_processor may point to an optional python script. If
specified, it is called after running the tests (with a path relative to the
suite level's path). It is expected to read the measurement's output text
on stdin and print the processed output to stdout.
The results_regexp will be applied to the processed output.
A suite without "tests" is considered a performance test itself.
Variants can be used to run different configurations at the current level. This
essentially copies the sub suites at the current level and can be used to avoid
duplicating a lot of nested "tests" were for instance only the "flags" change.
Full example (suite with one runner):
{
"path": ["."],
"owners": ["username@chromium.org"],
"flags": ["--expose-gc"],
"test_flags": ["5"],
"archs": ["ia32", "x64"],
"run_count": 5,
"run_count_ia32": 3,
"main": "run.js",
"results_regexp": "^%s: (.+)$",
"units": "score",
"tests": [
{"name": "Richards"},
{"name": "DeltaBlue"},
{"name": "NavierStokes",
"results_regexp": "^NavierStokes: (.+)$"}
]
}
Full example (suite with several runners):
{
"path": ["."],
"owners": ["username@chromium.org", "otherowner@google.com"],
"archs": ["ia32", "x64"],
"flags": ["--expose-gc"]},
"run_count": 5,
"units": "score",
"variants:" {
{"name": "default", "flags": []},
{"name": "future", "flags": ["--future"]},
{"name": "noopt", "flags": ["--noopt"]},
}
"tests": [
{"name": "Richards",
"path": ["richards"],
"main": "run.js",
"run_count": 3,
"results_regexp": "^Richards: (.+)$"},
{"name": "NavierStokes",
"path": ["navier_stokes"],
"main": "run.js",
"results_regexp": "^NavierStokes: (.+)$"}
]
}
Path pieces are concatenated. D8 is always run with the suite's path as cwd.
The test flags are passed to the js test file after '--'.
"""
from collections import OrderedDict
from math import sqrt
from statistics import mean, stdev
import copy
import json
import logging
import math
import argparse
import os
import re
import subprocess
import sys
import time
import traceback
from testrunner.local import android
from testrunner.local import command
from testrunner.local import utils
from testrunner.objects.output import Output, NULL_OUTPUT
SUPPORTED_ARCHS = ['arm',
'ia32',
'mips',
'mipsel',
'x64',
'arm64',
'riscv64']
GENERIC_RESULTS_RE = re.compile(r'^RESULT ([^:]+): ([^=]+)= ([^ ]+) ([^ ]*)$')
RESULT_STDDEV_RE = re.compile(r'^\{([^\}]+)\}$')
RESULT_LIST_RE = re.compile(r'^\[([^\]]+)\]$')
TOOLS_BASE = os.path.abspath(os.path.dirname(__file__))
INFRA_FAILURE_RETCODE = 87
MIN_RUNS_FOR_CONFIDENCE = 10
def GeometricMean(values):
"""Returns the geometric mean of a list of values.
The mean is calculated using log to avoid overflow.
"""
values = list(map(float, values))
return math.exp(sum(map(math.log, values)) / len(values))
class ResultTracker(object):
"""Class that tracks trace/runnable results and produces script output.
The output is structured like this:
{
"traces": [
{
"graphs": ["path", "to", "trace", "config"],
"units": <string describing units, e.g. "ms" or "KB">,
"results": [<list of values measured over several runs>],
"stddev": <stddev of the value if measure by script or ''>
},
...
],
"runnables": [
{
"graphs": ["path", "to", "runnable", "config"],
"durations": [<list of durations of each runnable run in seconds>],
"timeout": <timeout configured for runnable in seconds>,
},
...
],
"errors": [<list of strings describing errors>],
}
"""
def __init__(self):
self.traces = {}
self.errors = []
self.runnables = {}
def AddTraceResult(self, trace, result, stddev):
if trace.name not in self.traces:
self.traces[trace.name] = {
'graphs': trace.graphs,
'units': trace.units,
'results': [result],
'stddev': stddev or '',
}
else:
existing_entry = self.traces[trace.name]
assert trace.graphs == existing_entry['graphs']
assert trace.units == existing_entry['units']
if stddev:
existing_entry['stddev'] = stddev
existing_entry['results'].append(result)
def TraceHasStdDev(self, trace):
return trace.name in self.traces and self.traces[trace.name]['stddev'] != ''
def AddError(self, error):
self.errors.append(error)
def AddRunnableDuration(self, runnable, duration):
"""Records a duration of a specific run of the runnable."""
if runnable.name not in self.runnables:
self.runnables[runnable.name] = {
'graphs': runnable.graphs,
'durations': [duration],
'timeout': runnable.timeout,
}
else:
existing_entry = self.runnables[runnable.name]
assert runnable.timeout == existing_entry['timeout']
assert runnable.graphs == existing_entry['graphs']
existing_entry['durations'].append(duration)
def ToDict(self):
return {
'traces': list(self.traces.values()),
'errors': self.errors,
'runnables': list(self.runnables.values()),
}
def WriteToFile(self, file_name):
with open(file_name, 'w') as f:
f.write(json.dumps(self.ToDict()))
def HasEnoughRuns(self, graph_config, confidence_level):
"""Checks if the mean of the results for a given trace config is within
0.1% of the true value with the specified confidence level.
This assumes Gaussian distribution of the noise and based on
https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_rule.
Args:
graph_config: An instance of GraphConfig.
confidence_level: Number of standard deviations from the mean that all
values must lie within. Typical values are 1, 2 and 3 and correspond
to 68%, 95% and 99.7% probability that the measured value is within
0.1% of the true value.
Returns:
True if specified confidence level have been achieved.
"""
if not isinstance(graph_config, LeafTraceConfig):
return all(self.HasEnoughRuns(child, confidence_level)
for child in graph_config.children)
trace = self.traces.get(graph_config.name, {})
results = trace.get('results', [])
logging.debug('HasEnoughRuns for %s', graph_config.name)
if len(results) < MIN_RUNS_FOR_CONFIDENCE:
logging.debug(' Ran %d times, need at least %d',
len(results), MIN_RUNS_FOR_CONFIDENCE)
return False
logging.debug(' Results: %d entries', len(results))
avg = mean(results)
avg_stderr = stdev(results) / sqrt(len(results))
logging.debug(' Mean: %.2f, mean_stderr: %.2f', avg, avg_stderr)
logging.info('>>> Confidence level is %.2f',
avg / max(1000.0 * avg_stderr, .1))
return confidence_level * avg_stderr < avg / 1000.0
def __str__(self): # pragma: no cover
return json.dumps(self.ToDict(), indent=2, separators=(',', ': '))
def RunResultsProcessor(results_processor, output, count):
# Dummy pass through for null-runs.
if output.stdout is None:
return output
# We assume the results processor is relative to the suite.
assert os.path.exists(results_processor)
p = subprocess.Popen(
[sys.executable, results_processor],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
new_output = copy.copy(output)
new_output.stdout = p.communicate(
input=output.stdout.encode('utf-8'))[0].decode('utf-8')
logging.info('>>> Processed stdout (#%d):\n%s', count, output.stdout)
return new_output
class Node(object):
"""Represents a node in the suite tree structure."""
def __init__(self, *args):
self._children = []
def AppendChild(self, child):
self._children.append(child)
@property
def children(self):
return self._children
def __iter__(self):
yield self
for child in self.children:
yield from iter(child)
class DefaultSentinel(Node):
"""Fake parent node with all default values."""
def __init__(self, binary = 'd8'):
super(DefaultSentinel, self).__init__()
self.binary = binary
self.run_count = 10
self.timeout = 60
self.retry_count = 4
self.path = []
self.graphs = []
self.flags = []
self.test_flags = []
self.process_size = False
self.resources = []
self.results_processor = None
self.results_regexp = None
self.stddev_regexp = None
self.units = 'score'
self.total = False
self.owners = []
self.main = None
def __str__(self):
return type(self).__name__
class GraphConfig(Node):
"""Represents a suite definition.
Can either be a leaf or an inner node that provides default values.
"""
def __init__(self, suite, parent, arch):
super(GraphConfig, self).__init__()
self._suite = suite
assert isinstance(suite.get('path', []), list)
assert isinstance(suite.get('owners', []), list)
assert isinstance(suite['name'], str)
assert isinstance(suite.get('flags', []), list)
assert isinstance(suite.get('test_flags', []), list)
assert isinstance(suite.get('resources', []), list)
# Only used by child classes
self.main = suite.get('main', parent.main)
# Keep parent for easier debugging
self.parent = parent
# Accumulated values.
self.path = parent.path[:] + suite.get('path', [])
self.graphs = parent.graphs[:] + [suite['name']]
self.flags = parent.flags[:] + suite.get('flags', [])
self.test_flags = parent.test_flags[:] + suite.get('test_flags', [])
self.owners = parent.owners[:] + suite.get('owners', [])
# Values independent of parent node.
self.resources = suite.get('resources', [])
# Descrete values (with parent defaults).
self.binary = suite.get('binary', parent.binary)
self.run_count = suite.get('run_count', parent.run_count)
self.run_count = suite.get('run_count_%s' % arch, self.run_count)
self.retry_count = suite.get('retry_count', parent.retry_count)
self.retry_count = suite.get('retry_count_%s' % arch, self.retry_count)
self.timeout = suite.get('timeout', parent.timeout)
self.timeout = suite.get('timeout_%s' % arch, self.timeout)
self.units = suite.get('units', parent.units)
self.total = suite.get('total', parent.total)
self.results_processor = suite.get(
'results_processor', parent.results_processor)
self.process_size = suite.get('process_size', parent.process_size)
# A regular expression for results. If the parent graph provides a
# regexp and the current suite has none, a string place holder for the
# suite name is expected.
# TODO(machenbach): Currently that makes only sense for the leaf level.
# Multiple place holders for multiple levels are not supported.
self.results_regexp = suite.get('results_regexp', None)
if self.results_regexp is None and parent.results_regexp:
try:
self.results_regexp = parent.results_regexp % re.escape(suite['name'])
except TypeError as e:
raise TypeError(
"Got error while preparing results_regexp: "
"parent.results_regexp='%s' suite.name='%s' suite='%s', error: %s" %
(parent.results_regexp, suite['name'], str(suite)[:100], e))
# A similar regular expression for the standard deviation (optional).
if parent.stddev_regexp:
stddev_default = parent.stddev_regexp % re.escape(suite['name'])
else:
stddev_default = None
self.stddev_regexp = suite.get('stddev_regexp', stddev_default)
@property
def name(self):
return '/'.join(self.graphs)
def __str__(self):
return "%s(%s)" % (type(self).__name__, self.name)
class VariantConfig(GraphConfig):
"""Represents an intermediate node that has children that are all
variants of each other"""
def __init__(self, suite, parent, arch):
super(VariantConfig, self).__init__(suite, parent, arch)
assert "variants" in suite
for variant in suite.get('variants'):
assert "variants" not in variant, \
"Cannot directly nest variants:" + str(variant)[:100]
assert "name" in variant, \
"Variant must have 'name' property: " + str(variant)[:100]
assert len(variant) >= 2, \
"Variant must define other properties than 'name': " + str(variant)
class LeafTraceConfig(GraphConfig):
"""Represents a leaf in the suite tree structure."""
def __init__(self, suite, parent, arch):
super(LeafTraceConfig, self).__init__(suite, parent, arch)
assert self.results_regexp
if '%s' in self.results_regexp:
raise Exception(
"results_regexp at the wrong level. "
"Regexp should not contain '%%s': results_regexp='%s' name=%s" %
(self.results_regexp, self.name))
def AppendChild(self, node):
raise Exception("%s cannot have child configs." % type(self).__name__)
def ConsumeOutput(self, output, result_tracker):
"""Extracts trace results from the output.
Args:
output: Output object from the test run.
result_tracker: Result tracker to be updated.
Returns:
The raw extracted result value or None if an error occurred.
"""
if len(self.children) > 0:
results_for_total = []
for trace in self.children:
result = trace.ConsumeOutput(output, result_tracker)
if result:
results_for_total.append(result)
result = None
stddev = None
try:
result = float(
re.search(self.results_regexp, output.stdout, re.M).group(1))
except ValueError:
result_tracker.AddError(
'Regexp "%s" returned a non-numeric for test %s.' %
(self.results_regexp, self.name))
except:
result_tracker.AddError(
'Regexp "%s" did not match for test %s.' %
(self.results_regexp, self.name))
try:
if self.stddev_regexp:
if result_tracker.TraceHasStdDev(self):
result_tracker.AddError(
'Test %s should only run once since a stddev is provided by the '
'test.' % self.name)
stddev = re.search(self.stddev_regexp, output.stdout, re.M).group(1)
except:
result_tracker.AddError(
'Regexp "%s" did not match for test %s.' %
(self.stddev_regexp, self.name))
if result:
result_tracker.AddTraceResult(self, result, stddev)
return result
class TraceConfig(GraphConfig):
"""
A TraceConfig contains either TraceConfigs or LeafTraceConfigs
"""
def ConsumeOutput(self, output, result_tracker):
"""Processes test run output and updates result tracker.
Args:
output: Output object from the test run.
result_tracker: ResultTracker object to be updated.
count: Index of the test run (used for better logging).
"""
results_for_total = []
for trace in self.children:
result = trace.ConsumeOutput(output, result_tracker)
if result:
results_for_total.append(result)
if self.total:
# Produce total metric only when all traces have produced results.
if len(self.children) != len(results_for_total):
result_tracker.AddError(
'Not all traces have produced results. Can not compute total for '
'%s.' % self.name)
return
# Calculate total as a the geometric mean for results from all traces.
total_trace = LeafTraceConfig(
{
'name': 'Total',
'units': self.children[0].units
}, self, self.arch)
result_tracker.AddTraceResult(total_trace,
GeometricMean(results_for_total), '')
def AppendChild(self, node):
if node.__class__ not in (TraceConfig, LeafTraceConfig):
raise Exception(
"%s only allows TraceConfig and LeafTraceConfig as child configs." %
type(self).__name__)
super(TraceConfig, self).AppendChild(node)
class RunnableConfig(TraceConfig):
"""Represents a runnable suite definition (i.e. has a main file).
"""
def __init__(self, suite, parent, arch):
super(RunnableConfig, self).__init__(suite, parent, arch)
self.arch = arch
assert self.main, "No main js file provided"
if not self.owners:
logging.error("No owners provided for %s" % self.name)
def ChangeCWD(self, suite_path):
"""Changes the cwd to to path defined in the current graph.
The tests are supposed to be relative to the suite configuration.
"""
suite_dir = os.path.abspath(os.path.dirname(suite_path))
bench_dir = os.path.normpath(os.path.join(*self.path))
cwd = os.path.join(suite_dir, bench_dir)
logging.debug('Changing CWD to: %s' % cwd)
os.chdir(cwd)
def GetCommandFlags(self, extra_flags=None):
suffix = ['--'] + self.test_flags if self.test_flags else []
return self.flags + (extra_flags or []) + [self.main] + suffix
def GetCommand(self, cmd_prefix, shell_dir, extra_flags=None):
# TODO(machenbach): This requires +.exe if run on windows.
extra_flags = extra_flags or []
if self.binary != 'd8' and '--prof' in extra_flags:
logging.info('Profiler supported only on a benchmark run with d8')
if self.process_size:
cmd_prefix = ['/usr/bin/time', '--format=MaxMemory: %MKB'] + cmd_prefix
if self.binary.endswith('.py'):
# Copy cmd_prefix instead of update (+=).
cmd_prefix = cmd_prefix + [sys.executable]
return command.Command(
cmd_prefix=cmd_prefix,
shell=os.path.join(shell_dir, self.binary),
args=self.GetCommandFlags(extra_flags=extra_flags),
timeout=self.timeout or 60,
handle_sigterm=True)
def ProcessOutput(self, output, result_tracker, count):
"""Processes test run output and updates result tracker.
Args:
output: Output object from the test run.
result_tracker: ResultTracker object to be updated.
count: Index of the test run (used for better logging).
"""
if self.results_processor:
output = RunResultsProcessor(self.results_processor, output, count)
self.ConsumeOutput(output, result_tracker)
class RunnableLeafTraceConfig(LeafTraceConfig, RunnableConfig):
"""Represents a runnable suite definition that is a leaf."""
def __init__(self, suite, parent, arch):
super(RunnableLeafTraceConfig, self).__init__(suite, parent, arch)
if not self.owners:
logging.error("No owners provided for %s" % self.name)
def ProcessOutput(self, output, result_tracker, count):
result_tracker.AddRunnableDuration(self, output.duration)
self.ConsumeOutput(output, result_tracker)
def MakeGraphConfig(suite, parent, arch):
cls = GetGraphConfigClass(suite, parent)
return cls(suite, parent, arch)
def GetGraphConfigClass(suite, parent):
"""Factory method for making graph configuration objects."""
if isinstance(parent, TraceConfig):
if suite.get("tests"):
return TraceConfig
return LeafTraceConfig
elif suite.get('main') is not None:
# A main file makes this graph runnable. Empty strings are accepted.
if suite.get('tests'):
# This graph has subgraphs (traces).
return RunnableConfig
else:
# This graph has no subgraphs, it's a leaf.
return RunnableLeafTraceConfig
elif suite.get('tests'):
# This is neither a leaf nor a runnable.
return GraphConfig
else: # pragma: no cover
raise Exception('Invalid suite configuration.' + str(suite)[:200])
def BuildGraphConfigs(suite, parent, arch):
"""Builds a tree structure of graph objects that corresponds to the suite
configuration.
- GraphConfig:
- Can have arbitrary children
- can be used to store properties used by it's children
- VariantConfig
- Has variants of the same (any) type as children
For all other configs see the override AppendChild methods.
Example 1:
- GraphConfig
- RunnableLeafTraceConfig (no children)
- ...
Example 2:
- RunnableConfig
- LeafTraceConfig (no children)
- ...
Example 3:
- RunnableConfig
- LeafTraceConfig (optional)
- TraceConfig
- LeafTraceConfig (no children)
- ...
- TraceConfig (optional)
- ...
- ...
Example 4:
- VariantConfig
- RunnableConfig
- ...
- RunnableConfig
- ...
"""
# TODO(machenbach): Implement notion of cpu type?
if arch not in suite.get('archs', SUPPORTED_ARCHS):
return None
variants = suite.get('variants', [])
if len(variants) == 0:
graph = MakeGraphConfig(suite, parent, arch)
for subsuite in suite.get('tests', []):
BuildGraphConfigs(subsuite, graph, arch)
else:
graph = VariantConfig(suite, parent, arch)
variant_class = GetGraphConfigClass(suite, parent)
for variant_suite in variants:
# Propagate down the results_regexp if it's not override in the variant
variant_suite.setdefault('results_regexp',
suite.get('results_regexp', None))
variant_graph = variant_class(variant_suite, graph, arch)
graph.AppendChild(variant_graph)
for subsuite in suite.get('tests', []):
BuildGraphConfigs(subsuite, variant_graph, arch)
parent.AppendChild(graph)
return graph
def FlattenRunnables(node, node_cb):
"""Generator that traverses the tree structure and iterates over all
runnables.
"""
node_cb(node)
if isinstance(node, RunnableConfig):
yield node
elif isinstance(node, Node):
for child in node._children:
for result in FlattenRunnables(child, node_cb):
yield result
else: # pragma: no cover
raise Exception('Invalid suite configuration.')
def find_build_directory(base_path, arch):
"""Returns the location of d8 or node in the build output directory.
This supports a seamless transition between legacy build location
(out/Release) and new build location (out/build).
"""
def is_build(path):
# We support d8 or node as executables. We don't support testing on
# Windows.
return (os.path.isfile(os.path.join(path, 'd8')) or
os.path.isfile(os.path.join(path, 'node')))
possible_paths = [
# Location developer wrapper scripts is using.
'%s.release' % arch,
# Current build location on bots.
'build',
# Legacy build location on bots.
'Release',
]
possible_paths = [os.path.join(base_path, p) for p in possible_paths]
actual_paths = list(filter(is_build, possible_paths))
assert actual_paths, 'No build directory found.'
assert len(
actual_paths
) == 1, 'Found ambiguous build directories use --binary-override-path.'
return actual_paths[0]
class Platform(object):
def __init__(self, args):
self.shell_dir = args.shell_dir
self.shell_dir_secondary = args.shell_dir_secondary
self.is_dry_run = args.dry_run
self.extra_flags = args.extra_flags.split()
self.args = args
@staticmethod
def ReadBuildConfig(args):
config_path = os.path.join(args.shell_dir, 'v8_build_config.json')
if not os.path.isfile(config_path):
return {}
with open(config_path) as f:
return json.load(f)
@staticmethod
def GetPlatform(args):
if Platform.ReadBuildConfig(args).get('is_android', False):
return AndroidPlatform(args)
else:
return DesktopPlatform(args)
def _Run(self, runnable, count, secondary=False):
raise NotImplementedError() # pragma: no cover
def _LoggedRun(self, runnable, count, secondary=False):
suffix = ' - secondary' if secondary else ''
title = '>>> %%s (#%d)%s:' % ((count + 1), suffix)
try:
output = self._Run(runnable, count, secondary)
except OSError:
logging.exception(title % 'OSError')
raise
if output.stdout:
logging.info(title % 'Stdout' + '\n%s', output.stdout)
if output.stderr: # pragma: no cover
# Print stderr for debugging.
logging.info(title % 'Stderr' + '\n%s', output.stderr)
logging.warning('>>> Test timed out after %ss.', runnable.timeout)
if output.exit_code != 0:
logging.warning('>>> Test crashed with exit code %d.', output.exit_code)
return output
def Run(self, runnable, count, secondary):
"""Execute the benchmark's main file.
Args:
runnable: A Runnable benchmark instance.
count: The number of this (repeated) run.
secondary: True if secondary run should be executed.
Returns:
A tuple with the two benchmark outputs. The latter will be NULL_OUTPUT if
secondary is False.
"""
output = self._LoggedRun(runnable, count, secondary=False)
if secondary:
return output, self._LoggedRun(runnable, count, secondary=True)
else:
return output, NULL_OUTPUT
class DesktopPlatform(Platform):
def __init__(self, args):
super(DesktopPlatform, self).__init__(args)
self.command_prefix = []
# Setup command class to OS specific version.
command.setup(utils.GuessOS(), args.device)
if args.prioritize or args.affinitize != None:
self.command_prefix = ['schedtool']
if args.prioritize:
self.command_prefix += ['-n', '-20']
if args.affinitize != None:
# schedtool expects a bit pattern when setting affinity, where each
# bit set to '1' corresponds to a core where the process may run on.
# First bit corresponds to CPU 0. Since the 'affinitize' parameter is
# a core number, we need to map to said bit pattern.
cpu = int(args.affinitize)
core = 1 << cpu
self.command_prefix += ['-a', ('0x%x' % core)]
self.command_prefix += ['-e']
def PreExecution(self):
pass
def PostExecution(self):
pass
def PreTests(self, node, path):
if isinstance(node, RunnableConfig):
node.ChangeCWD(path)
def _Run(self, runnable, count, secondary=False):
shell_dir = self.shell_dir_secondary if secondary else self.shell_dir
cmd = runnable.GetCommand(self.command_prefix, shell_dir, self.extra_flags)
logging.debug('Running command: %s' % cmd)
output = Output() if self.is_dry_run else cmd.execute()
if output.IsSuccess() and '--prof' in self.extra_flags:
os_prefix = {'linux': 'linux', 'macos': 'mac'}.get(utils.GuessOS())
if os_prefix:
if not self.is_dry_run:
tick_tools = os.path.join(TOOLS_BASE, '%s-tick-processor' % os_prefix)
subprocess.check_call(tick_tools + ' --only-summary', shell=True)
else: # pragma: no cover
logging.warning(
'Profiler option currently supported on Linux and Mac OS.')
# /usr/bin/time outputs to stderr
if runnable.process_size:
output.stdout += output.stderr
return output
class AndroidPlatform(Platform): # pragma: no cover
def __init__(self, args):
super(AndroidPlatform, self).__init__(args)
self.driver = android.android_driver(args.device)
def PreExecution(self):
self.driver.set_high_perf_mode()
def PostExecution(self):
self.driver.set_default_perf_mode()
self.driver.tear_down()
def PreTests(self, node, path):
if isinstance(node, RunnableConfig):
node.ChangeCWD(path)
suite_dir = os.path.abspath(os.path.dirname(path))
if node.path:
bench_rel = os.path.normpath(os.path.join(*node.path))
bench_abs = os.path.join(suite_dir, bench_rel)
else:
bench_rel = '.'
bench_abs = suite_dir
self.driver.push_executable(self.shell_dir, 'bin', node.binary)
if self.shell_dir_secondary:
self.driver.push_executable(
self.shell_dir_secondary, 'bin_secondary', node.binary)
if isinstance(node, RunnableConfig):
self.driver.push_file(bench_abs, node.main, bench_rel)
for resource in node.resources:
self.driver.push_file(bench_abs, resource, bench_rel)
def _Run(self, runnable, count, secondary=False):
target_dir = 'bin_secondary' if secondary else 'bin'
self.driver.drop_ram_caches()
# Relative path to benchmark directory.
if runnable.path:
bench_rel = os.path.normpath(os.path.join(*runnable.path))
else:
bench_rel = '.'
logcat_file = None
if self.args.dump_logcats_to:
runnable_name = '-'.join(runnable.graphs)
logcat_file = os.path.join(
self.args.dump_logcats_to, 'logcat-%s-#%d%s.log' % (
runnable_name, count + 1, '-secondary' if secondary else ''))
logging.debug('Dumping logcat into %s', logcat_file)
output = Output()
start = time.time()
try:
if not self.is_dry_run:
output.stdout = self.driver.run(
target_dir=target_dir,
binary=runnable.binary,
args=runnable.GetCommandFlags(self.extra_flags),
rel_path=bench_rel,
timeout=runnable.timeout,
logcat_file=logcat_file,
)
except android.CommandFailedException as e:
output.stdout = e.output
output.exit_code = e.status
except android.TimeoutException as e:
output.stdout = e.output
output.timed_out = True
if runnable.process_size:
output.stdout += 'MaxMemory: Unsupported'
output.duration = time.time() - start
return output
class CustomMachineConfiguration:
def __init__(self, disable_aslr = False, governor = None):
self.aslr_backup = None
self.governor_backup = None
self.disable_aslr = disable_aslr
self.governor = governor
def __enter__(self):
if self.disable_aslr:
self.aslr_backup = CustomMachineConfiguration.GetASLR()
CustomMachineConfiguration.SetASLR(0)
if self.governor != None:
self.governor_backup = CustomMachineConfiguration.GetCPUGovernor()
CustomMachineConfiguration.SetCPUGovernor(self.governor)
return self
def __exit__(self, type, value, traceback):
if self.aslr_backup != None:
CustomMachineConfiguration.SetASLR(self.aslr_backup)
if self.governor_backup != None:
CustomMachineConfiguration.SetCPUGovernor(self.governor_backup)
@staticmethod
def GetASLR():
try:
with open('/proc/sys/kernel/randomize_va_space', 'r') as f:
return int(f.readline().strip())
except Exception:
logging.exception('Failed to get current ASLR settings.')
raise
@staticmethod
def SetASLR(value):
try:
with open('/proc/sys/kernel/randomize_va_space', 'w') as f:
f.write(str(value))
except Exception:
logging.exception(
'Failed to update ASLR to %s. Are we running under sudo?', value)
raise
new_value = CustomMachineConfiguration.GetASLR()
if value != new_value:
raise Exception('Present value is %s' % new_value)
@staticmethod
def GetCPUCoresRange():
try:
with open('/sys/devices/system/cpu/present', 'r') as f:
indexes = f.readline()
r = list(map(int, indexes.split('-')))
if len(r) == 1:
return list(range(r[0], r[0] + 1))
return list(range(r[0], r[1] + 1))
except Exception:
logging.exception('Failed to retrieve number of CPUs.')
raise
@staticmethod
def GetCPUPathForId(cpu_index):
ret = '/sys/devices/system/cpu/cpu'
ret += str(cpu_index)
ret += '/cpufreq/scaling_governor'
return ret
@staticmethod
def GetCPUGovernor():
try:
cpu_indices = CustomMachineConfiguration.GetCPUCoresRange()
ret = None
for cpu_index in cpu_indices:
cpu_device = CustomMachineConfiguration.GetCPUPathForId(cpu_index)
with open(cpu_device, 'r') as f:
# We assume the governors of all CPUs are set to the same value
val = f.readline().strip()
if ret == None:
ret = val
elif ret != val:
raise Exception('CPU cores have differing governor settings')
return ret
except Exception:
logging.exception('Failed to get the current CPU governor. Is the CPU '
'governor disabled? Check BIOS.')
raise
@staticmethod
def SetCPUGovernor(value):
try:
cpu_indices = CustomMachineConfiguration.GetCPUCoresRange()
for cpu_index in cpu_indices:
cpu_device = CustomMachineConfiguration.GetCPUPathForId(cpu_index)
with open(cpu_device, 'w') as f:
f.write(value)
except Exception:
logging.exception('Failed to change CPU governor to %s. Are we '
'running under sudo?', value)
raise
cur_value = CustomMachineConfiguration.GetCPUGovernor()
if cur_value != value:
raise Exception('Could not set CPU governor. Present value is %s'
% cur_value )
class MaxTotalDurationReachedError(Exception):
"""Exception used to stop running tests when max total duration is reached."""
pass
def Main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--arch',
help='The architecture to run tests for. Pass "auto" '
'to auto-detect.', default='x64',
choices=SUPPORTED_ARCHS + ['auto'])
parser.add_argument('--buildbot',
help='Deprecated',
default=False, action='store_true')
parser.add_argument('-d', '--device',
help='The device ID to run Android tests on. If not '
'given it will be autodetected.')
parser.add_argument('--extra-flags',
help='Additional flags to pass to the test executable',
default='')
parser.add_argument('--json-test-results',
help='Path to a file for storing json results.')
parser.add_argument('--json-test-results-secondary',
help='Path to a file for storing json results from run '
'without patch or for reference build run.')
parser.add_argument('--outdir', help='Base directory with compile output',
default='out')
parser.add_argument('--outdir-secondary',
help='Base directory with compile output without patch '
'or for reference build')
parser.add_argument('--binary-override-path',
help='JavaScript engine binary. By default, d8 under '
'architecture-specific build dir. '
'Not supported in conjunction with outdir-secondary.')
parser.add_argument('--prioritize',
help='Raise the priority to nice -20 for the '
'benchmarking process.Requires Linux, schedtool, and '
'sudo privileges.', default=False, action='store_true')
parser.add_argument('--affinitize',
help='Run benchmarking process on the specified core. '
'For example: --affinitize=0 will run the benchmark '
'process on core 0. --affinitize=3 will run the '
'benchmark process on core 3. Requires Linux, schedtool, '
'and sudo privileges.', default=None)
parser.add_argument('--noaslr',
help='Disable ASLR for the duration of the benchmarked '
'process. Requires Linux and sudo privileges.',
default=False, action='store_true')
parser.add_argument('--cpu-governor',
help='Set cpu governor to specified policy for the '
'duration of the benchmarked process. Typical options: '
'"powersave" for more stable results, or "performance" '
'for shorter completion time of suite, with potentially '
'more noise in results.')
parser.add_argument(
'--filter',
help='Only run the benchmarks matching with this '
'regex. For example: '
'--filter=JSTests/TypedArrays/ will run only TypedArray '
'benchmarks from the JSTests suite.')
parser.add_argument('--confidence-level', type=float,
help='Repeatedly runs each benchmark until specified '
'confidence level is reached. The value is interpreted '
'as the number of standard deviations from the mean that '
'all values must lie within. Typical values are 1, 2 and '
'3 and correspond to 68%%, 95%% and 99.7%% probability '
'that the measured value is within 0.1%% of the true '
'value. Larger values result in more retries and thus '
'longer runtime, but also provide more reliable results. '
'Also see --max-total-duration flag.')
parser.add_argument('--max-total-duration', type=int, default=7140, # 1h 59m
help='Max total duration in seconds allowed for retries '
'across all tests. This is especially useful in '
'combination with the --confidence-level flag.')
parser.add_argument('--dump-logcats-to',
help='Writes logcat output from each test into specified '
'directory. Only supported for android targets.')
parser.add_argument('--run-count', type=int, default=0,
help='Override the run count specified by the test '
'suite. The default 0 uses the suite\'s config.')
parser.add_argument(
'--dry-run',
default=False,
action='store_true',
help='Do not run any actual tests.')
parser.add_argument('-v', '--verbose', default=False, action='store_true',
help='Be verbose and print debug output.')
parser.add_argument('suite', nargs='+', help='Path to the suite config file.')
try:
args = parser.parse_args(argv)
except SystemExit:
return INFRA_FAILURE_RETCODE
logging.basicConfig(
level=logging.DEBUG if args.verbose else logging.INFO,
format='%(asctime)s %(levelname)-8s %(message)s')
if args.arch == 'auto': # pragma: no cover
args.arch = utils.DefaultArch()
if args.arch not in SUPPORTED_ARCHS:
logging.error(
'Auto-detected architecture "%s" is not supported.', args.arch)
return INFRA_FAILURE_RETCODE
if (args.json_test_results_secondary and
not args.outdir_secondary): # pragma: no cover
logging.error('For writing secondary json test results, a secondary outdir '
'patch must be specified.')
return INFRA_FAILURE_RETCODE
workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
if args.binary_override_path == None:
args.shell_dir = find_build_directory(
os.path.join(workspace, args.outdir), args.arch)
default_binary_name = 'd8'
else:
if not os.path.isfile(args.binary_override_path):
logging.error('binary-override-path must be a file name')
return INFRA_FAILURE_RETCODE
if args.outdir_secondary:
logging.error('specify either binary-override-path or outdir-secondary')
return INFRA_FAILURE_RETCODE
args.shell_dir = os.path.abspath(
os.path.dirname(args.binary_override_path))
default_binary_name = os.path.basename(args.binary_override_path)
if args.outdir_secondary:
args.shell_dir_secondary = find_build_directory(
os.path.join(workspace, args.outdir_secondary), args.arch)
else:
args.shell_dir_secondary = None
if args.json_test_results:
args.json_test_results = os.path.abspath(args.json_test_results)
if args.json_test_results_secondary:
args.json_test_results_secondary = os.path.abspath(
args.json_test_results_secondary)
try:
if args.filter:
args.filter = re.compile(args.filter)
except re.error:
logging.error("Invalid regular expression for --filter=%s" % args.filter)
return INFRA_FAILURE_RETCODE
# Ensure all arguments have absolute path before we start changing current
# directory.
args.suite = list(map(os.path.abspath, args.suite))
prev_aslr = None
prev_cpu_gov = None
platform = Platform.GetPlatform(args)
result_tracker = ResultTracker()
result_tracker_secondary = ResultTracker()
have_failed_tests = False
with CustomMachineConfiguration(governor = args.cpu_governor,
disable_aslr = args.noaslr) as conf:
for path in args.suite:
if not os.path.exists(path): # pragma: no cover
result_tracker.AddError('Configuration file %s does not exist.' % path)
continue
with open(path) as f:
suite = json.loads(f.read())
# If no name is given, default to the file name without .json.
suite.setdefault('name', os.path.splitext(os.path.basename(path))[0])
# Setup things common to one test suite.
platform.PreExecution()
# Build the graph/trace tree structure.
default_parent = DefaultSentinel(default_binary_name)
root = BuildGraphConfigs(suite, default_parent, args.arch)
if logging.DEBUG >= logging.root.level:
logging.debug("Config tree:")
for node in iter(root):
logging.debug(" %s", node)
# Callback to be called on each node on traversal.
def NodeCB(node):
platform.PreTests(node, path)
# Traverse graph/trace tree and iterate over all runnables.
start = time.time()
try:
for runnable in FlattenRunnables(root, NodeCB):
runnable_name = '/'.join(runnable.graphs)
if args.filter and args.filter.search(runnable_name):
logging.info('Skipping suite "%s" due to filter', runnable_name)
continue
logging.info('>>> Running suite: %s', runnable_name)
def RunGenerator(runnable):
if args.confidence_level:
counter = 0
while not result_tracker.HasEnoughRuns(
runnable, args.confidence_level):
yield counter
counter += 1
else:
for i in range(0, max(1, args.run_count or runnable.run_count)):
yield i
for i in RunGenerator(runnable):
attempts_left = runnable.retry_count + 1
while attempts_left:
total_duration = time.time() - start
if total_duration > args.max_total_duration:
logging.info(
'>>> Stopping now since running for too long (%ds > %ds)',
total_duration, args.max_total_duration)
raise MaxTotalDurationReachedError()
output, output_secondary = platform.Run(
runnable, i, secondary=args.shell_dir_secondary)
result_tracker.AddRunnableDuration(runnable, output.duration)
result_tracker_secondary.AddRunnableDuration(
runnable, output_secondary.duration)
if output.IsSuccess() and output_secondary.IsSuccess():
runnable.ProcessOutput(output, result_tracker, i)
if output_secondary is not NULL_OUTPUT:
runnable.ProcessOutput(
output_secondary, result_tracker_secondary, i)
break
attempts_left -= 1
if not attempts_left:
logging.info('>>> Suite %s failed after %d retries',
runnable_name, runnable.retry_count + 1)
have_failed_tests = True
else:
logging.info('>>> Retrying suite: %s', runnable_name)
except MaxTotalDurationReachedError:
have_failed_tests = True
platform.PostExecution()
if args.json_test_results:
result_tracker.WriteToFile(args.json_test_results)
else: # pragma: no cover
print('Primary results:', result_tracker)
if args.shell_dir_secondary:
if args.json_test_results_secondary:
result_tracker_secondary.WriteToFile(args.json_test_results_secondary)
else: # pragma: no cover
print('Secondary results:', result_tracker_secondary)
if (result_tracker.errors or result_tracker_secondary.errors or
have_failed_tests):
return 1
return 0
def MainWrapper():
try:
return Main(sys.argv[1:])
except:
# Log uncaptured exceptions and report infra failure to the caller.
traceback.print_exc()
return INFRA_FAILURE_RETCODE
if __name__ == '__main__': # pragma: no cover
sys.exit(MainWrapper())
|
from django import forms
from msbdev.models import ContactForm
class ContactFormForm(forms.ModelForm):
class Meta:
model = ContactForm
fields = ['email', 'note']
widgets = {
'email': forms.EmailInput(attrs={'class': 'form-control form-control-sm',
'placeholder': 'Your email address'}),
'note': forms.Textarea(attrs={'class': 'form-control form-control-sm', 'rows': '2',
'placeholder': 'Leave a note'}),
}
|
# Copyright 2021 Katteli Inc.
# TestFlows.com Open-Source Software Testing Framework (http://testflows.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import testflows._core.cli.arg.type as argtype
from testflows._core.cli.arg.common import epilog
from testflows._core.cli.arg.common import HelpFormatter
from testflows._core.cli.arg.handlers.handler import Handler as HandlerBase
from testflows._core.message import Message
from testflows._core.transform.log.pipeline import Pipeline as PipelineBase
from testflows._core.transform.log.read_and_filter import transform as read_and_filter_transform
from testflows._core.transform.log.flat import transform as flat_transform
from testflows._core.transform.log.parse import transform as parse_transform
from testflows._core.transform.log.stop import transform as stop_transform
from testflows._core.transform.log.write import transform as write_transform
class Handler(HandlerBase):
@classmethod
def add_command(cls, commands):
parser = commands.add_parser("description", help="description", epilog=epilog(),
description="Show description.",
formatter_class=HelpFormatter)
parser.add_argument("name", metavar="name", type=str, help="test name", default="", nargs="?")
parser.add_argument("--log", metavar="input", type=argtype.logfile("r", bufsize=1, encoding="utf-8"),
nargs="?", help="input log, default: stdin", default="-")
parser.add_argument("--output", metavar="output", type=argtype.file("w", bufsize=1, encoding="utf-8"),
nargs="?", help='output, default: stdout', default="-")
parser.set_defaults(func=cls())
class Pipeline(PipelineBase):
def __init__(self, name, input, output, tail=False):
stop_event = threading.Event()
message_types = [
Message.TEST.name
]
command = "grep -E '^{\"message_keyword\":\""
command = f"{command}({'|'.join(message_types)})\""
command += ".+,\"test_name\":\"%s.*?\",'" % name.replace("'", r"'\''")
steps = [
read_and_filter_transform(input, command=command, stop=stop_event, tail=tail),
parse_transform(),
flat_transform(),
write_transform(output),
stop_transform(stop_event)
]
super(Handler.Pipeline, self).__init__(steps, stop=stop_event)
def handle(self, args):
self.Pipeline(args.name, args.log, args.output, tail=True).run()
|
#!/usr/local/bin/python3
import csv
from optparse import OptionParser
def ifnull(val,ischar=False):
if val == "\\N":
return "NULL"
else:
if ischar:
return "'" + val + "'"
else:
return val
parser = OptionParser()
parser.add_option("-O", "--orders", dest="ordersname", default="orders.csv", help="orders csv file")
parser.add_option("--dml", dest="dml", default="insert", help="insert, upsert, ioc = insert .. on conflict update")
parser.add_option("-L", "--lineitem", dest="lineitemname", default="lineitem.csv", help="lineitem csv file")
parser.add_option("-l", "--limit", dest="orderlimit", default=100000, type=int, help="stop after processing this many orders")
parser.add_option("-b", "--batch", dest="batch", default=1000, type=int, help="commit after this many orders")
parser.add_option("-p", "--parallelize", dest="parallel", default=0, type=int, help="parallizing by adding returning nothing")
parser.add_option("-e", "--end", dest="lineend", default='', help="line terminator")
(options, args) = parser.parse_args()
ordersprocessed = 0
dml="insert"
orders_ioc=""
lineitem_ioc=""
if options.dml == "upsert":
dml="upsert"
elif options.dml == "ioc":
orders_ioc=" on conflict (o_orderkey) do update set o_orderkey=excluded.o_orderkey, o_custkey=excluded.o_custkey, o_orderstatus=excluded.o_orderstatus, o_totalprice=excluded.o_totalprice,o_totalprice=excluded.o_totalprice,o_orderdate=excluded.o_orderdate,o_orderpriority=excluded.o_orderpriority,o_clerk=excluded.o_clerk,o_shippriority=excluded.o_shippriority,o_comment=excluded.o_comment"
lineitem_ioc=" on conflict (l_orderkey,l_linenumber) do update set l_orderkey=excluded.l_orderkey,l_partkey=excluded.l_partkey,l_suppkey=excluded.l_suppkey,l_linenumber=excluded.l_linenumber,l_quantity=excluded.l_quantity,l_extendedprice=excluded.l_extendedprice,l_discount=excluded.l_discount,l_tax=excluded.l_tax,l_returnflag=excluded.l_returnflag,l_linestatus=excluded.l_linestatus,l_shipdate=excluded.l_shipdate,l_commitdate=excluded.l_commitdate,l_receiptdate=excluded.l_receiptdate,l_shipinstruct=excluded.l_shipinstruct,l_shipmode=excluded.l_shipmode,l_comment=excluded.l_comment"
parallel=""
if options.parallel:
parallel=" returning nothing"
ordersfile = open(options.ordersname,'r')
orderscsv = csv.reader(ordersfile, delimiter='\t')
lineitemfile = open(options.lineitemname,'r')
lineitemcsv = csv.reader(lineitemfile, delimiter='\t')
lineitemrow = next(lineitemcsv)
ordersrow = next(orderscsv)
while options.orderlimit > 0 and ordersprocessed < options.orderlimit and ordersrow:
print ("begin;",end=options.lineend)
print ("%s into orders values "% dml,end=options.lineend)
print ("("
+ ifnull(ordersrow[0]) # o_orderkey INTEGER NOT NULL,
+ "," + ifnull(ordersrow[1]) # o_custkey INTEGER NOT NULL,
+ "," + ifnull(ordersrow[2],ischar=True) # o_orderstatus CHAR(1) NOT NULL,
+ "," + ifnull(ordersrow[3]) # o_totalprice DECIMAL(15,2) NOT NULL,
+ "," + ifnull(ordersrow[4],ischar=True) # o_orderdate DATE NOT NULL,
+ "," + ifnull(ordersrow[5],ischar=True) # o_orderpriority CHAR(15) NOT NULL,
+ "," + ifnull(ordersrow[6],ischar=True) # o_clerk CHAR(15) NOT NULL,
+ "," + ifnull(ordersrow[7]) # o_shippriority INTEGER NOT NULL,
+ "," + ifnull(ordersrow[8],ischar=True) # o_comment VARCHAR(79) NOT NULL,
+ ")"
+ orders_ioc
+ parallel
+ ";"
,end=options.lineend)
print ("%s into lineitem values " % dml,end=options.lineend)
prefix = ""
while lineitemrow[0] == ordersrow[0]:
print (prefix + "("
+ ifnull(lineitemrow[0]) # l_orderkey INTEGER NOT NULL,
+ "," + ifnull(lineitemrow[1]) # l_partkey INTEGER NOT NULL,
+ "," + ifnull(lineitemrow[2]) # l_suppkey INTEGER NOT NULL,
+ "," + ifnull(lineitemrow[3]) # l_linenumber INTEGER NOT NULL,
+ "," + ifnull(lineitemrow[4]) # l_quantity DECIMAL(15,2) NOT NULL,
+ "," + ifnull(lineitemrow[5]) # l_extendedprice DECIMAL(15,2) NOT NULL,
+ "," + ifnull(lineitemrow[6]) # l_discount DECIMAL(15,2) NOT NULL,
+ "," + ifnull(lineitemrow[7]) # l_tax DECIMAL(15,2) NOT NULL,
+ "," + ifnull(lineitemrow[8],ischar=True) # l_returnflag CHAR(1) NOT NULL,
+ "," + ifnull(lineitemrow[9],ischar=True) # l_linestatus CHAR(1) NOT NULL,
+ "," + ifnull(lineitemrow[10],ischar=True) # l_shipdate DATE NOT NULL,
+ "," + ifnull(lineitemrow[11],ischar=True) # l_commitdate DATE NOT NULL,
+ "," + ifnull(lineitemrow[12],ischar=True) # l_receiptdate DATE NOT NULL,
+ "," + ifnull(lineitemrow[13],ischar=True) # l_shipinstruct CHAR(25) NOT NULL,
+ "," + ifnull(lineitemrow[14],ischar=True) # l_shipmode CHAR(10) NOT NULL,
+ "," + ifnull(lineitemrow[15],ischar=True) # l_comment VARCHAR(44) NOT NULL,
+ ")"
,end=options.lineend)
lineitemrow = next(lineitemcsv)
prefix = ","
print (lineitem_ioc
+ parallel
+ ";"
,end=options.lineend)
print ("commit;")
ordersrow = next(orderscsv)
ordersprocessed += 1
|
#
# Copyright 2018-2021 Elyra Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from logging import Logger
import os
import sys
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
class Operation(object):
"""
Represents a single operation in a pipeline representing a third-party component
"""
generic_node_types = ["execute-notebook-node", "execute-python-node", "execute-r-node"]
@classmethod
def create_instance(cls, id: str, type: str, name: str, classifier: str,
parent_operation_ids: Optional[List[str]] = None,
component_params: Optional[Dict[str, Any]] = None) -> 'Operation':
"""Class method that creates the appropriate instance of Operation based on inputs. """
if classifier in Operation.generic_node_types:
return GenericOperation(id, type, name, classifier,
parent_operation_ids=parent_operation_ids, component_params=component_params)
return Operation(id, type, name, classifier,
parent_operation_ids=parent_operation_ids, component_params=component_params)
def __init__(self, id: str, type: str, name: str, classifier: str,
parent_operation_ids: Optional[List[str]] = None,
component_params: Optional[Dict[str, Any]] = None):
"""
:param id: Generated UUID, 128 bit number used as a unique identifier
e.g. 123e4567-e89b-12d3-a456-426614174000
:param type: The type of node e.g. execution_node
:param classifier: indicates the operation's class
:param name: The name of the operation
:param parent_operation_ids: List of parent operation 'ids' required to execute prior to this operation
:param component_params: dictionary of parameter key:value pairs that are used in the creation of a
a non-standard operation instance
"""
# Validate that the operation has all required properties
if not id:
raise ValueError("Invalid pipeline operation: Missing field 'operation id'.")
if not type:
raise ValueError("Invalid pipeline operation: Missing field 'operation type'.")
if not classifier:
raise ValueError("Invalid pipeline operation: Missing field 'operation classifier'.")
if not name:
raise ValueError("Invalid pipeline operation: Missing field 'operation name'.")
self._id = id
self._type = type
self._classifier = classifier
self._name = name
self._parent_operation_ids = parent_operation_ids or []
self._component_params = component_params
# Scrub the inputs and outputs lists
self._component_params["inputs"] = Operation._scrub_list(component_params.get('inputs', []))
self._component_params["outputs"] = Operation._scrub_list(component_params.get('outputs', []))
@property
def id(self) -> str:
return self._id
@property
def type(self) -> str:
return self._type
@property
def classifier(self) -> str:
return self._classifier
@property
def name(self) -> str:
return self._name
@property
def parent_operation_ids(self) -> List[str]:
return self._parent_operation_ids
@property
def component_params(self) -> Optional[Dict[str, Any]]:
return self._component_params
@property
def component_params_as_dict(self) -> Dict[str, Any]:
return self._component_params or {}
@property
def inputs(self) -> Optional[List[str]]:
return self._component_params.get('inputs')
@inputs.setter
def inputs(self, value: List[str]):
self._component_params['inputs'] = value
@property
def outputs(self) -> Optional[List[str]]:
return self._component_params.get('outputs')
@outputs.setter
def outputs(self, value: List[str]):
self._component_params['outputs'] = value
def __eq__(self, other: 'Operation') -> bool:
if isinstance(self, other.__class__):
return self.id == other.id and \
self.type == other.type and \
self.classifier == other.classifier and \
self.name == other.name and \
self.parent_operation_ids == other.parent_operation_ids and \
self.component_params == other.component_params
return False
def __str__(self) -> str:
params = ""
for key, value in self.component_params_as_dict.items():
params += f"\t{key}: {value}, \n"
return f"componentID : {self.id} \n " \
f"name : {self.name} \n " \
f"parent_operation_ids : {self.parent_operation_ids} \n " \
f"component_parameters: {{\n{params}}} \n"
@staticmethod
def _log_info(msg: str, logger: Optional[Logger] = None):
if logger:
logger.info(msg)
else:
print(msg)
@staticmethod
def _log_warning(msg: str, logger: Optional[Logger] = None):
if logger:
logger.warning(msg)
else:
print(f"WARNING: {msg}")
@staticmethod
def _scrub_list(dirty: Optional[List[Optional[str]]]) -> List[str]:
"""
Clean an existing list by filtering out None and empty string values
:param dirty: a List of values
:return: a clean list without None or empty string values
"""
if not dirty:
return []
return [clean for clean in dirty if clean]
@staticmethod
def is_generic_operation(operation_type) -> bool:
return True if operation_type in Operation.generic_node_types else False
class GenericOperation(Operation):
"""
Represents a single operation in a pipeline representing a generic (built-in) component
"""
def __init__(self, id: str, type: str, name: str, classifier: str,
parent_operation_ids: Optional[List[str]] = None,
component_params: Optional[Dict[str, Any]] = None):
"""
:param id: Generated UUID, 128 bit number used as a unique identifier
e.g. 123e4567-e89b-12d3-a456-426614174000
:param type: The type of node e.g. execution_node
:param classifier: indicates the operation's class
:param name: The name of the operation
:param parent_operation_ids: List of parent operation 'ids' required to execute prior to this operation
:param component_params: dictionary of parameter key:value pairs that are used in the creation of a
a non-standard operation instance
Component_params for "generic components" (i.e., those with one of the following classifier values:
["execute-notebook-node", "execute-python-node", "exeucute-r-node"]) can expect to have the following
entries.
filename: The relative path to the source file in the users local environment
to be executed e.g. path/to/file.ext
runtime_image: The DockerHub image to be used for the operation
e.g. user/docker_image_name:tag
dependencies: List of local files/directories needed for the operation to run
and packaged into each operation's dependency archive
include_subdirectories: Include or Exclude subdirectories when packaging our 'dependencies'
env_vars: List of Environmental variables to set in the docker image
e.g. FOO="BAR"
inputs: List of files to be consumed by this operation, produced by parent operation(s)
outputs: List of files produced by this operation to be included in a child operation(s)
cpu: number of cpus requested to run the operation
memory: amount of memory requested to run the operation (in Gi)
gpu: number of gpus requested to run the operation
Entries for other (non-built-in) component types are a function of the respective component.
"""
super().__init__(id, type, name, classifier,
parent_operation_ids=parent_operation_ids, component_params=component_params)
if not component_params.get('filename'):
raise ValueError("Invalid pipeline operation: Missing field 'operation filename'.")
if not component_params.get('runtime_image'):
raise ValueError("Invalid pipeline operation: Missing field 'operation runtime image'.")
if component_params.get('cpu') and not self._validate_range(component_params.get('cpu'), min_value=1):
raise ValueError("Invalid pipeline operation: CPU must be a positive value or None")
if component_params.get('gpu') and not self._validate_range(component_params.get('gpu'), min_value=0):
raise ValueError("Invalid pipeline operation: GPU must be a positive value or None")
if component_params.get('memory') and not self._validate_range(component_params.get('memory'), min_value=1):
raise ValueError("Invalid pipeline operation: Memory must be a positive value or None")
# Re-build object to include default values
self._component_params["filename"] = component_params.get('filename')
self._component_params["runtime_image"] = component_params.get('runtime_image')
self._component_params["dependencies"] = Operation._scrub_list(component_params.get('dependencies', []))
self._component_params["include_subdirectories"] = component_params.get('include_subdirectories', False)
self._component_params["env_vars"] = Operation._scrub_list(component_params.get('env_vars', []))
self._component_params["cpu"] = component_params.get('cpu')
self._component_params["gpu"] = component_params.get('gpu')
self._component_params["memory"] = component_params.get('memory')
@property
def name(self) -> str:
if self._name == os.path.basename(self.filename):
self._name = os.path.basename(self._name).split(".")[0]
return self._name
@property
def filename(self) -> str:
return self._component_params.get('filename')
@property
def runtime_image(self) -> str:
return self._component_params.get('runtime_image')
@property
def dependencies(self) -> Optional[List[str]]:
return self._component_params.get('dependencies')
@property
def include_subdirectories(self) -> Optional[bool]:
return self._component_params.get('include_subdirectories')
@property
def env_vars(self) -> Optional[List[str]]:
return self._component_params.get('env_vars')
@property
def cpu(self) -> Optional[str]:
return self._component_params.get('cpu')
@property
def memory(self) -> Optional[str]:
return self._component_params.get('memory')
@property
def gpu(self) -> Optional[str]:
return self._component_params.get('gpu')
def __eq__(self, other: 'GenericOperation') -> bool:
if isinstance(self, other.__class__):
return super().__eq__(other)
return False
def _validate_range(self, value: str, min_value: int = 0, max_value: int = sys.maxsize) -> bool:
return int(value) in range(min_value, max_value)
def env_vars_as_dict(self, logger: Optional[Logger] = None) -> Dict[str, str]:
"""
Operation stores environment variables in a list of name=value pairs, while
subprocess.run() requires a dictionary - so we must convert. If no envs are
configured on the Operation, an empty dictionary is returned, otherwise envs
configured on the Operation are converted to dictionary entries and returned.
"""
envs = {}
for nv in self.env_vars:
if nv:
nv_pair = nv.split("=", 1)
if len(nv_pair) == 2 and nv_pair[0].strip():
if len(nv_pair[1]) > 0:
envs[nv_pair[0]] = nv_pair[1]
else:
Operation._log_info(f"Skipping inclusion of environment variable: "
f"`{nv_pair[0]}` has no value...",
logger=logger)
else:
Operation._log_warning(f"Could not process environment variable entry `{nv}`, skipping...",
logger=logger)
return envs
class Pipeline(object):
"""
Represents a single pipeline constructed in the pipeline editor
"""
def __init__(self, id: str, name: str, runtime: str, runtime_config: str, source: Optional[str] = None):
"""
:param id: Generated UUID, 128 bit number used as a unique identifier
e.g. 123e4567-e89b-12d3-a456-426614174000
:param name: Pipeline name
e.g. test-pipeline-123456
:param runtime: Type of runtime we want to use to execute our pipeline
e.g. kfp OR airflow
:param runtime_config: Runtime configuration that should be used to submit the pipeline to execution
:param source: The pipeline source, e.g. a pipeline file or a notebook.
"""
if not name:
raise ValueError('Invalid pipeline: Missing pipeline name.')
if not runtime:
raise ValueError('Invalid pipeline: Missing runtime.')
if not runtime_config:
raise ValueError('Invalid pipeline: Missing runtime configuration.')
self._id = id
self._name = name
self._source = source
self._runtime = runtime
self._runtime_config = runtime_config
self._operations = {}
@property
def id(self) -> str:
return self._id
@property
def name(self) -> str:
return self._name
@property
def source(self) -> str:
return self._source
@property
def runtime(self) -> str:
"""
Describe the runtime type where the pipeline will be executed
"""
return self._runtime
@property
def runtime_config(self) -> str:
"""
Describe the runtime configuration that should be used to submit the pipeline to execution
"""
return self._runtime_config
@property
def operations(self) -> Dict[str, Operation]:
return self._operations
def __eq__(self, other: 'Pipeline') -> bool:
if isinstance(self, other.__class__):
return self.id == other.id and \
self.name == other.name and \
self.source == other.source and \
self.runtime == other.runtime and \
self.runtime_config == other.runtime_config and \
self.operations == other.operations
|
# -*- coding: utf-8 -*-
# see LICENSE.rst
"""Test Unit Module."""
__all__ = [
# core and decorators
"test_core",
"test_decorators",
# added units
"test_amuse",
"test_composite",
"test_full_amuse",
]
##############################################################################
# IMPORTS
from . import test_composite # core and decorators; added units
from . import test_amuse, test_core, test_decorators, test_full_amuse
##############################################################################
# END
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encodes the training data with extracted features."""
import argparse
from context import feature_extractor
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'source_data',
help='''File path of the source training data to extract features.''')
parser.add_argument(
'-o',
'--outfile',
help='''Output file path for the encoded training data.
(default: encoded_data.txt)''',
default='encoded_data.txt')
args = parser.parse_args()
source_filename = args.source_data
train_data_filename = args.outfile
feature_extractor.process(source_filename, train_data_filename)
print('\033[92mEncoded training data is output to: %s\033[0m' %
(train_data_filename))
if __name__ == '__main__':
main()
|
import json
import os
import logging
import great_expectations as ge
from datetime import datetime
import tzlocal
from IPython.core.display import display, HTML
def set_data_source(context, data_source_type=None):
data_source_name = None
if not data_source_type:
configured_datasources = [datasource for datasource in context.list_datasources()]
if len(configured_datasources) == 0:
display(HTML("""
<p>
No data sources found in the great_expectations.yml of your project.
</p>
<p>
If you did not create the data source during init, here is how to add it now: <a href="https://great-expectations.readthedocs.io/en/latest/how_to_add_data_source.html">How To Add a Data Source</a>
</p>
""".format(data_source_type)))
elif len(configured_datasources) > 1:
display(HTML("""
<p>
Found more than one data source in the great_expectations.yml of your project:
<b>{1:s}</b>
</p>
<p>
Uncomment the next cell and set data_source_name to one of these names.
</p>
""".format(data_source_type, ','.join([datasource['name'] for datasource in configured_datasources]))))
else:
data_source_name = configured_datasources[0]['name']
display(HTML("Will be using this data source from your project's great_expectations.yml: <b>{0:s}</b>".format(data_source_name)))
else:
configured_datasources = [datasource['name'] for datasource in context.list_datasources() if
datasource['type'] == data_source_type]
if len(configured_datasources) == 0:
display(HTML("""
<p>
No {0:s} data sources found in the great_expectations.yml of your project.
</p>
<p>
If you did not create the data source during init, here is how to add it now: <a href="https://great-expectations.readthedocs.io/en/latest/how_to_add_data_source.html">How To Add a Data Source</a>
</p>
""".format(data_source_type)))
elif len(configured_datasources) > 1:
display(HTML("""
<p>
Found more than one {0:s} data source in the great_expectations.yml of your project:
<b>{1:s}</b>
</p>
<p>
Uncomment the next cell and set data_source_name to one of these names.
</p>
""".format(data_source_type, ','.join(configured_datasources))))
else:
data_source_name = configured_datasources[0]
display(HTML("Will be using this {0:s} data source from your project's great_expectations.yml: <b>{1:s}</b>".format(data_source_type, data_source_name)))
return data_source_name
def setup_notebook_logging(logger=None):
def posix2local(timestamp, tz=tzlocal.get_localzone()):
"""Seconds since the epoch -> local time as an aware datetime object."""
return datetime.fromtimestamp(timestamp, tz)
class Formatter(logging.Formatter):
def converter(self, timestamp):
return posix2local(timestamp)
def formatTime(self, record, datefmt=None):
dt = self.converter(record.created)
if datefmt:
s = dt.strftime(datefmt)
else:
t = dt.strftime(self.default_time_format)
s = self.default_msec_format % (t, record.msecs)
return s
if not logger:
logger = logging.getLogger()
chandler = logging.StreamHandler()
chandler.setLevel(logging.DEBUG)
chandler.setFormatter(Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s", "%Y-%m-%dT%H:%M:%S%z"))
logger.addHandler(chandler)
logger.setLevel(logging.ERROR)
# logger.setLevel(logging.INFO)
logging.debug("test")
# Filter warnings
import warnings
warnings.filterwarnings('ignore')
def list_available_data_asset_names(context, data_source_name=None):
datasources = context.list_datasources()
for datasource in datasources:
if data_source_name and datasource['name'] != data_source_name:
continue
print('data source: {0:s} ({1:s})'.format(datasource['name'], datasource['type']))
ds = context.get_datasource(datasource['name'])
generators = ds.list_generators()
for generator_info in generators:
print(' generator: {0:s} ({1:s})'.format(generator_info['name'], generator_info['type']))
generator = ds.get_generator(generator_info['name'])
data_asset_names = generator.get_available_data_asset_names()
if len(data_asset_names) > 0:
for data_asset_name in data_asset_names:
# print(' data asset: {0:s}. Full name: {1:s}/{2:s}/{0:s}'. \
print(' data asset: {0:s}. (Use this as an arg to get_batch)'. \
format(data_asset_name))
else:
display(HTML("""
<p>
No data assets found in this data source.
</p>
<p>
Read about how generators derive data assets from data sources: <a href="https://great-expectations.readthedocs.io/en/latest/how_to_add_data_source.html">Data assets</a>
</p>
"""))
#TODO: add expectation suite names (existing)
|
"""
Defines the preset values in the mimic api.
"""
from __future__ import absolute_import, division, unicode_literals
get_presets = {"loadbalancers": {"lb_building": "On create load balancer, keeps the load balancer in "
"building state for given seconds",
"lb_error_state": "Puts the LB in error state, and such an LB can only"
"be deleted",
"lb_pending_update": "Changes the load balancer to PENDING-UPDATE"
"state for the given number of seconds, any action"
"other than delete is performed on the server",
"lb_pending_delete": "Changes the load balancer to PENDING-DELETE"
"state for the given seconds, when deleted"},
"servers": {"create_server_failure": "{\"message\": \"given message\","
"\"code\": given code}",
"delete_server_failure": "{\"code\": given code,"
"\"times\": returns given code that many times}",
"invalid_image_ref": ["INVALID-IMAGE-ID", "1111", "image_ends_with_Z"],
"invalid_flavor_ref": ["INVALID-FLAVOR-ID", "8888", "-4", "1"],
"server_error": "sets server state to error on create",
"server_building": "sets the server to be in building state for given time"
" in seconds"},
"identity": {
# On ``validate_token`` the tokens listed below
# result in 'monitoring-service-admin' impersonator role.
"maas_admin_roles": [
"this_is_an_impersonator_token",
"this_is_an_impersonator_token_also",
"impersonate_watson",
"impersonate_creator",
"this_is_an_impersonator_token_also_2",
"impersonate_foo_token"],
# On ``validate_token`` the tokens listed below
# result in 'racker' impersonator role.
"racker_token": ["this_is_a_racker_token"],
# Tenants with user observer role
"observer_role": ["09876"],
# Tenants with user creator role
"creator_role": ["09090"],
# Tenants with user admin role
"admin_role": ["9999"],
# Tenants with this token result in a 401 when validating the token
"token_fail_to_auth": ["never-cache-this-and-fail-to-auth"],
# Users presenting these tokens have contact IDs that correspond
# to presets in the Valkyrie plugin...
"non_dedicated_observer": ["OneTwo"],
"non_dedicated_admin": ["ThreeFour"],
"non_dedicated_impersonator": ["ThreeFourImpersonator"],
"non_dedicated_racker": ["ThreeFourRacker"],
"dedicated_full_device_permission_holder": ["HybridOneTwo"],
"dedicated_account_permission_holder": ["HybridThreeFour"],
"dedicated_impersonator": ["HybridThreeFourImpersonator"],
"dedicated_racker": ["HybridOneTwoRacker"],
"dedicated_limited_device_permission_holder": ["HybridFiveSix"],
"dedicated_non_permission_holder": ["HybridSevenEight"],
"dedicated_quasi_user_impersonator": ["HybridNineZero"]}}
|
# DO NOT EDIT! This file is automatically generated
import typing
from commercetools.helpers import RemoveEmptyValuesMixin
from commercetools.platform.models.tax_category import (
TaxCategory,
TaxCategoryDraft,
TaxCategoryPagedQueryResponse,
TaxCategoryUpdate,
TaxCategoryUpdateAction,
)
from commercetools.typing import OptionalListStr
from . import abstract, traits
class _TaxCategoryQuerySchema(
traits.ExpandableSchema,
traits.SortableSchema,
traits.PagingSchema,
traits.QuerySchema,
):
pass
class _TaxCategoryUpdateSchema(traits.ExpandableSchema, traits.VersionedSchema):
pass
class _TaxCategoryDeleteSchema(traits.VersionedSchema, traits.ExpandableSchema):
pass
class TaxCategoryService(abstract.AbstractService):
"""Tax Categories define how products are to be taxed in different countries."""
def get_by_id(self, id: str, *, expand: OptionalListStr = None) -> TaxCategory:
params = self._serialize_params({"expand": expand}, traits.ExpandableSchema)
return self._client._get(
endpoint=f"tax-categories/{id}", params=params, response_class=TaxCategory
)
def get_by_key(self, key: str, *, expand: OptionalListStr = None) -> TaxCategory:
params = self._serialize_params({"expand": expand}, traits.ExpandableSchema)
return self._client._get(
endpoint=f"tax-categories/key={key}",
params=params,
response_class=TaxCategory,
)
def query(
self,
*,
expand: OptionalListStr = None,
sort: OptionalListStr = None,
limit: int = None,
offset: int = None,
with_total: bool = None,
where: OptionalListStr = None,
predicate_var: typing.Dict[str, str] = None,
) -> TaxCategoryPagedQueryResponse:
"""Tax Categories define how products are to be taxed in different
countries.
"""
params = self._serialize_params(
{
"expand": expand,
"sort": sort,
"limit": limit,
"offset": offset,
"with_total": with_total,
"where": where,
"predicate_var": predicate_var,
},
_TaxCategoryQuerySchema,
)
return self._client._get(
endpoint="tax-categories",
params=params,
response_class=TaxCategoryPagedQueryResponse,
)
def create(
self, draft: TaxCategoryDraft, *, expand: OptionalListStr = None
) -> TaxCategory:
"""Tax Categories define how products are to be taxed in different
countries.
"""
params = self._serialize_params({"expand": expand}, traits.ExpandableSchema)
return self._client._post(
endpoint="tax-categories",
params=params,
data_object=draft,
response_class=TaxCategory,
)
def update_by_id(
self,
id: str,
version: int,
actions: typing.List[TaxCategoryUpdateAction],
*,
expand: OptionalListStr = None,
force_update: bool = False,
) -> TaxCategory:
params = self._serialize_params({"expand": expand}, _TaxCategoryUpdateSchema)
update_action = TaxCategoryUpdate(version=version, actions=actions)
return self._client._post(
endpoint=f"tax-categories/{id}",
params=params,
data_object=update_action,
response_class=TaxCategory,
force_update=force_update,
)
def update_by_key(
self,
key: str,
version: int,
actions: typing.List[TaxCategoryUpdateAction],
*,
expand: OptionalListStr = None,
force_update: bool = False,
) -> TaxCategory:
params = self._serialize_params({"expand": expand}, _TaxCategoryUpdateSchema)
update_action = TaxCategoryUpdate(version=version, actions=actions)
return self._client._post(
endpoint=f"tax-categories/key={key}",
params=params,
data_object=update_action,
response_class=TaxCategory,
force_update=force_update,
)
def delete_by_id(
self,
id: str,
version: int,
*,
expand: OptionalListStr = None,
force_delete: bool = False,
) -> TaxCategory:
params = self._serialize_params(
{"version": version, "expand": expand}, _TaxCategoryDeleteSchema
)
return self._client._delete(
endpoint=f"tax-categories/{id}",
params=params,
response_class=TaxCategory,
force_delete=force_delete,
)
def delete_by_key(
self,
key: str,
version: int,
*,
expand: OptionalListStr = None,
force_delete: bool = False,
) -> TaxCategory:
params = self._serialize_params(
{"version": version, "expand": expand}, _TaxCategoryDeleteSchema
)
return self._client._delete(
endpoint=f"tax-categories/key={key}",
params=params,
response_class=TaxCategory,
force_delete=force_delete,
)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import io
import itertools
import json
import string
import unittest
import numpy as np
import pytest
import pyarrow as pa
from pyarrow.json import read_json, ReadOptions, ParseOptions
def generate_col_names():
# 'a', 'b'... 'z', then 'aa', 'ab'...
letters = string.ascii_lowercase
yield from letters
for first in letters:
for second in letters:
yield first + second
def make_random_json(num_cols=2, num_rows=10, linesep='\r\n'):
arr = np.random.RandomState(42).randint(0, 1000, size=(num_cols, num_rows))
col_names = list(itertools.islice(generate_col_names(), num_cols))
lines = []
for row in arr.T:
json_obj = OrderedDict([(k, int(v)) for (k, v) in zip(col_names, row)])
lines.append(json.dumps(json_obj))
data = linesep.join(lines).encode()
columns = [pa.array(col, type=pa.int64()) for col in arr]
expected = pa.Table.from_arrays(columns, col_names)
return data, expected
def test_read_options():
cls = ReadOptions
opts = cls()
assert opts.block_size > 0
opts.block_size = 12345
assert opts.block_size == 12345
assert opts.use_threads is True
opts.use_threads = False
assert opts.use_threads is False
opts = cls(block_size=1234, use_threads=False)
assert opts.block_size == 1234
assert opts.use_threads is False
def test_parse_options():
cls = ParseOptions
opts = cls()
assert opts.newlines_in_values is False
assert opts.explicit_schema is None
opts.newlines_in_values = True
assert opts.newlines_in_values is True
schema = pa.schema([pa.field('foo', pa.int32())])
opts.explicit_schema = schema
assert opts.explicit_schema == schema
assert opts.unexpected_field_behavior == "infer"
for value in ["ignore", "error", "infer"]:
opts.unexpected_field_behavior = value
assert opts.unexpected_field_behavior == value
with pytest.raises(ValueError):
opts.unexpected_field_behavior = "invalid-value"
class BaseTestJSONRead:
def read_bytes(self, b, **kwargs):
return self.read_json(pa.py_buffer(b), **kwargs)
def check_names(self, table, names):
assert table.num_columns == len(names)
assert [c.name for c in table.columns] == names
def test_file_object(self):
data = b'{"a": 1, "b": 2}\n'
expected_data = {'a': [1], 'b': [2]}
bio = io.BytesIO(data)
table = self.read_json(bio)
assert table.to_pydict() == expected_data
# Text files not allowed
sio = io.StringIO(data.decode())
with pytest.raises(TypeError):
self.read_json(sio)
def test_block_sizes(self):
rows = b'{"a": 1}\n{"a": 2}\n{"a": 3}'
read_options = ReadOptions()
parse_options = ParseOptions()
for data in [rows, rows + b'\n']:
for newlines_in_values in [False, True]:
parse_options.newlines_in_values = newlines_in_values
read_options.block_size = 4
with pytest.raises(ValueError,
match="try to increase block size"):
self.read_bytes(data, read_options=read_options,
parse_options=parse_options)
# Validate reader behavior with various block sizes.
# There used to be bugs in this area.
for block_size in range(9, 20):
read_options.block_size = block_size
table = self.read_bytes(data, read_options=read_options,
parse_options=parse_options)
assert table.to_pydict() == {'a': [1, 2, 3]}
def test_no_newline_at_end(self):
rows = b'{"a": 1,"b": 2, "c": 3}\n{"a": 4,"b": 5, "c": 6}'
table = self.read_bytes(rows)
assert table.to_pydict() == {
'a': [1, 4],
'b': [2, 5],
'c': [3, 6],
}
def test_simple_ints(self):
# Infer integer columns
rows = b'{"a": 1,"b": 2, "c": 3}\n{"a": 4,"b": 5, "c": 6}\n'
table = self.read_bytes(rows)
schema = pa.schema([('a', pa.int64()),
('b', pa.int64()),
('c', pa.int64())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [1, 4],
'b': [2, 5],
'c': [3, 6],
}
def test_simple_varied(self):
# Infer various kinds of data
rows = (b'{"a": 1,"b": 2, "c": "3", "d": false}\n'
b'{"a": 4.0, "b": -5, "c": "foo", "d": true}\n')
table = self.read_bytes(rows)
schema = pa.schema([('a', pa.float64()),
('b', pa.int64()),
('c', pa.string()),
('d', pa.bool_())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [1.0, 4.0],
'b': [2, -5],
'c': ["3", "foo"],
'd': [False, True],
}
def test_simple_nulls(self):
# Infer various kinds of data, with nulls
rows = (b'{"a": 1, "b": 2, "c": null, "d": null, "e": null}\n'
b'{"a": null, "b": -5, "c": "foo", "d": null, "e": true}\n'
b'{"a": 4.5, "b": null, "c": "nan", "d": null,"e": false}\n')
table = self.read_bytes(rows)
schema = pa.schema([('a', pa.float64()),
('b', pa.int64()),
('c', pa.string()),
('d', pa.null()),
('e', pa.bool_())])
assert table.schema == schema
assert table.to_pydict() == {
'a': [1.0, None, 4.5],
'b': [2, -5, None],
'c': [None, "foo", "nan"],
'd': [None, None, None],
'e': [None, True, False],
}
def test_empty_lists(self):
# ARROW-10955: Infer list(null)
rows = b'{"a": []}'
table = self.read_bytes(rows)
schema = pa.schema([('a', pa.list_(pa.null()))])
assert table.schema == schema
assert table.to_pydict() == {'a': [[]]}
def test_empty_rows(self):
rows = b'{}\n{}\n'
table = self.read_bytes(rows)
schema = pa.schema([])
assert table.schema == schema
assert table.num_columns == 0
assert table.num_rows == 2
def test_reconcile_accross_blocks(self):
# ARROW-12065: reconciling inferred types across blocks
first_row = b'{ }\n'
read_options = ReadOptions(block_size=len(first_row))
for next_rows, expected_pylist in [
(b'{"a": 0}', [None, 0]),
(b'{"a": []}', [None, []]),
(b'{"a": []}\n{"a": [[1]]}', [None, [], [[1]]]),
(b'{"a": {}}', [None, {}]),
(b'{"a": {}}\n{"a": {"b": {"c": 1}}}',
[None, {"b": None}, {"b": {"c": 1}}]),
]:
table = self.read_bytes(first_row + next_rows,
read_options=read_options)
expected = {"a": expected_pylist}
assert table.to_pydict() == expected
# Check that the issue was exercised
assert table.column("a").num_chunks > 1
def test_explicit_schema_with_unexpected_behaviour(self):
# infer by default
rows = (b'{"foo": "bar", "num": 0}\n'
b'{"foo": "baz", "num": 1}\n')
schema = pa.schema([
('foo', pa.binary())
])
opts = ParseOptions(explicit_schema=schema)
table = self.read_bytes(rows, parse_options=opts)
assert table.schema == pa.schema([
('foo', pa.binary()),
('num', pa.int64())
])
assert table.to_pydict() == {
'foo': [b'bar', b'baz'],
'num': [0, 1],
}
# ignore the unexpected fields
opts = ParseOptions(explicit_schema=schema,
unexpected_field_behavior="ignore")
table = self.read_bytes(rows, parse_options=opts)
assert table.schema == pa.schema([
('foo', pa.binary()),
])
assert table.to_pydict() == {
'foo': [b'bar', b'baz'],
}
# raise error
opts = ParseOptions(explicit_schema=schema,
unexpected_field_behavior="error")
with pytest.raises(pa.ArrowInvalid,
match="JSON parse error: unexpected field"):
self.read_bytes(rows, parse_options=opts)
def test_small_random_json(self):
data, expected = make_random_json(num_cols=2, num_rows=10)
table = self.read_bytes(data)
assert table.schema == expected.schema
assert table.equals(expected)
assert table.to_pydict() == expected.to_pydict()
def test_stress_block_sizes(self):
# Test a number of small block sizes to stress block stitching
data_base, expected = make_random_json(num_cols=2, num_rows=100)
read_options = ReadOptions()
parse_options = ParseOptions()
for data in [data_base, data_base.rstrip(b'\r\n')]:
for newlines_in_values in [False, True]:
parse_options.newlines_in_values = newlines_in_values
for block_size in [22, 23, 37]:
read_options.block_size = block_size
table = self.read_bytes(data, read_options=read_options,
parse_options=parse_options)
assert table.schema == expected.schema
if not table.equals(expected):
# Better error output
assert table.to_pydict() == expected.to_pydict()
class TestSerialJSONRead(BaseTestJSONRead, unittest.TestCase):
def read_json(self, *args, **kwargs):
read_options = kwargs.setdefault('read_options', ReadOptions())
read_options.use_threads = False
table = read_json(*args, **kwargs)
table.validate(full=True)
return table
class TestParallelJSONRead(BaseTestJSONRead, unittest.TestCase):
def read_json(self, *args, **kwargs):
read_options = kwargs.setdefault('read_options', ReadOptions())
read_options.use_threads = True
table = read_json(*args, **kwargs)
table.validate(full=True)
return table
|
"""
This module automatically converts sorted vardict output to standard .vcf format.
Author: Nick Veltmaat
Date: 19-11-2021
"""
import pandas as pd
import glob
import sys
if len(sys.argv) != 3:
print("Usage:\t" + sys.argv[0] + "\t<input_sorted_vardict_file_path>\t<output_path_filename>")
exit(0)
file = glob.glob(str(sys.argv[1]))
data = pd.read_csv(file[0], sep='\t', header=None)
df = data.rename(columns={1: '#CHROM', 3: 'POS', 5:"REF", 6:"ALT"})
df[7] = 'DP=' + df[7].astype(str)
df[8] = 'ADP=' + df[8].astype(str)
df[9] = 'RFW=' + df[9].astype(str)
df[10] = 'RREV=' + df[10].astype(str)
df[11] = 'AFW=' + df[11].astype(str)
df[12] = 'AREV=' + df[12].astype(str)
df[13] = 'GT=' + df[13].astype(str)
df[14] = 'AF=' + df[14].astype(str)
df[15] = 'BIAS=' + df[15].astype(str).str.replace(';','|')
df[16] = 'PMEAN=' + df[16].astype(str)
df[17] = 'PSTD=' + df[17].astype(str)
df[18] = 'QMean=' + df[18].astype(str)
df[19] = 'QStd=' + df[19].astype(str)
df[20] = 'MQ=' + df[20].astype(str)
df[21] = 'SN=' + df[21].astype(str)
df[22] = 'HiAF=' + df[22].astype(str)
df[23] = 'ExAF=' + df[23].astype(str)
df[24] = 'SHIFT3=' + df[24].astype(str)
df[25] = 'MSI=' + df[25].astype(str)
df[26] = 'MSINT=' + df[26].astype(str)
df[27] = 'NM=' + df[27].astype(str)
df[28] = 'HiCnt=' + df[28].astype(str)
df[29] = 'HiCov=' + df[29].astype(str)
df[30] = '5pFS=' + df[30].astype(str)
df[31] = '3pFS=' + df[31].astype(str)
df[32] = 'Seg=' + df[32].astype(str)
df[33] = 'VarType=' + df[33].astype(str)
df = df.drop([0, 2, 4, 34, 35], axis=1)
df['INFO'] = df[df.columns[4:]].apply( lambda x: ';'.join(x.astype(str)), axis=1)
df = df.filter(['#CHROM', 'POS', 'REF', 'ALT', 'INFO'])
df['ID'], df['QUAL'], df['FILTER'] = ['.', '.', '.']
df = df[['#CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO']]
file1 = open(str(sys.argv[2]),"w",newline='')
file1.write('''##fileformat=VCFv4.3\n''')
file1.write('''##INFO=<ID=DP,Number=1,Type=Integer,Description="Depth, total Coverage">\n''')
file1.write('''##INFO=<ID=ADP,Number=1,Type=Integer,Description="No. of reads supporting alternative allele">\n''')
file1.write('''##INFO=<ID=RFW,Number=1,Type=Integer,Description="No. of reads in forward orientation supporting reference allele">\n''')
file1.write('''##INFO=<ID=RREV,Number=1,Type=Integer,Description="No. of reads in reverse orientation supporting reference allele">\n''')
file1.write('''##INFO=<ID=AFW,Number=1,Type=Integer,Description="No. of reads in forward orientation supporting alternative allele">\n''')
file1.write('''##INFO=<ID=AREV,Number=1,Type=Integer,Description="No. of reads in reverse orientation supporting alternative allele">\n''')
file1.write('''##INFO=<ID=GT,Number=1,Type=Flag,Description="Genotype">\n''')
file1.write('''##INFO=<ID=AF,Number=1,Type=Float,Description="Allele frequency in fraction [0-1]">\n''')
file1.write('''##INFO=<ID=BIAS,Number=1,Type=Flag,Description="Whether there’s strand bias. It consists two numbers. First number is for reference allele. 2nd number is for alternative allele. Numbers are 0-2. 0 means not enough to determine bias. 1 means only one orientation observed. 2 means both orientations observed. 2:1 would indicate strand bias, but not 1:1.">\n''')
file1.write('''##INFO=<ID=PMEAN,Number=1,Type=Float,Description="The mean position in reads of alternative allele. Smaller number would suggest false positives">\n''')
file1.write('''##INFO=<ID=PSTD,Number=1,Type=Integer,Description="Indicate whether the position in reads are the same. 0 means they are the same, 1 mean they are different">\n''')
file1.write('''##INFO=<ID=QMean,Number=1,Type=Float,Description="The mean base quality for alternative allele">\n''')
file1.write('''##INFO=<ID=QStd,Number=1,Type=Integer,Description="Indicate whether the base quality in reads are the same. 0 means they are the same, 1 mean they are different">\n''')
file1.write('''##INFO=<ID=MQ,Number=1,Type=Float,Description="The mean mapping quality for reads supporting alternative allele">\n''')
file1.write('''##INFO=<ID=SN,Number=1,Type=Flag,Description="Signal to noise ratio. The higher the number (>1.5), the more reliable the calls">\n''')
file1.write('''##INFO=<ID=HiAF,Number=1,Type=Float,Description="Allele frequency if only high base quality reads are used">\n''')
file1.write('''##INFO=<ID=ExAF,Number=1,Type=Float,Description="Extra allele frequency recovered from local realignment">\n''')
file1.write('''##INFO=<ID=SHIFT3,Number=1,Type=Integer,Description="No. of bases the Indel can be shifted 3’ with equivalent alignment">\n''')
file1.write('''##INFO=<ID=MSI,Number=1,Type=Integer,Description="Whether there’s microsatellite in sequence context">\n''')
file1.write('''##INFO=<ID=MSINT,Number=1,Type=Integer,Description="Number of bp per unit for MSI. 1 would indicate homopolymer, 2 for di-nucleotide repeat, and so on… ">\n''')
file1.write('''##INFO=<ID=NM,Number=1,Type=Float,Description="Mean number of mismatches in the reads (excluding indels) supporting alternative allele">\n''')
file1.write('''##INFO=<ID=HiCnt,Number=1,Type=Integer,Description="No. of reads with high base quality">\n''')
file1.write('''##INFO=<ID=HiCov,Number=1,Type=Integer,Description="No. of coverage with high base quality">\n''')
file1.write('''##INFO=<ID=5pFS,Number=1,Type=Flag,Description="20bp flanking the variants at 5’">\n''')
file1.write('''##INFO=<ID=3pFS,Number=1,Type=Flag,Description="20bp flanking the variants at 3’">\n''')
file1.write('''##INFO=<ID=Seg,Number=1,Type=Float,Description="The genomic segment variant is called">\n''')
file1.write('''##INFO=<ID=VarType,Number=1,Type=Flag,Description="The type of variants. Values are: SNV, MNV, Insertion, Deletion, and Complex">\n''')
for i in df['#CHROM'].unique():
file1.write("##contig=<ID="+str(i)+">\n")
# file1.write(df.to_string(index = False, justify='left', colsp))
file1.write(df.to_csv(index = False, sep='\t'))
file1.close()
|
import director_abstract
def is_new_style_class(cls):
return hasattr(cls, "__class__")
class MyFoo(director_abstract.Foo):
def __init__(self):
director_abstract.Foo.__init__(self)
def ping(self):
return "MyFoo::ping()"
a = MyFoo()
if a.ping() != "MyFoo::ping()":
raise RuntimeError, a.ping()
if a.pong() != "Foo::pong();MyFoo::ping()":
raise RuntimeError, a.pong()
class MyExample1(director_abstract.Example1):
def Color(self, r, g, b):
return r
class MyExample2(director_abstract.Example2):
def Color(self, r, g, b):
return g
class MyExample3(director_abstract.Example3_i):
def Color(self, r, g, b):
return b
me1 = MyExample1()
if director_abstract.Example1_get_color(me1, 1, 2, 3) != 1:
raise RuntimeError
if is_new_style_class(MyExample2):
MyExample2_static = MyExample2
else:
MyExample2_static = MyExample2(0, 0)
me2 = MyExample2(1, 2)
if MyExample2_static.get_color(me2, 1, 2, 3) != 2:
raise RuntimeError
if is_new_style_class(MyExample3):
MyExample3_static = MyExample3
else:
MyExample3_static = MyExample3()
me3 = MyExample3()
if MyExample3_static.get_color(me3, 1, 2, 3) != 3:
raise RuntimeError
error = 1
try:
me1 = director_abstract.Example1()
except:
error = 0
if (error):
raise RuntimeError
error = 1
try:
me2 = director_abstract.Example2()
except:
error = 0
if (error):
raise RuntimeError
error = 1
try:
me3 = director_abstract.Example3_i()
except:
error = 0
if (error):
raise RuntimeError
try:
f = director_abstract.A.f
except:
raise RuntimeError
|
"""Support for Vallox ventilation units."""
import ipaddress
import logging
from vallox_websocket_api import PROFILE as VALLOX_PROFILE, Vallox
from vallox_websocket_api.constants import vlxDevConstants
from vallox_websocket_api.exceptions import ValloxApiException
import voluptuous as vol
from homeassistant.const import CONF_HOST, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
from .const import (
DEFAULT_FAN_SPEED_AWAY,
DEFAULT_FAN_SPEED_BOOST,
DEFAULT_FAN_SPEED_HOME,
DEFAULT_NAME,
DOMAIN,
METRIC_KEY_PROFILE_FAN_SPEED_AWAY,
METRIC_KEY_PROFILE_FAN_SPEED_BOOST,
METRIC_KEY_PROFILE_FAN_SPEED_HOME,
SIGNAL_VALLOX_STATE_UPDATE,
STATE_PROXY_SCAN_INTERVAL,
)
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): vol.All(ipaddress.ip_address, cv.string),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
PROFILE_TO_STR_SETTABLE = {
VALLOX_PROFILE.HOME: "Home",
VALLOX_PROFILE.AWAY: "Away",
VALLOX_PROFILE.BOOST: "Boost",
VALLOX_PROFILE.FIREPLACE: "Fireplace",
}
STR_TO_PROFILE = {v: k for (k, v) in PROFILE_TO_STR_SETTABLE.items()}
PROFILE_TO_STR_REPORTABLE = {
**{VALLOX_PROFILE.NONE: "None", VALLOX_PROFILE.EXTRA: "Extra"},
**PROFILE_TO_STR_SETTABLE,
}
ATTR_PROFILE = "profile"
ATTR_PROFILE_FAN_SPEED = "fan_speed"
SERVICE_SCHEMA_SET_PROFILE = vol.Schema(
{vol.Required(ATTR_PROFILE): vol.All(cv.string, vol.In(STR_TO_PROFILE))}
)
SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED = vol.Schema(
{
vol.Required(ATTR_PROFILE_FAN_SPEED): vol.All(
vol.Coerce(int), vol.Clamp(min=0, max=100)
)
}
)
SERVICE_SET_PROFILE = "set_profile"
SERVICE_SET_PROFILE_FAN_SPEED_HOME = "set_profile_fan_speed_home"
SERVICE_SET_PROFILE_FAN_SPEED_AWAY = "set_profile_fan_speed_away"
SERVICE_SET_PROFILE_FAN_SPEED_BOOST = "set_profile_fan_speed_boost"
SERVICE_TO_METHOD = {
SERVICE_SET_PROFILE: {
"method": "async_set_profile",
"schema": SERVICE_SCHEMA_SET_PROFILE,
},
SERVICE_SET_PROFILE_FAN_SPEED_HOME: {
"method": "async_set_profile_fan_speed_home",
"schema": SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED,
},
SERVICE_SET_PROFILE_FAN_SPEED_AWAY: {
"method": "async_set_profile_fan_speed_away",
"schema": SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED,
},
SERVICE_SET_PROFILE_FAN_SPEED_BOOST: {
"method": "async_set_profile_fan_speed_boost",
"schema": SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED,
},
}
async def async_setup(hass, config):
"""Set up the client and boot the platforms."""
conf = config[DOMAIN]
host = conf.get(CONF_HOST)
name = conf.get(CONF_NAME)
client = Vallox(host)
state_proxy = ValloxStateProxy(hass, client)
service_handler = ValloxServiceHandler(client, state_proxy)
hass.data[DOMAIN] = {"client": client, "state_proxy": state_proxy, "name": name}
for vallox_service, method in SERVICE_TO_METHOD.items():
schema = method["schema"]
hass.services.async_register(
DOMAIN, vallox_service, service_handler.async_handle, schema=schema
)
# The vallox hardware expects quite strict timings for websocket
# requests. Timings that machines with less processing power, like
# Raspberries, cannot live up to during the busy start phase of Home
# Asssistant. Hence, async_add_entities() for fan and sensor in respective
# code will be called with update_before_add=False to intentionally delay
# the first request, increasing chance that it is issued only when the
# machine is less busy again.
hass.async_create_task(async_load_platform(hass, "sensor", DOMAIN, {}, config))
hass.async_create_task(async_load_platform(hass, "fan", DOMAIN, {}, config))
async_track_time_interval(hass, state_proxy.async_update, STATE_PROXY_SCAN_INTERVAL)
return True
class ValloxStateProxy:
"""Helper class to reduce websocket API calls."""
def __init__(self, hass, client):
"""Initialize the proxy."""
self._hass = hass
self._client = client
self._metric_cache = {}
self._profile = None
self._valid = False
def fetch_metric(self, metric_key):
"""Return cached state value."""
_LOGGER.debug("Fetching metric key: %s", metric_key)
if not self._valid:
raise OSError("Device state out of sync.")
if metric_key not in vlxDevConstants.__dict__:
raise KeyError(f"Unknown metric key: {metric_key}")
return self._metric_cache[metric_key]
def get_profile(self):
"""Return cached profile value."""
_LOGGER.debug("Returning profile")
if not self._valid:
raise OSError("Device state out of sync.")
return PROFILE_TO_STR_REPORTABLE[self._profile]
async def async_update(self, event_time):
"""Fetch state update."""
_LOGGER.debug("Updating Vallox state cache")
try:
self._metric_cache = await self._client.fetch_metrics()
self._profile = await self._client.get_profile()
self._valid = True
except (OSError, ValloxApiException) as err:
_LOGGER.error("Error during state cache update: %s", err)
self._valid = False
async_dispatcher_send(self._hass, SIGNAL_VALLOX_STATE_UPDATE)
class ValloxServiceHandler:
"""Services implementation."""
def __init__(self, client, state_proxy):
"""Initialize the proxy."""
self._client = client
self._state_proxy = state_proxy
async def async_set_profile(self, profile: str = "Home") -> bool:
"""Set the ventilation profile."""
_LOGGER.debug("Setting ventilation profile to: %s", profile)
try:
await self._client.set_profile(STR_TO_PROFILE[profile])
return True
except (OSError, ValloxApiException) as err:
_LOGGER.error("Error setting ventilation profile: %s", err)
return False
async def async_set_profile_fan_speed_home(
self, fan_speed: int = DEFAULT_FAN_SPEED_HOME
) -> bool:
"""Set the fan speed in percent for the Home profile."""
_LOGGER.debug("Setting Home fan speed to: %d%%", fan_speed)
try:
await self._client.set_values(
{METRIC_KEY_PROFILE_FAN_SPEED_HOME: fan_speed}
)
return True
except (OSError, ValloxApiException) as err:
_LOGGER.error("Error setting fan speed for Home profile: %s", err)
return False
async def async_set_profile_fan_speed_away(
self, fan_speed: int = DEFAULT_FAN_SPEED_AWAY
) -> bool:
"""Set the fan speed in percent for the Away profile."""
_LOGGER.debug("Setting Away fan speed to: %d%%", fan_speed)
try:
await self._client.set_values(
{METRIC_KEY_PROFILE_FAN_SPEED_AWAY: fan_speed}
)
return True
except (OSError, ValloxApiException) as err:
_LOGGER.error("Error setting fan speed for Away profile: %s", err)
return False
async def async_set_profile_fan_speed_boost(
self, fan_speed: int = DEFAULT_FAN_SPEED_BOOST
) -> bool:
"""Set the fan speed in percent for the Boost profile."""
_LOGGER.debug("Setting Boost fan speed to: %d%%", fan_speed)
try:
await self._client.set_values(
{METRIC_KEY_PROFILE_FAN_SPEED_BOOST: fan_speed}
)
return True
except (OSError, ValloxApiException) as err:
_LOGGER.error("Error setting fan speed for Boost profile: %s", err)
return False
async def async_handle(self, service):
"""Dispatch a service call."""
method = SERVICE_TO_METHOD.get(service.service)
params = service.data.copy()
if not hasattr(self, method["method"]):
_LOGGER.error("Service not implemented: %s", method["method"])
return
result = await getattr(self, method["method"])(**params)
# Force state_proxy to refresh device state, so that updates are
# propagated to platforms.
if result:
await self._state_proxy.async_update(None)
|
'''
Created on Jul 12, 2020
@author: willg
'''
import Room
import UserDataProcessing
from discord.utils import escape_markdown, escape_mentions
from collections import defaultdict
from typing import List
import TableBot
DEBUGGING = False
scoreMatrix = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[15, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[15, 8, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[15, 9, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[15, 9, 5, 2, 1, 0, 0, 0, 0, 0, 0, 0],
[15, 10, 6, 3, 1, 0, 0, 0, 0, 0, 0, 0],
[15, 10, 7, 5, 3, 1, 0, 0, 0, 0, 0, 0],
[15, 11, 8, 6, 4, 2, 1, 0, 0, 0, 0, 0],
[15, 11, 8, 6, 4, 3, 2, 1, 0, 0, 0, 0],
[15, 12, 10, 8, 6, 4, 3, 2, 1, 0, 0, 0],
[15, 12, 10, 8, 6, 5, 4, 3, 2, 1, 0, 0],
[15, 12, 10, 8, 7, 6, 5, 4, 3, 2, 1, 0]
]
alternate_Matrices = {
771417753843925023:[
[15, 0 ,0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[15, 9 ,0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[15, 10 ,5, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[15, 11 ,7, 3, 0, 0, 0, 0, 0, 0, 0, 0],
[15, 11 ,8, 5, 3, 0, 0, 0, 0, 0, 0, 0],
[15, 11 ,9, 6, 4, 3, 0, 0, 0, 0, 0, 0],
[15, 12 ,10, 7, 5, 4, 3, 0, 0, 0, 0, 0],
[15, 13 ,10, 8, 6, 4, 3, 2, 0, 0, 0, 0],
[15, 13 ,10, 8, 7, 6, 4, 3, 2, 0, 0, 0],
[15, 13 ,11, 9, 8, 6, 5, 4, 3, 2, 0, 0],
[15, 13 ,12, 10, 8, 7, 6, 5, 3, 2, 1, 0],
[15, 13 ,12, 10, 8, 7, 6, 5, 3, 2, 1, 0]
]}
def print_scores(fc_score, fc_player):
for fc, score in sorted(fc_score.items(), key=lambda x: x[1], reverse=True):
print(fc_player[fc] + " (" + fc + "): " + str(score))
#Calculates the scores from the start race to the end race (eg startRace = 1 and endRace = 4 would be GP1)
def calculateScoresDCs(curRoom:Room.Room, startRace=1, endRace=12, missingRacePts=3, server_id=None):
#disconnections = curRoom.getMissingOnRace()
fc_score = {}
fc_player = curRoom.getFCPlayerListStartEnd(startRace, endRace)
for fc in fc_player:
fc_score[fc] = []
#If the races completed is less than the start race, no one has score anything yet - That's in the future!
if len(curRoom.getRaces()) < startRace:
return fc_score
#Iterating over the splice - no, this isn't an error. Check how splicing works, this won't go out of bounds.
for raceNum, race in enumerate(curRoom.getRaces()[startRace-1:endRace], startRace):
mkwxNumRacers = race.numRacers()
if mkwxNumRacers != len(fc_player.keys()):
#Someone is missing. Need to give them the specified DC points.
raceFCs = race.getFCs()
for fc in fc_player:
if raceNum in curRoom.dc_on_or_before:
if fc in curRoom.dc_on_or_before[raceNum]:
if curRoom.dc_on_or_before[raceNum][fc] == 'on':
mkwxNumRacers += 1
if fc not in raceFCs:
was_in_manual_dcs = False
if raceNum in curRoom.dc_on_or_before:
if fc in curRoom.dc_on_or_before[raceNum]:
was_in_manual_dcs = True
points_to_get = 0
if curRoom.dc_on_or_before[raceNum][fc] == 'on':
points_to_get = 0
else:
points_to_get = missingRacePts
fc_score[fc].append( points_to_get )
if not was_in_manual_dcs:
fc_score[fc].append(missingRacePts)
if raceNum in curRoom.forcedRoomSize:
mkwxNumRacers = curRoom.forcedRoomSize[raceNum]
if mkwxNumRacers > 12:
mkwxNumRacers = 12
for placement in race.getPlacements():
placement_score = 0
if placement.place <= 12: #Only get people's score if their place is less than 12
if server_id in alternate_Matrices:
placement_score = alternate_Matrices[server_id][mkwxNumRacers-1][placement.place-1]
else:
placement_score = scoreMatrix[mkwxNumRacers-1][placement.place-1]
fc_score[placement.player.FC].append( placement_score )
#Fille awkward sized arrays with 0
for fc in fc_score:
difference = endRace-(startRace-1) - len(fc_score[fc])
if difference > 0:
for _ in range(difference):
fc_score[fc].append(0)
return fc_score
def calculateGPScoresDCS(GPNumber, curRoom, missingRacePts=3, server_id=None):
startRace = ((GPNumber-1)*4)+1
endRace = GPNumber * 4
return calculateScoresDCs(curRoom, startRace, endRace, missingRacePts, server_id)
def chunk_list(to_chunk:List, n):
"""Yield successive n-sized chunks from the given list."""
for i in range(0, len(to_chunk), n):
yield to_chunk[i:i + n]
#Takes a GPs list and resizes into a new GP size
#Previous code seems to guarantee that everyone will have the same number of scores
#If this is not true, bugs can happen
def resizeGPsInto(GPs, new_size_GP):
total_GP_dict = defaultdict(list)
for GP_scores in GPs:
for fc, scores in GP_scores.items():
for score in scores:
total_GP_dict[fc].append(score)
new_gps = []
if len(total_GP_dict) == 0:
return []
for fc, player_scores in total_GP_dict.items():
total_GP_dict[fc] = [gp_chunk for gp_chunk in chunk_list(player_scores, new_size_GP)]
extra_gps_needed = len(total_GP_dict[fc]) - len(new_gps)
if extra_gps_needed > 0:
for _ in range(extra_gps_needed):
new_gps.append({})
for fc, player_scores in total_GP_dict.items():
for new_gp_ind, new_gp in enumerate(new_gps):
if new_gp_ind <= len(player_scores):
new_gp[fc] = player_scores[new_gp_ind]
else:
new_gp[fc] = []
return new_gps
def get_war_table_DCS(channel_bot:TableBot.ChannelBot, use_lounge_otherwise_mii=True, use_miis=False, lounge_replace=None, server_id=None, missingRacePts=3, discord_escape=False, step=None, up_to_race=None):
war = channel_bot.getWar()
room = channel_bot.getRoom()
if step is None:
step = channel_bot.get_race_size()
numGPs = war.getNumberOfGPS()
GPs = []
use_lounge_names = lounge_replace
fc_did = UserDataProcessing.fc_discordId
did_lounge = UserDataProcessing.discordId_lounges
for x in range(numGPs):
GPs.append(calculateGPScoresDCS(x+1, room, missingRacePts, server_id))
fcs_players = room.getFCPlayerListStartEnd(1, numGPs*4)
FC_table_str = {}
for fc, player in fcs_players.items():
name = ""
if player.strip() == "":
name = "no name"
if use_lounge_otherwise_mii:
name = player
if fc in fc_did and fc_did[fc][0] in did_lounge:
name = did_lounge[fc_did[fc][0]]
else:
if not use_miis and not use_lounge_names:
name = fc
elif not use_miis and use_lounge_names:
if not fc in fc_did or not fc_did[fc][0] in did_lounge:
name = player + " / No Discord"
else:
name = did_lounge[fc_did[fc][0]]
elif use_miis and not use_lounge_names:
name = player
elif use_miis and use_lounge_names:
name = player
discord = "No Discord"
if fc in fc_did and fc_did[fc][0] in did_lounge:
discord = did_lounge[fc_did[fc][0]]
name = name + " / " + discord
if fc in room.getNameChanges():
name = room.getNameChanges()[fc]
if room.fc_subbed_in(fc):
name = room.get_sub_string(name, fc)
if discord_escape:
name = escape_mentions(escape_markdown(name))
FC_table_str[fc] = [name + " ", 0, []]
#add flag, if the FC has a flag set
if fc in UserDataProcessing.fc_discordId:
discord_id_number = UserDataProcessing.fc_discordId[fc][0]
if discord_id_number in UserDataProcessing.discordId_flags:
FC_table_str[fc][0] += "[" + UserDataProcessing.discordId_flags[discord_id_number] + "] "
for GPnum, GP_scores in enumerate(GPs, 1):
for fc in FC_table_str:
gp_amount = [0, 0, 0, 0]
editAmount = war.getEditAmount(fc, GPnum)
if editAmount is not None:
gp_amount = [editAmount, 0, 0, 0]
else:
if fc in GP_scores.keys():
gp_amount = GP_scores[fc]
for gp_race_num in range(1, 5):
_, subout_old_score = room.get_sub_out_for_subbed_in_fc(fc, ((GPnum-1)*4)+gp_race_num)
if subout_old_score is not None:
gp_amount[gp_race_num-1] = subout_old_score
GP_scores[fc] = gp_amount
#after GP scores have been determined, if `up_to_race` has been set, set all races after `up_to_race` to 0 pts
if up_to_race:
up_to_race = min(up_to_race, len(room.races)) #`up_to_race` cannot be greater than the maximum number of races
gp_start = int(up_to_race/4) #GP where first race needs to be reset to 0
first_gp_index_start = up_to_race%4 #race in first GP that needs to be reset to 0 (cutoff between races that are kept and races that are reset to 0)
for indx, gp_scores in enumerate(GPs[gp_start:]):
race_start = first_gp_index_start if indx==0 else 0
for _, player_scores in gp_scores.items():
player_scores[race_start:] = [0] * (4-race_start)
resizedGPs = GPs if step == 4 else resizeGPsInto(GPs, step)
for GPnum, GP_scores in enumerate(resizedGPs, 1):
for fc, player in FC_table_str.items():
section_amount = sum(GP_scores[fc])
FC_table_str[fc][0] += f"{section_amount}|"
FC_table_str[fc][1] += section_amount
FC_table_str[fc][2].extend(GP_scores[fc])
for fc in FC_table_str.keys():
FC_table_str[fc][0] = FC_table_str[fc][0].strip("|")
for fc, amount in room.getPlayerPenalities().items():
if fc in FC_table_str:
to_add = amount * -1
FC_table_str[fc][1] += to_add
if to_add >= 0:
FC_table_str[fc][0] += "|" + str(to_add)
else:
FC_table_str[fc][0] += str(to_add)
#build table string
numRaces = up_to_race if up_to_race else min( (len(room.races), war.getNumberOfGPS()*4) )
table_str = "#title " + war.getTableWarName(numRaces) + "\n"
curTeam = None
teamCounter = 0
is_ffa = war.playersPerTeam == 1
if is_ffa:
table_str += "FFA\n"
FC_table_str_items = sorted(FC_table_str.items(), key=lambda t: war.getTeamForFC(t[0]))
scores_by_team = defaultdict(list)
for fc, player_data in FC_table_str_items:
scores_by_team[war.getTeamForFC(fc)].append((fc, player_data))
def player_score(player_data):
return player_data[1]
def team_score(all_players, team_tag):
total_score = 0
for fc, player_data in all_players:
total_score += player_score(player_data)
if team_tag in war.getTeamPenalities():
total_score -= war.getTeamPenalities()[curTeam]
return total_score
scores_by_team = sorted(scores_by_team.items(), key=lambda t: (team_score(t[1], t[0]), t[0]), reverse=True)
for _, team_players in scores_by_team:
team_players.sort(key=lambda pd:player_score(pd[1]), reverse=True)
for team_tag, team_players in scores_by_team:
for fc, player_data in team_players:
player_scores_str = player_data[0]
if not is_ffa:
if team_tag != curTeam:
if curTeam in war.getTeamPenalities() and war.getTeamPenalities()[curTeam] > 0:
table_str += "\nPenalty -" + str(war.getTeamPenalities()[curTeam]) + "\n"
curTeam = war.getTeamForFC(fc)
teamHex = ""
if war.teamColors is not None:
if teamCounter < len(war.teamColors):
teamHex = " " + war.teamColors[teamCounter]
table_str += "\n" + curTeam + teamHex + "\n"
teamCounter += 1
table_str += player_scores_str + "\n"
if not is_ffa:
if team_tag in war.getTeamPenalities():
table_str += "Penalty -" + str(war.getTeamPenalities()[war.getTeamForFC(fc)]) + "\n"
"""for fc, player_data in FC_table_str_items:
player_scores_str = player_data[0]
if not is_ffa:
if war.getTeamForFC(fc) != curTeam:
if curTeam in war.getTeamPenalities():
table_str += "\nPenalty -" + str(war.getTeamPenalities()[curTeam]) + "\n"
curTeam = war.getTeamForFC(fc)
teamHex = ""
if war.teamColors is not None:
if teamCounter < len(war.teamColors):
teamHex = " " + war.teamColors[teamCounter]
table_str += "\n" + curTeam + teamHex + "\n"
teamCounter += 1
table_str += player_scores_str + "\n"
if not is_ffa:
if war.getTeamForFC(fc) in war.getTeamPenalities():
table_str += "Penalty -" + str(war.getTeamPenalities()[war.getTeamForFC(fc)]) + "\n"""
return table_str, scores_by_team
def get_race_scores_for_fc(friend_code:str, channel_bot:TableBot.ChannelBot, use_lounge_otherwise_mii=True, use_miis=False, lounge_replace=None, server_id=None, missingRacePts=3, discord_escape=False):
_, race_score_data = get_war_table_DCS(channel_bot, use_lounge_otherwise_mii, use_miis, lounge_replace, server_id, missingRacePts, discord_escape, step=1)
for _, team_players in race_score_data:
for fc, player_data in team_players:
if fc == friend_code:
return player_data[2]
return None
team_tag_mapping = {"λρ":"Apocalypse"}
def format_sorted_data_for_gsc(scores_by_team, team_penalties):
gsc_tag_scores = defaultdict(lambda:[0, 0, 0, 0])
for tag, players in scores_by_team:
for _, (_, player_overall, score_by_race) in players:
cur_team = gsc_tag_scores[tag]
cur_team[3] += player_overall
chunked_scores = [score_by_race[i:i+4] for i in range(len(score_by_race))[:12:4]]
for gpNum, gpScores in enumerate(chunked_scores):
cur_team[gpNum] += sum(gpScores)
for tag in team_penalties:
if tag in gsc_tag_scores:
gsc_tag_scores[tag][3] -= team_penalties[tag]
all_tags = [tag for tag in gsc_tag_scores]
first_team_tag = all_tags[0]
second_team_tag = all_tags[1]
first_team_tag_altered = team_tag_mapping[first_team_tag] if first_team_tag in team_tag_mapping else first_team_tag
second_team_tag_altered = team_tag_mapping[second_team_tag] if second_team_tag in team_tag_mapping else second_team_tag
gsc_team_scores = {first_team_tag:[0, 0, 0, 0],
second_team_tag:[0, 0, 0, 0]}
for gp_index, first_team_score, second_team_score in zip(range(len(gsc_tag_scores[first_team_tag])), gsc_tag_scores[first_team_tag], gsc_tag_scores[second_team_tag]):
multiplier = 1 if gp_index != 3 else 2
if first_team_score > second_team_score:
gsc_team_scores[first_team_tag][gp_index] = 2
elif first_team_score < second_team_score:
gsc_team_scores[second_team_tag][gp_index] = 2
else:
gsc_team_scores[first_team_tag][gp_index] = 1
gsc_team_scores[second_team_tag][gp_index] = 1
gsc_team_scores[first_team_tag][gp_index] *= multiplier
gsc_team_scores[second_team_tag][gp_index] *= multiplier
first_team_gps_text = "|".join(str(s) for s in gsc_team_scores[first_team_tag])
second_team_gps_text = "|".join(str(s) for s in gsc_team_scores[second_team_tag])
gsc_table_text = f"""#title Grand Star Cup
{first_team_tag}
{first_team_tag_altered} {first_team_gps_text}
{second_team_tag}
{second_team_tag_altered} {second_team_gps_text}"""
return gsc_table_text
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#
#
# python-twoauth [oauth.py]
# - Hirotaka Kawata <info@techno-st.net>
# - http://www.techno-st.net/wiki/python-twoauth
#
#
# Copyright (c) 2009-2010 Hirotaka Kawata
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# OAuth Module for Twitter
import time
import random
import string
import urllib, urllib2
import httplib
import urlparse
import hmac, hashlib
import cgi
import cStringIO
class oauth(object):
_randchars = string.ascii_letters + string.digits
def __init__(self, ckey, csecret, atoken = "", asecret = "",
site = "https://api.twitter.com/"):
# Request Token URL
self._reqt_url = site + 'oauth/request_token'
# Authorize URL
self._authorize_url = site + 'oauth/authorize'
# Authenticate URL
self._authenticate_url = site + 'oauth/authenticate'
# Access Token URL
self._acct_url = site + 'oauth/access_token'
# Consumer Key, Secret
self.ckey = ckey
self.csecret = csecret
# Access Key, Secret
self.atoken = atoken
self.asecret = asecret
random.seed()
# Get Request Token
def request_token(self, callback_url = None):
# initialize OAuth parameters
oauth_params = self._init_params()
del oauth_params["oauth_token"]
if callback_url:
# add callback
oauth_params["oauth_callback"] = callback_url
# get OAuth header
auth_header = self.oauth_header(
self._reqt_url, oauth_params = oauth_params)
# send request
req = urllib2.Request(self._reqt_url)
req.add_header("Authorization", auth_header)
resp = urllib2.urlopen(req)
# Parse Token Parameters
token_info = cgi.parse_qs(resp.read())
for p in token_info.keys():
token_info[p] = token_info[p][0]
return token_info
# Get Authorize URL
def authorize_url(self, token_info):
return "%s?%s=%s" % (
self._authorize_url, "oauth_token", token_info["oauth_token"])
# Get Authenticate URL
def authenticate_url(self, token_info):
return "%s?%s=%s" % (
self._authenticate_url, "oauth_token", token_info["oauth_token"])
# Get Access Token
def access_token(self, token_info, pin):
# set request token information
token = token_info["oauth_token"]
secret = token_info["oauth_token_secret"]
# initialize OAuth parameters
oauth_params = self._init_params(token)
oauth_params["oauth_verifier"] = pin
# get OAuth header
auth_header = self.oauth_header(self._acct_url, secret = secret,
oauth_params = oauth_params)
# send request
req = urllib2.Request(self._acct_url)
req.add_header("Authorization", auth_header)
resp = urllib2.urlopen(req)
# Parse Access Token
token_info = cgi.parse_qs(resp.read())
for p in token_info.keys():
token_info[p] = token_info[p][0]
# set token and secret to instance if not set
if not self.atoken and not self.asecret:
self.atoken = token_info["oauth_token"]
self.asecret = token_info["oauth_token_secret"]
return token_info
# calculate oauth_signature
def oauth_signature(self, url, method = "GET", secret = "", *params):
sigparams = {}
for d in params: sigparams.update(d)
# Generate Signature Base String
plist = ["%s=%s" % (k, v) for k, v in sorted(sigparams.items())]
pstr = "&".join(plist)
msg = "%s&%s&%s" % (method, self._oquote(url), self._oquote(pstr))
# Calculate Signature
h = hmac.new("%s&%s" % (self.csecret, secret), msg, hashlib.sha1)
sig = h.digest().encode("base64").strip()
return sig
# Return Authorization Header String
def oauth_header(self, url, method = "GET", params = {},
secret = "", oauth_params = None, realm = ""):
# initialize OAuth parameters if no given oauth_params
if oauth_params == None:
oauth_params = self._init_params()
# Encode oatuh_params for OAuth format
values = map(self._oquote, oauth_params.itervalues())
enc_oauth_params = dict(zip(oauth_params.keys(), values))
# Encode params for OAuth format
keys = map(self._oquote, params.iterkeys())
values = map(self._oquote, params.itervalues())
enc_params = dict(zip(keys, values))
# get oauth_signature
sig = self.oauth_signature(url, method, secret, enc_oauth_params, enc_params)
oauth_params["oauth_signature"] = sig
# quote OAuth format
plist = ['%s="%s"' % (self._oquote(k), self._oquote(v))
for k, v in oauth_params.iteritems()]
h = 'realm="%s",%s' % (realm, ",".join(plist))
return "OAuth %s" % (h)
# Return urllib2.Request Object for OAuth
def oauth_request(self, url, method = "GET", params = {}):
# create urllib2.Request
if method == "GET":
if params:
req = urllib2.Request("%s?%s" % (url, urllib.urlencode(params)))
else:
req = urllib2.Request(url)
elif method == "POST":
req = urllib2.Request(url, urllib.urlencode(params))
else:
raise
# set OAuth header
req.add_header("Authorization",
self.oauth_header(url, method, params, self.asecret))
return req
# Return httplib.HTTPResponse (for DELETE Method
def oauth_http_request(self, url, method = "GET", params = {}, header = {}):
urlp = urlparse.urlparse(url)
if urlp.scheme == "https":
conn = httplib.HTTPSConnection(urlp.netloc)
else:
conn = httplib.HTTPConnection(urlp.netloc)
header["Authorization"] = self.oauth_header(url, method, params, self.asecret)
if method == "GET":
path = "%s?%s" % (urlp.path, urllib.urlencode(params))
conn.request(method, path, headers = header)
else:
conn.request(method, urlp.path, urllib.urlencode(params), header)
return conn
# Get random string (for oauth_nonce)
def _rand_str(self, n):
return ''.join(random.choice(self._randchars) for i in xrange(n))
# Initialize OAuth parameters
def _init_params(self, token = None):
if token == None:
token = self.atoken
params = {
"oauth_consumer_key": self.ckey,
"oauth_signature_method": "HMAC-SHA1",
"oauth_timestamp": str(int(time.time())),
"oauth_nonce": self._rand_str(42),
"oauth_version": "1.0",
"oauth_token" : token
}
return params
# quote string for OAuth format
def _oquote(self, s):
return urllib.quote(str(s), "-._~")
|
# Generated by Django 2.1.3 on 2019-04-26 15:44
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('models', '0027_auto_20190425_1032'),
]
operations = [
migrations.AlterField(
model_name='client',
name='address',
field=models.CharField(blank=True, default='', max_length=160, null=True, verbose_name='Client Address'),
),
migrations.AlterField(
model_name='client',
name='name',
field=models.CharField(max_length=80, verbose_name='Client Name'),
),
migrations.AlterField(
model_name='computer',
name='computername',
field=models.CharField(max_length=40, verbose_name='Computer Name'),
),
migrations.AlterField(
model_name='computer',
name='model',
field=models.CharField(blank=True, default='', max_length=60, null=True),
),
migrations.AlterField(
model_name='computer',
name='serialnumber',
field=models.CharField(blank=True, default='', max_length=60, null=True),
),
migrations.AlterField(
model_name='domain',
name='admin',
field=models.CharField(default='administrator', max_length=30, verbose_name='Domain admin username:'),
),
migrations.AlterField(
model_name='domain',
name='domainnamelong',
field=models.CharField(max_length=70, verbose_name='Long domain name (.local):'),
),
migrations.AlterField(
model_name='domain',
name='domainnameshort',
field=models.CharField(max_length=50, verbose_name='Short domain name:'),
),
migrations.AlterField(
model_name='othernetworkequipment',
name='equipmenttype',
field=models.CharField(max_length=50, verbose_name='Equipment Type'),
),
migrations.AlterField(
model_name='othernetworkequipment',
name='manufacturer',
field=models.CharField(blank=True, default='', max_length=40, null=True, verbose_name='Manufacturer'),
),
migrations.AlterField(
model_name='othernetworkequipment',
name='model',
field=models.CharField(blank=True, default='', max_length=40, null=True, verbose_name='Model'),
),
migrations.AlterField(
model_name='othernetworkequipment',
name='serialnumber',
field=models.CharField(blank=True, default='', max_length=50, null=True, verbose_name='Serial number'),
),
migrations.AlterField(
model_name='person',
name='firstname',
field=models.CharField(max_length=60, verbose_name='First Name'),
),
migrations.AlterField(
model_name='person',
name='lastname',
field=models.CharField(max_length=60, verbose_name='Last Name'),
),
migrations.AlterField(
model_name='printer',
name='manufacturer',
field=models.CharField(choices=[('H', 'HP'), ('S', 'Samsung'), ('B', 'Brothers'), ('C', 'Canon'), ('X', 'Xerox'), ('D', 'Dymo'), ('O', 'Other')], default='H', max_length=1),
),
migrations.AlterField(
model_name='printer',
name='model',
field=models.CharField(blank=True, default='', max_length=50, null=True),
),
migrations.AlterField(
model_name='printer',
name='printername',
field=models.CharField(max_length=50, verbose_name='Printer Name'),
),
migrations.AlterField(
model_name='printer',
name='serialnumber',
field=models.CharField(blank=True, default='', max_length=40, null=True),
),
migrations.AlterField(
model_name='router',
name='firmwareversion',
field=models.CharField(blank=True, default='', max_length=60, null=True, verbose_name='Firware version'),
),
migrations.AlterField(
model_name='router',
name='model',
field=models.CharField(blank=True, default='', max_length=40, null=True),
),
migrations.AlterField(
model_name='router',
name='serialnumber',
field=models.CharField(blank=True, default='', max_length=50, null=True),
),
migrations.AlterField(
model_name='router',
name='settingslink',
field=models.CharField(blank=True, default='', max_length=130, null=True, verbose_name='Link to settings'),
),
migrations.AlterField(
model_name='secretnote',
name='expireon',
field=models.DateField(blank=True, default=datetime.date(2019, 5, 3), null=True, verbose_name='Note expires on'),
),
migrations.AlterField(
model_name='secretnote',
name='subject',
field=models.CharField(max_length=150, verbose_name='Subject*'),
),
migrations.AlterField(
model_name='systemupdates',
name='author',
field=models.CharField(blank=True, max_length=120, null=True, verbose_name='Update author'),
),
migrations.AlterField(
model_name='systemupdates',
name='title',
field=models.CharField(max_length=160, verbose_name='Title'),
),
migrations.AlterField(
model_name='systemupdates',
name='version',
field=models.CharField(max_length=50, verbose_name='Version'),
),
migrations.AlterField(
model_name='ticket',
name='companyname',
field=models.CharField(max_length=90, verbose_name='Company name*'),
),
migrations.AlterField(
model_name='ticket',
name='resolvedby',
field=models.CharField(blank=True, default=None, max_length=120, null=True, verbose_name='Resolved by'),
),
migrations.AlterField(
model_name='ticket',
name='title',
field=models.CharField(max_length=160, verbose_name='Subject*'),
),
]
|
class ModelWrapper:
""" Helper class to wrap json with Model Classes """
def __init__(self, modelCls, connection):
""" Intialize with the Model Class and Connection """
self.modelCls = modelCls
self.connection = connection
def __call__(self, json):
""" Wrap the json with the Model Class """
return [self.modelCls(connection=self.connection, **jsonElement) for jsonElement in json]
|
from decimal import Decimal
from typing import List, Dict
import pandas as pd
from hummingbot.connector.exchange_base import ExchangeBase
from hummingbot.connector.in_flight_order_base import InFlightOrderBase
from hummingbot.core.data_type.cancellation_result import CancellationResult
from hummingbot.core.data_type.limit_order import LimitOrder
from hummingbot.core.data_type.order_book import OrderBook
from hummingbot.core.event.events import OrderType, TradeType, TradeFee
s_decimal_NaN = Decimal("nan")
class MockExchange(ExchangeBase):
def __init__(self):
super(MockExchange, self).__init__()
self._buy_price = Decimal(1)
self._sell_price = Decimal(1)
@property
def buy_price(self) -> Decimal:
return self._buy_price
@buy_price.setter
def buy_price(self, price: Decimal):
self._buy_price = price
@property
def sell_price(self) -> Decimal:
return self._sell_price
@sell_price.setter
def sell_price(self, price: Decimal):
self._sell_price = price
@property
def status_dict(self) -> Dict[str, bool]:
pass
@property
def in_flight_orders(self) -> Dict[str, InFlightOrderBase]:
pass
async def cancel_all(self, timeout_seconds: float) -> List[CancellationResult]:
pass
def stop_tracking_order(self, order_id: str):
pass
@property
def order_books(self) -> Dict[str, OrderBook]:
pass
@property
def limit_orders(self) -> List[LimitOrder]:
pass
async def get_active_exchange_markets(self) -> pd.DataFrame:
pass
def c_stop_tracking_order(self, order_id):
pass
def buy(self, trading_pair: str, amount: Decimal, order_type=OrderType.MARKET, price: Decimal = s_decimal_NaN,
**kwargs) -> str:
pass
def sell(self, trading_pair: str, amount: Decimal, order_type=OrderType.MARKET, price: Decimal = s_decimal_NaN,
**kwargs) -> str:
pass
def cancel(self, trading_pair: str, client_order_id: str):
pass
def get_order_book(self, trading_pair: str) -> OrderBook:
pass
def get_fee(self, base_currency: str, quote_currency: str, order_type: OrderType, order_side: TradeType,
amount: Decimal, price: Decimal = s_decimal_NaN) -> TradeFee:
pass
_ready = False
@property
def ready(self):
return self._ready
@ready.setter
def ready(self, status: bool):
self._ready = status
def get_price(self, trading_pair: str, is_buy_price: bool) -> Decimal:
return self.buy_price if is_buy_price else self.sell_price
def update_account_balance(self, asset_balance: Dict[str, Decimal]):
if not self._account_balances:
self._account_balances = {}
for asset, balance in asset_balance.items():
self._account_balances[asset] = self._account_balances.get(asset, Decimal(0)) + balance
def update_account_available_balance(self, asset_balance: Dict[str, Decimal]):
if not self._account_available_balances:
self._account_available_balances = {}
for asset, balance in asset_balance.items():
self._account_available_balances[asset] = self._account_available_balances.get(asset, Decimal(0)) + balance
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import sys
from IPython.display import set_matplotlib_formats
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
import matplotlib
import math
import numpy as np
import scipy as sp
import seaborn as sns
import scipy.signal as sps
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = [9.5, 5]
import pandas
specific = "dns"
args = sys.argv[1:]
if len(args) >= 1:
specific = args[0]
# In[6]:
data=pandas.read_csv("data/" + specific + "_camel.csv", delim_whitespace=True)
data["TIME"]=pandas.to_datetime(data["TIME"],infer_datetime_format=True, unit='s', utc=True)
data.set_index("TIME")
# In[28]:
for k in ["INTERNET_STANDARD", "PROPOSED_STANDARD", "EXPERIMENTAL", "HISTORIC", "BEST_CURRENT_PRACTICE", "OBSOLETED", "UNKNOWN"]:
if k not in data:
data[k] = 0
plt.figure()
plt.stackplot(data["TIME"],
data["INTERNET_STANDARD"],
data["PROPOSED_STANDARD"],
data["EXPERIMENTAL"],
data["HISTORIC"],
data["BEST_CURRENT_PRACTICE"],
data["OBSOLETED"],
data["UNKNOWN"],
labels=["Internet Standard", "Proposed Standard", "Experimental", "Historic", "Best Current Practice", "Obsoleted", "Unknown"])
plt.legend(loc=2)
plt.grid()
plt.xlabel("Year")
plt.ylabel("Pages")
plt.title("Number of RFC pages covering " + specific.upper() + " over time")
plt.savefig("data/" + specific + "_camel-plot.png", dpi=300)
# In[29]:
plt.figure()
plt.stackplot(data["TIME"],
data["INTERNET_STANDARD"],
data["PROPOSED_STANDARD"],
data["BEST_CURRENT_PRACTICE"],
data["UNKNOWN"],
labels=["Internet Standard", "Proposed Standard", "Best Current Practice", "Unknown"])
plt.legend(loc=2)
plt.grid()
plt.xlabel("Year")
plt.ylabel("Pages")
plt.title("Number of RFC pages covering " + specific.upper() + " over time")
plt.savefig("data/" + specific + "_camel-plot-clean.png", dpi=300)
# In[ ]:
|
"""Common functions and constants."""
from SPARQLWrapper import SPARQLWrapper, JSON
LKIF_OUTPUT_FILE = 'data/lkif_hierarchy.json'
YAGO_OUTPUT_FILE = 'data/yago_hierarchy.json'
OUTPUT_FILE = 'data/ontology.json'
LKIF_TO_YAGO_MAPPING = {
'Hohfeldian_Power': ['wordnet_legal_power_105198427'],
'Potestative_Right': ['wordnet_right_113341756'],
'Immunity': ['wordnet_exemption_100213903'],
'Legal_Document': [
'wordnet_mandate_106556481',
'wordnet_legal_document_106479665',
],
'Regulation': [
'wordnet_legal_code_106667792',
'wordnet_law_106532330',
'wordnet_law_108441203',
'wordnet_legislative_act_106564387',
'wordnet_legislation_106535222',
],
'Contract': ['wordnet_contract_106520944'],
'Treaty': ['wordnet_treaty_106773434'],
'Code_of_Conduct': ['wordnet_code_of_conduct_105668095'],
'Directive': [
'wordnet_directive_107170080',
'wordnet_pronouncement_106727616',
],
'Decree': ['wordnet_decree_106539770'],
'International_Agreement': ['wordnet_written_agreement_106771653'],
'Legal_Doctrine': [
'wikicat_Legal_doctrines_and_principles',
'wordnet_common_law_108453722',
],
'Precedent': ['wordnet_case_law_106535035'],
'Resolution': ['wordnet_resolution_106511874'],
'Proclamation': ['wordnet_proclamation_101266491'],
'Right': [
'wordnet_right_105174653',
'wordnet_right_104850341',
],
'Disallowed': ['wordnet_prohibition_106541820'],
'Permission': ['wordnet_permission_106689297'],
'Obligation': [
'wordnet_obligation_106773150',
'wordnet_duty_101129920',
],
'Legislative_Body': [
'wordnet_legislature_108163273',
'wordnet_court_108329453',
],
'Society': ['wordnet_association_108049401'],
'Co-operative': ['wordnet_cooperative_101100877'],
'Company': ['wordnet_company_108058098'],
'Limited_Company': ['wordnet_limited_company_108185211'],
'Corporation': ['wordnet_corporation_108059412'],
'Foundation': ['wordnet_foundation_108406486'],
'Delegation': ['wordnet_delegating_101140839'],
'Legal_Speech_Act': ['wordnet_pleading_106559365'],
'Decision': ['wordnet_decision_105838176'],
'Professional_Legal_Role': [
'wordnet_judge_110225219',
'wordnet_judiciary_108166187',
'wordnet_lawyer_110249950'
],
'Natural_Person': ['wordnet_person_100007846'],
'Statute': ['wordnet_legislative_act_106564387'],
'Code': [
'wordnet_legislative_act_106564387',
'wordnet_legislation_106535222',
'wordnet_law_106532330',
'wordnet_law_108441203'
],
}
YAGO_TO_LKIF_MAPPING = {
'wordnet_legal_power_105198427': ['Hohfeldian_Power'],
'wordnet_right_113341756': ['Potestative_Right'],
'wordnet_exemption_100213903': ['Immunity'],
'wordnet_legal_document_106479665': ['Legal_Document'],
'wordnet_legal_code_106667792': ['Regulation', 'Code'],
'wordnet_law_106532330': ['Regulation', 'Code'],
'wordnet_law_108441203': ['Regulation', 'Code'],
'wordnet_legislative_act_106564387': ['Regulation', 'Code', 'Statute'],
'wordnet_legislation_106535222': ['Regulation', 'Code'],
'wordnet_contract_106520944': ['Contract'],
'wordnet_treaty_106773434': ['Treaty'],
'wordnet_code_of_conduct_105668095': ['Code_of_Conduct'],
'wordnet_directive_107170080': ['Directive'],
'wordnet_pronouncement_106727616': ['Directive'],
'wordnet_decree_106539770': ['Decree'],
'wordnet_written_agreement_106771653': ['International_Agreement'],
'wordnet_mandate_106556481': ['Legal_Document'],
'wikicat_Legal_doctrines_and_principles': ['Legal_Doctrine'],
'wordnet_case_law_106535035': ['Precedent'],
'wordnet_common_law_108453722': ['Legal_Doctrine'],
'wordnet_resolution_106511874': ['Resolution'],
'wordnet_proclamation_101266491': ['Proclamation'],
'wordnet_right_105174653': ['Right'],
'wordnet_right_104850341': ['Right'],
'wordnet_prohibition_106541820': ['Disallowed'],
'wordnet_permission_106689297': ['Permission'],
'wordnet_obligation_106773150': ['Obligation'],
'wordnet_duty_101129920': ['Obligation'],
'wordnet_legislature_108163273': ['Legislative_Body'],
'wordnet_court_108329453': ['Legislative_Body'],
'wordnet_association_108049401': ['Society'],
'wordnet_cooperative_101100877': ['Co-operative'],
'wordnet_company_108058098': ['Company'],
'wordnet_limited_company_108185211': ['Limited_Company'],
'wordnet_corporation_108059412': ['Corporation'],
'wordnet_foundation_108406486': ['Foundation'],
'wordnet_delegating_101140839': ['Delegation'],
'wordnet_pleading_106559365': ['Legal_Speech_Act'],
'wordnet_decision_105838176': ['Decision'],
'wordnet_judge_110225219': ['Professional_Legal_Role'],
'wordnet_judiciary_108166187': ['Professional_Legal_Role'],
'wordnet_lawyer_110249950': ['Professional_Legal_Role'],
'wordnet_person_100007846': ['Natural_Person']
}
def query_sparql(query, endpoint):
"""Run a query again an SPARQL endpoint.
Returns:
A double list with only the values of each requested variable in
the query. The first row in the result contains the name of the
variables.
"""
sparql = SPARQLWrapper(endpoint)
sparql.setReturnFormat(JSON)
sparql.setQuery(query)
response = sparql.query().convert()
bindings = response['results']['bindings']
variables = response['head']['vars']
result = [variables]
for binding in bindings:
row = []
for variable in variables:
row.append(binding[variable]['value'])
result.append(row)
return result
|
from copy import deepcopy
import numpy as np
import torch
import torch.nn as nn
from pybnn.bohamiann import Bohamiann
from pybnn.util.layers import AppendLayer
def vapor_pressure(x, a, b, c, *args):
b_ = (b + 1) / 2 / 10
a_ = (a + 1) / 2
c_ = (c + 1) / 2 / 10
return torch.exp(-a_ - b_ / (x + 1e-5) - c_ * torch.log(x)) - (torch.exp(a_ + b_))
def log_func(t, a, b, c, *args):
a_ = (a + 1) / 2 * 5
b_ = (b + 1) / 2
c_ = (c + 1) / 2 * 10
return (c_ + a_ * torch.log(b_ * t + 1e-10)) / 10.
def hill_3(x, a, b, c, *args):
a_ = (a + 1) / 2
b_ = (b + 1) / 2
c_ = (c + 1) / 2 / 100
return a_ * (1. / ((c_ / x + 1e-5) ** b_ + 1.))
def bf_layer(theta, t):
y_a = vapor_pressure(t, theta[:, 0], theta[:, 1], theta[:, 2])
y_b = log_func(t, theta[:, 3], theta[:, 4], theta[:, 5])
y_c = hill_3(t, theta[:, 6], theta[:, 7], theta[:, 8])
return torch.stack([y_a, y_b, y_c], dim=1)
def get_lc_net_architecture(input_dimensionality: int) -> torch.nn.Module:
class Architecture(nn.Module):
def __init__(self, n_inputs, n_hidden=50):
super(Architecture, self).__init__()
self.fc1 = nn.Linear(n_inputs - 1, n_hidden)
self.fc2 = nn.Linear(n_hidden, n_hidden)
self.fc3 = nn.Linear(n_hidden, n_hidden)
self.theta_layer = nn.Linear(n_hidden, 9)
self.weight_layer = nn.Linear(n_hidden, 3)
self.asymptotic_layer = nn.Linear(n_hidden, 1)
self.sigma_layer = AppendLayer(noise=1e-3)
def forward(self, input):
x = input[:, :-1]
t = input[:, -1]
x = torch.tanh(self.fc1(x))
x = torch.tanh(self.fc2(x))
x = torch.tanh(self.fc3(x))
theta = torch.tanh(self.theta_layer(x))
bf = bf_layer(theta, t)
weights = torch.softmax(self.weight_layer(x), -1)
residual = torch.tanh(torch.sum(bf * weights, dim=(1,), keepdim=True))
asymptotic = torch.sigmoid(self.asymptotic_layer(x))
mean = residual + asymptotic
return self.sigma_layer(mean)
return Architecture(n_inputs=input_dimensionality)
class LCNet(Bohamiann):
def __init__(self, **kwargs) -> None:
super(LCNet, self).__init__(get_network=get_lc_net_architecture,
normalize_input=True,
normalize_output=False,
**kwargs)
@staticmethod
def normalize_input(x, m=None, s=None):
if m is None:
m = np.mean(x, axis=0)
if s is None:
s = np.std(x, axis=0)
x_norm = deepcopy(x)
x_norm[:, :-1] = (x[:, :-1] - m[:-1]) / s[:-1]
return x_norm, m, s
|
#!/usr/bin/env python
import numpy as np
#from matplotlib import pyplot as plt
from numpy.random import *
def main():
M = 1.00
M1 = 0.00
e = 0.00
e1 = 0.00
e2 = 0.00
Kp = 0.10
Ki = 0.10
Kd = 0.10
t = 100
goal = 50.00
x_list = []
y_list = []
x_list.append(0)
y_list.append(0.00)
for i in range(1,t):
M1 = M
e1 = e
e2 = e1
e = goal - y_list[i-1]
M = M1 + Kp * (e-e1) + Ki * e + Kd * ((e-e1) - (e1-e2))
y_list.append(M)
x_list.append(i)
print M
"""
plt.plot(x_list, y_list)
plt.ylim(0, goal*2)
plt.show()
"""
#print M
if __name__ == "__main__":
main()
|
import re
from streamlink.compat import urljoin
from streamlink.plugin import Plugin
from streamlink.plugin.api import http, validate
from streamlink.stream import HLSStream
COOKIE_PARAMS = (
"devicetype=desktop&"
"preferred-player-odm=hlslink&"
"preferred-player-live=hlslink"
)
_id_re = re.compile(r"/(?:program|direkte|serie/[^/]+)/([^/]+)")
_url_re = re.compile(r"https?://(tv|radio).nrk.no/")
_api_baseurl_re = re.compile(r'''apiBaseUrl:\s*["'](?P<baseurl>[^"']+)["']''')
_schema = validate.Schema(
validate.transform(_api_baseurl_re.search),
validate.any(
None,
validate.all(
validate.get("baseurl"),
validate.url(
scheme="http"
)
)
)
)
_mediaelement_schema = validate.Schema({
"mediaUrl": validate.url(
scheme="http",
path=validate.endswith(".m3u8")
)
})
class NRK(Plugin):
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
def _get_streams(self):
# Get the stream type from the url (tv/radio).
stream_type = _url_re.match(self.url).group(1).upper()
cookie = {
"NRK_PLAYER_SETTINGS_{0}".format(stream_type): COOKIE_PARAMS
}
# Construct API URL for this program.
baseurl = http.get(self.url, cookies=cookie, schema=_schema)
program_id = _id_re.search(self.url).group(1)
# Extract media URL.
json_url = urljoin(baseurl, "mediaelement/{0}".format(program_id))
res = http.get(json_url, cookies=cookie)
media_element = http.json(res, schema=_mediaelement_schema)
media_url = media_element["mediaUrl"]
return HLSStream.parse_variant_playlist(self.session, media_url)
__plugin__ = NRK
|
# coding=utf-8
import argparse
import sys
import sdl2.ext
from conf import *
from textbuffer import TextBuffer
from textarea import TextArea
from statusbar import StatusBar
from state import Editor, EditState
def res(s):
try:
return map(int, s.split(','))
except:
raise argparse.ArgumentTypeError("Resolution must be in x,y format.")
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("file", nargs="?", help="file to edit")
parser.add_argument("-r", "--resolution", type=res, help="size of window in x,y format")
args = parser.parse_args()
window_size = WINDOW_SIZE
if args.resolution:
window_size = args.resolution
return window_size, args.file
def run():
(window_size, f) = get_args()
tb = TextBuffer()
if f:
tb.load(f)
sdl2.ext.init()
window = sdl2.ext.Window(WINDOW_TITLE, size=window_size)
window.show()
factory = sdl2.ext.SpriteFactory(sdl2.ext.SOFTWARE)
font_img = factory.from_image(FONT_IMG_PATH)
sdl2.SDL_SetColorKey(font_img.surface, sdl2.SDL_TRUE,
sdl2.SDL_MapRGB(font_img.surface.format, COLOR_KEY_R, COLOR_KEY_G, COLOR_KEY_B))
font = sdl2.ext.BitmapFont(font_img, FONT_SIZE, mapping=FONT_MAPPING)
sb = StatusBar(window, tb, font, STATUS_BAR_COLOR, (STATUS_BAR_BORDER_TOP,
STATUS_BAR_BORDER_RIGHT,
STATUS_BAR_BORDER_BOTTOM,
STATUS_BAR_BORDER_LEFT))
ta = TextArea(window, font, tb, (TEXT_AREA_BORDER_TOP,
TEXT_AREA_BORDER_RIGHT,
sb.height + TEXT_AREA_BORDER_BOTTOM,
TEXT_AREA_BORDER_LEFT),
TEXT_AREA_LINE_SPACING)
state = EditState(Editor(None, window, ta, sb))
sdl2.SDL_StartTextInput()
while True:
start = sdl2.SDL_GetTicks()
state = state.update(sdl2.ext.get_events())
if state is None:
break
ticks = sdl2.SDL_GetTicks() - start
if ticks < 1000 / FRAMES_PER_SECOND:
sdl2.SDL_Delay((1000 / FRAMES_PER_SECOND) - ticks)
sdl2.SDL_StopTextInput()
sdl2.ext.quit()
return 0
if __name__ == "__main__":
sys.exit(run())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.