hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cbe9a1720b8af76b0bda0a31d14b5c3804ae9d47 | 735 | py | Python | approvaltests/reporters/introduction_reporter.py | SwamyDev/ApprovalTests.Python | f857f18420e8d8a6859e17d128f47c143bc4e588 | [
"Apache-2.0"
] | null | null | null | approvaltests/reporters/introduction_reporter.py | SwamyDev/ApprovalTests.Python | f857f18420e8d8a6859e17d128f47c143bc4e588 | [
"Apache-2.0"
] | null | null | null | approvaltests/reporters/introduction_reporter.py | SwamyDev/ApprovalTests.Python | f857f18420e8d8a6859e17d128f47c143bc4e588 | [
"Apache-2.0"
] | null | null | null | from approvaltests.core.reporter import Reporter
from approvaltests.reporters.python_native_reporter import PythonNativeReporter
class IntroductionReporter(Reporter):
def report(self, received_path: str, approved_path: str) -> bool:
print(self.get_text())
return PythonNativeReporter().report(received_path, approved_path)
def get_text(self):
return '''
Welcome to ApprovalTests!
No DiffReporters have been detected on this system.
To learn more, visit [Introduction to Reporters](https://github.com/approvals/ApprovalTests.Python/blob/main/docs/tutorial/intro-to-reporters.md)
'''
def __str__(self):
return self.__class__.__name__
__repr__ = __str__ | 36.75 | 153 | 0.727891 |
992a2416ddd5a2f449853822e6934429d3296283 | 554 | py | Python | saw/parsers/paragraphs.py | diNard/Saw | 7ed8cbfa243e638eac77cf83e72907be3d19fa1e | [
"MIT"
] | 3 | 2015-01-13T14:33:02.000Z | 2015-06-18T04:53:16.000Z | saw/parsers/paragraphs.py | diNard/Saw | 7ed8cbfa243e638eac77cf83e72907be3d19fa1e | [
"MIT"
] | null | null | null | saw/parsers/paragraphs.py | diNard/Saw | 7ed8cbfa243e638eac77cf83e72907be3d19fa1e | [
"MIT"
] | null | null | null | from saw.parsers.sentences import Parser, Sentences
class Paragraphs(Parser):
_child_class = Sentences
@classmethod
def parse(cls, text):
items = text.split("\n")
result, tmp = [], []
for item in items:
_item = item.strip()
if _item != '':
result.append(tmp)
result.append(_item)
tmp = []
tmp.append("\n")
# end of string will add \n. We should ignore it.
tmp.pop()
result.append(tmp)
return result
| 25.181818 | 57 | 0.512635 |
4d81509866fa11c4d5af7348f4ae12bdd4e0c80c | 18,697 | py | Python | parser/fase2/team28/models/instructions/DML/dml_instr.py | Josue-Zea/tytus | f9e4be9a8c03eb698fade7a748972e4f52d46685 | [
"MIT"
] | 3 | 2020-12-19T03:48:51.000Z | 2021-10-06T03:42:30.000Z | parser/fase2/team28/models/instructions/DML/dml_instr.py | Josue-Zea/tytus | f9e4be9a8c03eb698fade7a748972e4f52d46685 | [
"MIT"
] | null | null | null | parser/fase2/team28/models/instructions/DML/dml_instr.py | Josue-Zea/tytus | f9e4be9a8c03eb698fade7a748972e4f52d46685 | [
"MIT"
] | 1 | 2022-02-16T04:25:00.000Z | 2022-02-16T04:25:00.000Z | import pandas as pd
from models.instructions.shared import Instruction
from models.instructions.DDL.table_inst import CreateTB
from models.instructions.DML.special_functions import loop_list
from controllers.type_checker import TypeChecker
from controllers.symbol_table import SymbolTable
from controllers.error_controller import ErrorController
from controllers.data_controller import DataController
from models.instructions.shared import Where, putVarValues
from models.instructions.DML.special_functions import storage_columns, storage_table
from controllers.three_address_code import ThreeAddressCode
from models.Other.funcion import Funcion
from storageManager import jsonMode as j
'''
Lenguaje de Manipulación de Datos (DML) =======================================================================================================================
'''
class Insert(Instruction):
'''
INSERT recibe tres parametros:
1. tabla a insertar
2. columnas donde insertar (puede estar vacio (se inserta en todas))
3. valores a insertar
'''
def __init__(self, table, arr_columns, arr_values, tac, line, column):
self.table = table
self.arr_columns = arr_columns
self.arr_values = arr_values
self._tac = tac
self.line = line
self.column = column
def __repr__(self):
return str(vars(self))
def compile(self, environment):
database_id = SymbolTable().useDatabase
#ejecutando si hay llamada a alguna funcion
temps_array = []
for value in self.arr_values:
if isinstance(value, Funcion):
temps_array.append(value.compile(environment))
new_val = putVarValues(self._tac, temps_array, environment)
temp = ''
if new_val == self._tac: #Es un temporal --- quitar comillas
temp = ThreeAddressCode().newTemp()
if database_id is not None:
ThreeAddressCode().addCode(f"{temp} = \"USE {database_id}; {new_val}\"")
else:
ThreeAddressCode().addCode(f"{temp} = \"{new_val}\"")
else:
temp = new_val
#LLAMANDO A FUNCION PARA ANALIZAR ESTA COCHINADA
temp1 = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(f"{temp1} = parse({temp})")
return temp1
def process(self, instruction):
if self.arr_columns == None:
# Solo nos dieron los valores, tienen que venir todos ---> Espino ya valida longitud? ---> CREO QUE SI -- TEST --
vals_insert = []
for column in self.arr_values:
val = column.process(instruction)
vals_insert.append(val.value)
# print(vals_insert)
if self.validateValues(vals_insert):
pass
else:
return
DataController().insert(self.table.alias, vals_insert, self.line, self.column)
else:
if len(self.arr_columns) == len(self.arr_values):
dic = {}
for i in range(len(self.arr_columns)):
id_col = self.arr_columns[i].alias
if id_col in dic:
desc = f'Columna {id_col} ya definida'
ErrorController().add(29, 'Execution', desc, self.line, self.column)
return None
else:
dic[id_col] = self.arr_values[i].process(
instruction).value
# Pidiendo tabla
database_id = SymbolTable().useDatabase
table_tp = TypeChecker().searchTable(database_id, self.table.alias)
headers = TypeChecker().searchColumnHeadings(table_tp)
checker = CreateTB(None, None, None, None)
# validando nombres de columnas ingresados
for key in dic:
if not key in headers:
desc = f'Nombre de columna invalido, {key}'
ErrorController().add(26, 'Execution', desc, self.line, self.column)
return None
for name_col in headers:
column = TypeChecker().searchColumn(table_tp, name_col).__dict__
if not name_col in dic: # Valor Nulo --> ver si se puede
if column['_default'] is not None:
if isinstance(column['_default'], str):
dic[name_col] = column['_default'].replace(
"\'", "")
else:
dic[name_col] = column['_default']
else:
dic[name_col] = None
if column['_notNull'] == True:
desc = f'Columna {name_col} no puede ser null'
ErrorController().add(28, 'Execution', desc, self.line, self.column)
return None
else:
dic[name_col] = None
else: # validar valor
is_correct = checker.validateType(
column['_dataType'], dic.get(name_col), False)
if not is_correct:
desc = f'Valor no valido para la columna {name_col}'
ErrorController().add(9, 'Execution', desc, self.line, self.column)
return None
# VALIDAR CHECK
if not realizeCheck(column, dic, self.line, self.column):
return None
# TODO: METER EL WHERE, VALIDAR UNIQUE Y VALIDAR CHECK
ordered_vals = []
for name_col in headers:
ordered_vals.append(dic.get(name_col))
print(ordered_vals)
DataController().insert(self.table.alias, ordered_vals,
0, 1) # Enviar numero de fila y columna
else:
desc = "Error Datos incompletos"
ErrorController().add(28, 'Execution', desc, self.line, self.column)
return None
def validateValues(self, array_values: list):
database_id = SymbolTable().useDatabase
table_tp = TypeChecker().searchTable(database_id, self.table.alias)
headers = TypeChecker().searchColumnHeadings(table_tp)
if len(headers) != len(array_values):
desc = "Error Datos incompletos"
ErrorController().add(28, 'Execution', desc, self.line, self.column)
return False
checker = CreateTB(None, None, None, None)
dic = dict(zip(headers, array_values))
for index, name_col in enumerate(headers):
column = TypeChecker().searchColumn(table_tp, name_col).__dict__
is_correct = checker.validateType(
column['_dataType'], array_values[index], False)
if not is_correct:
desc = f'Valor no valido para la columna {name_col}'
ErrorController().add(9, 'Execution', desc, self.line, self.column)
return False
if not realizeCheck(column, dic, self.line, self.column):
return False
return True
class Update(Instruction):
'''
UPDATE recibe tres parametros:
1. tabla a insertar
2. array de columnas con el valor a insertar (ColumnVal[])
3. recibe un array con todas los parametros OPCIONALES
'''
def __init__(self, table, arr_columns_vals, params, tac, line, column):
self.table = table
self.arr_columns_vals = arr_columns_vals
self.params = params
self._tac = tac
self.line = line
self.column = column
def __repr__(self):
return str(vars(self))
def compile(self, instrucction):
#CREANDO C3D
temp = ThreeAddressCode().newTemp()
database_id = SymbolTable().useDatabase
if database_id is not None:
ThreeAddressCode().addCode(f"{temp} = \"USE {database_id}; {self._tac}\"")
else:
ThreeAddressCode().addCode(f"{temp} = \"{self._tac}\"")
#LLAMANDO A FUNCION PARA ANALIZAR ESTA COCHINADA
temp1 = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(f"{temp1} = parse({temp})")
return temp1
def process(self, instruction):
# Obteniendo tabla de la cual voy a hacer el update
database_id = SymbolTable().useDatabase
table_tp = TypeChecker().searchTable(database_id, self.table)
table_cont = DataController().extractTable(self.table, self.line, self.column)
headers = TypeChecker().searchColumnHeadings(table_tp)
table_update = pd.DataFrame(table_cont)
tuplas = [] # t[0] = nombre columna, t[1] = valor a cambiar
for column in self.arr_columns_vals:
tuplas.append(column.process(instruction))
d = {}
d_col_names = {}
# validando nombres de columnas ingresados
for t in tuplas:
if not t[0] in headers:
desc = f'Nombre de columna invalido, {t[0]}'
ErrorController().add(26, 'Execution', desc, self.line, self.column)
return None
else:
d[headers.index(t[0])] = t[1].value
d_col_names[t[0]] = t[1].value
# validando tipo de valores para las columnas
print(d_col_names)
checker = CreateTB(None, None, None, None)
for key in list(d_col_names.keys()):
column = TypeChecker().searchColumn(table_tp, key).__dict__
is_correct = checker.validateType(
column['_dataType'], d_col_names.get(key), False)
if not is_correct:
desc = f'Valor no valido para la columna {key}'
ErrorController().add(9, 'Execution', desc, self.line, self.column)
return None
if not realizeCheck(column, d_col_names, self.line, self.column):
return None
# CAMBIAR TODOS LOS REGISTROS DE LA TABLA
if self.params == None:
pk_col_name = TypeChecker().searchColPrimaryKey(table_tp)
if pk_col_name == []: # NO HAY LLAVE PRIMARIA
pk_list = range(len(table_update.index))
print(pk_list)
for pk in pk_list:
DataController().update(self.table, d, [
pk], self.line, self.column)
else:
list_pks = []
for col in pk_col_name:
list_pks.append(col.name)
table_update.columns = headers
pk_list = table_update[list_pks].values.tolist()
print(pk_list)
for pk in pk_list:
DataController().update(self.table, d, [
pk], self.line, self.column)
else:
if self.params is not list:
self.params = [self.params]
for option in self.params:
if isinstance(option, Where):
table_update.columns = headers
storage_columns(table_cont, headers,
self.line, self.column)
storage_table(table_cont, headers, self.table,
self.line, self.column)
table_result = option.process(
instruction, table_update, self.table)
pk_col_name = TypeChecker().searchColPrimaryKey(table_tp)
if pk_col_name == []: # NO HAY LLAVE PRIMARIA
pk_list = table_result.index.to_list()
print(pk_list)
for pk in pk_list:
if type(pk) is list:
DataController().update(self.table, d, pk, self.line, self.column)
else:
DataController().update(self.table, d, [pk], self.line, self.column)
else:
table_result.columns = headers
list_pks = []
for col in pk_col_name:
list_pks.append(col.name)
pk_list = table_result[list_pks].values.tolist()
print(pk_list)
for pk in pk_list:
if type(pk) is list:
DataController().update(self.table, d, pk, self.line, self.column)
else:
DataController().update(self.table, d, [pk], self.line, self.column)
return None
class ColumnVal(Instruction):
'''
ColumnVal recibe dos parametros:
1. nombre del campo a insertar
2. valor a poner
'''
def __init__(self, column, value):
self.column = column
self.value = value
self._tac = ''
def __repr__(self):
return str(vars(self))
def process(self, instruction):
id_col = self.column.alias
val = self.value.process(instruction)
return [id_col, val]
class Opt1(Instruction):
'''
Recibe si se ha introducido un ALIAS y un asterisco (true || false)
'''
def __init__(self, isAsterisco, alias):
self.isAsterisco = isAsterisco
self.alias = alias
self._tac = ''
def __repr__(self):
return str(vars(self))
def process(self, instrucction):
pass
class Delete(Instruction):
'''
DELETE recibe la tabla donde tiene que borrar y recibe un array con todas los parametros OPCIONALES
Las opciones disponibles en un array del DELETE
opt1 = ASTERISK SQLALIAS || ASTERISK || SQLALIAS
opt2 = USING
opt3 = WHERE
opt4 = RETURNING
'''
def __init__(self, table, params, tac, line, column):
self.table = table
self.params = params
self._tac = tac
self.line = line
self.column = column
def __repr__(self):
return str(vars(self))
def compile(self, environment):
database_id = SymbolTable().useDatabase
#ejecutando si hay llamada a alguna funcion
temps_array = []
if self.params is not None:
for value in self.params:
if isinstance(value, Funcion):
temps_array.append(value.compile(environment))
new_val = None
if temps_array is not None:
new_val = putVarValues(self._tac, temps_array, environment)
else:
new_val = self._tac
temp = ''
if new_val == self._tac: #Es un temporal --- quitar comillas
temp = ThreeAddressCode().newTemp()
if database_id is not None:
ThreeAddressCode().addCode(f"{temp} = \"USE {database_id}; {new_val}\"")
else:
ThreeAddressCode().addCode(f"{temp} = \"{new_val}\"")
else:
temp = new_val
#LLAMANDO A FUNCION PARA ANALIZAR ESTA COCHINADA
temp1 = ThreeAddressCode().newTemp()
ThreeAddressCode().addCode(f"{temp1} = parse({temp})")
return temp1
def process(self, instrucction):
# Obteniendo tabla de la cual voy a borrar
database_id = SymbolTable().useDatabase
table_tp = TypeChecker().searchTable(database_id, self.table)
table_cont = DataController().extractTable(self.table, self.line, self.column)
headers = TypeChecker().searchColumnHeadings(table_tp)
table_delete = pd.DataFrame(table_cont)
if self.params == None:
pk_col_name = TypeChecker().searchColPrimaryKey(table_tp)
if pk_col_name == []: # NO HAY LLAVE PRIMARIA
pk_list = table_delete.index.tolist()
print(pk_list)
for pk in pk_list:
DataController().delete(self.table, pk, self.line, self.column)
else:
table_delete.columns = headers
list_pks = []
for col in pk_col_name:
list_pks.append(col.name)
pk_list = table_delete[list_pks].values.tolist()
print(pk_list)
for pk in pk_list:
DataController().delete(self.table, pk, self.line, self.column)
else:
for option in self.params:
if isinstance(option, Where):
table_delete.columns = headers
storage_columns(table_cont, headers,
self.line, self.column)
storage_table(table_cont, headers, self.table,
self.line, self.column)
table_result = option.process(
instrucction, table_delete, self.table)
pk_col_name = TypeChecker().searchColPrimaryKey(table_tp)
if pk_col_name == []: # NO HAY LLAVE PRIMARIA
pk_list = table_result.index.to_list()
print(pk_list)
for pk in pk_list:
DataController().delete(self.table, pk, self.line, self.column)
else:
table_result.columns = headers
list_pks = []
for col in pk_col_name:
list_pks.append(col.name)
pk_list = table_result[list_pks].values.tolist()
print(pk_list)
for pk in pk_list:
DataController().delete(self.table, pk, self.line, self.column)
break
return None
def realizeCheck(column: dict, dic: dict, line, pos_column):
# VALIDAR CHECK
if column['_check'] == []:
print("NO tiene check")
# no tiene check
else:
print("tiene check")
condition = column['_check']['_condition_check']
print(condition)
val = eval(condition, dic)
print(val)
if not val:
desc = f'Valor no cumple la condicion {condition} del check'
ErrorController().add(9, 'Execution', desc, line, pos_column)
return False
return True
| 39.696391 | 163 | 0.537573 |
a7381f35bbe93e3d5685bb64e2ea719b02b36076 | 18,153 | py | Python | benchmarks/f3_wrong_hints_permutations/scaling_ltl_timed_transition_system/13-sender_receiver_17.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 3 | 2021-04-23T23:29:26.000Z | 2022-03-23T10:00:30.000Z | benchmarks/f3_wrong_hints_permutations/scaling_ltl_timed_transition_system/13-sender_receiver_17.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | null | null | null | benchmarks/f3_wrong_hints_permutations/scaling_ltl_timed_transition_system/13-sender_receiver_17.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 1 | 2021-11-17T22:02:56.000Z | 2021-11-17T22:02:56.000Z | from typing import FrozenSet
from collections import Iterable
from math import log, ceil
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or, msat_make_iff
from mathsat import msat_make_leq, msat_make_equal, msat_make_true
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
delta_name = "delta"
def decl_consts(menv: msat_env, name: str, c_type) -> tuple:
assert not name.startswith("_"), name
s = msat_declare_function(menv, name, c_type)
s = msat_make_constant(menv, s)
x_s = msat_declare_function(menv, name_next(name), c_type)
x_s = msat_make_constant(menv, x_s)
return s, x_s
def make_enum(menv, v_name: str, enum_size: int):
bool_type = msat_get_bool_type(menv)
num_bits = ceil(log(enum_size, 2))
b_vars = []
for idx in range(num_bits):
c_name = "{}{}".format(v_name, idx)
b_vars.append(tuple(decl_consts(menv, c_name, bool_type)))
vals = []
x_vals = []
for enum_val in range(enum_size):
bit_val = format(enum_val, '0{}b'.format(num_bits))
assert len(bit_val) == num_bits
assert all(c in {'0', '1'} for c in bit_val)
assign = [b_vars[idx] if c == '1' else
(msat_make_not(menv, b_vars[idx][0]),
msat_make_not(menv, b_vars[idx][1]))
for idx, c in enumerate(reversed(bit_val))]
pred = assign[0][0]
x_pred = assign[0][1]
for it in assign[1:]:
pred = msat_make_and(menv, pred, it[0])
x_pred = msat_make_and(menv, x_pred, it[1])
vals.append(pred)
x_vals.append(x_pred)
assert len(vals) == enum_size
assert len(x_vals) == enum_size
return b_vars, vals, x_vals
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
m_one = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, m_one)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def diverging_symbs(menv: msat_env) -> frozenset:
real_type = msat_get_rational_type(menv)
delta = msat_declare_function(menv, delta_name, real_type)
delta = msat_make_constant(menv, delta)
return frozenset([delta])
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
r2s, x_r2s = decl_consts(menv, "r2s", int_type)
s2r, x_s2r = decl_consts(menv, "s2r", int_type)
delta, x_delta = decl_consts(menv, delta_name, real_type)
sender = Sender("s", menv, enc, r2s, x_r2s, s2r, x_s2r, delta)
receiver = Receiver("r", menv, enc, s2r, x_s2r, r2s, x_r2s, delta)
curr2next = {r2s: x_r2s, s2r: x_s2r, delta: x_delta}
for comp in [sender, receiver]:
for s, x_s in comp.symb2next.items():
curr2next[s] = x_s
zero = msat_make_number(menv, "0")
init = msat_make_and(menv, receiver.init, sender.init)
trans = msat_make_and(menv, receiver.trans, sender.trans)
# invar delta >= 0
init = msat_make_and(menv, init,
msat_make_geq(menv, delta, zero))
trans = msat_make_and(menv, trans,
msat_make_geq(menv, x_delta, zero))
# delta > 0 -> (r2s' = r2s & s2r' = s2r)
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_equal(menv, x_r2s, r2s),
msat_make_equal(menv, x_s2r, s2r))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, lhs, rhs))
# (G F !s.stutter) -> G (s.wait_ack -> F s.send)
lhs = enc.make_G(enc.make_F(msat_make_not(menv, sender.stutter)))
rhs = enc.make_G(msat_make_impl(menv, sender.wait_ack,
enc.make_F(sender.send)))
ltl = msat_make_impl(menv, lhs, rhs)
return TermMap(curr2next), init, trans, ltl
class Module:
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
*args, **kwargs):
self.name = name
self.menv = menv
self.enc = enc
self.symb2next = {}
true = msat_make_true(menv)
self.init = true
self.trans = true
def _symb(self, v_name, v_type):
v_name = "{}_{}".format(self.name, v_name)
return decl_consts(self.menv, v_name, v_type)
def _enum(self, v_name: str, enum_size: int):
c_name = "{}_{}".format(self.name, v_name)
return make_enum(self.menv, c_name, enum_size)
class Sender(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
loc, x_loc = self._symb("l", bool_type)
evt, x_evt = self._symb("evt", bool_type)
msg_id, x_msg_id = self._symb("msg_id", int_type)
timeout, x_timeout = self._symb("timeout", real_type)
c, x_c = self._symb("c", real_type)
self.move = evt
self.stutter = msat_make_not(menv, evt)
self.x_move = x_evt
self.x_stutter = msat_make_not(menv, x_evt)
self.send = loc
self.wait_ack = msat_make_not(menv, loc)
self.x_send = x_loc
self.x_wait_ack = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc, evt: x_evt, msg_id: x_msg_id,
timeout: x_timeout, c: x_c}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
base_timeout = one
# send & c = 0 & msg_id = 0
self.init = msat_make_and(menv,
msat_make_and(menv, self.send,
msat_make_equal(menv, c,
zero)),
msat_make_equal(menv, msg_id, zero))
# invar: wait_ack -> c <= timeout
self.init = msat_make_and(
menv, self.init,
msat_make_impl(menv, self.wait_ack,
msat_make_leq(menv, c, timeout)))
self.trans = msat_make_impl(menv, self.x_wait_ack,
msat_make_leq(menv, x_c, x_timeout))
# delta > 0 | stutter -> l' = l & msg_id' = msg_id & timeout' = timeout &
# c' = c + delta & out_c' = out_c
lhs = msat_make_or(menv, msat_make_gt(menv, delta, zero), self.stutter)
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_msg_id, msg_id)),
msat_make_and(menv,
msat_make_equal(menv, x_timeout, timeout),
msat_make_equal(menv, x_c,
msat_make_plus(menv, c, delta))))
rhs = msat_make_and(menv, rhs,
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, self.move,
msat_make_equal(menv, delta, zero))
# (send & send') ->
# (msg_id' = msg_id & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_send))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id, msg_id),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (send & wait_ack') ->
# (msg_id' = msg_id + 1 & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_wait_ack))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id,
msat_make_plus(menv, msg_id, one)),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (c' = 0 & out_c' = out_c &
# (wait_ack' <-> (in_c != msg_id & c > timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs_iff = msat_make_and(menv,
msat_make_not(menv,
msat_make_equal(menv, in_c,
msg_id)),
msat_make_geq(menv, c, timeout))
rhs_iff = msat_make_iff(menv, self.x_wait_ack, rhs_iff)
rhs = msat_make_and(menv,
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c,
out_c)),
rhs_iff)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & wait_ack') -> (timeout' > timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack,
self.x_wait_ack))
rhs = msat_make_gt(menv, x_timeout, timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (send' <-> (in_c = msg_id & c < timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs = msat_make_iff(menv, self.x_send,
msat_make_and(menv,
msat_make_equal(menv, in_c, msg_id),
msat_make_lt(menv, c, timeout)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & send') -> (timeout' = base_timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack, self.x_send))
rhs = msat_make_equal(menv, x_timeout, base_timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
class Receiver(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
loc, x_loc = self._symb("l", bool_type)
self.wait = loc
self.work = msat_make_not(menv, loc)
self.x_wait = x_loc
self.x_work = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc}
zero = msat_make_number(menv, "0")
# wait
self.init = self.wait
# delta > 0 -> loc' = loc & out_c' = out_c
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_impl(menv, lhs, rhs)
disc_t = msat_make_equal(menv, delta, zero)
# wait -> (wait' <-> in_c = out_c)
lhs = msat_make_and(menv, disc_t, self.wait)
rhs = msat_make_iff(menv, self.x_wait,
msat_make_equal(menv, in_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait & wait') -> (out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_wait))
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait & work') -> out_c' = in_c
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_work))
rhs = msat_make_equal(menv, x_out_c, in_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# work -> out_c' = out_c
lhs = msat_make_and(menv, disc_t, self.work)
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
delta = mgr.Symbol(delta_name, types.REAL)
r2s = mgr.Symbol("r2s", types.INT)
s2r = mgr.Symbol("r2s", types.INT)
s_l = mgr.Symbol("s_l", types.BOOL)
s_evt = mgr.Symbol("s_evt", types.BOOL)
s_msg_id = mgr.Symbol("s_msg_id", types.INT)
s_timeout = mgr.Symbol("s_timeout", types.REAL)
s_c = mgr.Symbol("s_c", types.REAL)
r_l = mgr.Symbol("r_l", types.BOOL)
symbs = frozenset([delta, r2s, s2r, s_l, s_evt, s_msg_id, s_timeout, s_c,
r_l])
x_delta = symb_to_next(mgr, delta)
x_r2s = symb_to_next(mgr, r2s)
x_s2r = symb_to_next(mgr, s2r)
x_s_l = symb_to_next(mgr, s_l)
x_s_evt = symb_to_next(mgr, s_evt)
x_s_msg_id = symb_to_next(mgr, s_msg_id)
x_s_timeout = symb_to_next(mgr, s_timeout)
x_s_c = symb_to_next(mgr, s_c)
x_r_l = symb_to_next(mgr, r_l)
res = []
r0 = mgr.Real(0)
r1 = mgr.Real(1)
i0 = mgr.Int(0)
i1 = mgr.Int(1)
loc0 = Location(env, mgr.Equals(delta, r0))
loc0.set_progress(0, mgr.Equals(x_delta, r0))
hint = Hint("h_delta0", env, frozenset([delta]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_l)
loc0.set_progress(1, mgr.Not(x_s_l))
loc1 = Location(env, mgr.Not(s_l))
loc1.set_progress(0, x_s_l)
hint = Hint("h_s_l1", env, frozenset([s_l]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, r_l)
loc0.set_progress(0, x_r_l)
hint = Hint("h_r_l0", env, frozenset([r_l]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s_c, r0))
loc0.set_progress(0, mgr.Equals(x_s_c, mgr.Plus(s_c, r1)))
hint = Hint("h_s_c1", env, frozenset([s_c]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(r2s, i0))
loc0.set_progress(0, mgr.Equals(x_r2s, i1))
hint = Hint("h_r2s1", env, frozenset([r2s]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(delta, r0))
loc0.set_progress(0, mgr.Equals(x_delta, r1))
hint = Hint("h_delta1", env, frozenset([delta]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(r2s, i0))
loc0.set_progress(0, mgr.Equals(x_r2s, i0))
hint = Hint("h_r2s0", env, frozenset([r2s]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s_msg_id, i0))
loc0.set_progress(0, mgr.Equals(x_s_msg_id, mgr.Plus(s_msg_id, i1)))
hint = Hint("h_s_msg_id1", env, frozenset([s_msg_id]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, r_l)
loc0.set_progress(1, mgr.Not(x_r_l))
loc1 = Location(env, mgr.Not(r_l))
loc1.set_progress(0, x_r_l)
hint = Hint("h_r_l1", env, frozenset([r_l]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, s_evt)
loc0.set_progress(0, x_s_evt)
hint = Hint("h_s_evt0", env, frozenset([s_evt]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_evt)
loc0.set_progress(1, mgr.Not(x_s_evt))
loc1 = Location(env, mgr.Not(s_evt))
loc1.set_progress(0, x_s_evt)
hint = Hint("h_s_evt1", env, frozenset([s_evt]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, s_l)
loc0.set_progress(0, x_s_l)
hint = Hint("h_s_l0", env, frozenset([s_l]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, i0))
hint = Hint("h_s2r0", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
return frozenset(res)
| 38.459746 | 89 | 0.574561 |
ffd053c02fb40ec61406ed8e2034edbbe04500b7 | 48,017 | py | Python | tensorflow/python/keras/engine/topology_test.py | ranbir/tensorflow | 46924b2f7bc4262b2c4b36841d393741113594ca | [
"Apache-2.0"
] | 5 | 2019-06-24T14:16:08.000Z | 2022-03-17T04:35:40.000Z | tensorflow/python/keras/engine/topology_test.py | ranbir/tensorflow | 46924b2f7bc4262b2c4b36841d393741113594ca | [
"Apache-2.0"
] | 1 | 2019-09-14T04:40:07.000Z | 2020-11-18T18:16:17.000Z | tensorflow/python/keras/engine/topology_test.py | ranbir/tensorflow | 46924b2f7bc4262b2c4b36841d393741113594ca | [
"Apache-2.0"
] | 1 | 2021-12-21T04:46:40.000Z | 2021-12-21T04:46:40.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#,============================================================================
"""Tests for layer graphs construction & handling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import input_layer as input_layer_lib
from tensorflow.python.keras.engine import network as network_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import test
try:
import yaml # pylint:disable=g-import-not-at-top
except ImportError:
yaml = None
class TopologyConstructionTest(keras_parameterized.TestCase):
@test_util.run_deprecated_v1
def test_get_updates(self):
class MyLayer(keras.layers.Layer):
def build(self, input_shape):
self.a = self.add_variable('a',
(1, 1),
'float32',
trainable=False)
self.b = self.add_variable('b',
(1, 1),
'float32',
trainable=False)
self.add_update(state_ops.assign_add(self.a, [[1.]],
name='unconditional_update'))
self.built = True
def call(self, inputs):
self.add_update(state_ops.assign_add(self.b, inputs,
name='conditional_update'),
inputs=True)
return inputs + 1
x1 = input_layer_lib.Input(shape=(1,))
layer = MyLayer()
_ = layer.apply(x1)
self.assertEqual(len(layer.updates), 2)
self.assertEqual(len(layer.get_updates_for(x1)), 1)
self.assertEqual(len(layer.get_updates_for(None)), 1)
x2 = input_layer_lib.Input(shape=(1,))
y2 = layer.apply(x2)
self.assertEqual(len(layer.updates), 3)
self.assertEqual(len(layer.get_updates_for(x1)), 1)
self.assertEqual(len(layer.get_updates_for(x2)), 1)
self.assertEqual(len(layer.get_updates_for(None)), 1)
network = network_lib.Network(x2, y2)
self.assertEqual(len(network.updates), 2)
self.assertEqual(len(network.get_updates_for(x2)), 1)
self.assertEqual(len(network.get_updates_for(None)), 1)
x3 = input_layer_lib.Input(shape=(1,))
_ = layer.apply(x3)
self.assertEqual(len(network.updates), 2)
x4 = input_layer_lib.Input(shape=(1,))
_ = network(x4)
self.assertEqual(len(network.updates), 3)
self.assertEqual(len(network.get_updates_for(x2)), 1)
self.assertEqual(len(network.get_updates_for(x4)), 1)
self.assertEqual(len(network.get_updates_for(None)), 1)
network.add_update(state_ops.assign_add(layer.a, [[1]]))
self.assertEqual(len(network.updates), 4)
self.assertEqual(len(network.get_updates_for(None)), 2)
network.add_update(state_ops.assign_add(layer.b, x4), inputs=True)
self.assertEqual(len(network.updates), 5)
self.assertEqual(len(network.get_updates_for(x4)), 2)
@test_util.run_in_graph_and_eager_modes()
def test_get_updates_bn(self):
x1 = input_layer_lib.Input(shape=(1,))
layer = keras.layers.BatchNormalization()
_ = layer.apply(x1)
self.assertEqual(len(layer.updates), 2)
self.assertEqual(len(layer.get_updates_for(x1)), 2)
self.assertEqual(len(layer.get_updates_for(None)), 0)
@test_util.run_deprecated_v1
def test_get_losses(self):
class MyLayer(keras.layers.Layer):
def build(self, input_shape):
self.a = self.add_variable('a',
(1, 1),
'float32',
trainable=False)
self.b = self.add_variable('b',
(1, 1),
'float32',
trainable=False)
self.add_loss(math_ops.reduce_sum(self.a))
self.built = True
def call(self, inputs):
self.add_loss(math_ops.reduce_sum(inputs),
inputs=True)
return inputs + 1
x1 = input_layer_lib.Input(shape=(1,))
layer = MyLayer()
_ = layer.apply(x1)
self.assertEqual(len(layer.losses), 2)
self.assertEqual(len(layer.get_losses_for(x1)), 1)
self.assertEqual(len(layer.get_losses_for(None)), 1)
x2 = input_layer_lib.Input(shape=(1,))
y2 = layer.apply(x2)
self.assertEqual(len(layer.losses), 3)
self.assertEqual(len(layer.get_losses_for(x1)), 1)
self.assertEqual(len(layer.get_losses_for(x2)), 1)
self.assertEqual(len(layer.get_losses_for(None)), 1)
network = network_lib.Network(x2, y2)
self.assertEqual(len(network.losses), 2)
self.assertEqual(len(network.get_losses_for(x1)), 0)
self.assertEqual(len(network.get_losses_for(x2)), 1)
self.assertEqual(len(network.get_losses_for(None)), 1)
x3 = input_layer_lib.Input(shape=(1,))
_ = layer.apply(x3)
self.assertEqual(len(network.losses), 2)
x4 = input_layer_lib.Input(shape=(1,))
_ = network(x4)
self.assertEqual(len(network.losses), 3)
self.assertEqual(len(network.get_losses_for(x2)), 1)
self.assertEqual(len(network.get_losses_for(x4)), 1)
self.assertEqual(len(network.get_losses_for(None)), 1)
network.add_loss(math_ops.reduce_sum(layer.a))
self.assertEqual(len(network.losses), 4)
self.assertEqual(len(network.get_losses_for(None)), 2)
network.add_loss(math_ops.reduce_sum(x4), inputs=True)
self.assertEqual(len(network.losses), 5)
self.assertEqual(len(network.get_losses_for(x4)), 2)
@test_util.run_in_graph_and_eager_modes()
def testTopologicalAttributes(self):
# test layer attributes / methods related to cross-layer connectivity.
a = input_layer_lib.Input(shape=(32,), name='input_a')
b = input_layer_lib.Input(shape=(32,), name='input_b')
# test input, output, input_shape, output_shape
test_layer = keras.layers.Dense(16, name='test_layer')
a_test = test_layer(a)
self.assertEqual(test_layer.input, a)
self.assertEqual(test_layer.output, a_test)
self.assertEqual(test_layer.input_shape, (None, 32))
self.assertEqual(test_layer.output_shape, (None, 16))
# test `get_*_at` methods
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
self.assertEqual(dense.get_input_at(0), a)
self.assertEqual(dense.get_input_at(1), b)
self.assertEqual(dense.get_output_at(0), a_2)
self.assertEqual(dense.get_output_at(1), b_2)
self.assertEqual(dense.get_input_shape_at(0), (None, 32))
self.assertEqual(dense.get_input_shape_at(1), (None, 32))
self.assertEqual(dense.get_output_shape_at(0), (None, 16))
self.assertEqual(dense.get_output_shape_at(1), (None, 16))
# Test invalid value for attribute retrieval.
with self.assertRaises(ValueError):
dense.get_input_at(2)
with self.assertRaises(AttributeError):
new_dense = keras.layers.Dense(16)
_ = new_dense.input
with self.assertRaises(AttributeError):
new_dense = keras.layers.Dense(16)
_ = new_dense.output
with self.assertRaises(AttributeError):
new_dense = keras.layers.Dense(16)
_ = new_dense.output_shape
with self.assertRaises(AttributeError):
new_dense = keras.layers.Dense(16)
_ = new_dense.input_shape
with self.assertRaises(AttributeError):
new_dense = keras.layers.Dense(16)
a = input_layer_lib.Input(shape=(3, 32))
a = input_layer_lib.Input(shape=(5, 32))
a_2 = dense(a)
b_2 = dense(b)
_ = new_dense.input_shape
with self.assertRaises(AttributeError):
new_dense = keras.layers.Dense(16)
a = input_layer_lib.Input(shape=(3, 32))
a = input_layer_lib.Input(shape=(5, 32))
a_2 = dense(a)
b_2 = dense(b)
_ = new_dense.output_shape
@test_util.run_in_graph_and_eager_modes()
def testTopologicalAttributesMultiOutputLayer(self):
class PowersLayer(keras.layers.Layer):
def call(self, inputs):
return [inputs**2, inputs**3]
x = input_layer_lib.Input(shape=(32,))
test_layer = PowersLayer()
p1, p2 = test_layer(x) # pylint: disable=not-callable
self.assertEqual(test_layer.input, x)
self.assertEqual(test_layer.output, [p1, p2])
self.assertEqual(test_layer.input_shape, (None, 32))
self.assertEqual(test_layer.output_shape, [(None, 32), (None, 32)])
@test_util.run_in_graph_and_eager_modes()
def testTopologicalAttributesMultiInputLayer(self):
class AddLayer(keras.layers.Layer):
def call(self, inputs):
assert len(inputs) == 2
return inputs[0] + inputs[1]
a = input_layer_lib.Input(shape=(32,))
b = input_layer_lib.Input(shape=(32,))
test_layer = AddLayer()
y = test_layer([a, b]) # pylint: disable=not-callable
self.assertEqual(test_layer.input, [a, b])
self.assertEqual(test_layer.output, y)
self.assertEqual(test_layer.input_shape, [(None, 32), (None, 32)])
self.assertEqual(test_layer.output_shape, (None, 32))
@test_util.run_deprecated_v1
def testBasicNetwork(self):
# minimum viable network
x = input_layer_lib.Input(shape=(32,))
dense = keras.layers.Dense(2)
y = dense(x)
network = network_lib.Network(x, y, name='dense_network')
# test basic attributes
self.assertEqual(network.name, 'dense_network')
self.assertEqual(len(network.layers), 2) # InputLayer + Dense
self.assertEqual(network.layers[1], dense)
self.assertEqual(network.weights, dense.weights)
self.assertEqual(network.trainable_weights, dense.trainable_weights)
self.assertEqual(network.non_trainable_weights, dense.non_trainable_weights)
# test callability on Input
x_2 = input_layer_lib.Input(shape=(32,))
y_2 = network(x_2)
self.assertEqual(y_2.shape.as_list(), [None, 2])
# test callability on regular tensor
x_2 = array_ops.placeholder(dtype='float32', shape=(None, 32))
y_2 = network(x_2)
self.assertEqual(y_2.shape.as_list(), [None, 2])
# test network `trainable` attribute
network.trainable = False
self.assertEqual(network.weights, dense.weights)
self.assertEqual(network.trainable_weights, [])
self.assertEqual(network.non_trainable_weights,
dense.trainable_weights + dense.non_trainable_weights)
@test_util.run_in_graph_and_eager_modes
def test_trainable_weights(self):
a = keras.layers.Input(shape=(2,))
b = keras.layers.Dense(1)(a)
model = keras.models.Model(a, b)
weights = model.weights
self.assertListEqual(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.trainable = False
self.assertListEqual(model.trainable_weights, [])
self.assertListEqual(model.non_trainable_weights, weights)
model.trainable = True
self.assertListEqual(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.layers[1].trainable = False
self.assertListEqual(model.trainable_weights, [])
self.assertListEqual(model.non_trainable_weights, weights)
# sequential model
model = keras.models.Sequential()
model.add(keras.layers.Dense(1, input_dim=2))
weights = model.weights
self.assertListEqual(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.trainable = False
self.assertListEqual(model.trainable_weights, [])
self.assertListEqual(model.non_trainable_weights, weights)
model.trainable = True
self.assertListEqual(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.layers[0].trainable = False
self.assertListEqual(model.trainable_weights, [])
self.assertListEqual(model.non_trainable_weights, weights)
@test_util.run_deprecated_v1
def test_layer_call_arguments(self):
# Test the ability to pass and serialize arguments to `call`.
inp = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3)(inp)
x = keras.layers.Dropout(0.5)(x, training=True)
model = keras.models.Model(inp, x)
# Would be `dropout/cond/Merge` by default
self.assertTrue(model.output.op.name.endswith('dropout/mul_1'))
# Test that argument is kept when applying the model
inp2 = keras.layers.Input(shape=(2,))
out2 = model(inp2)
self.assertTrue(out2.op.name.endswith('dropout/mul_1'))
# Test that argument is kept after loading a model
config = model.get_config()
model = keras.models.Model.from_config(config)
self.assertTrue(model.output.op.name.endswith('dropout/mul_1'))
def test_node_construction(self):
# test basics
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
with self.assertRaises(ValueError):
_ = keras.layers.Input(shape=(32,), batch_shape=(10, 32))
with self.assertRaises(ValueError):
_ = keras.layers.Input(shape=(32,), unknown_kwarg=None)
self.assertListEqual(a.shape.as_list(), [None, 32])
a_layer, a_node_index, a_tensor_index = a._keras_history
b_layer, _, _ = b._keras_history
self.assertEqual(len(a_layer._inbound_nodes), 1)
self.assertEqual(a_tensor_index, 0)
node = a_layer._inbound_nodes[a_node_index]
self.assertEqual(node.outbound_layer, a_layer)
self.assertListEqual(node.inbound_layers, [])
self.assertListEqual(node.input_tensors, [a])
self.assertListEqual(node.input_shapes, [(None, 32)])
self.assertListEqual(node.output_tensors, [a])
self.assertListEqual(node.output_shapes, [(None, 32)])
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
self.assertEqual(len(dense._inbound_nodes), 2)
self.assertEqual(len(dense._outbound_nodes), 0)
self.assertEqual(dense._inbound_nodes[0].inbound_layers, a_layer)
self.assertEqual(dense._inbound_nodes[0].outbound_layer, dense)
self.assertEqual(dense._inbound_nodes[1].inbound_layers, b_layer)
self.assertEqual(dense._inbound_nodes[1].outbound_layer, dense)
self.assertEqual(dense._inbound_nodes[0].input_tensors, a)
self.assertEqual(dense._inbound_nodes[1].input_tensors, b)
# test layer properties
test_layer = keras.layers.Dense(16, name='test_layer')
a_test = test_layer(a)
self.assertListEqual(test_layer.kernel.shape.as_list(), [32, 16])
self.assertEqual(test_layer.input, a)
self.assertEqual(test_layer.output, a_test)
self.assertEqual(test_layer.input_shape, (None, 32))
self.assertEqual(test_layer.output_shape, (None, 16))
self.assertEqual(dense.get_input_at(0), a)
self.assertEqual(dense.get_input_at(1), b)
self.assertEqual(dense.get_output_at(0), a_2)
self.assertEqual(dense.get_output_at(1), b_2)
self.assertEqual(dense.get_input_shape_at(0), (None, 32))
self.assertEqual(dense.get_input_shape_at(1), (None, 32))
self.assertEqual(dense.get_output_shape_at(0), (None, 16))
self.assertEqual(dense.get_output_shape_at(1), (None, 16))
self.assertEqual(dense.get_input_mask_at(0), None)
self.assertEqual(dense.get_input_mask_at(1), None)
self.assertEqual(dense.get_output_mask_at(0), None)
self.assertEqual(dense.get_output_mask_at(1), None)
@test_util.run_in_graph_and_eager_modes()
def test_multi_input_layer(self):
with self.cached_session():
# test multi-input layer
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
self.assertListEqual(merged.shape.as_list(), [None, 16 * 2])
merge_layer, merge_node_index, merge_tensor_index = merged._keras_history
self.assertEqual(merge_node_index, 0)
self.assertEqual(merge_tensor_index, 0)
self.assertEqual(len(merge_layer._inbound_nodes), 1)
self.assertEqual(len(merge_layer._outbound_nodes), 0)
self.assertEqual(len(merge_layer._inbound_nodes[0].input_tensors), 2)
self.assertEqual(len(merge_layer._inbound_nodes[0].inbound_layers), 2)
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
self.assertEqual(len(model.layers), 6)
output_shapes = model.compute_output_shape([(None, 32), (None, 32)])
self.assertListEqual(output_shapes[0].as_list(), [None, 64])
self.assertListEqual(output_shapes[1].as_list(), [None, 5])
self.assertListEqual(
model.compute_mask([a, b], [None, None]), [None, None])
# we don't check names of first 2 layers (inputs) because
# ordering of same-level layers is not fixed
self.assertListEqual([l.name for l in model.layers][2:],
['dense_1', 'merge', 'dense_2', 'dense_3'])
self.assertListEqual([l.name for l in model._input_layers],
['input_a', 'input_b'])
self.assertListEqual([l.name for l in model._output_layers],
['dense_2', 'dense_3'])
# actually run model
fn = keras.backend.function(model.inputs, model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 64), (10, 5)])
# test get_source_inputs
self.assertListEqual(keras.engine.get_source_inputs(c), [a, b])
# serialization / deserialization
json_config = model.to_json()
recreated_model = keras.models.model_from_json(json_config)
recreated_model.compile('rmsprop', 'mse')
self.assertListEqual([l.name for l in recreated_model.layers][2:],
['dense_1', 'merge', 'dense_2', 'dense_3'])
self.assertListEqual([l.name for l in recreated_model._input_layers],
['input_a', 'input_b'])
self.assertListEqual([l.name for l in recreated_model._output_layers],
['dense_2', 'dense_3'])
fn = keras.backend.function(recreated_model.inputs,
recreated_model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 64), (10, 5)])
@test_util.run_deprecated_v1
def test_recursion(self):
with self.cached_session():
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
e = keras.layers.Input(shape=(32,), name='input_e')
f = keras.layers.Input(shape=(32,), name='input_f')
self.assertEqual(len(model.inputs), 2)
g, h = model([e, f])
self.assertEqual(len(model.inputs), 2)
self.assertEqual(g.name, 'model/dense_2/BiasAdd:0')
self.assertListEqual(g.shape.as_list(), c.shape.as_list())
self.assertListEqual(h.shape.as_list(), d.shape.as_list())
# test separate manipulation of different layer outputs
i = keras.layers.Dense(7, name='dense_4')(h)
final_model = keras.models.Model(
inputs=[e, f], outputs=[i, g], name='final')
self.assertEqual(len(final_model.inputs), 2)
self.assertEqual(len(final_model.outputs), 2)
self.assertEqual(len(final_model.layers), 4)
# we don't check names of first 2 layers (inputs) because
# ordering of same-level layers is not fixed
self.assertListEqual([layer.name for layer in final_model.layers][2:],
['model', 'dense_4'])
self.assertListEqual(
model.compute_mask([e, f], [None, None]), [None, None])
self.assertListEqual(
final_model.compute_output_shape([(10, 32), (10, 32)]), [(10, 7),
(10, 64)])
# run recursive model
fn = keras.backend.function(final_model.inputs, final_model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 7), (10, 64)])
# test serialization
model_config = final_model.get_config()
recreated_model = keras.models.Model.from_config(model_config)
fn = keras.backend.function(recreated_model.inputs,
recreated_model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 7), (10, 64)])
@test_util.run_in_graph_and_eager_modes()
def test_multi_input_multi_output_recursion(self):
with self.cached_session():
# test multi-input multi-output
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
_, n = model([j, k])
o = keras.layers.Input(shape=(32,), name='input_o')
p = keras.layers.Input(shape=(32,), name='input_p')
q, _ = model([o, p])
self.assertListEqual(n.shape.as_list(), [None, 5])
self.assertListEqual(q.shape.as_list(), [None, 64])
s = keras.layers.concatenate([n, q], name='merge_nq')
self.assertListEqual(s.shape.as_list(), [None, 64 + 5])
# test with single output as 1-elem list
multi_io_model = keras.models.Model([j, k, o, p], [s])
fn = keras.backend.function(multi_io_model.inputs, multi_io_model.outputs)
fn_outputs = fn([
np.random.random((10, 32)), np.random.random((10, 32)),
np.random.random((10, 32)), np.random.random((10, 32))
])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)])
# test with single output as tensor
multi_io_model = keras.models.Model([j, k, o, p], s)
fn = keras.backend.function(multi_io_model.inputs, multi_io_model.outputs)
fn_outputs = fn([
np.random.random((10, 32)), np.random.random((10, 32)),
np.random.random((10, 32)), np.random.random((10, 32))
])
# note that the output of the function will still be a 1-elem list
self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)])
# test serialization
model_config = multi_io_model.get_config()
recreated_model = keras.models.Model.from_config(model_config)
fn = keras.backend.function(recreated_model.inputs,
recreated_model.outputs)
fn_outputs = fn([
np.random.random((10, 32)), np.random.random((10, 32)),
np.random.random((10, 32)), np.random.random((10, 32))
])
# note that the output of the function will still be a 1-elem list
self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)])
config = model.get_config()
keras.models.Model.from_config(config)
model.summary()
json_str = model.to_json()
keras.models.model_from_json(json_str)
if yaml is not None:
yaml_str = model.to_yaml()
keras.models.model_from_yaml(yaml_str)
@test_util.run_in_graph_and_eager_modes()
def test_invalid_graphs(self):
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
# input is not an Input tensor
j = keras.layers.Input(shape=(32,), name='input_j')
j = keras.layers.Dense(32)(j)
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
keras.models.Model([j, k], [m, n])
# disconnected graph
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
keras.models.Model([j], [m, n])
# redundant outputs
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
keras.models.Model([j, k], [m, n, n])
# redundant inputs
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
keras.models.Model([j, k, j], [m, n])
# i have not idea what I'm doing: garbage as inputs/outputs
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
keras.models.Model([j, k], [m, n, 0])
@test_util.run_deprecated_v1
def test_raw_tf_compatibility(self):
# test calling layers/models on TF tensors
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
self.assertEqual(len(model.inputs), 2)
m, n = model([j, k])
self.assertEqual(len(model.inputs), 2)
tf_model = keras.models.Model([j, k], [m, n])
j_tf = array_ops.placeholder(dtype=dtypes.float32, shape=(None, 32))
k_tf = array_ops.placeholder(dtype=dtypes.float32, shape=(None, 32))
m_tf, n_tf = tf_model([j_tf, k_tf])
self.assertListEqual(m_tf.shape.as_list(), [None, 64])
self.assertListEqual(n_tf.shape.as_list(), [None, 5])
# test merge
keras.layers.concatenate([j_tf, k_tf], axis=1)
keras.layers.add([j_tf, k_tf])
# test tensor input
x = array_ops.placeholder(shape=(None, 2), dtype=dtypes.float32)
keras.layers.InputLayer(input_tensor=x)
x = keras.layers.Input(tensor=x)
keras.layers.Dense(2)(x)
@test_util.run_in_graph_and_eager_modes()
def test_basic_masking(self):
a = keras.layers.Input(shape=(10, 32), name='input_a')
b = keras.layers.Masking()(a)
model = keras.models.Model(a, b)
self.assertEqual(model.output_mask.shape.as_list(), [None, 10])
@test_util.run_deprecated_v1
def testMaskingSingleInput(self):
class MaskedLayer(keras.layers.Layer):
def call(self, inputs, mask=None):
if mask is not None:
return inputs * mask
return inputs
def compute_mask(self, inputs, mask=None):
return array_ops.ones_like(inputs)
if context.executing_eagerly():
a = constant_op.constant([2] * 32)
mask = constant_op.constant([0, 1] * 16)
a._keras_mask = mask
b = MaskedLayer().apply(a)
self.assertTrue(hasattr(b, '_keras_mask'))
self.assertAllEqual(
self.evaluate(array_ops.ones_like(mask)),
self.evaluate(getattr(b, '_keras_mask')))
self.assertAllEqual(self.evaluate(a * mask), self.evaluate(b))
else:
x = input_layer_lib.Input(shape=(32,))
y = MaskedLayer()(x) # pylint: disable=not-callable
network = network_lib.Network(x, y)
# test callability on Input
x_2 = input_layer_lib.Input(shape=(32,))
y_2 = network(x_2)
self.assertEqual(y_2.shape.as_list(), [None, 32])
# test callability on regular tensor
x_2 = array_ops.placeholder(dtype='float32', shape=(None, 32))
y_2 = network(x_2)
self.assertEqual(y_2.shape.as_list(), [None, 32])
@test_util.run_deprecated_v1
def test_activity_regularization_with_model_composition(self):
def reg(x):
return math_ops.reduce_sum(x)
net_a_input = input_layer_lib.Input((2,))
net_a = net_a_input
net_a = keras.layers.Dense(2, kernel_initializer='ones',
use_bias=False,
activity_regularizer=reg)(net_a)
model_a = keras.Model([net_a_input], [net_a])
net_b_input = input_layer_lib.Input((2,))
net_b = model_a(net_b_input)
model_b = keras.Model([net_b_input], [net_b])
model_b.compile(optimizer='sgd', loss=None)
x = np.ones((1, 2))
loss = model_b.evaluate(x)
self.assertEqual(loss, 4.)
@keras_parameterized.run_all_keras_modes
def test_layer_sharing_at_heterogenous_depth(self):
x_val = np.random.random((10, 5))
x = input_layer_lib.Input(shape=(5,))
a = keras.layers.Dense(5, name='A')
b = keras.layers.Dense(5, name='B')
output = a(b(a(b(x))))
m = keras.models.Model(x, output)
m.run_eagerly = testing_utils.should_run_eagerly()
output_val = m.predict(x_val)
config = m.get_config()
weights = m.get_weights()
m2 = keras.models.Model.from_config(config)
m2.set_weights(weights)
output_val_2 = m2.predict(x_val)
self.assertAllClose(output_val, output_val_2, atol=1e-6)
@keras_parameterized.run_all_keras_modes
def test_layer_sharing_at_heterogenous_depth_with_concat(self):
input_shape = (16, 9, 3)
input_layer = input_layer_lib.Input(shape=input_shape)
a = keras.layers.Dense(3, name='dense_A')
b = keras.layers.Dense(3, name='dense_B')
c = keras.layers.Dense(3, name='dense_C')
x1 = b(a(input_layer))
x2 = a(c(input_layer))
output = keras.layers.concatenate([x1, x2])
m = keras.models.Model(inputs=input_layer, outputs=output)
m.run_eagerly = testing_utils.should_run_eagerly()
x_val = np.random.random((10, 16, 9, 3))
output_val = m.predict(x_val)
config = m.get_config()
weights = m.get_weights()
m2 = keras.models.Model.from_config(config)
m2.set_weights(weights)
output_val_2 = m2.predict(x_val)
self.assertAllClose(output_val, output_val_2, atol=1e-6)
@keras_parameterized.run_all_keras_modes
def test_explicit_training_argument(self):
a = keras.layers.Input(shape=(2,))
b = keras.layers.Dropout(0.5)(a)
base_model = keras.models.Model(a, b)
a = keras.layers.Input(shape=(2,))
b = base_model(a, training=False)
model = keras.models.Model(a, b)
x = np.ones((100, 2))
y = np.ones((100, 2))
model.compile(
optimizer='sgd',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
loss = model.train_on_batch(x, y)
self.assertEqual(loss, 0) # In inference mode, output is equal to input.
a = keras.layers.Input(shape=(2,))
b = base_model(a, training=True)
model = keras.models.Model(a, b)
preds = model.predict(x)
self.assertEqual(np.min(preds), 0.) # At least one unit was dropped.
@keras_parameterized.run_all_keras_modes
def test_multi_output_model_with_none_masking(self):
def func(x):
return [x * 0.2, x * 0.3]
def output_shape(input_shape):
return [input_shape, input_shape]
i = keras.layers.Input(shape=(3, 2, 1))
o = keras.layers.Lambda(function=func, output_shape=output_shape)(i)
self.assertEqual(keras.backend.int_shape(o[0]), (None, 3, 2, 1))
self.assertEqual(keras.backend.int_shape(o[1]), (None, 3, 2, 1))
o = keras.layers.add(o)
model = keras.Model(i, o)
model.run_eagerly = testing_utils.should_run_eagerly()
i2 = keras.layers.Input(shape=(3, 2, 1))
o2 = model(i2)
model2 = keras.Model(i2, o2)
model2.run_eagerly = testing_utils.should_run_eagerly()
x = np.random.random((4, 3, 2, 1))
out = model2.predict(x)
assert out.shape == (4, 3, 2, 1)
self.assertAllClose(out, x * 0.2 + x * 0.3, atol=1e-4)
@keras_parameterized.run_all_keras_modes
def test_constant_initializer_with_numpy(self):
initializer = keras.initializers.Constant(np.ones((3, 2)))
model = keras.models.Sequential()
model.add(
keras.layers.Dense(2, input_shape=(3,), kernel_initializer=initializer))
model.add(keras.layers.Dense(3))
model.compile(
loss='mse',
optimizer='sgd',
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
json_str = model.to_json()
keras.models.model_from_json(json_str)
if yaml is not None:
yaml_str = model.to_yaml()
keras.models.model_from_yaml(yaml_str)
def test_subclassed_error_if_init_not_called(self):
class MyNetwork(network_lib.Network):
def __init__(self):
self._foo = [keras.layers.Dense(10), keras.layers.Dense(10)]
with self.assertRaisesRegexp(RuntimeError, 'forgot to call'):
MyNetwork()
class DeferredModeTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def testSimpleNetworkBuilding(self):
inputs = input_layer_lib.Input(shape=(32,))
if context.executing_eagerly():
self.assertEqual(inputs.dtype.name, 'float32')
self.assertEqual(inputs.shape.as_list(), [None, 32])
x = keras.layers.Dense(2)(inputs)
if context.executing_eagerly():
self.assertEqual(x.dtype.name, 'float32')
self.assertEqual(x.shape.as_list(), [None, 2])
outputs = keras.layers.Dense(4)(x)
network = network_lib.Network(inputs, outputs)
self.assertIsInstance(network, network_lib.Network)
if context.executing_eagerly():
# It should be possible to call such a network on EagerTensors.
inputs = constant_op.constant(
np.random.random((10, 32)).astype('float32'))
outputs = network(inputs)
self.assertEqual(outputs.shape.as_list(), [10, 4])
@test_util.run_in_graph_and_eager_modes()
def testMultiIONetworkBuilding(self):
input_a = input_layer_lib.Input(shape=(32,))
input_b = input_layer_lib.Input(shape=(16,))
a = keras.layers.Dense(16)(input_a)
class AddLayer(keras.layers.Layer):
def call(self, inputs):
return inputs[0] + inputs[1]
c = AddLayer()([a, input_b]) # pylint: disable=not-callable
c = keras.layers.Dense(2)(c)
network = network_lib.Network([input_a, input_b], [a, c])
if context.executing_eagerly():
a_val = constant_op.constant(
np.random.random((10, 32)).astype('float32'))
b_val = constant_op.constant(
np.random.random((10, 16)).astype('float32'))
outputs = network([a_val, b_val])
self.assertEqual(len(outputs), 2)
self.assertEqual(outputs[0].shape.as_list(), [10, 16])
self.assertEqual(outputs[1].shape.as_list(), [10, 2])
class DefaultShapeInferenceBehaviorTest(keras_parameterized.TestCase):
def _testShapeInference(self, model, input_shape, expected_output_shape):
input_value = np.random.random(input_shape)
output_value = model.predict(input_value)
self.assertEqual(output_value.shape, expected_output_shape)
@test_util.run_in_graph_and_eager_modes()
def testSingleInputCase(self):
class LayerWithOneInput(keras.layers.Layer):
def build(self, input_shape):
self.w = array_ops.ones(shape=(3, 4))
def call(self, inputs):
return keras.backend.dot(inputs, self.w)
inputs = input_layer_lib.Input(shape=(3,))
layer = LayerWithOneInput()
if context.executing_eagerly():
self.assertEqual(
layer.compute_output_shape((None, 3)).as_list(), [None, 4])
# As a side-effect, compute_output_shape builds the layer.
self.assertTrue(layer.built)
# We can still query the layer's compute_output_shape with compatible
# input shapes.
self.assertEqual(
layer.compute_output_shape((6, 3)).as_list(), [6, 4])
outputs = layer(inputs)
model = keras.Model(inputs, outputs)
self._testShapeInference(model, (2, 3), (2, 4))
@test_util.run_in_graph_and_eager_modes()
def testMultiInputOutputCase(self):
class MultiInputOutputLayer(keras.layers.Layer):
def build(self, input_shape):
self.w = array_ops.ones(shape=(3, 4))
def call(self, inputs):
a = keras.backend.dot(inputs[0], self.w)
b = a + inputs[1]
return [a, b]
input_a = input_layer_lib.Input(shape=(3,))
input_b = input_layer_lib.Input(shape=(4,))
output_a, output_b = MultiInputOutputLayer()([input_a, input_b])
model = keras.Model([input_a, input_b], [output_a, output_b])
output_a_val, output_b_val = model.predict(
[np.random.random((2, 3)), np.random.random((2, 4))])
self.assertEqual(output_a_val.shape, (2, 4))
self.assertEqual(output_b_val.shape, (2, 4))
@test_util.run_in_graph_and_eager_modes()
def testTrainingArgument(self):
class LayerWithTrainingArg(keras.layers.Layer):
def build(self, input_shape):
self.w = array_ops.ones(shape=(3, 4))
def call(self, inputs, training):
return keras.backend.dot(inputs, self.w)
inputs = input_layer_lib.Input(shape=(3,))
outputs = LayerWithTrainingArg()(inputs, training=False)
model = keras.Model(inputs, outputs)
self._testShapeInference(model, (2, 3), (2, 4))
@test_util.run_in_graph_and_eager_modes()
def testNoneInShape(self):
class Model(keras.Model):
def __init__(self):
super(Model, self).__init__()
self.conv1 = keras.layers.Conv2D(8, 3)
self.pool = keras.layers.GlobalAveragePooling2D()
self.fc = keras.layers.Dense(3)
def call(self, x):
x = self.conv1(x)
x = self.pool(x)
x = self.fc(x)
return x
model = Model()
model.build(tensor_shape.TensorShape((None, None, None, 1)))
self.assertTrue(model.built, 'Model should be built')
self.assertTrue(model.weights,
'Model should have its weights created as it '
'has been built')
sample_input = array_ops.ones((1, 10, 10, 1))
output = model(sample_input)
self.assertEqual(output.shape, (1, 3))
@test_util.run_in_graph_and_eager_modes()
def testNoneInShapeWithCompoundModel(self):
class BasicBlock(keras.Model):
def __init__(self):
super(BasicBlock, self).__init__()
self.conv1 = keras.layers.Conv2D(8, 3)
self.pool = keras.layers.GlobalAveragePooling2D()
self.dense = keras.layers.Dense(3)
def call(self, x):
x = self.conv1(x)
x = self.pool(x)
x = self.dense(x)
return x
class CompoundModel(keras.Model):
def __init__(self):
super(CompoundModel, self).__init__()
self.block = BasicBlock()
def call(self, x):
x = self.block(x) # pylint: disable=not-callable
return x
model = CompoundModel()
model.build(tensor_shape.TensorShape((None, None, None, 1)))
self.assertTrue(model.built, 'Model should be built')
self.assertTrue(model.weights,
'Model should have its weights created as it '
'has been built')
sample_input = array_ops.ones((1, 10, 10, 1))
output = model(sample_input) # pylint: disable=not-callable
self.assertEqual(output.shape, (1, 3))
@test_util.run_in_graph_and_eager_modes()
def testNoneInShapeWithFunctinalAPI(self):
class BasicBlock(keras.Model):
# Inherting from keras.layers.Layer since we are calling this layer
# inside a model created using functional API.
def __init__(self):
super(BasicBlock, self).__init__()
self.conv1 = keras.layers.Conv2D(8, 3)
def call(self, x):
x = self.conv1(x)
return x
input_layer = keras.layers.Input(shape=(None, None, 1))
x = BasicBlock()(input_layer)
x = keras.layers.GlobalAveragePooling2D()(x)
output_layer = keras.layers.Dense(3)(x)
model = keras.Model(inputs=input_layer, outputs=output_layer)
model.build(tensor_shape.TensorShape((None, None, None, 1)))
self.assertTrue(model.built, 'Model should be built')
self.assertTrue(model.weights,
'Model should have its weights created as it '
'has been built')
sample_input = array_ops.ones((1, 10, 10, 1))
output = model(sample_input)
self.assertEqual(output.shape, (1, 3))
@keras_parameterized.run_all_keras_modes
def test_sequential_as_downstream_of_masking_layer(self):
inputs = keras.layers.Input(shape=(3, 4))
x = keras.layers.Masking(mask_value=0., input_shape=(3, 4))(inputs)
s = keras.Sequential()
s.add(keras.layers.Dense(5, input_shape=(4,)))
x = keras.layers.wrappers.TimeDistributed(s)(x)
model = keras.Model(inputs=inputs, outputs=x)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model_input = np.random.randint(
low=1, high=5, size=(10, 3, 4)).astype('float32')
for i in range(4):
model_input[i, i:, :] = 0.
model.fit(model_input,
np.random.random((10, 3, 5)), epochs=1, batch_size=6)
if not context.executing_eagerly():
# Note: this doesn't work in eager due to DeferredTensor/ops compatibility
# issue.
mask_outputs = [model.layers[1].compute_mask(model.layers[1].input)]
mask_outputs += [model.layers[2].compute_mask(
model.layers[2].input, mask_outputs[-1])]
func = keras.backend.function([model.input], mask_outputs)
mask_outputs_val = func([model_input])
self.assertAllClose(mask_outputs_val[0], np.any(model_input, axis=-1))
self.assertAllClose(mask_outputs_val[1], np.any(model_input, axis=-1))
class GraphUtilsTest(test.TestCase):
@test_util.run_deprecated_v1
def testGetReachableFromInputs(self):
with self.cached_session():
pl_1 = array_ops.placeholder(shape=None, dtype='float32')
pl_2 = array_ops.placeholder(shape=None, dtype='float32')
pl_3 = array_ops.placeholder(shape=None, dtype='float32')
x_1 = pl_1 + pl_2
x_2 = pl_2 * 2
x_3 = pl_3 + 1
x_4 = x_1 + x_2
x_5 = x_3 * pl_1
self.assertEqual(
keras.utils.tf_utils.get_reachable_from_inputs([pl_1]),
{pl_1, x_1, x_4, x_5, x_1.op, x_4.op, x_5.op})
self.assertEqual(
keras.utils.tf_utils.get_reachable_from_inputs([pl_1, pl_2]),
{pl_1, pl_2, x_1, x_2, x_4, x_5, x_1.op, x_2.op, x_4.op, x_5.op})
self.assertEqual(
keras.utils.tf_utils.get_reachable_from_inputs([pl_3]),
{pl_3, x_3, x_5, x_3.op, x_5.op})
self.assertEqual(
keras.utils.tf_utils.get_reachable_from_inputs([x_3]),
{x_3, x_5, x_5.op})
@test_util.run_all_in_graph_and_eager_modes
class NestedNetworkTest(test.TestCase):
def test_nested_inputs_network(self):
inputs = {'x1': keras.Input(shape=(1,)), 'x2': keras.Input(shape=(1,))}
outputs = keras.layers.Add()([inputs['x1'], inputs['x2']])
network = keras.engine.network.Network(inputs, outputs)
network = keras.engine.network.Network.from_config(network.get_config())
result_tensor = network({
'x': array_ops.ones((1, 1), 'float32'),
'y': array_ops.ones((1, 1), 'float32')
})
result = self.evaluate(result_tensor)
self.assertAllEqual(result, [[2.]])
# TODO(b/122726584): Investigate why concrete batch is flaky in some builds.
output_shape = network.compute_output_shape({
'x1': (None, 1),
'x2': (None, 1)
})
self.assertListEqual(output_shape.as_list(), [None, 1])
def test_nested_outputs_network(self):
inputs = keras.Input(shape=(1,))
outputs = {
'x+x': keras.layers.Add()([inputs, inputs]),
'x*x': keras.layers.Multiply()([inputs, inputs])
}
network = keras.engine.network.Network(inputs, outputs)
network = keras.engine.network.Network.from_config(network.get_config())
result_tensor = network(array_ops.ones((1, 1), 'float32'))
result = self.evaluate(result_tensor)
self.assertAllEqual(result['x+x'], [[2.]])
self.assertAllEqual(result['x*x'], [[1.]])
output_shape = network.compute_output_shape((None, 1))
self.assertListEqual(output_shape['x+x'].as_list(), [None, 1])
self.assertListEqual(output_shape['x*x'].as_list(), [None, 1])
def test_nested_network_inside_network(self):
inner_inputs = {
'x1': keras.Input(shape=(1,)),
'x2': keras.Input(shape=(1,))
}
inner_outputs = {
'x1+x2':
keras.layers.Add()([inner_inputs['x1'], inner_inputs['x2']]),
'x1*x2':
keras.layers.Multiply()([inner_inputs['x1'], inner_inputs['x2']])
}
inner_network = keras.engine.network.Network(inner_inputs, inner_outputs)
inputs = [keras.Input(shape=(1,)), keras.Input(shape=(1,))]
middle = inner_network({'x1': inputs[0], 'x2': inputs[1]})
outputs = keras.layers.Add()([middle['x1+x2'], middle['x1*x2']])
network = keras.engine.network.Network(inputs, outputs)
network = keras.engine.network.Network.from_config(network.get_config())
# Computes: `(x1+x2) + (x1*x2)`
result_tensor = network(
[array_ops.ones((1, 1), 'float32'),
array_ops.ones((1, 1), 'float32')])
result = self.evaluate(result_tensor)
self.assertAllEqual(result, [[3.]])
output_shape = network.compute_output_shape([(None, 1), (None, 1)])
self.assertListEqual(output_shape.as_list(), [None, 1])
@test_util.run_in_graph_and_eager_modes
def test_updates_with_direct_call(self):
inputs = keras.Input(shape=(10,))
x = keras.layers.BatchNormalization()(inputs)
x = keras.layers.Dense(10)(x)
model = keras.Model(inputs, x)
ph = keras.backend.placeholder(shape=(10, 10))
model(ph)
self.assertLen(model.get_updates_for(ph), 2)
self.assertLen(model.get_updates_for(None), 0)
if __name__ == '__main__':
test.main()
| 36.851113 | 80 | 0.661141 |
6311a5dd14eccfd038b2d5948f26adab0101cb9b | 14,590 | py | Python | src/jinja2/sandbox.py | Alramiro4/jinja | d73a10bafd7d50f0dd96090dcd1ddd9012ef7139 | [
"BSD-3-Clause"
] | 1 | 2021-11-25T07:01:35.000Z | 2021-11-25T07:01:35.000Z | src/jinja2/sandbox.py | Alramiro4/jinja | d73a10bafd7d50f0dd96090dcd1ddd9012ef7139 | [
"BSD-3-Clause"
] | null | null | null | src/jinja2/sandbox.py | Alramiro4/jinja | d73a10bafd7d50f0dd96090dcd1ddd9012ef7139 | [
"BSD-3-Clause"
] | null | null | null | """A sandbox layer that ensures unsafe operations cannot be performed.
Useful when the template itself comes from an untrusted source.
"""
import operator
import types
from _string import formatter_field_name_split # type: ignore
from collections import abc
from collections import deque
from string import Formatter
from typing import FrozenSet
from typing import Set
from markupsafe import EscapeFormatter
from markupsafe import Markup
from .environment import Environment
from .exceptions import SecurityError
#: maximum number of items a range may produce
MAX_RANGE = 100000
#: Unsafe function attributes.
UNSAFE_FUNCTION_ATTRIBUTES: Set = set()
#: Unsafe method attributes. Function attributes are unsafe for methods too.
UNSAFE_METHOD_ATTRIBUTES: Set = set()
#: unsafe generator attributes.
UNSAFE_GENERATOR_ATTRIBUTES = {"gi_frame", "gi_code"}
#: unsafe attributes on coroutines
UNSAFE_COROUTINE_ATTRIBUTES = {"cr_frame", "cr_code"}
#: unsafe attributes on async generators
UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = {"ag_code", "ag_frame"}
_mutable_spec = (
(
abc.MutableSet,
frozenset(
[
"add",
"clear",
"difference_update",
"discard",
"pop",
"remove",
"symmetric_difference_update",
"update",
]
),
),
(
abc.MutableMapping,
frozenset(["clear", "pop", "popitem", "setdefault", "update"]),
),
(
abc.MutableSequence,
frozenset(["append", "reverse", "insert", "sort", "extend", "remove"]),
),
(
deque,
frozenset(
[
"append",
"appendleft",
"clear",
"extend",
"extendleft",
"pop",
"popleft",
"remove",
"rotate",
]
),
),
)
def inspect_format_method(callable):
if not isinstance(
callable, (types.MethodType, types.BuiltinMethodType)
) or callable.__name__ not in ("format", "format_map"):
return None
obj = callable.__self__
if isinstance(obj, str):
return obj
def safe_range(*args):
"""A range that can't generate ranges with a length of more than
MAX_RANGE items.
"""
rng = range(*args)
if len(rng) > MAX_RANGE:
raise OverflowError(
"Range too big. The sandbox blocks ranges larger than"
f" MAX_RANGE ({MAX_RANGE})."
)
return rng
def unsafe(f):
"""Marks a function or method as unsafe.
.. code-block: python
@unsafe
def delete(self):
pass
"""
f.unsafe_callable = True
return f
def is_internal_attribute(obj, attr):
"""Test if the attribute given is an internal python attribute. For
example this function returns `True` for the `func_code` attribute of
python objects. This is useful if the environment method
:meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
>>> from jinja2.sandbox import is_internal_attribute
>>> is_internal_attribute(str, "mro")
True
>>> is_internal_attribute(str, "upper")
False
"""
if isinstance(obj, types.FunctionType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
return True
elif isinstance(obj, types.MethodType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES or attr in UNSAFE_METHOD_ATTRIBUTES:
return True
elif isinstance(obj, type):
if attr == "mro":
return True
elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)):
return True
elif isinstance(obj, types.GeneratorType):
if attr in UNSAFE_GENERATOR_ATTRIBUTES:
return True
elif hasattr(types, "CoroutineType") and isinstance(obj, types.CoroutineType):
if attr in UNSAFE_COROUTINE_ATTRIBUTES:
return True
elif hasattr(types, "AsyncGeneratorType") and isinstance(
obj, types.AsyncGeneratorType
):
if attr in UNSAFE_ASYNC_GENERATOR_ATTRIBUTES:
return True
return attr.startswith("__")
def modifies_known_mutable(obj, attr):
"""This function checks if an attribute on a builtin mutable object
(list, dict, set or deque) or the corresponding ABCs would modify it
if called.
>>> modifies_known_mutable({}, "clear")
True
>>> modifies_known_mutable({}, "keys")
False
>>> modifies_known_mutable([], "append")
True
>>> modifies_known_mutable([], "index")
False
If called with an unsupported object, ``False`` is returned.
>>> modifies_known_mutable("foo", "upper")
False
"""
for typespec, unsafe in _mutable_spec:
if isinstance(obj, typespec):
return attr in unsafe
return False
class SandboxedEnvironment(Environment):
"""The sandboxed environment. It works like the regular environment but
tells the compiler to generate sandboxed code. Additionally subclasses of
this environment may override the methods that tell the runtime what
attributes or functions are safe to access.
If the template tries to access insecure code a :exc:`SecurityError` is
raised. However also other exceptions may occur during the rendering so
the caller has to ensure that all exceptions are caught.
"""
sandboxed = True
#: default callback table for the binary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`binop_table`
default_binop_table = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.truediv,
"//": operator.floordiv,
"**": operator.pow,
"%": operator.mod,
}
#: default callback table for the unary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`unop_table`
default_unop_table = {"+": operator.pos, "-": operator.neg}
#: a set of binary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_binop` method that will perform the operator. The default
#: operator callback is specified by :attr:`binop_table`.
#:
#: The following binary operators are interceptable:
#: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_binops: FrozenSet = frozenset()
#: a set of unary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_unop` method that will perform the operator. The default
#: operator callback is specified by :attr:`unop_table`.
#:
#: The following unary operators are interceptable: ``+``, ``-``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_unops: FrozenSet = frozenset()
def intercept_unop(self, operator):
"""Called during template compilation with the name of a unary
operator to check if it should be intercepted at runtime. If this
method returns `True`, :meth:`call_unop` is executed for this unary
operator. The default implementation of :meth:`call_unop` will use
the :attr:`unop_table` dictionary to perform the operator with the
same logic as the builtin one.
The following unary operators are interceptable: ``+`` and ``-``
Intercepted calls are always slower than the native operator call,
so make sure only to intercept the ones you are interested in.
.. versionadded:: 2.6
"""
return False
def __init__(self, *args, **kwargs):
Environment.__init__(self, *args, **kwargs)
self.globals["range"] = safe_range
self.binop_table = self.default_binop_table.copy()
self.unop_table = self.default_unop_table.copy()
def is_safe_attribute(self, obj, attr, value):
"""The sandboxed environment will call this method to check if the
attribute of an object is safe to access. Per default all attributes
starting with an underscore are considered private as well as the
special attributes of internal python objects as returned by the
:func:`is_internal_attribute` function.
"""
return not (attr.startswith("_") or is_internal_attribute(obj, attr))
def is_safe_callable(self, obj):
"""Check if an object is safely callable. Per default a function is
considered safe unless the `unsafe_callable` attribute exists and is
True. Override this method to alter the behavior, but this won't
affect the `unsafe` decorator from this module.
"""
return not (
getattr(obj, "unsafe_callable", False) or getattr(obj, "alters_data", False)
)
def call_binop(self, context, operator, left, right):
"""For intercepted binary operator calls (:meth:`intercepted_binops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.binop_table[operator](left, right)
def call_unop(self, context, operator, arg):
"""For intercepted unary operator calls (:meth:`intercepted_unops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.unop_table[operator](arg)
def getitem(self, obj, argument):
"""Subscribe an object from sandboxed code."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, str):
try:
attr = str(argument)
except Exception:
pass
else:
try:
value = getattr(obj, attr)
except AttributeError:
pass
else:
if self.is_safe_attribute(obj, argument, value):
return value
return self.unsafe_undefined(obj, argument)
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Subscribe an object from sandboxed code and prefer the
attribute. The attribute passed *must* be a bytestring.
"""
try:
value = getattr(obj, attribute)
except AttributeError:
try:
return obj[attribute]
except (TypeError, LookupError):
pass
else:
if self.is_safe_attribute(obj, attribute, value):
return value
return self.unsafe_undefined(obj, attribute)
return self.undefined(obj=obj, name=attribute)
def unsafe_undefined(self, obj, attribute):
"""Return an undefined object for unsafe attributes."""
return self.undefined(
f"access to attribute {attribute!r} of"
f" {obj.__class__.__name__!r} object is unsafe.",
name=attribute,
obj=obj,
exc=SecurityError,
)
def format_string(self, s, args, kwargs, format_func=None):
"""If a format call is detected, then this is routed through this
method so that our safety sandbox can be used for it.
"""
if isinstance(s, Markup):
formatter = SandboxedEscapeFormatter(self, s.escape)
else:
formatter = SandboxedFormatter(self)
if format_func is not None and format_func.__name__ == "format_map":
if len(args) != 1 or kwargs:
raise TypeError(
"format_map() takes exactly one argument"
f" {len(args) + (kwargs is not None)} given"
)
kwargs = args[0]
args = None
rv = formatter.vformat(s, args, kwargs)
return type(s)(rv)
def call(__self, __context, __obj, *args, **kwargs): # noqa: B902
"""Call an object from sandboxed code."""
fmt = inspect_format_method(__obj)
if fmt is not None:
return __self.format_string(fmt, args, kwargs, __obj)
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
if not __self.is_safe_callable(__obj):
raise SecurityError(f"{__obj!r} is not safely callable")
return __context.call(__obj, *args, **kwargs)
class ImmutableSandboxedEnvironment(SandboxedEnvironment):
"""Works exactly like the regular `SandboxedEnvironment` but does not
permit modifications on the builtin mutable objects `list`, `set`, and
`dict` by using the :func:`modifies_known_mutable` function.
"""
def is_safe_attribute(self, obj, attr, value):
if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value):
return False
return not modifies_known_mutable(obj, attr)
class SandboxedFormatterMixin:
def __init__(self, env):
self._env = env
def get_field(self, field_name, args, kwargs):
first, rest = formatter_field_name_split(field_name)
obj = self.get_value(first, args, kwargs)
for is_attr, i in rest:
if is_attr:
obj = self._env.getattr(obj, i)
else:
obj = self._env.getitem(obj, i)
return obj, first
class SandboxedFormatter(SandboxedFormatterMixin, Formatter):
def __init__(self, env):
SandboxedFormatterMixin.__init__(self, env)
Formatter.__init__(self)
class SandboxedEscapeFormatter(SandboxedFormatterMixin, EscapeFormatter):
def __init__(self, env, escape):
SandboxedFormatterMixin.__init__(self, env)
EscapeFormatter.__init__(self, escape)
| 34.57346 | 88 | 0.628033 |
d6dce68e689e1505f1c0922aa730a0a8a9966b64 | 4,319 | py | Python | src/tasks/advent_of_code_21/day4.py | JNMaree/solvdoku | d7cbce8618b5a94db8781d88cf3db102e728f4f6 | [
"Unlicense"
] | null | null | null | src/tasks/advent_of_code_21/day4.py | JNMaree/solvdoku | d7cbce8618b5a94db8781d88cf3db102e728f4f6 | [
"Unlicense"
] | 2 | 2021-07-04T21:01:17.000Z | 2021-11-29T21:12:23.000Z | src/tasks/advent_of_code_21/day4.py | JNMaree/solvdoku | d7cbce8618b5a94db8781d88cf3db102e728f4f6 | [
"Unlicense"
] | null | null | null | import numpy as np
class BingoBoard:
array = []
size = 5
check = True
def __init__(self, boardtext) -> None:
self.array = np.zeros((self.size,self.size,2))
board = boardtext.split()
bct = 0
#print(f"board:{board}")
for i in range(self.size):
for j in range(self.size):
self.array[i][j][0] = int(board[bct])
bct += 1
def __str__(self) -> str:
sret = ""
for i in range(self.size):
for j in range(self.size):
sret += f"{int (self.array[i][j][0]) :2}|{int (self.array[i][j][1]) :1} "
sret += '\n'
return sret
def is_bingo(self) -> bool:
for i in range(self.size):
for j in range(self.size):
if self.array[i][j][1] == 1:
row_match_count = 0
col_match_count = 0
for x in range(self.size):
if self.array[x][j][1] == 1:
row_match_count += 1
for y in range(self.size):
if self.array[i][y][1] == 1:
col_match_count += 1
if row_match_count == 5 or col_match_count == 5:
self.check = False
return True
return False
def draw_number(self, number):
for i in range(self.size):
for j in range(self.size):
if self.array[i][j][0] == number:
self.array[i][j][1] = 1
return
def sum_unmarked(self) -> int:
unmarked = 0
for i in range(self.size):
for j in range(self.size):
if self.array[i][j][1] == 0:
unmarked += self.array[i][j][0]
return unmarked
def main():
# Part 1 ----------------------------------------------------------
draw_list = []
board_list = []
# Read file input
relative_path = 'src/tasks/advent_of_code_21/day4_input.txt'
with open(relative_path, 'r') as f:
draw_list = f.readline().strip()
draw_list = draw_list.split(',')
line = f.readline()
line = f.readline()
board = ''
ct = 0
while line:
if line != "\n":
board += line.strip() + " "
else:
board_list.append(BingoBoard(board))
board = ''
line = f.readline()
ct += 1
print(f"{len(draw_list)} draws:{draw_list}")
# Work through draw list checking for win conditions
win_number = 0
unmarked_sum = 0
win_board = None
win_condition = False
for i in range(len(draw_list)):
#print(f"draw[{i}]:{draw_list[i]}")
for b in board_list:
b.draw_number(int (draw_list[i]))
if b.is_bingo():
win_number = int (draw_list[i])
unmarked_sum = b.sum_unmarked()
win_board = b
win_condition = True
break
if win_condition:
break
# Calculate score of first winning bingo board
score = unmarked_sum * win_number
print(f"winning board:\n{win_board}")
print(f"unmarked_sum:{int (unmarked_sum)}")
print(f"winning_number:{int (win_number)}")
print(f"score:{int (score)}")
# Part 2 ----------------------------------------------------------
print("\nPart2:")
number_of_boards = len(board_list)
win_count = 0
draw = 0
while win_count < (number_of_boards) and draw < len(draw_list):
#print(f"draw[{i}]:{draw_list[i]}")
for b in board_list:
if b.check: # If board not yet complete
b.draw_number(int (draw_list[draw]))
if b.is_bingo():
win_number = int (draw_list[draw])
unmarked_sum = b.sum_unmarked()
win_board = b
win_count += 1
draw += 1
score = unmarked_sum * win_number
print(f"winning board:\n{win_board}")
print(f"unmarked_sum:{int (unmarked_sum)}")
print(f"winning_number:{int (win_number)}")
print(f"score:{int (score)}")
if __name__ == "__main__":
main()
| 31.297101 | 89 | 0.477194 |
bce24c505fc5823b2f508f52ed4e846fd64494ab | 2,229 | py | Python | setup.py | dotsdl/test_package_a | 33e50168e95b80f2d5de16be6787bff43e287493 | [
"MIT"
] | null | null | null | setup.py | dotsdl/test_package_a | 33e50168e95b80f2d5de16be6787bff43e287493 | [
"MIT"
] | null | null | null | setup.py | dotsdl/test_package_a | 33e50168e95b80f2d5de16be6787bff43e287493 | [
"MIT"
] | null | null | null | """
test_package_a
The first of two pages to test implicit namespaces
"""
import sys
from setuptools import setup, find_namespace_packages
import versioneer
short_description = __doc__.split("\n")
# from https://github.com/pytest-dev/pytest-runner#conditional-requirement
needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
try:
with open("README.md", "r") as handle:
long_description = handle.read()
except:
long_description = "\n".join(short_description[2:])
setup(
# Self-descriptive entries which should always be present
name='test_package_a',
author='OFF Core Devs',
author_email='info@openforcefield.org',
description=short_description[0],
long_description=long_description,
long_description_content_type="text/markdown",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license='MIT',
# Which Python importable modules should be included when your package is installed
# Handled automatically by setuptools. Use 'exclude' to prevent some specific
# subpackage(s) from being added, if needed
packages=find_namespace_packages(include=['test_org.*']),
# Optional include package data to ship with your package
# Customize MANIFEST.in if the general case does not suit your needs
# Comment out this line to prevent the files from being packaged with your software
include_package_data=True,
# Allows `setup.py test` to work correctly with pytest
setup_requires=[] + pytest_runner,
# Additional entries you may want simply uncomment the lines you want and fill in the data
# url='http://www.my_package.com', # Website
# install_requires=[], # Required packages, pulls from pip if needed; do not use for Conda deployment
# platforms=['Linux',
# 'Mac OS-X',
# 'Unix',
# 'Windows'], # Valid platforms your code works on, adjust to your flavor
# python_requires=">=3.5", # Python version restrictions
# Manual control if final package is compressible or not, set False to prevent the .egg from being made
# zip_safe=False,
)
| 37.15 | 118 | 0.703454 |
04a03a0b3ecb32dfdd4945dbba17fc813c0fe83d | 14,157 | py | Python | envs/flatland/observations/custom_graph_obs.py | netceteragroup/Flatland-Challenge | 4292e8aa778d264d025ad6d32926840864b22a21 | [
"MIT"
] | 4 | 2021-01-15T10:49:33.000Z | 2021-12-31T08:11:35.000Z | envs/flatland/observations/custom_graph_obs.py | netceteragroup/Flatland-Challenge | 4292e8aa778d264d025ad6d32926840864b22a21 | [
"MIT"
] | null | null | null | envs/flatland/observations/custom_graph_obs.py | netceteragroup/Flatland-Challenge | 4292e8aa778d264d025ad6d32926840864b22a21 | [
"MIT"
] | null | null | null | import numpy as np
import gym
from flatland.core.env_observation_builder import ObservationBuilder
from flatland.core.env_prediction_builder import PredictionBuilder
from flatland.core.grid.grid_utils import coordinate_to_position
from flatland.envs.agent_utils import RailAgentStatus
from flatland.envs.rail_env import RailEnvActions
from flatland.envs.rail_env_shortest_paths import get_valid_move_actions_
from flatland.envs.observations import GlobalObsForRailEnv
from typing import Tuple, Dict, List, Set, NamedTuple, Optional
from .segment_graph import Graph
from envs.flatland.observations import Observation, register_obs
# Features = NamedTuple('Features', [#("num_agents", int),
# # ("width", int),
# # ("height", int),
# # ("max_num_cities", int),
# # ("time_tick", int),
# ("num_active_agents", int),
# ("num_ready_agents", int),
# ("deadlock_in_segment", int),
# ("is_next_switch_usable", int),
# ("shortest_path_length", int),
# ("distance_left", int),
# ("distance_forward", int),
# ("distance_right", int),
# ("number_agents_same_dir_on_shortest", int),
# ("number_agents_opp_dir_on_shortest", int),
# # ("alternative_path_dist", int),
# # ("number_agents_same_dir_on_alternative", int),
# # ("number_agents_opp_dir_on_alternative", int),
# # ("number_of_switches_on_shortest_path", int),
# ("potential_deadlock_left", int),
# ("potential_deadlock_forward", int),
# ("potential_deadlock_right", int),
# # ("betweenness_switch_same_dir", float),
# # ("betweenness_switch_opp_dir_avg", float),
# # ("closeness_switch_same_dir", float),
# # ("closeness_switch_opp_dir_avg", float),
# # ("betweenness_shortest", float),
# # ("betweenness_alternative", float),
# # ("closeness_shortest", float),
# # ("closeness_alternative", float),
# ("is_on_switch", int),
# ("dist_agent_same_dir", int),
# ("dist_agent_opposite_dir", int),
# ("dist_agent_same_dir_alternative", int),
# ("dist_agent_opposite_dir_alternative", int),
# ("dist_to_switch", int),
# ("deadlock_on_segment_with_unusable_switches", int),
# ("priority", int),
# ("agent_status", int),
# ])
Features = NamedTuple('Features', [
("agent_shortest_path_ind", int),
("priority", int),
("agent_status", int),
])
@register_obs("graphobs")
class GraphObservartion(Observation):
def __init__(self, config=None) -> None:
super().__init__(config)
self._builder = CustomGraphObservation()
def builder(self) -> ObservationBuilder:
return self._builder
def observation_space(self) -> gym.Space:
return gym.spaces.Box(low=-1, high=np.inf, shape=(len(Features._fields),))
class CustomGraphObservation(ObservationBuilder):
def __init__(self):
super().__init__()
def reset(self):
self.graph = Graph(self.env)
self.num_agents = len(self.env.agents)
self.width = self.env.width
self.height = self.env.height
self.max_num_cities = (self.num_agents // 10) + 2
self.time_tick = -1
self.num_active_agents = len([agent for agent in self.env.agents if agent.status == RailAgentStatus.ACTIVE])
self.num_ready_agents = len(
[agent for agent in self.env.agents if agent.status == RailAgentStatus.READY_TO_DEPART])
self.dist_next_switch = -1
self.shortest_path_length = -1
# for v, u, idx, data in self.graph.graph.edges(data=True, keys=True):
# print(f'{v},{u} - {data["segments"]}')
# print(edge[0], edge[1], edge[2])
# for node in self.graph.graph.nodes():
# print(node, self.graph.graph.nodes[node]['action_dict'])
# self.graph.draw_graph(self.graph.graph)
def get(self, handle: int = 0, segment_deadlock=0):
#
#
# self.graph.draw_graph(self.graph.graph)
# self.graph.get_AgentInfo(handle)
# print(handle, self.graph.segment_deadlock(handle))
#
#
# self.graph.draw_graph(self.graph.graph)
# self.graph.get_AgentInfo(handle)
# print(handle, self.graph.segment_deadlock(handle))
# print(handle, results)
# print('===')
# shortest_path_dist, number_agents_same_dir_on_shortest, \
# number_agents_opp_dir_on_shortest, alternative_path_dist, number_agents_same_dir_on_alternative, \
# number_agents_opp_dir_on_alternative, number_of_switches_on_shortest_path, betweenness_shortest, \
# betweenness_alternative, closeness_shortest, closeness_alternative, dist_agent_same_dir, \
# dist_agent_opposite_dir, dist_agent_same_dir_alternative, dist_agent_opposite_dir_alternative \
# = self.graph.compute_shortest_path(handle)
# print(handle, dist_agent_same_dir, dist_agent_opposite_dir, dist_agent_same_dir_alternative,
# dist_agent_opposite_dir_alternative)
# print(handle, self.graph.compute_shortest_path(handle))
# betweenness_switch_same_dir, betweenness_switch_opp_dir_avg = self.graph.get_centrality_for_next_node(handle,
# centrality="betweenness")
# closeness_switch_same_dir, closeness_switch_opp_dir_avg = self.graph.get_centrality_for_next_node(handle,
#centrality="closeness")
# dist_to_switch = self.graph.dist_to_switch(handle)
# deadlock_on_segment_with_unusable_switches = self.graph.check_if_unusable_switches_cause_deadlock(handle)
priority = self.graph.priorities.get(handle, 0)
agent_status = self.graph.get_agent_status(handle)
# is_next_switch_usable = 1 if self.graph.check_if_next_switch_is_usable(handle) else 0
# results, deadlocks = self.graph.all_paths_from_switch(handle)
# potential_deadlock_left, potential_deadlock_forward, potential_deadlock_right = deadlocks
# print(handle, results, deadlocks, is_next_switch_usable, shortest_path_dist, alternative_path_dist, priority)
# deadlock_in_segment = 1 if self.graph.segment_deadlock(handle) else 0
# is_on_switch = 1 if self.graph.is_on_switch(handle) else 0
# prev_priority = self.graph.prev_priorities[handle] if handle in self.graph.prev_priorities.keys() else 0
# print(handle, shortest_path_dist, deadlock_in_segment, results[0], results[1], results[2],
# potential_deadlock_left, potential_deadlock_forward, potential_deadlock_right, is_on_switch)
# dist_shortest_alt = np.partition(results, 2)
# shortest_path_dist = dist_shortest_alt[0]
# alternative_path_dist = dist_shortest_alt[1]
# if not is_on_switch or (is_on_switch and not is_next_switch_usable):
# results[1] = shortest_path_dist
if agent_status == RailAgentStatus.DONE or agent_status == RailAgentStatus.DONE_REMOVED:
min_path_ind = 3
else:
dist, visited, parent, start, best_path, end_node = self.graph.shortest_paths[handle]
seg_with_station = [end_nodes for end_nodes in self.graph.agents[handle].EndEdges if
self.graph.agents[handle].CurrentNode in end_nodes]
if len(seg_with_station) == 1:
segment = self.graph.graph[seg_with_station[0][0]][seg_with_station[0][1]]["segment"]
min_path_ind = self.graph.get_next_direction_from_given_direction_on_switch(segment[0][2],
segment[1][2])
else:
if end_node == 0 or end_node is None:
min_path_ind = 2
else:
path = self.graph._construct_shortest_path(parent, end_node)
extended_path = self.graph._get_extended_shortest_path(self.graph.agents[handle], path)
min_path_ind = self.graph.get_next_direction_from_given_direction_on_switch(self.graph.agents[handle].Agent.direction,
extended_path[0][2])
# print(handle, potential_deadlock_left, potential_deadlock_forward, potential_deadlock_right)
# out = Features(#num_agents=self.num_agents,
# # width=self.width,
# # height=self.height,
# # max_num_cities=self.max_num_cities,
# # time_tick=self.time_tick,
# num_active_agents=self.num_active_agents,
# num_ready_agents=self.num_ready_agents,
# deadlock_in_segment=segment_deadlock,
# is_next_switch_usable=is_next_switch_usable,
# shortest_path_length=shortest_path_dist,
# distance_left=results[0],
# distance_forward=results[1],
# distance_right=results[2],
# number_agents_same_dir_on_shortest=number_agents_same_dir_on_shortest,
# number_agents_opp_dir_on_shortest=number_agents_opp_dir_on_shortest,
# # alternative_path_dist=alternative_path_dist,
# # number_agents_same_dir_on_alternative=number_agents_same_dir_on_alternative,
# # number_agents_opp_dir_on_alternative=number_agents_opp_dir_on_alternative,
# # number_of_switches_on_shortest_path=number_of_switches_on_shortest_path,
# potential_deadlock_left=potential_deadlock_left,
# potential_deadlock_forward=potential_deadlock_forward,
# potential_deadlock_right=potential_deadlock_right,
# # betweenness_switch_same_dir=betweenness_switch_same_dir,
# # betweenness_switch_opp_dir_avg=betweenness_switch_opp_dir_avg,
# # closeness_switch_same_dir=closeness_switch_same_dir,
# # closeness_switch_opp_dir_avg=closeness_switch_opp_dir_avg,
# # betweenness_shortest=betweenness_shortest,
# # betweenness_alternative=betweenness_alternative,
# # closeness_shortest=closeness_shortest,
# # closeness_alternative=closeness_alternative,
# is_on_switch=is_on_switch,
# dist_agent_same_dir=dist_agent_same_dir,
# dist_agent_opposite_dir=dist_agent_opposite_dir,
# dist_agent_same_dir_alternative=dist_agent_same_dir_alternative,
# dist_agent_opposite_dir_alternative=dist_agent_opposite_dir_alternative,
# dist_to_switch=dist_to_switch,
# deadlock_on_segment_with_unusable_switches=deadlock_on_segment_with_unusable_switches,
# priority=priority,
# agent_status=agent_status,
# )
out = Features(
agent_shortest_path_ind=min_path_ind,
priority=priority,
agent_status=agent_status,
)
# print(handle, potential_deadlock_left, potential_deadlock_forward, potential_deadlock_right, out[7], dist_to_switch)
# print(handle, deadlock_on_segment_with_unusable_switches)
# print(handle, deadlocks, betweenness_shortest, betweenness_alternative, closeness_shortest, closeness_alternative)
# i = input()
# print(out)
out = np.array(out)
# print(handle, results, self.graph.check_if_next_switch_is_usable(handle))
# print('=============================================')
return out
def get_many(self, handles: Optional[List[int]] = None):
self.num_active_agents = len([agent for agent in self.env.agents if agent.status == RailAgentStatus.ACTIVE])
self.num_ready_agents = len(
[agent for agent in self.env.agents if agent.status == RailAgentStatus.READY_TO_DEPART])
self.time_tick += 1
for handle in handles:
self.graph.update_agent(handle)
observations = {}
deadlocks = []
if handles is None:
handles = []
for h in handles:
deadlocks.append(1 if self.graph.segment_deadlock(h) else 0)
self.graph.compute_shortest_paths_for_all_agents()
self.graph.calculate_priorities()
for h in handles:
observations[h] = self.get(h, deadlocks[h])
return observations
| 55.517647 | 138 | 0.579431 |
9d716c5c32c36f4e7761f0cb188e9fc1268396fa | 26,905 | py | Python | test/functional/wallet_bumpfee.py | chrisandrewca/namecoin-core | 2f2bfb323e7e19512697039753c0675dacbb6dec | [
"MIT"
] | null | null | null | test/functional/wallet_bumpfee.py | chrisandrewca/namecoin-core | 2f2bfb323e7e19512697039753c0675dacbb6dec | [
"MIT"
] | null | null | null | test/functional/wallet_bumpfee.py | chrisandrewca/namecoin-core | 2f2bfb323e7e19512697039753c0675dacbb6dec | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2016-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the bumpfee RPC.
Verifies that the bumpfee RPC creates replacement transactions successfully when
its preconditions are met, and returns appropriate errors in other cases.
This module consists of around a dozen individual test cases implemented in the
top-level functions named as test_<test_case_description>. The test functions
can be disabled or reordered if needed for debugging. If new test cases are
added in the future, they should try to follow the same convention and not
make assumptions about execution order.
"""
from decimal import Decimal
import io
from test_framework.blocktools import add_witness_commitment, create_block, create_coinbase, send_to_witness
from test_framework.messages import BIP125_SEQUENCE_NUMBER, CTransaction
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
hex_str_to_bytes,
)
WALLET_PASSPHRASE = "test"
WALLET_PASSPHRASE_TIMEOUT = 3600
# Fee rates (in BTC per 1000 vbytes)
INSUFFICIENT = 0.00001000
ECONOMICAL = 0.00150000
NORMAL = 0.00250000
HIGH = 0.00500000
TOO_HIGH = 1.00000000
class BumpFeeTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [[
"-walletrbf={}".format(i),
"-mintxfee=0.00002",
"-addresstype=bech32",
] for i in range(self.num_nodes)]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Encrypt wallet for test_locked_wallet_fails test
self.nodes[1].encryptwallet(WALLET_PASSPHRASE)
self.nodes[1].walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
peer_node, rbf_node = self.nodes
rbf_node_address = rbf_node.getnewaddress()
# fund rbf node with 10 coins of 0.001 btc (100,000 satoshis)
self.log.info("Mining blocks...")
peer_node.generate(110)
self.sync_all()
for _ in range(25):
peer_node.sendtoaddress(rbf_node_address, 0.001)
self.sync_all()
peer_node.generate(1)
self.sync_all()
assert_equal(rbf_node.getbalance(), Decimal("0.025"))
self.log.info("Running tests")
dest_address = peer_node.getnewaddress()
test_invalid_parameters(rbf_node, dest_address)
test_simple_bumpfee_succeeds(self, "default", rbf_node, peer_node, dest_address)
test_simple_bumpfee_succeeds(self, "fee_rate", rbf_node, peer_node, dest_address)
test_feerate_args(self, rbf_node, peer_node, dest_address)
test_segwit_bumpfee_succeeds(self, rbf_node, dest_address)
test_nonrbf_bumpfee_fails(self, peer_node, dest_address)
test_notmine_bumpfee_fails(self, rbf_node, peer_node, dest_address)
test_bumpfee_with_descendant_fails(self, rbf_node, rbf_node_address, dest_address)
test_dust_to_fee(self, rbf_node, dest_address)
test_watchonly_psbt(self, peer_node, rbf_node, dest_address)
test_rebumping(self, rbf_node, dest_address)
test_rebumping_not_replaceable(self, rbf_node, dest_address)
test_unconfirmed_not_spendable(self, rbf_node, rbf_node_address)
test_bumpfee_metadata(self, rbf_node, dest_address)
test_locked_wallet_fails(self, rbf_node, dest_address)
test_change_script_match(self, rbf_node, dest_address)
test_settxfee(self, rbf_node, dest_address)
test_maxtxfee_fails(self, rbf_node, dest_address)
# These tests wipe out a number of utxos that are expected in other tests
test_small_output_with_feerate_succeeds(self, rbf_node, dest_address)
test_no_more_inputs_fails(self, rbf_node, dest_address)
def test_invalid_parameters(node, dest_address):
txid = spend_one_input(node, dest_address)
# invalid estimate mode
assert_raises_rpc_error(-8, "Invalid estimate_mode parameter", node.bumpfee, txid, {
"estimate_mode": "moo",
})
assert_raises_rpc_error(-3, "Expected type string", node.bumpfee, txid, {
"estimate_mode": 38,
})
assert_raises_rpc_error(-3, "Expected type string", node.bumpfee, txid, {
"estimate_mode": {
"foo": "bar",
},
})
assert_raises_rpc_error(-8, "Invalid estimate_mode parameter", node.bumpfee, txid, {
"estimate_mode": Decimal("3.141592"),
})
# confTarget and conf_target
assert_raises_rpc_error(-8, "confTarget and conf_target options should not both be set", node.bumpfee, txid, {
"confTarget": 123,
"conf_target": 456,
})
def test_simple_bumpfee_succeeds(self, mode, rbf_node, peer_node, dest_address):
self.log.info('Test simple bumpfee: {}'.format(mode))
rbfid = spend_one_input(rbf_node, dest_address)
rbftx = rbf_node.gettransaction(rbfid)
self.sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
if mode == "fee_rate":
bumped_psbt = rbf_node.psbtbumpfee(rbfid, {"fee_rate": NORMAL})
bumped_tx = rbf_node.bumpfee(rbfid, {"fee_rate": NORMAL})
else:
bumped_psbt = rbf_node.psbtbumpfee(rbfid)
bumped_tx = rbf_node.bumpfee(rbfid)
assert_equal(bumped_tx["errors"], [])
assert bumped_tx["fee"] > -rbftx["fee"]
assert_equal(bumped_tx["origfee"], -rbftx["fee"])
assert "psbt" not in bumped_tx
assert_equal(bumped_psbt["errors"], [])
assert bumped_psbt["fee"] > -rbftx["fee"]
assert_equal(bumped_psbt["origfee"], -rbftx["fee"])
assert "psbt" in bumped_psbt
# check that bumped_tx propagates, original tx was evicted and has a wallet conflict
self.sync_mempools((rbf_node, peer_node))
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert bumped_tx["txid"] in peer_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
assert rbfid not in peer_node.getrawmempool()
oldwtx = rbf_node.gettransaction(rbfid)
assert len(oldwtx["walletconflicts"]) > 0
# check wallet transaction replaces and replaced_by values
bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"])
assert_equal(bumpedwtx["replaces_txid"], rbfid)
def test_feerate_args(self, rbf_node, peer_node, dest_address):
self.log.info('Test fee_rate args')
rbfid = spend_one_input(rbf_node, dest_address)
self.sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
assert_raises_rpc_error(-8, "conf_target can't be set with fee_rate. Please provide either a confirmation target in blocks for automatic fee estimation, or an explicit fee rate.", rbf_node.bumpfee, rbfid, {"fee_rate": NORMAL, "confTarget": 1})
assert_raises_rpc_error(-3, "Unexpected key totalFee", rbf_node.bumpfee, rbfid, {"totalFee": NORMAL})
assert_raises_rpc_error(-8, "conf_target can't be set with fee_rate. Please provide either a confirmation target in blocks for automatic fee estimation, or an explicit fee rate.", rbf_node.bumpfee, rbfid, {"fee_rate":0.00001, "confTarget": 1})
# Bumping to just above minrelay should fail to increase total fee enough, at least
assert_raises_rpc_error(-8, "Insufficient total fee", rbf_node.bumpfee, rbfid, {"fee_rate": INSUFFICIENT})
assert_raises_rpc_error(-3, "Amount out of range", rbf_node.bumpfee, rbfid, {"fee_rate": -1})
assert_raises_rpc_error(-4, "is too high (cannot be higher than", rbf_node.bumpfee, rbfid, {"fee_rate": TOO_HIGH})
def test_segwit_bumpfee_succeeds(self, rbf_node, dest_address):
self.log.info('Test that segwit-sourcing bumpfee works')
# Create a transaction with segwit output, then create an RBF transaction
# which spends it, and make sure bumpfee can be called on it.
segwit_in = next(u for u in rbf_node.listunspent() if u["amount"] == Decimal("0.001"))
segwit_out = rbf_node.getaddressinfo(rbf_node.getnewaddress(address_type='p2sh-segwit'))
segwitid = send_to_witness(
use_p2wsh=False,
node=rbf_node,
utxo=segwit_in,
pubkey=segwit_out["pubkey"],
encode_p2sh=False,
amount=Decimal("0.0009"),
sign=True)
rbfraw = rbf_node.createrawtransaction([{
'txid': segwitid,
'vout': 0,
"sequence": BIP125_SEQUENCE_NUMBER
}], {dest_address: Decimal("0.0005"),
rbf_node.getrawchangeaddress(): Decimal("0.0003")})
rbfsigned = rbf_node.signrawtransactionwithwallet(rbfraw)
rbfid = rbf_node.sendrawtransaction(rbfsigned["hex"])
assert rbfid in rbf_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
def test_nonrbf_bumpfee_fails(self, peer_node, dest_address):
self.log.info('Test that we cannot replace a non RBF transaction')
not_rbfid = peer_node.sendtoaddress(dest_address, Decimal("0.00090000"))
assert_raises_rpc_error(-4, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
def test_notmine_bumpfee_fails(self, rbf_node, peer_node, dest_address):
self.log.info('Test that it cannot bump fee if non-owned inputs are included')
# here, the rbftx has a peer_node coin and then adds a rbf_node input
# Note that this test depends upon the RPC code checking input ownership prior to change outputs
# (since it can't use fundrawtransaction, it lacks a proper change output)
utxos = [node.listunspent()[-1] for node in (rbf_node, peer_node)]
inputs = [{
"txid": utxo["txid"],
"vout": utxo["vout"],
"address": utxo["address"],
"sequence": BIP125_SEQUENCE_NUMBER
} for utxo in utxos]
output_val = sum(utxo["amount"] for utxo in utxos) - Decimal("0.001")
rawtx = rbf_node.createrawtransaction(inputs, {dest_address: output_val})
signedtx = rbf_node.signrawtransactionwithwallet(rawtx)
signedtx = peer_node.signrawtransactionwithwallet(signedtx["hex"])
rbfid = rbf_node.sendrawtransaction(signedtx["hex"])
assert_raises_rpc_error(-4, "Transaction contains inputs that don't belong to this wallet",
rbf_node.bumpfee, rbfid)
def test_bumpfee_with_descendant_fails(self, rbf_node, rbf_node_address, dest_address):
self.log.info('Test that fee cannot be bumped when it has descendant')
# parent is send-to-self, so we don't have to check which output is change when creating the child tx
parent_id = spend_one_input(rbf_node, rbf_node_address)
tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.00020000})
tx = rbf_node.signrawtransactionwithwallet(tx)
rbf_node.sendrawtransaction(tx["hex"])
assert_raises_rpc_error(-8, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
def test_small_output_with_feerate_succeeds(self, rbf_node, dest_address):
self.log.info('Testing small output with feerate bump succeeds')
# Make sure additional inputs exist
rbf_node.generatetoaddress(101, rbf_node.getnewaddress())
rbfid = spend_one_input(rbf_node, dest_address)
input_list = rbf_node.getrawtransaction(rbfid, 1)["vin"]
assert_equal(len(input_list), 1)
original_txin = input_list[0]
self.log.info('Keep bumping until transaction fee out-spends non-destination value')
tx_fee = 0
while True:
input_list = rbf_node.getrawtransaction(rbfid, 1)["vin"]
new_item = list(input_list)[0]
assert_equal(len(input_list), 1)
assert_equal(original_txin["txid"], new_item["txid"])
assert_equal(original_txin["vout"], new_item["vout"])
rbfid_new_details = rbf_node.bumpfee(rbfid)
rbfid_new = rbfid_new_details["txid"]
raw_pool = rbf_node.getrawmempool()
assert rbfid not in raw_pool
assert rbfid_new in raw_pool
rbfid = rbfid_new
tx_fee = rbfid_new_details["fee"]
# Total value from input not going to destination
if tx_fee > Decimal('0.00050000'):
break
# input(s) have been added
final_input_list = rbf_node.getrawtransaction(rbfid, 1)["vin"]
assert_greater_than(len(final_input_list), 1)
# Original input is in final set
assert [txin for txin in final_input_list
if txin["txid"] == original_txin["txid"]
and txin["vout"] == original_txin["vout"]]
rbf_node.generatetoaddress(1, rbf_node.getnewaddress())
assert_equal(rbf_node.gettransaction(rbfid)["confirmations"], 1)
def test_dust_to_fee(self, rbf_node, dest_address):
self.log.info('Test that bumped output that is dust is dropped to fee')
rbfid = spend_one_input(rbf_node, dest_address)
fulltx = rbf_node.getrawtransaction(rbfid, 1)
# The DER formatting used by Bitcoin to serialize ECDSA signatures means that signatures can have a
# variable size of 70-72 bytes (or possibly even less), with most being 71 or 72 bytes. The signature
# in the witness is divided by 4 for the vsize, so this variance can take the weight across a 4-byte
# boundary. Thus expected transaction size (p2wpkh, 1 input, 2 outputs) is 140-141 vbytes, usually 141.
if not 140 <= fulltx["vsize"] <= 141:
raise AssertionError("Invalid tx vsize of {} (140-141 expected), full tx: {}".format(fulltx["vsize"], fulltx))
# Bump with fee_rate of 0.00350250 BTC per 1000 vbytes to create dust.
# Expected fee is 141 vbytes * fee_rate 0.00350250 BTC / 1000 vbytes = 0.00049385 BTC.
# or occasionally 140 vbytes * fee_rate 0.00350250 BTC / 1000 vbytes = 0.00049035 BTC.
# Dust should be dropped to the fee, so actual bump fee is 0.00050000 BTC.
bumped_tx = rbf_node.bumpfee(rbfid, {"fee_rate": 0.00350250})
full_bumped_tx = rbf_node.getrawtransaction(bumped_tx["txid"], 1)
assert_equal(bumped_tx["fee"], Decimal("0.00050000"))
assert_equal(len(fulltx["vout"]), 2)
assert_equal(len(full_bumped_tx["vout"]), 1) # change output is eliminated
assert_equal(full_bumped_tx["vout"][0]['value'], Decimal("0.00050000"))
def test_settxfee(self, rbf_node, dest_address):
self.log.info('Test settxfee')
assert_raises_rpc_error(-8, "txfee cannot be less than min relay tx fee", rbf_node.settxfee, Decimal('0.000005'))
assert_raises_rpc_error(-8, "txfee cannot be less than wallet min fee", rbf_node.settxfee, Decimal('0.000015'))
# check that bumpfee reacts correctly to the use of settxfee (paytxfee)
rbfid = spend_one_input(rbf_node, dest_address)
requested_feerate = Decimal("0.00250000")
rbf_node.settxfee(requested_feerate)
bumped_tx = rbf_node.bumpfee(rbfid)
actual_feerate = bumped_tx["fee"] * 1000 / rbf_node.getrawtransaction(bumped_tx["txid"], True)["vsize"]
# Assert that the difference between the requested feerate and the actual
# feerate of the bumped transaction is small.
assert_greater_than(Decimal("0.00001000"), abs(requested_feerate - actual_feerate))
rbf_node.settxfee(Decimal("0.00000000")) # unset paytxfee
# check that settxfee respects -maxtxfee
self.restart_node(1, ['-maxtxfee=0.000025'] + self.extra_args[1])
assert_raises_rpc_error(-8, "txfee cannot be more than wallet max tx fee", rbf_node.settxfee, Decimal('0.00003'))
self.restart_node(1, self.extra_args[1])
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
def test_maxtxfee_fails(self, rbf_node, dest_address):
self.log.info('Test that bumpfee fails when it hits -maxtxfee')
# size of bumped transaction (p2wpkh, 1 input, 2 outputs): 141 vbytes
# expected bump fee of 141 vbytes * 0.00200000 BTC / 1000 vbytes = 0.00002820 BTC
# which exceeds maxtxfee and is expected to raise
self.restart_node(1, ['-maxtxfee=0.000025'] + self.extra_args[1])
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
rbfid = spend_one_input(rbf_node, dest_address)
assert_raises_rpc_error(-4, "Unable to create transaction. Fee exceeds maximum configured by -maxtxfee", rbf_node.bumpfee, rbfid)
self.restart_node(1, self.extra_args[1])
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
def test_watchonly_psbt(self, peer_node, rbf_node, dest_address):
self.log.info('Test that PSBT is returned for bumpfee in watchonly wallets')
priv_rec_desc = "wpkh([00000001/84'/1'/0']tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0/*)#rweraev0"
pub_rec_desc = rbf_node.getdescriptorinfo(priv_rec_desc)["descriptor"]
priv_change_desc = "wpkh([00000001/84'/1'/0']tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/*)#j6uzqvuh"
pub_change_desc = rbf_node.getdescriptorinfo(priv_change_desc)["descriptor"]
# Create a wallet with private keys that can sign PSBTs
rbf_node.createwallet(wallet_name="signer", disable_private_keys=False, blank=True)
signer = rbf_node.get_wallet_rpc("signer")
assert signer.getwalletinfo()['private_keys_enabled']
result = signer.importmulti([{
"desc": priv_rec_desc,
"timestamp": 0,
"range": [0,1],
"internal": False,
"keypool": False # Keys can only be imported to the keypool when private keys are disabled
},
{
"desc": priv_change_desc,
"timestamp": 0,
"range": [0, 0],
"internal": True,
"keypool": False
}])
assert_equal(result, [{'success': True}, {'success': True}])
# Create another wallet with just the public keys, which creates PSBTs
rbf_node.createwallet(wallet_name="watcher", disable_private_keys=True, blank=True)
watcher = rbf_node.get_wallet_rpc("watcher")
assert not watcher.getwalletinfo()['private_keys_enabled']
result = watcher.importmulti([{
"desc": pub_rec_desc,
"timestamp": 0,
"range": [0, 10],
"internal": False,
"keypool": True,
"watchonly": True
}, {
"desc": pub_change_desc,
"timestamp": 0,
"range": [0, 10],
"internal": True,
"keypool": True,
"watchonly": True
}])
assert_equal(result, [{'success': True}, {'success': True}])
funding_address1 = watcher.getnewaddress(address_type='bech32')
funding_address2 = watcher.getnewaddress(address_type='bech32')
peer_node.sendmany("", {funding_address1: 0.001, funding_address2: 0.001})
peer_node.generate(1)
self.sync_all()
# Create single-input PSBT for transaction to be bumped
psbt = watcher.walletcreatefundedpsbt([], {dest_address: 0.0005}, 0, {"feeRate": 0.00001}, True)['psbt']
psbt_signed = signer.walletprocesspsbt(psbt=psbt, sign=True, sighashtype="ALL", bip32derivs=True)
psbt_final = watcher.finalizepsbt(psbt_signed["psbt"])
original_txid = watcher.sendrawtransaction(psbt_final["hex"])
assert_equal(len(watcher.decodepsbt(psbt)["tx"]["vin"]), 1)
# Bump fee, obnoxiously high to add additional watchonly input
bumped_psbt = watcher.psbtbumpfee(original_txid, {"fee_rate": HIGH})
assert_greater_than(len(watcher.decodepsbt(bumped_psbt['psbt'])["tx"]["vin"]), 1)
assert "txid" not in bumped_psbt
assert_equal(bumped_psbt["origfee"], -watcher.gettransaction(original_txid)["fee"])
assert not watcher.finalizepsbt(bumped_psbt["psbt"])["complete"]
# Sign bumped transaction
bumped_psbt_signed = signer.walletprocesspsbt(psbt=bumped_psbt["psbt"], sign=True, sighashtype="ALL", bip32derivs=True)
bumped_psbt_final = watcher.finalizepsbt(bumped_psbt_signed["psbt"])
assert bumped_psbt_final["complete"]
# Broadcast bumped transaction
bumped_txid = watcher.sendrawtransaction(bumped_psbt_final["hex"])
assert bumped_txid in rbf_node.getrawmempool()
assert original_txid not in rbf_node.getrawmempool()
rbf_node.unloadwallet("watcher")
rbf_node.unloadwallet("signer")
def test_rebumping(self, rbf_node, dest_address):
self.log.info('Test that re-bumping the original tx fails, but bumping successor works')
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"fee_rate": ECONOMICAL})
assert_raises_rpc_error(-4, "already bumped", rbf_node.bumpfee, rbfid, {"fee_rate": NORMAL})
rbf_node.bumpfee(bumped["txid"], {"fee_rate": NORMAL})
def test_rebumping_not_replaceable(self, rbf_node, dest_address):
self.log.info('Test that re-bumping non-replaceable fails')
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"fee_rate": ECONOMICAL, "replaceable": False})
assert_raises_rpc_error(-4, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"],
{"fee_rate": NORMAL})
def test_unconfirmed_not_spendable(self, rbf_node, rbf_node_address):
self.log.info('Test that unconfirmed outputs from bumped txns are not spendable')
rbfid = spend_one_input(rbf_node, rbf_node_address)
rbftx = rbf_node.gettransaction(rbfid)["hex"]
assert rbfid in rbf_node.getrawmempool()
bumpid = rbf_node.bumpfee(rbfid)["txid"]
assert bumpid in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
# check that outputs from the bump transaction are not spendable
# due to the replaces_txid check in CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == bumpid], [])
# submit a block with the rbf tx to clear the bump tx out of the mempool,
# then invalidate the block so the rbf tx will be put back in the mempool.
# This makes it possible to check whether the rbf tx outputs are
# spendable before the rbf tx is confirmed.
block = submit_block_with_tx(rbf_node, rbftx)
# Can not abandon conflicted tx
assert_raises_rpc_error(-5, 'Transaction not eligible for abandonment', lambda: rbf_node.abandontransaction(txid=bumpid))
rbf_node.invalidateblock(block.hash)
# Call abandon to make sure the wallet doesn't attempt to resubmit
# the bump tx and hope the wallet does not rebroadcast before we call.
rbf_node.abandontransaction(bumpid)
assert bumpid not in rbf_node.getrawmempool()
assert rbfid in rbf_node.getrawmempool()
# check that outputs from the rbf tx are not spendable before the
# transaction is confirmed, due to the replaced_by_txid check in
# CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == rbfid], [])
# check that the main output from the rbf tx is spendable after confirmed
rbf_node.generate(1)
assert_equal(
sum(1 for t in rbf_node.listunspent(minconf=0, include_unsafe=False)
if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1)
def test_bumpfee_metadata(self, rbf_node, dest_address):
self.log.info('Test that bumped txn metadata persists to new txn record')
assert(rbf_node.getbalance() < 49)
rbf_node.generatetoaddress(101, rbf_node.getnewaddress())
rbfid = rbf_node.sendtoaddress(dest_address, 49, "comment value", "to value")
bumped_tx = rbf_node.bumpfee(rbfid)
bumped_wtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(bumped_wtx["comment"], "comment value")
assert_equal(bumped_wtx["to"], "to value")
def test_locked_wallet_fails(self, rbf_node, dest_address):
self.log.info('Test that locked wallet cannot bump txn')
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first.",
rbf_node.bumpfee, rbfid)
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
def test_change_script_match(self, rbf_node, dest_address):
self.log.info('Test that the same change addresses is used for the replacement transaction when possible')
def get_change_address(tx):
tx_details = rbf_node.getrawtransaction(tx, 1)
txout_addresses = [txout['scriptPubKey']['addresses'][0] for txout in tx_details["vout"]]
return [address for address in txout_addresses if rbf_node.getaddressinfo(address)["ischange"]]
# Check that there is only one change output
rbfid = spend_one_input(rbf_node, dest_address)
change_addresses = get_change_address(rbfid)
assert_equal(len(change_addresses), 1)
# Now find that address in each subsequent tx, and no other change
bumped_total_tx = rbf_node.bumpfee(rbfid, {"fee_rate": ECONOMICAL})
assert_equal(change_addresses, get_change_address(bumped_total_tx['txid']))
bumped_rate_tx = rbf_node.bumpfee(bumped_total_tx["txid"])
assert_equal(change_addresses, get_change_address(bumped_rate_tx['txid']))
def spend_one_input(node, dest_address, change_size=Decimal("0.00049000")):
tx_input = dict(
sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in node.listunspent() if u["amount"] == Decimal("0.00100000")))
destinations = {dest_address: Decimal("0.00050000")}
if change_size > 0:
destinations[node.getrawchangeaddress()] = change_size
rawtx = node.createrawtransaction([tx_input], destinations)
signedtx = node.signrawtransactionwithwallet(rawtx)
txid = node.sendrawtransaction(signedtx["hex"])
return txid
def submit_block_with_tx(node, tx):
ctx = CTransaction()
ctx.deserialize(io.BytesIO(hex_str_to_bytes(tx)))
tip = node.getbestblockhash()
height = node.getblockcount() + 1
block_time = node.getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.vtx.append(ctx)
block.rehash()
block.hashMerkleRoot = block.calc_merkle_root()
add_witness_commitment(block)
block.solve()
node.submitblock(block.serialize().hex())
return block
def test_no_more_inputs_fails(self, rbf_node, dest_address):
self.log.info('Test that bumpfee fails when there are no available confirmed outputs')
# feerate rbf requires confirmed outputs when change output doesn't exist or is insufficient
rbf_node.generatetoaddress(1, dest_address)
# spend all funds, no change output
rbfid = rbf_node.sendtoaddress(rbf_node.getnewaddress(), rbf_node.getbalance(), "", "", True)
assert_raises_rpc_error(-4, "Unable to create transaction. Insufficient funds", rbf_node.bumpfee, rbfid)
if __name__ == "__main__":
BumpFeeTest().main()
| 48.477477 | 247 | 0.722579 |
b267f6f733e4a1a46004840f5fcd6fa5d0bf17b0 | 697 | py | Python | academic/schools/models.py | sebastiansalazar123/final | 85e9b822919d946b250070ac63504b2270e59ad1 | [
"MIT"
] | null | null | null | academic/schools/models.py | sebastiansalazar123/final | 85e9b822919d946b250070ac63504b2270e59ad1 | [
"MIT"
] | null | null | null | academic/schools/models.py | sebastiansalazar123/final | 85e9b822919d946b250070ac63504b2270e59ad1 | [
"MIT"
] | null | null | null | from django.db import models
from django.db.models.base import Model
class student(models.Model):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
code = models.CharField(max_length=10)
created_at = models.DateTimeField()
class subject(models.Model) :
name = models.CharField(max_length=10)
code = models.CharField(max_length=10)
course = models.CharField(max_length=3)
created_at = models.DateTimeField()
class subject1(models.Model) :
id_student = models.ForeignKey(student, on_delete=models.CASCADE)
id_subject = models.ForeignKey(subject, on_delete=models.CASCADE)
created_at = models.DateTimeField()
| 33.190476 | 69 | 0.748924 |
c097e6e512d909a5e03ed522563a214aab76d920 | 2,130 | py | Python | jazzpos/forms.py | AhmadManzoor/jazzpos | 7b771095b8df52d036657f33f36a97efb575d36c | [
"MIT"
] | 5 | 2015-12-05T15:39:51.000Z | 2020-09-16T20:14:29.000Z | jazzpos/forms.py | AhmadManzoor/jazzpos | 7b771095b8df52d036657f33f36a97efb575d36c | [
"MIT"
] | null | null | null | jazzpos/forms.py | AhmadManzoor/jazzpos | 7b771095b8df52d036657f33f36a97efb575d36c | [
"MIT"
] | 2 | 2019-11-23T17:47:46.000Z | 2022-01-14T11:05:21.000Z | from django import forms
from xpos.forms import RequestModelForm
from jazzpos.models import Customer, Patient, Treatment, StoreSettings
DATE_FORMAT = (
"%d-%m-%Y",
)
class CustomerForm(RequestModelForm):
refferer = forms.IntegerField(required=False)
def __init__(self, *args, **kwargs):
super(CustomerForm, self).__init__(*args, **kwargs)
#self.fields['address'].label = 'Alamat'
class Meta:
model = Customer
widgets = {
'address': forms.Textarea(attrs={'rows': 3, 'cols': 80, }),
'notes': forms.Textarea(attrs={'rows': 3, 'cols': 80, }),
}
class PatientForm(RequestModelForm):
def __init__(self, *args, **kwargs):
super(PatientForm, self).__init__(*args, **kwargs)
self.fields['dob'].label = 'Tarikh lahir'
self.fields['dob'].input_formats = DATE_FORMAT
self.fields['dob'].widget = forms.DateInput(format="%d-%m-%Y")
class Meta:
model = Patient
exclude = ('customer', 'rcno', 'old_dob', 'nota_penting',)
widgets = {
'treatment_history': forms.Textarea(attrs={'rows': 3, 'cols': 80, }),
'notes': forms.Textarea(attrs={'rows': 3, 'cols': 80, }),
}
class TreatmentForm(RequestModelForm):
def save(self, commit=True):
self.instance.uid = self.request.user.id
self.store = self.request.store
return super(TreatmentForm, self).save(commit=commit)
class Meta:
model = Treatment
exclude = ('nid', 'uid', 'type', 'created', 'modified', 'store',)
widgets = {
'patient': forms.HiddenInput,
'notes': forms.Textarea(attrs={'rows': 5, 'cols': 80, }),
'symptom': forms.Textarea(attrs={'rows': 5, 'cols': 80, }),
'diagnosis': forms.Textarea(attrs={'rows': 5, 'cols': 80, }),
'remedy': forms.Textarea(attrs={'rows': 5, 'cols': 80, }),
}
class StoreSettingsForm(forms.ModelForm):
class Meta:
model = StoreSettings
widgets = {
'name': forms.HiddenInput,
'store': forms.HiddenInput,
}
| 32.769231 | 81 | 0.582629 |
0d63ac227ea8ad83d3318947c7f8856cf24089d4 | 8,211 | py | Python | atomicswap/ecdsa.py | y-chan/atomicswap-qt | 5bab6d301177aaf7487236597f75efb1172e6450 | [
"MIT"
] | 14 | 2020-01-12T07:43:31.000Z | 2020-09-10T23:50:53.000Z | atomicswap/ecdsa.py | atomicswap-network/atomicswap-qt | 4379deda0840ca305d8cc20b0337a7d3517eb6a3 | [
"MIT"
] | 12 | 2020-01-12T10:33:58.000Z | 2020-02-06T04:35:27.000Z | atomicswap/ecdsa.py | atomicswap-network/atomicswap-qt | 4379deda0840ca305d8cc20b0337a7d3517eb6a3 | [
"MIT"
] | 4 | 2020-01-28T03:17:32.000Z | 2021-08-09T22:14:40.000Z | # Copyright (c) 2010-2020 The Go Authors
# Copyright (c) 2014-2020 The btcsuite developers
# Copyright (c) 2019-2020 The atomicswap-qt developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from enum import IntEnum
from hashlib import sha256
from typing import Tuple
import hmac
class Secp256k1(IntEnum):
p = 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f
n = 0xfffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141
b = 0x0000000000000000000000000000000000000000000000000000000000000007
gx = 0x79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798
gy = 0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8
bitsize = 256
q = (p + 1) // 4
h = 1
half_order = n >> 1
class Signature:
def __init__(self, r: int, s: int):
self.r = r
self.s = s
def signature_serialize(self) -> bytes:
sig_s = self.s
if sig_s > Secp256k1.half_order:
sig_s = Secp256k1.n - sig_s
rb = canonicalize_int(self.r)
sb = canonicalize_int(sig_s)
length = 6 + len(rb) + len(sb)
b = (0x30).to_bytes(1, "big")
b += (length - 2).to_bytes(1, "big")
b += (0x02).to_bytes(1, "big")
b += (len(rb)).to_bytes(1, "big")
b += rb
b += (0x02).to_bytes(1, "big")
b += (len(sb)).to_bytes(1, "big")
b += sb
return b
def canonicalize_int(val: int) -> bytes:
try:
b = val.to_bytes(len(hex(val)[2:]) // 2, "big")
except Exception:
b = val.to_bytes(len(hex(val)) // 2, "big")
if len(b) == 0:
b = bytes(1)
if b[0] & 0x80 != 0:
b = bytes(1) + b
return b
def sign_rfc6979(priv_key: int, in_hash: bytes) -> Signature:
n = Secp256k1.n
half_order = Secp256k1.half_order
k = nonce_rfc6979(priv_key, in_hash)
inv = mod_inv(k, n)
try:
k_bytes = k.to_bytes(len(hex(k)[2:]), "big")
except Exception:
k_bytes = k.to_bytes(len(hex(k)), "big")
r, _ = scalar_base_mult(k_bytes)
r %= n
if r == 0:
raise Exception("Calculated R is zero!")
e = hash_to_int(in_hash)
s = ((priv_key * r + e) * inv) % n
if s > half_order:
s = n - s
if s == 0:
raise Exception("Calculated S is zero!")
return Signature(r, s)
# https://tools.ietf.org/html/rfc6979#section-3.2
def nonce_rfc6979(priv_key: int, in_hash: bytes) -> int:
q = Secp256k1.n
x = priv_key
alg = sha256
qlen = q.bit_length()
holen = alg().digest_size
rolen = (qlen + 7) >> 3
bx = [int2octets(x, rolen), bits2octets(in_hash, rolen)]
# Step B
v = b"\x01" * holen
# Step C
k = b"\x00" * holen
# Step D
k = hmac.new(k, digestmod=alg)
k.update(v + b"\x00")
for i in bx:
k.update(i)
k = k.digest()
# Step E
v = hmac.new(k, v, alg).digest()
# Step F
k = hmac.new(k, digestmod=alg)
k.update(v + b"\x01")
for i in bx:
k.update(i)
k = k.digest()
# Step G
v = hmac.new(k, v, alg).digest()
# Step H
while True:
# Step H1
t = b""
# Step H2
while len(t) < rolen:
v = hmac.new(k, v, alg).digest()
t += v
# Step H3
secret = hash_to_int(t)
if 1 <= secret < q:
return secret
k = hmac.new(k, v + b"\x00", alg).digest()
v = hmac.new(k, v, alg).digest()
def egcd(a: int, b: int) -> Tuple[int, int, int]:
if a == 0:
return b, 0, 1
else:
g, y, x = egcd(b % a, a)
return g, x - (b // a) * y, y
def mod_inv(a: int, m: int):
g, x, y = egcd(a, m)
if g != 1:
raise Exception("modular inverse does not exist")
else:
return x % m
def hash_to_int(v: bytes) -> int:
order_bytes = (Secp256k1.bitsize + 7) // 8
if len(v) > order_bytes:
v = v[:order_bytes]
ret = int.from_bytes(v, "big")
excess = len(v) * 8 - Secp256k1.bitsize
if excess > 0:
ret = ret >> excess
return ret
# https://tools.ietf.org/html/rfc6979#section-2.3.3
def int2octets(v: int, rolen: int) -> bytes:
v_len = len(hex(v)[2:]) // 2
try:
out = v.to_bytes(v_len, "big")
except Exception:
out = v.to_bytes(v_len + 1, "big")
if len(out) < rolen:
out2 = bytes(rolen - len(out))
out2 += out
return out2
if len(out) > rolen:
out2 = out[len(out) - rolen:]
return out2
return out
# https://tools.ietf.org/html/rfc6979#section-2.3.4
def bits2octets(v: bytes, rolen: int) -> bytes:
z1 = hash_to_int(v)
z2 = z1 - Secp256k1.n
if z2 < 0:
return int2octets(z1, rolen)
return int2octets(z2, rolen)
def scalar_base_mult(k: bytes) -> Tuple[int, int]:
bx = Secp256k1.gx
by = Secp256k1.gy
bz = 1
x, y, z = 0, 0, 0
for _, byte in enumerate(k):
for num in range(8):
x, y, z = double_jacobian(x, y, z)
if byte & 0x80 == 0x80:
x, y, z = add_jacobian(bx, by, bz, x, y, z)
byte = byte << 1
return affine_from_jacobian(x, y, z)
def add_jacobian(x1: int, y1: int, z1: int, x2: int, y2: int, z2: int) -> Tuple[int, int, int]:
x3, y3, z3 = 0, 0, 0
if z1 == 0:
return x2, y2, z2
if z2 == 0:
return x1, y1, z1
z1z1 = (z1 ** 2) % Secp256k1.p
z2z2 = (z2 ** 2) % Secp256k1.p
u1 = (x1 * z2z2) % Secp256k1.p
u2 = (x2 * z1z1) % Secp256k1.p
h = u2 - u1
x_equal = h == 0
if h < 0:
h += Secp256k1.p
i = (h << 1) ** 2
j = h * i
s1 = (y1 * z2 * z2z2) % Secp256k1.p
s2 = (y2 * z1 * z1z1) % Secp256k1.p
r = s2 - s1
if r < 0:
r += Secp256k1.p
y_equal = r == 0
if x_equal and y_equal:
return double_jacobian(x1, x2, x3)
r = r << 1
v = u1 * i
x3 = (r ** 2 - (j + v * 2)) % Secp256k1.p
v -= x3
s1 = (s1 * j) << 1
y3 = (r * v - s1) % Secp256k1.p
z3 = (((z1 + z2) ** 2 - (z1z1 + z2z2)) * h) % Secp256k1.p
return x3, y3, z3
def double_jacobian(x: int, y: int, z: int) -> Tuple[int, int, int]:
a = x ** 2
b = y ** 2
c = b ** 2
d = ((x + b) ** 2 - (a + c)) * 2
e = 3 * a
f = e ** 2
x3 = (f - (2 * d)) % Secp256k1.p
y3 = (e * (d - x3) - (8 * c)) % Secp256k1.p
z3 = (y * z * 2) % Secp256k1.p
return x3, y3, z3
def affine_from_jacobian(x: int, y: int, z: int) -> Tuple[int, int]:
if z == 0:
return 0, 0
z_inv = mod_inv(z, Secp256k1.p)
z_inv_sq = z_inv ** 2
x_out = (x * z_inv_sq) % Secp256k1.p
z_inv_sq = z_inv_sq * z_inv
y_out = (y * z_inv_sq) % Secp256k1.p
return x_out, y_out
def pubkey_from_privkey(privkey: bytes) -> bytes:
x, y = scalar_base_mult(privkey)
_format = 0x2
bit = y >> 0 & 1
if bit == 1:
_format |= 0x1
b = _format.to_bytes(1, "big")
try:
x_len = len(hex(x)[2:]) // 2
x_bytes = x.to_bytes(x_len, "big")
except Exception:
x_len = len(hex(x)) // 2
x_bytes = x.to_bytes(x_len, "big")
for i in range(32 - x_len):
b += (0).to_bytes(1, "big")
return b + x_bytes
| 26.659091 | 95 | 0.564974 |
9476586f3c52229ac088f68a10578e5bf129d58f | 296 | py | Python | build/repo/root_dir/finder.py | fifoforlifo/nstd | 68f5b370e10b2d1e078027ecbc040b7eaa9e08aa | [
"BSL-1.0"
] | null | null | null | build/repo/root_dir/finder.py | fifoforlifo/nstd | 68f5b370e10b2d1e078027ecbc040b7eaa9e08aa | [
"BSL-1.0"
] | null | null | null | build/repo/root_dir/finder.py | fifoforlifo/nstd | 68f5b370e10b2d1e078027ecbc040b7eaa9e08aa | [
"BSL-1.0"
] | null | null | null | # This file is intentionally in a directory that is not located in sys.path.
# That causes the python runtime to return an absolute path for __file__.
import os
def get_root_dir():
absDir = os.path.dirname(__file__)
rootDir = os.path.normpath(absDir + "../../../..")
return rootDir
| 29.6 | 76 | 0.706081 |
49b712cd9d0438fa641c910d7378efd29aea5763 | 3,409 | py | Python | nameko/nameko/cli/commands.py | Alecto3-D/testable-greeter | 09e8e488edfb7e46cf5867b2b5a6ebe0b1929f78 | [
"MIT"
] | 2 | 2017-07-11T18:56:27.000Z | 2017-07-28T14:01:12.000Z | nameko/cli/commands.py | appetito/nameko | 960f2864c476d40469cbc0412927000517f739d4 | [
"Apache-2.0"
] | 1 | 2017-07-28T13:53:41.000Z | 2017-07-31T15:30:40.000Z | nameko/nameko/cli/commands.py | Alecto3-D/testable-greeter | 09e8e488edfb7e46cf5867b2b5a6ebe0b1929f78 | [
"MIT"
] | null | null | null | """Commands are defined in here, with imports inline, to avoid triggering
imports from other subcommands (e.g. `run` will cause an eventlet monkey-patch
which we don't want for `shell`)
"""
from .actions import FlagAction
class Command(object):
name = None
@staticmethod
def init_parser(parser):
raise NotImplementedError # pragma: no cover
@staticmethod
def main(args):
# import inline to avoid triggering imports from other subcommands
raise NotImplementedError # pragma: no cover
class Backdoor(Command):
"""Connect to a nameko backdoor.
If a backdoor is running this will connect to a remote shell. The
runner is generally available as `runner`.
"""
name = 'backdoor'
@staticmethod
def init_parser(parser):
parser.add_argument(
'target', metavar='[host:]port',
help="(host and) port to connect to",
)
parser.add_argument(
'--rlwrap', dest='rlwrap', action=FlagAction,
help='Use rlwrap')
parser.set_defaults(feature=True)
return parser
@staticmethod
def main(args):
from .backdoor import main
main(args)
class Run(Command):
"""Run nameko services. Given a python path to a module containing one or
more nameko services, will host and run them. By default this will try to
find classes that look like services (anything with nameko entrypoints),
but a specific service can be specified via
``nameko run module:ServiceClass``.
"""
name = 'run'
@staticmethod
def init_parser(parser):
parser.add_argument(
'services', nargs='+',
metavar='module[:service class]',
help='python path to one or more service classes to run')
parser.add_argument(
'--config', default='',
help='The YAML configuration file')
parser.add_argument(
'--broker', default='pyamqp://guest:guest@localhost',
help='RabbitMQ broker url')
parser.add_argument(
'--backdoor-port', type=int,
help='Specify a port number to host a backdoor, which can be'
' connected to for an interactive interpreter within the running'
' service process using `nameko backdoor`.')
return parser
@staticmethod
def main(args):
from .run import main
main(args)
class Shell(Command):
"""Launch an interactive python shell for working with remote nameko
services.
This is a regular interactive interpreter, with a special module ``n``
added to the built-in namespace, providing ``n.rpc`` and
``n.dispatch_event``.
"""
name = 'shell'
SHELLS = ['bpython', 'ipython', 'plain']
@classmethod
def init_parser(cls, parser):
parser.add_argument(
'--broker', default='pyamqp://guest:guest@localhost',
help='RabbitMQ broker url')
parser.add_argument(
'--interface', choices=cls.SHELLS,
help='Specify an interactive interpreter interface.')
parser.add_argument(
'--config', default='',
help='The YAML configuration file')
return parser
@staticmethod
def main(args):
from .shell import main
main(args)
commands = Command.__subclasses__() # pylint: disable=E1101
| 28.173554 | 78 | 0.624523 |
a1b1a75f67a498a33198e4b92ee3bd1c492497f9 | 1,136 | py | Python | vigil/migrations/0014_auto_20180228_1321.py | inuitwallet/vigil | 0367237edf96587c4101b909a7d748ba215b309a | [
"MIT"
] | null | null | null | vigil/migrations/0014_auto_20180228_1321.py | inuitwallet/vigil | 0367237edf96587c4101b909a7d748ba215b309a | [
"MIT"
] | 8 | 2020-06-06T06:34:55.000Z | 2021-09-22T19:43:55.000Z | vigil/migrations/0014_auto_20180228_1321.py | inuitwallet/vigil | 0367237edf96587c4101b909a7d748ba215b309a | [
"MIT"
] | null | null | null | # Generated by Django 2.0.2 on 2018-02-28 13:21
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('vigil', '0013_auto_20180227_1700'),
]
operations = [
migrations.RenameField(
model_name='alertchannel',
old_name='text',
new_name='message',
),
migrations.RemoveField(
model_name='alertchannel',
name='data',
),
migrations.AddField(
model_name='alertchannel',
name='alert_id',
field=models.UUIDField(default=uuid.uuid4),
),
migrations.AddField(
model_name='alertchannel',
name='priority',
field=models.CharField(blank=True, choices=[('LOW', 'Low'), ('MEDIUM', 'Medium'), ('HIGH', 'High'), ('URGENT', 'Urgent'), ('EMERGENCY', 'Emergency')], max_length=255, null=True),
),
migrations.AddField(
model_name='alertchannel',
name='title',
field=models.CharField(blank=True, max_length=500, null=True),
),
]
| 29.128205 | 190 | 0.558099 |
7d1eb34d3b91b3e75466c80eb0b4e969e82302be | 34,176 | py | Python | test/functional/rpc_fundrawtransaction.py | kadirada/wtf | 00a6d423ec2b9776f51eb8dddeaf6a96050564f3 | [
"MIT"
] | null | null | null | test/functional/rpc_fundrawtransaction.py | kadirada/wtf | 00a6d423ec2b9776f51eb8dddeaf6a96050564f3 | [
"MIT"
] | null | null | null | test/functional/rpc_fundrawtransaction.py | kadirada/wtf | 00a6d423ec2b9776f51eb8dddeaf6a96050564f3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the fundrawtransaction RPC."""
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_fee_amount,
assert_greater_than,
assert_greater_than_or_equal,
assert_raises_rpc_error,
connect_nodes_bi,
count_bytes,
find_vout_for_address,
)
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
def run_test(self):
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
# ensure that setting changePosition in fundraw with an exact match is handled properly
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].getaddressinfo(watchonly_address)["pubkey"]
watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
# Lock UTXO so nodes[0] doesn't accidentally spend it
watchonly_vout = find_vout_for_address(self.nodes[0], watchonly_txid, watchonly_address)
self.nodes[0].lockunspent(False, [{"txid": watchonly_txid, "vout": watchonly_vout}])
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
# reserveChangeKey was deprecated and is now removed
assert_raises_rpc_error(-3, "Unexpected key reserveChangeKey", lambda: self.nodes[2].fundrawtransaction(hexstring=rawtx, options={'reserveChangeKey': True}))
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-5, "changeAddress must be a valid ragecoin address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
############################################################
# test a fundrawtransaction with a provided change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
assert_raises_rpc_error(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0]
assert_equal(change, out['scriptPubKey']['addresses'][0])
#########################################################
# test a fundrawtransaction with a provided change type #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[2].fundrawtransaction, rawtx, {'change_type': None})
assert_raises_rpc_error(-5, "Unknown change type ''", self.nodes[2].fundrawtransaction, rawtx, {'change_type': ''})
rawtx = self.nodes[2].fundrawtransaction(rawtx, {'change_type': 'bech32'})
dec_tx = self.nodes[2].decoderawtransaction(rawtx['hex'])
assert_equal('witness_v0_keyhash', dec_tx['vout'][rawtx['changepos']]['scriptPubKey']['type'])
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[1].getaddressinfo(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[1].getaddressinfo(addr2)
addr3Obj = self.nodes[1].getaddressinfo(addr3)
addr4Obj = self.nodes[1].getaddressinfo(addr4)
addr5Obj = self.nodes[1].getaddressinfo(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
# send 1.2 BTC to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawtx)
signedTx = self.nodes[2].signrawtransactionwithwallet(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.stop_node(0)
self.nodes[1].node_encrypt_wallet("test")
self.stop_node(2)
self.stop_node(3)
self.start_nodes()
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
# Again lock the watchonly UTXO or nodes[0] may spend it, because
# lockunspent is memory-only and thus lost on restart
self.nodes[0].lockunspent(False, [{"txid": watchonly_txid, "vout": watchonly_vout}])
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
self.nodes[1].getrawchangeaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
assert_raises_rpc_error(-4, "Keypool ran out, please call keypoolrefill first", self.nodes[1].fundrawtransaction, rawtx)
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].keypoolrefill(8) #need to refill the keypool to get an internal change address
self.nodes[1].walletlock()
assert_raises_rpc_error(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 600)
signedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
fundedAndSignedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransactionwithwallet(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransactionwithwallet(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
#######################
# Test feeRate option #
#######################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[3].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 1*min_relay_tx_fee}) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
################################
# Test no address reuse occurs #
################################
result3 = self.nodes[3].fundrawtransaction(rawtx)
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert(changeaddress != "")
nextaddr = self.nodes[3].getnewaddress()
# Now the change address key should be removed from the keypool
assert(changeaddress != nextaddr)
######################################
# Test subtractFeeFromOutputs option #
######################################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress(): 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}),
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee, "subtractFeeFromOutputs": [0]})]
dec_tx = [self.nodes[3].decoderawtransaction(tx_['hex']) for tx_ in result]
output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
assert_equal(result[3]['fee'], result[4]['fee'])
assert_equal(change[0], change[1])
assert_equal(output[0], output[1])
assert_equal(output[0], output[2] + result[2]['fee'])
assert_equal(change[0] + result[0]['fee'], change[2])
assert_equal(output[3], output[4] + result[4]['fee'])
assert_equal(change[3] + result[3]['fee'], change[4])
inputs = []
outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx),
# split the fee between outputs 0, 2, and 3, but not output 1
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})]
dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),
self.nodes[3].decoderawtransaction(result[1]['hex'])]
# Nested list of non-change output amounts for each transaction
output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']]
for d, r in zip(dec_tx, result)]
# List of differences in output amounts between normal and subtractFee transactions
share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]
# output 1 is the same in both transactions
assert_equal(share[1], 0)
# the other 3 outputs are smaller as a result of subtractFeeFromOutputs
assert_greater_than(share[0], 0)
assert_greater_than(share[2], 0)
assert_greater_than(share[3], 0)
# outputs 2 and 3 take the same share of the fee
assert_equal(share[2], share[3])
# output 0 takes at least as much share of the fee, and no more than 2 satoshis more, than outputs 2 and 3
assert_greater_than_or_equal(share[0], share[2])
assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])
# the fee is the same in both transactions
assert_equal(result[0]['fee'], result[1]['fee'])
# the total subtracted from the outputs is equal to the fee
assert_equal(share[0] + share[2] + share[3], result[0]['fee'])
if __name__ == '__main__':
RawTransactionsTest().main()
| 45.146631 | 223 | 0.574731 |
bd7072639522561c0c07baea58ef7bc83e0ae898 | 6,944 | py | Python | h/db/types.py | bibliotechie/h | 16e275f79ef7d1086971bd30ef403501c6b93beb | [
"BSD-2-Clause"
] | null | null | null | h/db/types.py | bibliotechie/h | 16e275f79ef7d1086971bd30ef403501c6b93beb | [
"BSD-2-Clause"
] | null | null | null | h/db/types.py | bibliotechie/h | 16e275f79ef7d1086971bd30ef403501c6b93beb | [
"BSD-2-Clause"
] | null | null | null | """Custom SQLAlchemy types for use with the Annotations API database."""
import base64
import binascii
import uuid
from sqlalchemy import types
from sqlalchemy.dialects import postgresql
from sqlalchemy.exc import DontWrapMixin
# A magic byte (expressed as two hexadecimal nibbles) which we use to expand a
# 15-byte ElasticSearch flake ID into a 16-byte UUID.
#
# The UUID specification defines UUIDs as taking the form
#
# xxxxxxxx-xxxx-Mxxx-Nxxx-xxxxxxxxxxxx
#
# in the canonical hexadecimal representation. M and N represent the UUID
# version and variant fields respectively. The four bits M can take values {1,
# 2, 3, 4, 5} in specified UUID types, and the first three bits of N can take
# the values {8, 9, 0xa, 0xb} in specified UUID types.
#
# In order to expand a 15-byte ElasticSearch flake ID into a value that can be
# stored in the UUID field, we insert the magic nibbles 0xe, 0x5 into the
# version and variant fields respectively. These values are disjoint with any
# specified UUID so the resulting UUID can be distinguished from those
# generated by, for example, PostgreSQL's uuid_generate_v1mc(), and mapped back
# to a 20-char ElasticSearch flake ID.
ES_FLAKE_MAGIC_BYTE = ["e", "5"]
class InvalidUUID(Exception, DontWrapMixin):
pass
class URLSafeUUID(types.TypeDecorator):
"""
Expose UUIDs as URL-safe base64-encoded strings.
Fields decorated with this type decorator use PostgreSQL UUID fields for
storage, but expose URL-safe strings in the application.
This type decorator will handle the transformation between any UUID and a
URL-safe, base64-encoded string version of that UUID (which will be 22
characters long). In addition, it will transparently map post-v1.4
ElasticSearch flake IDs (which are 20 characters long and map to 15 bytes
of data).
"""
impl = postgresql.UUID
cache_ok = True
def process_bind_param(self, value, dialect):
return self.url_safe_to_hex(value)
def process_result_value(self, value, dialect):
return self.hex_to_url_safe(value)
@staticmethod
def url_safe_to_hex(value):
"""
Return the hex version of the given URL-safe UUID.
Converts UUID's from the application-level URL-safe format to the hex
format that's used internally in the DB.
"""
if value is None:
return None
return _get_hex_from_urlsafe(value)
@staticmethod
def hex_to_url_safe(value):
"""
Return the URL-safe version of the given hex-format UUID.
Converts UUID's from the database-internal hex format to the URL-safe
format that's used in the application.
"""
if value is None:
return None
hexstring = uuid.UUID(value).hex
return _get_urlsafe_from_hex(hexstring)
class AnnotationSelectorJSONB(types.TypeDecorator):
"""
Special type for the Annotation selector column.
It transparently escapes NULL (\u0000) bytes to \\u0000 when writing to the
database, and the other way around when reading from the database, but
only on the prefix/exact/suffix fields in a TextQuoteSelector.
"""
impl = postgresql.JSONB
def process_bind_param(self, value, dialect):
return _transform_quote_selector(value, _escape_null_byte)
def process_result_value(self, value, dialect):
return _transform_quote_selector(value, _unescape_null_byte)
def _get_hex_from_urlsafe(value):
"""
Convert a URL-safe base 64 ID to a hex UUID.
:type value: unicode
:rtype: unicode
"""
def _fail():
raise InvalidUUID("{0!r} is not a valid encoded UUID".format(value))
if not isinstance(value, str):
raise InvalidUUID("`value` is {}, expected str".format(type(value)))
bytestr = value.encode()
if len(bytestr) == 22:
# 22-char inputs represent 16 bytes of data, which when normally
# base64-encoded would have two bytes of padding on the end, so we add
# that back before decoding.
try:
data = _must_b64_decode(bytestr + b"==", expected_size=16)
except (TypeError, binascii.Error):
_fail()
return binascii.hexlify(data).decode()
if len(bytestr) == 20:
# 20-char inputs represent 15 bytes of data, which requires no padding
# corrections.
try:
data = _must_b64_decode(bytestr, expected_size=15)
except (TypeError, binascii.Error):
_fail()
hexstring = binascii.hexlify(data).decode()
# These are ElasticSearch flake IDs, so to convert them into UUIDs we
# insert the magic nibbles at the appropriate points. See the comments
# on ES_FLAKE_MAGIC_BYTE for details.
return (
hexstring[0:12]
+ ES_FLAKE_MAGIC_BYTE[0]
+ hexstring[12:15]
+ ES_FLAKE_MAGIC_BYTE[1]
+ hexstring[15:30]
)
# Fallthrough: we must have a received a string of invalid length
_fail()
def _get_urlsafe_from_hex(value):
"""
Convert a hex UUID to a URL-safe base 64 ID.
:type value: unicode
:rtype: unicode
"""
# Validate and normalise hex string
hexstring = uuid.UUID(hex=value).hex
is_flake_id = (
hexstring[12] == ES_FLAKE_MAGIC_BYTE[0]
and hexstring[16] == ES_FLAKE_MAGIC_BYTE[1]
)
if is_flake_id:
# The hex representation of the flake ID is simply the UUID without the
# two magic nibbles.
data = binascii.unhexlify(hexstring[0:12] + hexstring[13:16] + hexstring[17:32])
return base64.urlsafe_b64encode(data).decode()
# Encode UUID bytes and strip two bytes of padding
data = binascii.unhexlify(hexstring)
return base64.urlsafe_b64encode(data)[:-2].decode()
def _must_b64_decode(data, expected_size=None):
result = base64.urlsafe_b64decode(data)
if expected_size is not None and len(result) != expected_size:
raise TypeError("incorrect data size")
return result
def _transform_quote_selector(selectors, transform_func):
if selectors is None:
return None
if not isinstance(selectors, list):
return selectors
for selector in selectors:
if not isinstance(selector, dict):
continue
if not selector.get("type") == "TextQuoteSelector":
continue
if "prefix" in selector:
selector["prefix"] = transform_func(selector["prefix"])
if "exact" in selector:
selector["exact"] = transform_func(selector["exact"])
if "suffix" in selector:
selector["suffix"] = transform_func(selector["suffix"])
return selectors
def _escape_null_byte(s):
if s is None:
return s
return s.replace("\u0000", "\\u0000")
def _unescape_null_byte(s):
if s is None:
return s
return s.replace("\\u0000", "\u0000")
| 31.139013 | 88 | 0.673963 |
ad5fabd716cf1bd5b20b452d4b6db525438fa08d | 405 | py | Python | 1684-count-the-number-of-consistent-strings/1684-count-the-number-of-consistent-strings.py | hyeseonko/LeetCode | 48dfc93f1638e13041d8ce1420517a886abbdc77 | [
"MIT"
] | 2 | 2021-12-05T14:29:06.000Z | 2022-01-01T05:46:13.000Z | 1684-count-the-number-of-consistent-strings/1684-count-the-number-of-consistent-strings.py | hyeseonko/LeetCode | 48dfc93f1638e13041d8ce1420517a886abbdc77 | [
"MIT"
] | null | null | null | 1684-count-the-number-of-consistent-strings/1684-count-the-number-of-consistent-strings.py | hyeseonko/LeetCode | 48dfc93f1638e13041d8ce1420517a886abbdc77 | [
"MIT"
] | null | null | null | class Solution:
def countConsistentStrings(self, allowed: str, words: List[str]) -> int:
result=0
for word in words:
cur = set(c for c in word)
checked=False
for each in cur:
if each not in allowed:
checked=True
break
if not checked:
result+=1
return result | 31.153846 | 76 | 0.479012 |
20dccc838b334bc2334df71150e45565e259a599 | 1,978 | py | Python | sdk/ml/azure-ai-ml/azure/ai/ml/entities/_endpoint/_endpoint_helpers.py | dubiety/azure-sdk-for-python | 62ffa839f5d753594cf0fe63668f454a9d87a346 | [
"MIT"
] | 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | sdk/ml/azure-ai-ml/azure/ai/ml/entities/_endpoint/_endpoint_helpers.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | sdk/ml/azure-ai-ml/azure/ai/ml/entities/_endpoint/_endpoint_helpers.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import re
from azure.ai.ml.constants import OnlineEndpointConfigurations
from azure.ai.ml._ml_exceptions import ErrorCategory, ErrorTarget, ValidationException
def validate_endpoint_or_deployment_name(name: str, is_deployment: bool = False) -> None:
"""Validates that the name of an endpoint or deployment is:
1. Between 3 and 32 characters long (inclusive of both ends of the range)
2. Follows the following regex pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
"""
type_str = "a deployment" if is_deployment else "an endpoint"
target = ErrorTarget.DEPLOYMENT if is_deployment else ErrorTarget.ENDPOINT
if (
len(name) < OnlineEndpointConfigurations.MIN_NAME_LENGTH
or len(name) > OnlineEndpointConfigurations.MAX_NAME_LENGTH
):
msg = f"The name for {type_str} must be at least 3 and at most 32 characters long (inclusive of both limits)."
raise ValidationException(
message=msg, target=target, no_personal_data_message=msg, error_category=ErrorCategory.USER_ERROR
)
if not re.match(OnlineEndpointConfigurations.NAME_REGEX_PATTERN, name):
msg = f"The name for {type_str} must start with an upper- or lowercase letter and only consist of '-'s and alphanumeric characters."
raise ValidationException(
message=msg, target=target, no_personal_data_message=msg, error_category=ErrorCategory.USER_ERROR
)
def validate_identity_type_defined(identity: object) -> None:
if identity and not identity.type:
msg = "Identity type not found in provided yaml file."
raise ValidationException(
message=msg,
target=ErrorTarget.ENDPOINT,
no_personal_data_message=msg,
error_category=ErrorCategory.USER_ERROR,
)
| 47.095238 | 140 | 0.674924 |
54ccff3afe407e5e6e80c7081881656ee6a08585 | 14,294 | py | Python | translate/train_bead_cv.py | shenghh2015/segmentation_models | 473c528c724f62ff38ac127747dd8babb7de6b85 | [
"MIT"
] | null | null | null | translate/train_bead_cv.py | shenghh2015/segmentation_models | 473c528c724f62ff38ac127747dd8babb7de6b85 | [
"MIT"
] | null | null | null | translate/train_bead_cv.py | shenghh2015/segmentation_models | 473c528c724f62ff38ac127747dd8babb7de6b85 | [
"MIT"
] | null | null | null | import os
import cv2
from skimage import io
import sys
# import keras
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import argparse
from natsort import natsorted
# sys.path.append('../')
import segmentation_models_v1 as sm
from segmentation_models_v1 import Unet, Linknet, PSPNet, FPN, AtUnet, ResUnet
sm.set_framework('tf.keras')
from helper_function import plot_history_flu2, save_phase_fl_history, plot_flu_prediction, plot_set_prediction
from helper_function import save_history_for_callback, plot_history_for_callback
from helper_function import precision, recall, f1_score, calculate_psnr, calculate_pearsonr
from sklearn.metrics import confusion_matrix
def str2bool(value):
return value.lower() == 'true'
def generate_folder(folder_name):
if not os.path.exists(folder_name):
os.system('mkdir -p {}'.format(folder_name))
def read_txt(txt_dir):
lines = []
with open(txt_dir, 'r+') as f:
lines = [line.strip() for line in f.readlines()]
return lines
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=str, default = '0')
parser.add_argument("--docker", type=str2bool, default = True)
parser.add_argument("--net_type", type=str, default = 'Unet') #Unet, Linknet, PSPNet, FPN
parser.add_argument("--backbone", type=str, default = 'efficientnetb0')
parser.add_argument("--dataset", type=str, default = 'bead_128x128')
parser.add_argument("--zmax", type=int, default = 150)
parser.add_argument("--subset", type=str, default = 'train')
parser.add_argument("--epoch", type=int, default = 12)
parser.add_argument("--dim", type=int, default = 128)
parser.add_argument("--rot", type=float, default = 0)
parser.add_argument("--scale", type=float, default = 100)
parser.add_argument("--train", type=int, default = None)
parser.add_argument("--act_fun", type=str, default = 'relu')
parser.add_argument("--loss", type=str, default = 'mse')
parser.add_argument("--batch_size", type=int, default = 6)
parser.add_argument("--lr", type=float, default = 5e-4)
parser.add_argument("--decay", type=float, default = 0.8)
parser.add_argument("--fold", type=str, default = '3')
parser.add_argument("--version", type=int, default = 1)
parser.add_argument("--cv", type=int, default = 1)
parser.add_argument("--run", type=int, default = 1)
parser.add_argument("--pre_train", type=str2bool, default = True)
args = parser.parse_args()
print(args)
## screen the fl1
model_name = 'Cor-bead-net-{}-bone-{}-pre-{}-epoch-{}-batch-{}-lr-{}-dim-{}-train-{}-rot-{}-set-{}-subset-{}-loss-{}-act-{}-scale-{}-decay-{}-zmax-{}-fold-{}-cv-{}-v-{}-run-{}'.format(args.net_type, args.backbone, args.pre_train,\
args.epoch, args.batch_size, args.lr, args.dim, args.train, args.rot, args.dataset, args.subset, args.loss, args.act_fun, args.scale, args.decay, args.zmax, args.fold, args.cv, args.version, args.run)
print(model_name)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
DATA_DIR = '/data/datasets/{}'.format(args.dataset) if args.docker else './datasets/{}'.format(args.dataset)
train_dim = args.dim
x_dir = DATA_DIR+'/images'; y_dir = DATA_DIR+'/masks'
fold = args.fold
cv = args.cv
version = args.version
if version < 2:
train_file = DATA_DIR+'/cross_valid_{}/train_{}.txt'.format(fold, cv)
test_file = DATA_DIR+'/cross_valid_{}/valid_{}.txt'.format(fold, cv)
else:
train_file = DATA_DIR+'/cross_valid_{}_v{}/train_{}.txt'.format(fold, version, cv)
test_file = DATA_DIR+'/cross_valid_{}_v{}/valid_{}.txt'.format(fold, version, cv)
train_sample_names = read_txt(train_file)
test_sample_names = read_txt(test_file)
if args.dataset == 'bead_float' or 'bead_128x128':
val_dim = 128
# if args.dataset == 'neuron_wbx1' or args.dataset == 'neuron_trn_tst':
# val_dim = 1792
# elif args.dataset == 'spheroids_dataset_x1':
# val_dim = 1792
# elif args.dataset == 'neuron_wbx2':
# val_dim = 896
print(x_dir)
print(y_dir)
# classes for data loading and preprocessing
class Dataset:
"""CamVid Dataset. Read images, apply augmentation and preprocessing transformations.
Args:
images_dir (str): path to images folder
masks_dir (str): path to segmentation masks folder
class_values (list): values of classes to extract from segmentation mask
augmentation (albumentations.Compose): data transfromation pipeline
(e.g. flip, scale, etc.)
preprocessing (albumentations.Compose): data preprocessing
(e.g. noralization, shape manipulation, etc.)
"""
def __init__(
self,
images_dir,
masks_dir,
sample_names,
z_range = [0,250],
scale = 1.0,
nb_data=None,
augmentation=None,
preprocessing=None,
):
#self.ids = os.listdir(images_dir)
id_list = natsorted(os.listdir(images_dir))
z_set = ['z{}.npy'.format(z) for z in range(z_range[0], z_range[1])]
self.ids = [id for id in id_list if id.split('_')[0] in sample_names and id.split('_')[1] in z_set]
self.images_fps = [os.path.join(images_dir, image_id) for image_id in self.ids]
self.masks_fps = [os.path.join(masks_dir, image_id) for image_id in self.ids]
print('Load files: image {}, fl2:{}'.format(len(self.images_fps),len(self.masks_fps)))
self.scale = scale
self.augmentation = augmentation
self.preprocessing = preprocessing
def __getitem__(self, i):
# load image and fl1 or fl2 or both
#image = io.imread(self.images_fps[i])
#mask = np.expand_dims(io.imread(self.masks_fps[i])/255.*self.scale, axis = -1)
image = np.load(self.images_fps[i]) * 255.
mask = np.expand_dims(np.load(self.masks_fps[i]) * self.scale , axis = -1)
# apply augmentations
if self.augmentation:
sample = self.augmentation(image=image, mask=mask)
image, mask = sample['image'], sample['mask']
# apply preprocessing
if self.preprocessing:
sample = self.preprocessing(image=image, mask=mask)
image, mask = sample['image'], sample['mask']
return image, mask
def __len__(self):
return len(self.ids)
class Dataloder(tf.keras.utils.Sequence):
"""Load data from dataset and form batches
Args:
dataset: instance of Dataset class for image loading and preprocessing.
batch_size: Integet number of images in batch.
shuffle: Boolean, if `True` shuffle image indexes each epoch.
"""
def __init__(self, dataset, batch_size=1, shuffle=False):
self.dataset = dataset
self.batch_size = batch_size
self.shuffle = shuffle
self.indexes = np.arange(len(dataset))
self.on_epoch_end()
def __getitem__(self, i):
# collect batch data
start = i * self.batch_size
stop = (i + 1) * self.batch_size
data = []
for j in range(start, stop):
data.append(self.dataset[j])
# transpose list of lists
batch = [np.stack(samples, axis=0) for samples in zip(*data)]
return (batch[0], batch[1])
def __len__(self):
"""Denotes the number of batches per epoch"""
return len(self.indexes) // self.batch_size
def on_epoch_end(self):
"""Callback function to shuffle indexes each epoch"""
if self.shuffle:
self.indexes = np.random.permutation(self.indexes)
import albumentations as A
# define heavy augmentations
def get_training_augmentation(dim, rot = 0):
train_transform = [
A.HorizontalFlip(p=0.5),
#
# A.ShiftScaleRotate(scale_limit=0.5, rotate_limit=rot, shift_limit=0.1, p=1, border_mode=0),
#
A.PadIfNeeded(min_height=dim, min_width=dim, always_apply=True, border_mode=0),
A.RandomCrop(height=dim, width=dim, always_apply=True),
#
# A.IAAAdditiveGaussianNoise(p=0.2),
# A.IAAPerspective(p=0.5),
#
# A.OneOf(
# [
# A.CLAHE(p=1),
# A.RandomBrightness(p=1),
# A.RandomGamma(p=1),
# ],
# p=0.9,
# ),
#
# A.OneOf(
# [
# A.IAASharpen(p=1),
# A.Blur(blur_limit=3, p=1),
# A.MotionBlur(blur_limit=3, p=1),
# ],
# p=0.9,
# ),
#
# A.OneOf(
# [
# A.RandomContrast(p=1),
# A.HueSaturationValue(p=1),
# ],
# p=0.9,
# ),
# A.Lambda(mask=round_clip_0_1)
]
return A.Compose(train_transform)
def get_validation_augmentation(dim = 128):
"""Add paddings to make image shape divisible by 32"""
test_transform = [
A.PadIfNeeded(dim, dim)
# A.PadIfNeeded(384, 480)
]
return A.Compose(test_transform)
def get_preprocessing(preprocessing_fn):
"""Construct preprocessing transform
Args:
preprocessing_fn (callbale): data normalization function
(can be specific for each pretrained neural network)
Return:
transform: albumentations.Compose
"""
_transform = [
A.Lambda(image=preprocessing_fn),
]
return A.Compose(_transform)
## create models
BACKBONE = args.backbone
BATCH_SIZE = args.batch_size
LR = args.lr
EPOCHS = args.epoch
# processing configuration
preprocess_input = sm.get_preprocessing(BACKBONE)
# define network parameters
n_classes = 1
activation = '{}'.format(args.act_fun)
#create model
net_func = globals()[args.net_type]
encoder_weights='imagenet' if args.pre_train else None
model = net_func(BACKBONE, encoder_weights=encoder_weights, classes=n_classes, activation=activation)
# define optomizer
optim = tf.keras.optimizers.Adam(LR)
if args.loss == 'mse':
loss = tf.keras.losses.MSE
from tensorflow.keras import backend as K
def pearson(y_true, y_pred):
x = y_true
y = y_pred
mx = K.mean(x)
my = K.mean(y)
xm, ym = x-mx, y-my
r_num = K.sum(tf.multiply(xm,ym))
r_den = K.sqrt(tf.multiply(K.sum(K.square(xm)), K.sum(K.square(ym))))
r = r_num / r_den
return r
metrics = [sm.metrics.PSNR(max_val=args.scale), pearson]
# compile keras model with defined optimozer, loss and metrics
model.compile(optim, loss, metrics)
# Dataset for train images
train_dataset = Dataset(
x_dir,
y_dir,
sample_names = train_sample_names,
z_range = [0,args.zmax],
scale = args.scale,
nb_data=args.train,
augmentation=get_training_augmentation(train_dim, args.rot),
preprocessing=get_preprocessing(preprocess_input),
)
# Dataset for validation images
valid_dataset = Dataset(
x_dir,
y_dir,
sample_names = test_sample_names,
z_range = [0,args.zmax],
scale = args.scale,
augmentation=get_validation_augmentation(val_dim),
preprocessing=get_preprocessing(preprocess_input),
)
train_dataloader = Dataloder(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
valid_dataloader = Dataloder(valid_dataset, batch_size=1, shuffle=False)
print(train_dataloader[0][0].shape)
print(train_dataloader[0][1].shape)
print(train_dataloader[0][0].min(), train_dataloader[0][0].max())
print(train_dataloader[0][1].min(), train_dataloader[0][1].max())
# check shapes for errors
assert train_dataloader[0][0].shape == (BATCH_SIZE, train_dim, train_dim, 3)
assert train_dataloader[0][1].shape == (BATCH_SIZE, train_dim, train_dim, n_classes)
model_folder = '/data/2d_models/{}/{}'.format(args.dataset, model_name) if args.docker else './2d_models/{}/{}'.format(args.dataset, model_name)
generate_folder(model_folder)
def concat_tile(im_list_2d):
return cv2.vconcat([cv2.hconcat(im_list_h) for im_list_h in im_list_2d])
def save_images(file_name, vols):
vols = vols[:,:,:,1] if vols.shape[-1] >= 2 else vols[:,:,:,0]
shp = vols.shape
ls, lx, ly = shp
sx, sy = int(lx/128), int(ly/128)
vols = vols[:,::sx,::sy]
slice_list, rows = [], []
for si in range(vols.shape[0]):
slice = vols[si,:,:]
slice[0, :] = 255
slice[:, 0] = 255
slice[:, -1] = 255
slice[-1, :] = 255
rows.append(slice)
if si%8 == 7 and not si == vols.shape[0]-1:
slice_list.append(rows)
rows = []
save_img = concat_tile(slice_list)
cv2.imwrite(file_name, save_img)
class HistoryPrintCallback(tf.keras.callbacks.Callback):
def __init__(self):
super(HistoryPrintCallback, self).__init__()
self.history = {}
def on_epoch_end(self, epoch, logs=None):
if logs:
for key in logs.keys():
if epoch == 0:
self.history[key] = []
self.history[key].append(logs[key])
if epoch%5 == 0:
plot_history_for_callback(model_folder+'/train_history.png', self.history)
save_history_for_callback(model_folder, self.history)
img_vols, gt_vols, pr_vols = [],[],[]
for i in range(0, len(valid_dataset),int(len(valid_dataset)/64)):
img_vols.append(np.load(valid_dataloader.dataset.images_fps[i]))
gt_vols.append(valid_dataloader[i][1])
pr_vols.append(self.model.predict(valid_dataloader[i]))
img_vols = np.stack(img_vols, axis = 0)
gt_vols = np.concatenate(gt_vols, axis = 0)
pr_vols = np.concatenate(pr_vols, axis = 0)
save_images(model_folder+'/epoch-{}-img.png'.format(epoch), np.uint8(img_vols))
save_images(model_folder+'/epoch-{}-gt.png'.format(epoch), gt_vols/args.scale*255)
save_images(model_folder+'/epoch-{}-pr.png'.format(epoch), pr_vols/args.scale*255)
# if epoch%5 == 0:
# plot_history_for_callback(model_folder+'/train_history.png', logs)
# define callbacks for learning rate scheduling and best checkpoints saving
callbacks = [
tf.keras.callbacks.ModelCheckpoint(model_folder+'/best_model-{epoch:03d}.h5', monitor='val_pearson', save_weights_only=True, save_best_only=True, mode='max'),
tf.keras.callbacks.ReduceLROnPlateau(factor=args.decay),
HistoryPrintCallback(),
]
# train model
history = model.fit_generator(
train_dataloader,
steps_per_epoch=len(train_dataloader),
epochs=EPOCHS,
callbacks=callbacks,
validation_data=valid_dataloader,
validation_steps=len(valid_dataloader),
) | 34.52657 | 230 | 0.660697 |
5cee99c9c34a27ca9fba6f099499e7f8c6a1f639 | 2,066 | py | Python | drs4Calibration/drs4Calibration_version_1/config.py | fact-project/DrsTemperatureCalibration | 3702ee390c16cf2c5930d4a0f24c1354d036d645 | [
"MIT"
] | null | null | null | drs4Calibration/drs4Calibration_version_1/config.py | fact-project/DrsTemperatureCalibration | 3702ee390c16cf2c5930d4a0f24c1354d036d645 | [
"MIT"
] | null | null | null | drs4Calibration/drs4Calibration_version_1/config.py | fact-project/DrsTemperatureCalibration | 3702ee390c16cf2c5930d4a0f24c1354d036d645 | [
"MIT"
] | null | null | null | from drs4Calibration.drs4Calibration_version_1.constants import NRCHID, NRCELL, ROI, NRTEMPSENSOR
# data_collection .h5 stuff
class data_collection_config:
column_names = ['TimeBaseline', 'TempBaseline', 'Baseline',
'TimeGain', 'TempGain', 'Gain', 'GainStd']
column_dtype = {'TimeBaseline': 'float32',
'TempBaseline': 'float32',
'Baseline': 'float32',
'TimeGain': 'float32',
'TempGain': 'float32',
'Gain': 'float32',
'GainStd': 'float16'}
column_length = {'TimeBaseline': 1,
'TempBaseline': NRTEMPSENSOR,
'Baseline': NRCHID*NRCELL*ROI,
'TimeGain': 1,
'TempGain': NRTEMPSENSOR,
'Gain': NRCHID*NRCELL,
'GainStd': NRCHID*NRCELL}
class fit_value_config:
drs_value_types = ['Baseline', 'Gain']
class interval_indice_config:
# hardware_boundaries
#
# 20.05.2014 Camera repair, Replacement of Capacitors
# 26.5.2015 Replacement FAD board (crate 2, board 0)
#
# See also 'https://trac.fact-project.org/wiki/Protected/SystemChanges'
hardware_boundaries = (['2014-05-20 12',
'2015-05-26 12'])
# There are two reasons for calculate no mask for Baseline values.
# 1. No sufficient standard deviation of the Baseline mean exist.
# 2. Baseline mask does not fit in ram.
# All Gain-values with a larger error (std dev of the mean)
# than the 'CutOffErrorFactor' multiplied with the mean of the error
# from all collected Gain-values for one capacitor will not used for the fit
cut_off_error_factor = {'Gain': 2}
drs_values_per_cell = {'Baseline': ROI,
'Gain': 1}
value_units = {'Baseline': 'mV',
'Gain': '1'}
nrCellsPerChid = {'Baseline': NRCELL,
'Gain': NRCELL}
| 33.868852 | 97 | 0.556631 |
34009cae6ff47c5b706b22ff6f0f01f7d7be62af | 10,085 | py | Python | python/helpers/pydev/_pydev_runfiles/pydev_runfiles_parallel.py | tgodzik/intellij-community | f5ef4191fc30b69db945633951fb160c1cfb7b6f | [
"Apache-2.0"
] | 349 | 2019-05-07T00:15:12.000Z | 2022-03-10T15:05:08.000Z | python/helpers/pydev/_pydev_runfiles/pydev_runfiles_parallel.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 1,095 | 2018-03-01T00:50:11.000Z | 2019-05-06T17:44:15.000Z | python/helpers/pydev/_pydev_runfiles/pydev_runfiles_parallel.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 53 | 2018-03-01T00:33:57.000Z | 2019-05-05T00:50:23.000Z | import unittest
from _pydev_imps._pydev_saved_modules import thread
try:
import Queue
except:
import queue as Queue #@UnresolvedImport
from _pydev_runfiles import pydev_runfiles_xml_rpc
import time
import os
import threading
import sys
#=======================================================================================================================
# flatten_test_suite
#=======================================================================================================================
def flatten_test_suite(test_suite, ret):
if isinstance(test_suite, unittest.TestSuite):
for t in test_suite._tests:
flatten_test_suite(t, ret)
elif isinstance(test_suite, unittest.TestCase):
ret.append(test_suite)
#=======================================================================================================================
# execute_tests_in_parallel
#=======================================================================================================================
def execute_tests_in_parallel(tests, jobs, split, verbosity, coverage_files, coverage_include):
'''
@param tests: list(PydevTestSuite)
A list with the suites to be run
@param split: str
Either 'module' or the number of tests that should be run in each batch
@param coverage_files: list(file)
A list with the files that should be used for giving coverage information (if empty, coverage information
should not be gathered).
@param coverage_include: str
The pattern that should be included in the coverage.
@return: bool
Returns True if the tests were actually executed in parallel. If the tests were not executed because only 1
should be used (e.g.: 2 jobs were requested for running 1 test), False will be returned and no tests will be
run.
It may also return False if in debug mode (in which case, multi-processes are not accepted)
'''
try:
from _pydevd_bundle.pydevd_comm import get_global_debugger
if get_global_debugger() is not None:
return False
except:
pass #Ignore any error here.
#This queue will receive the tests to be run. Each entry in a queue is a list with the tests to be run together When
#split == 'tests', each list will have a single element, when split == 'module', each list will have all the tests
#from a given module.
tests_queue = []
queue_elements = []
if split == 'module':
module_to_tests = {}
for test in tests:
lst = []
flatten_test_suite(test, lst)
for test in lst:
key = (test.__pydev_pyfile__, test.__pydev_module_name__)
module_to_tests.setdefault(key, []).append(test)
for key, tests in module_to_tests.items():
queue_elements.append(tests)
if len(queue_elements) < jobs:
#Don't create jobs we will never use.
jobs = len(queue_elements)
elif split == 'tests':
for test in tests:
lst = []
flatten_test_suite(test, lst)
for test in lst:
queue_elements.append([test])
if len(queue_elements) < jobs:
#Don't create jobs we will never use.
jobs = len(queue_elements)
else:
raise AssertionError('Do not know how to handle: %s' % (split,))
for test_cases in queue_elements:
test_queue_elements = []
for test_case in test_cases:
try:
test_name = test_case.__class__.__name__+"."+test_case._testMethodName
except AttributeError:
#Support for jython 2.1 (__testMethodName is pseudo-private in the test case)
test_name = test_case.__class__.__name__+"."+test_case._TestCase__testMethodName
test_queue_elements.append(test_case.__pydev_pyfile__+'|'+test_name)
tests_queue.append(test_queue_elements)
if jobs < 2:
return False
sys.stdout.write('Running tests in parallel with: %s jobs.\n' %(jobs,))
queue = Queue.Queue()
for item in tests_queue:
queue.put(item, block=False)
providers = []
clients = []
for i in range(jobs):
test_cases_provider = CommunicationThread(queue)
providers.append(test_cases_provider)
test_cases_provider.start()
port = test_cases_provider.port
if coverage_files:
clients.append(ClientThread(i, port, verbosity, coverage_files.pop(0), coverage_include))
else:
clients.append(ClientThread(i, port, verbosity))
for client in clients:
client.start()
client_alive = True
while client_alive:
client_alive = False
for client in clients:
#Wait for all the clients to exit.
if not client.finished:
client_alive = True
time.sleep(.2)
break
for provider in providers:
provider.shutdown()
return True
#=======================================================================================================================
# CommunicationThread
#=======================================================================================================================
class CommunicationThread(threading.Thread):
def __init__(self, tests_queue):
threading.Thread.__init__(self)
self.setDaemon(True)
self.queue = tests_queue
self.finished = False
from _pydev_bundle.pydev_imports import SimpleXMLRPCServer
# This is a hack to patch slow socket.getfqdn calls that
# BaseHTTPServer (and its subclasses) make.
# See: http://bugs.python.org/issue6085
# See: http://www.answermysearches.com/xmlrpc-server-slow-in-python-how-to-fix/2140/
try:
import BaseHTTPServer
def _bare_address_string(self):
host, port = self.client_address[:2]
return '%s' % host
BaseHTTPServer.BaseHTTPRequestHandler.address_string = _bare_address_string
except:
pass
# End hack.
# Create server
from _pydev_bundle import pydev_localhost
server = SimpleXMLRPCServer((pydev_localhost.get_localhost(), 0), logRequests=False)
server.register_function(self.GetTestsToRun)
server.register_function(self.notifyStartTest)
server.register_function(self.notifyTest)
server.register_function(self.notifyCommands)
self.port = server.socket.getsockname()[1]
self.server = server
def GetTestsToRun(self, job_id):
'''
@param job_id:
@return: list(str)
Each entry is a string in the format: filename|Test.testName
'''
try:
ret = self.queue.get(block=False)
return ret
except: #Any exception getting from the queue (empty or not) means we finished our work on providing the tests.
self.finished = True
return []
def notifyCommands(self, job_id, commands):
#Batch notification.
for command in commands:
getattr(self, command[0])(job_id, *command[1], **command[2])
return True
def notifyStartTest(self, job_id, *args, **kwargs):
pydev_runfiles_xml_rpc.notifyStartTest(*args, **kwargs)
return True
def notifyTest(self, job_id, *args, **kwargs):
pydev_runfiles_xml_rpc.notifyTest(*args, **kwargs)
return True
def shutdown(self):
if hasattr(self.server, 'shutdown'):
self.server.shutdown()
else:
self._shutdown = True
def run(self):
if hasattr(self.server, 'shutdown'):
self.server.serve_forever()
else:
self._shutdown = False
while not self._shutdown:
self.server.handle_request()
#=======================================================================================================================
# Client
#=======================================================================================================================
class ClientThread(threading.Thread):
def __init__(self, job_id, port, verbosity, coverage_output_file=None, coverage_include=None):
threading.Thread.__init__(self)
self.setDaemon(True)
self.port = port
self.job_id = job_id
self.verbosity = verbosity
self.finished = False
self.coverage_output_file = coverage_output_file
self.coverage_include = coverage_include
def _reader_thread(self, pipe, target):
while True:
target.write(pipe.read(1))
def run(self):
try:
from _pydev_runfiles import pydev_runfiles_parallel_client
#TODO: Support Jython:
#
#For jython, instead of using sys.executable, we should use:
#r'D:\bin\jdk_1_5_09\bin\java.exe',
#'-classpath',
#'D:/bin/jython-2.2.1/jython.jar',
#'org.python.util.jython',
args = [
sys.executable,
pydev_runfiles_parallel_client.__file__,
str(self.job_id),
str(self.port),
str(self.verbosity),
]
if self.coverage_output_file and self.coverage_include:
args.append(self.coverage_output_file)
args.append(self.coverage_include)
import subprocess
if False:
proc = subprocess.Popen(args, env=os.environ, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
thread.start_new_thread(self._reader_thread,(proc.stdout, sys.stdout))
thread.start_new_thread(target=self._reader_thread,args=(proc.stderr, sys.stderr))
else:
proc = subprocess.Popen(args, env=os.environ, shell=False)
proc.wait()
finally:
self.finished = True
| 33.956229 | 122 | 0.564799 |
a215be314677815d71f66745f772820b9b5c373e | 1,741 | py | Python | arrays/lonely_number.py | kandarpck/leetcode | d2ffcccede5d1543aea48f18a39cdbd3d83e3ed8 | [
"MIT"
] | null | null | null | arrays/lonely_number.py | kandarpck/leetcode | d2ffcccede5d1543aea48f18a39cdbd3d83e3ed8 | [
"MIT"
] | null | null | null | arrays/lonely_number.py | kandarpck/leetcode | d2ffcccede5d1543aea48f18a39cdbd3d83e3ed8 | [
"MIT"
] | null | null | null | from collections import Counter
class Solution(object):
def singleNumber(self, nums):
"""
Time N^2
Space 1
:type nums: List[int]
:rtype: int
"""
for i in range(len(nums)):
duplicate = False
for j in range(len(nums)):
if i == j:
continue
if nums[i] == nums[j]:
duplicate = True
if not duplicate:
return nums[i]
def singleNumberCounter(self, nums):
"""
Time N
Space 1
:type nums: List[int]
:rtype: int
"""
num_counter = Counter(nums)
return num_counter.most_common()[:-2:-1][0][0]
def singleNumberDuplicates(self, nums):
"""
Time N
Space N
:return:
"""
duplicates = []
for number in nums:
if number not in duplicates:
duplicates.append(number)
else:
duplicates.remove(number)
return duplicates.pop()
def singleNumberMath(self, nums):
"""
Time: N
Space: N
:return:
"""
return 2 * sum(set(nums)) - sum(nums)
def singleNumberXOR(self, nums):
"""
Time: N
Space: 1
:param nums:
:return:
"""
result = 0
for number in nums:
result ^= number
return result
if __name__ == '__main__':
sol = Solution()
ip = [1, 1, 9, 3, 4, 2, 9, 2, 4]
print(sol.singleNumber(ip))
print(sol.singleNumberCounter(ip))
print(sol.singleNumberDuplicates(ip))
print(sol.singleNumberMath(ip))
print(sol.singleNumberXOR(ip))
| 22.61039 | 54 | 0.482481 |
54cb3723404f368024fe7de356bad316c11edad3 | 5,729 | py | Python | SRcode/AttentionLayers.py | robotic-vision-lab/Attention-With-Varying-Receptive-Fields-Network | e151216ca029dc72ab93e03d6bcacd69161d1c25 | [
"MIT"
] | 1 | 2021-12-10T12:50:39.000Z | 2021-12-10T12:50:39.000Z | SRcode/AttentionLayers.py | robotic-vision-lab/Attention-With-Varying-Receptive-Fields-Network | e151216ca029dc72ab93e03d6bcacd69161d1c25 | [
"MIT"
] | null | null | null | SRcode/AttentionLayers.py | robotic-vision-lab/Attention-With-Varying-Receptive-Fields-Network | e151216ca029dc72ab93e03d6bcacd69161d1c25 | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy as np
import tensorflow.keras as keras
from tensorflow.keras import datasets, layers, models, regularizers
from pdb import set_trace as trace
class Channel_Attention(layers.Layer):
"""
Channel Attention Layer
"""
def __init__(self, filters, reduction=1, name = None):
super(Channel_Attention, self).__init__()
self.conv1=layers.Conv2D(filters // reduction, kernel_size = (1,1), name =
'conv2d-1', activation='relu', padding = 'same')
self.conv2=layers.Conv2D(filters, kernel_size = (1,1), name =
'conv2d-2', activation='sigmoid', padding = 'same')
def call(self, x):
skip_conn=tf.identity(x,name = 'identity')
channel=x.get_shape()[-1]
x=tf.reshape(tf.reduce_mean(input_tensor = x, axis = [1,2]),
(-1,1,1,channel))
x=self.conv1(x)
x=self.conv2(x)
x=tf.multiply(skip_conn,x)
return x
class Scalar_CA(layers.Layer):
"""
Channel Attention Layer, Returns vector of scalars
"""
def __init__(self, filters, reduction=1, name = None):
super(Scalar_CA, self).__init__()
self.conv1=layers.Conv2D(filters // reduction, kernel_size = (1,1), name =
'conv2d-1', activation='relu', padding = 'same')
self.conv2=layers.Conv2D(filters, kernel_size = (1,1), name =
'conv2d-1', activation='sigmoid', padding = 'same')
self.dense1=layers.Dense(2)
def call(self, x):
skip_conn=tf.identity(x,name = 'identity')
channel=x.get_shape()[-1]
x=tf.reshape(tf.reduce_mean(input_tensor = x, axis = [1,2]),
(-1,1,1,channel))
x=self.conv1(x)
x=self.conv2(x)
x=tf.reshape(x, (-1,channel))
x=self.dense1(x)
return x
class Scale_Attention(layers.Layer):
def __init__(self, filters, reduction=2):
super(Scale_Attention, self).__init__()
self.local_attention=layers.Conv2D(1, kernel_size = (7),
name='merge2d-1', activation = 'linear',
padding='same',
strides=1)
self.conv1=layers.Conv2D(filters // reduction, kernel_size = (1,1), name =
'conv2d-1', activation='linear', padding = 'same')
self.conv2=layers.Conv2D(filters, kernel_size = (1,1), name =
'conv2d-1', activation='sigmoid', padding = 'same')
def call(self, x):
skip_conn=tf.identity(x,name = 'identity')
a=self.local_attention(x)
x=self.conv1(a)
x=self.conv2(x)
return tf.multiply(skip_conn,x)
class SOCA(layers.Layer):
"""
Second Order Channel Attention Layer
Based on:
https://github.com/daitao/SAN/edit/master/TrainCode/model/MPNCOV/python/MPNCOV.py
"""
def __init__(self, filters, reduction=1, input_shape = (48,48)):
super(SOCA, self).__init__()
self.conv_du=models.Sequential()
self.conv_du.add(layers.Conv2D(filters // reduction,
padding ='same', activation = 'relu', kernel_size = (3,3)))
self.conv_du.add(layers.Conv2D(filters, padding =
'same', activation='sigmoid', kernel_size = (3,3)))
h,w=input_shape[0], input_shape[1]
h,w=min(80,h) , min( 80, w)
self.crop=layers.experimental.preprocessing.CenterCrop(h,w, name=None)
def normalizeCov(self, x, iterN):
batchSize, c=tf.shape(x)[0], x.shape[-1]
h,w=x.shape[1], x.shape[2]
I3=3 * tf.eye(c,c, batch_shape = (batchSize,))
normA =tf.multiply( (1/3), tf.reduce_sum(tf.multiply(x,I3), axis= [1,2]))
A=x / tf.reshape(normA, (batchSize,1,1))
Y=tf.zeros((batchSize, c,c))
Z=tf.eye(c,c, batch_shape = [batchSize])
ZY=0.5 * (I3 - A)
Y=tf.matmul(A,ZY)
Z=ZY
for i in range(1, iterN -1):
ZY=0.5 * (I3 - tf.matmul(Z,Y))
Y=tf.matmul(Y, ZY)
Z=tf.matmul(ZY, Z)
ZY=0.5 * tf.matmul(Y, I3 - tf.matmul(Z,Y))
y=ZY * tf.math.sqrt(tf.reshape(normA, (batchSize,1,1)))
y=tf.reshape(tf.reduce_mean(y, axis = -1), (batchSize,1,1,c))
return self.conv_du(y)
def call(self, x):
self.crop.target_height=min(48,x.shape[1])
self.crop.target_width=min(48,x.shape[2])
x_sub=self.crop(x)
x_sub=tf.expand_dims(x_sub, axis = -1)
h1 ,w1=200,200
### compute covariance matrix
h,w, c=x_sub.shape[1], x_sub.shape[2] ,x_sub.shape[3]
batch_size=tf.shape(x)[0]
M=(h * w )
x_sub=tf.reshape(x_sub, (batch_size, c,M))
Minv= tf.cast(1/M, dtype = tf.float32)
I_hat=tf.multiply(Minv, tf.eye(M) - tf.multiply(Minv, tf.ones((M,M))))
cov=tf.matmul(tf.matmul(x_sub,I_hat), tf.transpose(x_sub, perm= [0,2,1]))
y_cov=self.normalizeCov(cov, 5)
return y_cov * x
| 45.110236 | 90 | 0.500611 |
b37715b618acd59cac49d7aa38dc281b6724c602 | 1,159 | py | Python | cogs/anime.py | kach17/discord-bot | 6a77748332802e4bfb84f57713363b59d97aa679 | [
"MIT"
] | 3 | 2021-05-21T13:48:13.000Z | 2022-02-01T05:43:51.000Z | cogs/anime.py | kach17/Discord-bot-python | 6a77748332802e4bfb84f57713363b59d97aa679 | [
"MIT"
] | null | null | null | cogs/anime.py | kach17/Discord-bot-python | 6a77748332802e4bfb84f57713363b59d97aa679 | [
"MIT"
] | 3 | 2021-05-22T18:49:39.000Z | 2022-02-23T08:30:34.000Z | import discord
from discord.ext import commands
import requests
class AnimeCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="anime")
async def anime(self, ctx, *, arg):
argument = str(arg)
results = requests.get(f"https://api.jikan.moe/v3/search/anime?q={argument}&limit=1")
for i in results.json()["results"]:
embed=discord.Embed(title=f'{i["title"]}', url=f'{i["url"]}', description=f'{i["synopsis"]}\n\n**Rating: ** {i["score"]}\n**Episodes:** {i["episodes"]}', color=0x5800db)
embed.set_image(url=f'{i["image_url"]}')
await ctx.send(embed=embed)
@commands.command(name="manga")
async def manga(self, ctx, *, arg):
argument = str(arg)
results = requests.get(f"https://api.jikan.moe/v3/search/manga?q={argument}&limit=1")
for i in results.json()["results"]:
embed=discord.Embed(title=f'{i["title"]}', url=f'{i["url"]}', description=f'{i["synopsis"]}\n\n**Rating: ** {i["score"]}\n**Chapters: ** {i["chapters"]}', color=0x5800db)
embed.set_image(url=f'{i["image_url"]}')
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(AnimeCog(bot))
| 38.633333 | 176 | 0.639344 |
3012ff58e280c08bf101b9eb5d6c1947877e1086 | 225 | py | Python | terrascript/influxdb/r.py | hugovk/python-terrascript | 08fe185904a70246822f5cfbdc9e64e9769ec494 | [
"BSD-2-Clause"
] | 507 | 2017-07-26T02:58:38.000Z | 2022-01-21T12:35:13.000Z | terrascript/influxdb/r.py | hugovk/python-terrascript | 08fe185904a70246822f5cfbdc9e64e9769ec494 | [
"BSD-2-Clause"
] | 135 | 2017-07-20T12:01:59.000Z | 2021-10-04T22:25:40.000Z | terrascript/influxdb/r.py | hugovk/python-terrascript | 08fe185904a70246822f5cfbdc9e64e9769ec494 | [
"BSD-2-Clause"
] | 81 | 2018-02-20T17:55:28.000Z | 2022-01-31T07:08:40.000Z | # terrascript/influxdb/r.py
import terrascript
class influxdb_database(terrascript.Resource):
pass
class influxdb_user(terrascript.Resource):
pass
class influxdb_continuous_query(terrascript.Resource):
pass
| 15 | 54 | 0.791111 |
ff97518e0b1c2659201fe2e7ace80ae37b872cb2 | 1,260 | py | Python | bridges/icon/bridge/tests/test_merkle_parts.py | vutting4221/bandchain | 47d6cbb2923d9e8651a3ff03ae1d979458067c5d | [
"Apache-2.0"
] | 251 | 2018-08-03T04:07:16.000Z | 2022-03-27T09:12:38.000Z | bridges/icon/bridge/tests/test_merkle_parts.py | vutting4221/bandchain | 47d6cbb2923d9e8651a3ff03ae1d979458067c5d | [
"Apache-2.0"
] | 2,935 | 2018-08-03T08:59:20.000Z | 2022-03-25T23:44:55.000Z | bridges/icon/bridge/tests/test_merkle_parts.py | vutting4221/bandchain | 47d6cbb2923d9e8651a3ff03ae1d979458067c5d | [
"Apache-2.0"
] | 57 | 2018-08-16T08:10:04.000Z | 2022-03-30T06:16:11.000Z | from ..utils import merkle_part
from tbears.libs.scoretest.score_test_case import ScoreTestCase
class TestBlockHeaderMerkleParts(ScoreTestCase):
def setUp(self):
super().setUp()
def test_get_block_header(self):
data = b''
data += bytes.fromhex(
"32fa694879095840619f5e49380612bd296ff7e950eafb66ff654d99ca70869e")
data += bytes.fromhex(
"4BAEF831B309C193CC94DCF519657D832563B099A6F62C6FA8B7A043BA4F3B3B")
data += bytes.fromhex(
"5E1A8142137BDAD33C3875546E42201C050FBCCDCF33FFC15EC5B60D09803A25")
data += bytes.fromhex(
"004209A161040AB1778E2F2C00EE482F205B28EFBA439FCB04EA283F619478D9")
data += bytes.fromhex(
"6E340B9CFFB37A989CA544E6BB780A2C78901D3FB33738768511A30617AFA01D")
data += bytes.fromhex(
"0CF1E6ECE60E49D19BB57C1A432E805F39BB4F65C366741E4F03FA54FBD90714")
self.assertEqual(
merkle_part.get_block_header(
data,
bytes.fromhex(
"1CCD765C80D0DC1705BB7B6BE616DAD3CF2E6439BB9A9B776D5BD183F89CA141"),
381837
).hex(),
"a35617a81409ce46f1f820450b8ad4b217d99ae38aaa719b33c4fc52dca99b22"
)
| 37.058824 | 88 | 0.680159 |
cc0a3e06c921a720839160700fa1cfda7495a8d4 | 61,838 | py | Python | tests/pmap_test.py | joschkabraun/jax | 4f3011f3204dc9b36ed4d9de883ea31c1a6bad40 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-04-06T11:42:04.000Z | 2021-04-06T11:42:04.000Z | tests/pmap_test.py | jg8610/jax | 2a9c2d22cfbaae1c6255e47be0afa7c370910ea3 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/pmap_test.py | jg8610/jax | 2a9c2d22cfbaae1c6255e47be0afa7c370910ea3 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import itertools as it
import gc
import os
from random import shuffle
from typing import Optional, cast
from unittest import SkipTest
import warnings
import weakref
import numpy as np
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
from jax import test_util as jtu
from jax import tree_util
from jax import lax
from jax import random
from jax.abstract_arrays import ShapedArray
from jax.api import (pmap, soft_pmap, jit, vmap, jvp, grad, make_jaxpr,
linearize, device_put)
from jax.lib import xla_bridge
from jax.util import prod, safe_map
from jax.interpreters import pxla
from jax.interpreters import xla
from jax.config import config
config.parse_flags_with_absl()
prev_xla_flags = None
# TODO(jakevdp): move the following to test_util.py
compatible_shapes = [[(3,)], [(3, 4), (3, 1), (1, 4)], [(2, 3, 4), (2, 1, 4)]]
def all_bdims(*shapes):
bdims = (it.chain([cast(Optional[int], None)],
range(len(shape) + 1)) for shape in shapes)
return (t for t in it.product(*bdims) if not all(e is None for e in t))
def add_bdim(bdim_size, bdim, shape):
shape = list(shape)
if bdim is not None:
shape.insert(bdim, bdim_size)
return tuple(shape)
def slicer(x, bdim):
if bdim is None:
return lambda _: x
else:
return lambda i: lax.index_in_dim(x, i, bdim, keepdims=False)
def args_slicer(args, bdims):
slicers = safe_map(slicer, args, bdims)
return lambda i: [sl(i) for sl in slicers]
# Run all tests with 8 CPU devices.
def setUpModule():
global prev_xla_flags
prev_xla_flags = os.getenv("XLA_FLAGS")
flags_str = prev_xla_flags or ""
# Don't override user-specified device count, or other XLA flags.
if "xla_force_host_platform_device_count" not in flags_str:
os.environ["XLA_FLAGS"] = (flags_str +
" --xla_force_host_platform_device_count=8")
# Clear any cached backends so new CPU backend will pick up the env var.
xla_bridge.get_backend.cache_clear()
# Reset to previous configuration in case other test modules will be run.
def tearDownModule():
if prev_xla_flags is None:
del os.environ["XLA_FLAGS"]
else:
os.environ["XLA_FLAGS"] = prev_xla_flags
xla_bridge.get_backend.cache_clear()
ignore_soft_pmap_warning = partial(
jtu.ignore_warning, message="soft_pmap is an experimental.*")
class PmapTest(jtu.JaxTestCase):
def _getMeshShape(self, device_mesh_shape):
device_count = xla_bridge.device_count()
if any(size == -1 for size in device_mesh_shape):
try:
return np.arange(device_count).reshape(device_mesh_shape).shape
except ValueError as err:
msg = "device mesh shape {} not compatible with device count {}"
raise SkipTest(msg.format(device_mesh_shape, device_count)) from err
else:
if device_count % prod(device_mesh_shape):
msg = "device mesh size {} does not divide available device count {}"
raise SkipTest(msg.format(prod(device_mesh_shape), device_count))
else:
return device_mesh_shape
def testBasic(self):
f = pmap(lambda x: x - lax.psum(x, 'i'), axis_name='i')
shape = (xla_bridge.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
expected = x - np.sum(x, 0)
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testMean(self):
f = pmap(lambda x: x - lax.pmean(x, 'i'), axis_name='i')
shape = (xla_bridge.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
expected = x - np.broadcast_to(np.mean(x, 0), x.shape)
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testGather(self):
f = pmap(lambda x: lax.all_gather(x, 'i'), axis_name='i')
shape = (xla_bridge.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
expected = np.array([x] * xla_bridge.device_count())
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testTrees(self):
ptranspose = lambda x, axis_name: lax.all_to_all(x, axis_name, 0, 0)
def protate(x, axis_name):
n = lax.psum(1, axis_name)
return lax.ppermute(x, axis_name, [(i, (i + 1) % n) for i in range(n)])
tree_f = lambda f: partial(tree_util.tree_map, f)
jax_f = lambda p: pmap(lambda x: p(x, 'i'), 'i')
np_f = lambda p: tree_f(lambda x: np.broadcast_to(p(x, 0), x.shape))
np_transpose = tree_f(np.transpose)
np_rotate = tree_f(lambda x: np.concatenate([x[-1:], x[:-1]]))
n = xla_bridge.device_count()
x = {'a': np.arange(1 * n * n, 2 * n * n).reshape([n, n]),
'b': np.arange(2 * n * n, 3 * n * n).reshape([n, n]),
'c': np.arange(4 * n * n, 5 * n * n).reshape([n, n])}
assert_allclose = partial(tree_util.tree_multimap,
partial(self.assertAllClose, check_dtypes=False))
assert_allclose(jax_f(lax.pmax)(x), np_f(np.max)(x))
assert_allclose(jax_f(lax.pmin)(x), np_f(np.min)(x))
assert_allclose(jax_f(lax.psum)(x), np_f(np.sum)(x))
assert_allclose(jax_f(lax.pmean)(x), np_f(np.mean)(x))
if jtu.device_under_test() not in ("cpu", "gpu"):
# NOTE: all-to-all and ppermute only supported on TPU.
assert_allclose(jax_f(ptranspose)(x), np_transpose(x))
assert_allclose(jax_f(protate)(x), np_rotate(x))
def testCollectivesWithTreesOfDifferentDtypes(self):
n = len(jax.devices())
x = {'a': np.arange(1 * n * n, 2 * n * n, dtype=np.float32).reshape([n, n]),
'b': np.arange(2 * n * n, 3 * n * n, dtype=np.int32).reshape([n, n]),
'c': np.arange(4 * n * n, 5 * n * n, dtype=np.float32).reshape([n, n]),
'd': np.arange(6 * n * n, 7 * n * n, dtype=np.int32).reshape([n, n])}
tree_f = lambda f: partial(tree_util.tree_map, f)
jax_f = lambda p: pmap(lambda x: p(x, 'i'), 'i')
np_f = lambda p: tree_f(lambda x: np.broadcast_to(p(x, 0), x.shape))
assert_allclose = partial(tree_util.tree_multimap,
partial(self.assertAllClose, check_dtypes=False))
assert_allclose(jax_f(lax.pmax)(x), np_f(np.max)(x))
assert_allclose(jax_f(lax.pmin)(x), np_f(np.min)(x))
assert_allclose(jax_f(lax.psum)(x), np_f(np.sum)(x))
assert_allclose(jax_f(lax.pmean)(x), np_f(np.mean)(x))
def testComplexPsum(self):
f = pmap(lambda x: x - lax.psum(x, 'i'), axis_name='i')
shape = (xla_bridge.device_count(), 4 * 2)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape).view(np.complex64)
expected = x - np.sum(x, 0)
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testNestedBasic(self):
f = lambda x: lax.psum(lax.psum(x, 'i'), 'j')
f = pmap(pmap(f, 'i'), 'j')
def sum_and_broadcast(x, axis):
return np.repeat(np.sum(x, axis, keepdims=True), x.shape[axis], axis)
shape = (xla_bridge.device_count(), 1, 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
ans = f(x)
expected = sum_and_broadcast(sum_and_broadcast(x, 0), 1)
self.assertAllClose(ans, expected, check_dtypes=False)
def testMismatchedAxisSizes(self):
n = xla_bridge.device_count()
f = pmap(lambda x, y: x + y)
self.assertRaisesRegex(
ValueError,
"pmap got inconsistent sizes for array axes to be mapped",
lambda: f(np.random.randn(n), np.random.randn(n - 1)))
@parameterized.named_parameters(
{"testcase_name": "_mesh={}".format(device_mesh_shape).replace(" ", ""),
"device_mesh_shape": device_mesh_shape}
for device_mesh_shape in [(1, 1), (2, -1), (-1, 2)])
def testNestedShardingAndStacking(self, device_mesh_shape):
mesh_shape = self._getMeshShape(device_mesh_shape)
f = lambda x: x
f = pmap(pmap(f, 'i'), 'j')
shape = mesh_shape + (4,)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
ans = f(x)
expected = x
self.assertEqual(ans.shape, expected.shape)
self.assertAllClose(ans, expected, check_dtypes=False)
def testPartiallyMapped(self):
f = pmap(lambda x, y: x, in_axes=(None, 0))
g = pmap(lambda x, y: x - lax.psum(y, 'i'), axis_name='i', in_axes=(None, 0))
mesh_shape = (xla_bridge.device_count(),)
shape = mesh_shape + (4,)
x = np.array(3., dtype=np.float32)
y = np.arange(prod(shape), dtype=np.float32).reshape(shape)
f_expected = np.broadcast_to(x, mesh_shape)
f_ans = f(x, y)
self.assertAllClose(f_ans, f_expected)
self.assertIsInstance(f_ans, pxla.ShardedDeviceArray)
# the output is actually replicated (has the same values in each device buffer)
# but out_axes is implicitly 0, so we shouldn't have replication in the
# sharding spec.
self.assertEmpty(f_ans.sharding_spec.replication_factors)
g_expected = np.broadcast_to(x - np.sum(y, 0, keepdims=True), shape)
g_ans = g(x, y)
self.assertAllClose(g_ans, g_expected)
self.assertIsInstance(g_ans, pxla.ShardedDeviceArray)
self.assertEmpty(g_ans.sharding_spec.replication_factors)
@parameterized.named_parameters(
{"testcase_name": "_mesh={}".format(device_mesh_shape).replace(" ", ""),
"device_mesh_shape": device_mesh_shape}
for device_mesh_shape in [(1, 1), (2, -1), (-1, 2)])
def testPartiallyMappedNested(self, device_mesh_shape):
mesh_shape = self._getMeshShape(device_mesh_shape)
f = pmap(lambda x, y: x - lax.psum(y, 'i'), axis_name='i', in_axes=(None, 0))
f = pmap(f, axis_name='j', in_axes=(None, 0))
x = 3.
y = np.arange(prod(mesh_shape), dtype=np.float32).reshape(mesh_shape)
expected = np.broadcast_to(x - np.sum(y, 1, keepdims=True), mesh_shape)
ans = f(x, y)
self.assertAllClose(ans, expected, check_dtypes=False)
def testJvpAndPartialEval(self):
@partial(pmap, axis_name='i')
def f(x):
return jnp.sin(x)
def splitjvp(x):
_, jvp = linearize(f, x)
return jvp(jnp.ones_like(x))
shape = (xla_bridge.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
expected = np.cos(x)
ans = splitjvp(x)
self.assertAllClose(ans, expected, check_dtypes=False)
make_jaxpr(splitjvp)(x) # doesn't crash
def testGradBasic(self):
@partial(pmap, axis_name='i')
def f(x):
return jnp.sin(x)
shape = (xla_bridge.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
ans = grad(lambda x: jnp.sum(jnp.sin(x)))(x)
expected = grad(lambda x: jnp.sum(f(x)))(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testGradOfPsum(self):
@partial(pmap, axis_name='i')
def f(x):
return lax.psum(x, axis_name='i')
shape = (jax.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
jtu.check_grads(f, (x,), 2, ["fwd", "rev"], 1e-2, 1e-2, eps=1.)
def testGradOfJvp(self):
@partial(pmap, axis_name='i')
def f(x):
return jnp.sin(x)
def splitjvp(x):
_, jvp = linearize(f, x)
return jvp(jnp.ones_like(x))
fun = lambda x: jnp.sum(jvp(jnp.sin, (x,), (jnp.ones_like(x),))[1])
shape = (xla_bridge.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
ans = grad(lambda x: jnp.sum(splitjvp(x)))(x)
expected = grad(fun)(x)
self.assertAllClose(ans, expected)
def testTwoArgsGrad(self):
def f(x, y):
return lax.psum(5. * jnp.cos(x) * jnp.sin(y), 'i')
f = pmap(f, 'i')
def g(x, y):
tot = jnp.sum(5. * jnp.cos(x) * jnp.sin(y))
return tot * jnp.ones_like(x) # broadcast to map like pjit does
shape = (xla_bridge.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
y = 4 + x
ans = grad(lambda x, y: jnp.sum(g(x, y)))(x, y)
expected = grad(lambda x, y: jnp.sum(g(x, y)))(x, y)
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": "_mesh={}".format(device_mesh_shape).replace(" ", ""),
"device_mesh_shape": device_mesh_shape}
for device_mesh_shape in [(1, 1), (2, -1), (-1, 2)])
def testNestedWithClosure(self, device_mesh_shape):
mesh_shape = self._getMeshShape(device_mesh_shape)
@partial(pmap, axis_name='i')
def test_fun(x):
y = jnp.sum(jnp.sin(x))
@partial(pmap, axis_name='j')
def g(z):
return 3. * jnp.exp(jnp.sin(x).sum() * jnp.cos(y) * jnp.tan(z))
return grad(lambda w: jnp.sum(g(w)))(x)
@vmap
def baseline_fun(x):
y = jnp.sum(jnp.sin(x))
@vmap
def g(z):
return 3. * jnp.exp(jnp.sin(x).sum() * jnp.cos(y) * jnp.tan(z))
return grad(lambda w: jnp.sum(g(w)))(x)
shape = mesh_shape + (4,)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
ans = grad(lambda x: jnp.sum(test_fun(x)))(x)
expected = grad(lambda x: jnp.sum(baseline_fun(x)))(x)
self.assertAllClose(ans, expected, atol=1e-3)
def testShardedDeviceArrays(self):
f = lambda x: 2 * x
f = pmap(f, axis_name='i')
shape = (xla_bridge.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
# test that we can pass in and out ShardedDeviceArrays
y = f(x)
self.assertIsInstance(y, jnp.ndarray)
self.assertIsInstance(y, pxla.ShardedDeviceArray)
self.assertAllClose(y, 2 * x, check_dtypes=False)
z = f(y)
self.assertIsInstance(z, pxla.ShardedDeviceArray)
self.assertAllClose(z, 2 * 2 * x, check_dtypes=False)
# test that we can pass in a regular DeviceArray
y = f(device_put(x))
self.assertIsInstance(y, pxla.ShardedDeviceArray)
self.assertAllClose(y, 2 * x, check_dtypes=False)
# test that we can pass a ShardedDeviceArray to a regular jit computation
z = y + y
self.assertAllClose(z, 2 * 2 * x, check_dtypes=False)
# test that we can handle device movement on dispatch
y.device_buffers = y.device_buffers[::-1]
z = f(y)
self.assertAllClose(z, 2 * 2 * x[::-1], check_dtypes=False)
# test that the repr doesn't crash
repr(z)
# Tests edge cases in lax._reshape_sharded_device_array
@parameterized.named_parameters(
{"testcase_name": "_in={}_out={}".format(in_shape, out_shape)
.replace(" ", ""),
"in_shape": in_shape, "out_shape": out_shape}
for in_shape, out_shape in [
[(1,1), (1,)], [(1,), (1,1)], [(1,), ()], [(4,7), (2,2,7)]
])
def testShardedDeviceArrayReshape(self, in_shape, out_shape):
if xla_bridge.device_count() < max(in_shape[:1] + out_shape[:1]):
raise SkipTest("not enough devices")
x = np.arange(prod(in_shape)).reshape(in_shape)
sharded_x = pmap(lambda x: x)(x)
self.assertAllClose(sharded_x.reshape(out_shape), x.reshape(out_shape),
check_dtypes=False)
def testPsumMultiple(self):
f = lambda x: lax.psum(x, ('i', 'j'))
f = pmap(pmap(f, 'i'), 'j')
def sum_and_broadcast(x, axis):
return np.repeat(np.sum(x, axis, keepdims=True), x.shape[axis], axis)
device_count = xla_bridge.device_count()
num_pairs, ragged = divmod(device_count, 2)
if num_pairs > 1 and not ragged:
shape = (num_pairs, 2, 4)
else:
shape = (device_count, 1, 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
ans = f(x)
expected = sum_and_broadcast(sum_and_broadcast(x, 0), 1)
self.assertAllClose(ans, expected, check_dtypes=False)
def testPsumReplicaGroups(self):
replicas = xla_bridge.device_count()
if replicas % 2 != 0:
raise SkipTest
axis_index_groups = np.arange(replicas).reshape(
2, replicas // 2).tolist()
f = lambda x: x - lax.psum(x, 'i', axis_index_groups=axis_index_groups)
f = pmap(f, 'i')
shape = (replicas, 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
def sum_helper(a):
return np.broadcast_to(a.sum(0, keepdims=True),
(replicas // 2, x.shape[1]))
expected_psum_1 = sum_helper(x[:replicas // 2])
expected_psum_2 = sum_helper(x[replicas // 2:])
expected_psum = np.concatenate([expected_psum_1, expected_psum_2], 0)
expected = x - expected_psum
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testNestedPmapReplicaGroups(self):
replicas = xla_bridge.device_count()
if replicas % 4 != 0:
raise SkipTest
axis_index_groups = np.arange(replicas // 2).reshape(
2, replicas // 4).tolist()
f = lambda x: x - lax.psum(x, 'i', axis_index_groups=axis_index_groups)
f1 = pmap(pmap(f, 'i'), 'j')
f2 = pmap(lambda x: pmap(f, 'i')(x) + 1., 'j') # "imperfectly nested" case
f3 = pmap(pmap(f, 'j'), 'i')
shape = (2, replicas // 2, 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
def sum_helper_f1(a):
return np.broadcast_to(a.sum(1, keepdims=True),
(shape[0], shape[1] // 2, shape[2]))
expected_psum_1 = sum_helper_f1(x[:, :replicas // 4])
expected_psum_2 = sum_helper_f1(x[:, replicas // 4:])
expected_psum = np.concatenate([expected_psum_1, expected_psum_2], 1)
expected = x - expected_psum
ans = f1(x)
self.assertAllClose(ans, expected)
expected = x - expected_psum + 1.
ans = f2(x)
self.assertAllClose(ans, expected)
shape = (replicas // 2, 2, 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
def sum_helper_f3(a):
return np.broadcast_to(a.sum(0, keepdims=True),
(shape[0] // 2, shape[1], shape[2]))
expected_psum_1 = sum_helper_f3(x[:replicas // 4])
expected_psum_2 = sum_helper_f3(x[replicas // 4:])
expected_psum = np.concatenate([expected_psum_1, expected_psum_2], 0)
expected = x - expected_psum
ans = f3(x)
self.assertAllClose(ans, expected)
def testAxisGroups(self):
axis_env = xla.AxisEnv(8, ('i', 'j'), (4, 2))
groups = xla.axis_groups(axis_env, 'i')
self.assertEqual(groups, ((0, 2, 4, 6), (1, 3, 5, 7)))
groups = xla.axis_groups(axis_env, 'j')
self.assertEqual(groups, ((0, 1), (2, 3), (4, 5), (6, 7)))
groups = xla.axis_groups(axis_env, ('i', 'j'))
self.assertEqual(groups, ((0, 1, 2, 3, 4, 5, 6, 7,),))
groups = xla.axis_groups(axis_env, ('j', 'i'))
self.assertEqual(len(groups), 1)
self.assertEqual((tuple(sorted(groups[0])),),
((0, 1, 2, 3, 4, 5, 6, 7,),)) # order doesn't matter
@jtu.skip_on_devices("cpu", "gpu")
def testCollectivePermute(self):
device_count = xla_bridge.device_count()
rotation = [(i, (i + 1) % device_count) for i in range(device_count)]
f = lambda x: lax.ppermute(x, perm=rotation, axis_name='i')
f = pmap(f, 'i')
x = jnp.arange(4 * device_count).reshape((device_count, 4))
ans = f(x)
expected = np.roll(x, shift=1, axis=0)
self.assertAllClose(ans, expected, check_dtypes=False)
@jtu.skip_on_devices("cpu", "gpu")
def testCollectivePermuteGrad(self):
device_count = xla_bridge.device_count()
shift_right = [(i, (i + 1)) for i in range(device_count - 1)]
f = lambda x: lax.ppermute(x, perm=shift_right, axis_name='i')
y = np.pi + np.arange(device_count, dtype=np.float32)
g = lambda x: jnp.sum(y * pmap(f, 'i')(x))
x = np.arange(device_count, dtype=np.float32)
ans = grad(g)(x)
expected = np.concatenate([np.pi + np.arange(1, device_count), [0]])
self.assertAllClose(ans, expected, check_dtypes=False)
@jtu.skip_on_devices("cpu", "gpu")
def testCollectivePermuteCyclicGrad(self):
device_count = xla_bridge.device_count()
shift_right = [(i, (i + 1) % device_count) for i in range(device_count)]
f = lambda x: lax.ppermute(x, perm=shift_right, axis_name='i')
y = np.pi + np.arange(device_count, dtype=np.float32)
g = lambda x: jnp.sum(y * pmap(f, 'i')(x))
x = np.arange(device_count, dtype=np.float32)
ans = grad(g)(x)
expected = np.roll(np.pi + np.arange(device_count), 1)
self.assertAllClose(ans, expected, check_dtypes=False)
@jtu.skip_on_devices("cpu")
def testCollectivePermuteCyclicWithPShuffle(self):
device_count = xla_bridge.device_count()
values = np.arange(device_count)
shift_right = [(i - 1) % device_count for i in range(device_count)]
f = lambda x: lax.pshuffle(x, perm=shift_right, axis_name='i')
expected = np.roll(values, -1)
ans = np.asarray(pmap(f, "i")(values))
self.assertAllClose(ans, expected, check_dtypes=False)
@jtu.skip_on_devices("cpu")
def testPShuffleWithBadPerm(self):
device_count = xla_bridge.device_count()
bad_perm = list(range(device_count))
bad_perm[0] = 1
f = lambda x: lax.pshuffle(x, perm=bad_perm, axis_name='i')
g = lambda: pmap(f, "i")(np.arange(device_count))
self.assertRaisesRegex(
AssertionError,
"Given `perm` does not represent a real permutation: \\[1.*\\]", g)
@jtu.skip_on_devices("cpu", "gpu")
def testPpermuteWithZipObject(self):
# https://github.com/google/jax/issues/1703
num_devices = xla_bridge.device_count()
perm = [num_devices - 1] + list(range(num_devices - 1))
f = pmap(
lambda x: lax.ppermute(x, "i", zip(range(num_devices), perm)), "i")
result = f(jnp.arange(num_devices, dtype=jnp.float32))
expected = jnp.asarray(perm, dtype=jnp.float32)
self.assertAllClose(result, expected)
@jtu.skip_on_devices("cpu", "gpu")
def testRule30(self):
# This is a test of collective_permute implementing a simple halo exchange
# to run a rule 30 simulation: https://en.wikipedia.org/wiki/Rule_30
# Halo exchange should be useful in spatially-sharded convolutions and in
# other simulations.
device_count = xla_bridge.device_count()
def send_right(x, axis_name):
left_perm = [(i, (i + 1) % device_count) for i in range(device_count)]
return lax.ppermute(x, perm=left_perm, axis_name=axis_name)
def send_left(x, axis_name):
left_perm = [((i + 1) % device_count, i) for i in range(device_count)]
return lax.ppermute(x, perm=left_perm, axis_name=axis_name)
def update_board(board):
left = board[:-2]
right = board[2:]
center = board[1:-1]
return lax.bitwise_xor(left, lax.bitwise_or(center, right))
@partial(pmap, axis_name='i')
def step(board_slice):
left, right = board_slice[:1], board_slice[-1:]
right, left = send_left(left, 'i'), send_right(right, 'i')
enlarged_board_slice = jnp.concatenate([left, board_slice, right])
return update_board(enlarged_board_slice)
board = np.zeros(40, dtype=bool)
board[board.shape[0] // 2] = True
reshaped_board = board.reshape((device_count, -1))
boards = []
def print_board(board):
boards.append(''.join('*' if x else ' ' for x in board.ravel()))
print_board(reshaped_board)
for _ in range(20):
reshaped_board = step(reshaped_board)
print_board(reshaped_board)
ans = '\n'.join(boards)
expected = '\n'.join((
' * ',
' *** ',
' ** * ',
' ** **** ',
' ** * * ',
' ** **** *** ',
' ** * * * ',
' ** **** ****** ',
' ** * *** * ',
' ** **** ** * *** ',
' ** * * **** ** * ',
' ** **** ** * * **** ',
' ** * *** ** ** * * ',
' ** **** ** *** *** ** *** ',
' ** * * *** * *** * * ',
' ** **** ** * * ***** ******* ',
' ** * *** **** * *** * ',
' ** **** ** *** ** ** * *** ',
' ** * * *** * ** *** **** ** * ',
' ** **** ** * ****** * * *** ****',
' * * *** **** **** *** ** * ',
))
print(ans)
self.assertEqual(ans, expected)
@jtu.skip_on_devices("cpu", "gpu")
def testReduceMax(self):
f = pmap(lambda x: x - lax.pmax(x, 'i'), axis_name='i')
shape = (xla_bridge.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
expected = x - np.max(x, 0)
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
@jtu.skip_on_devices("cpu", "gpu")
def testReduceMin(self):
f = pmap(lambda x: x - lax.pmin(x, 'i'), axis_name='i')
shape = (xla_bridge.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
expected = x - np.min(x, 0)
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testDeviceCountError(self):
device_count = xla_bridge.device_count()
f = pmap(lambda x: x)
x = jnp.arange(device_count + 1)
self.assertRaisesRegex(ValueError, ".*requires.*replicas", lambda: f(x))
f = pmap(lambda x: x)
x = np.ones((device_count + 1, 10))
self.assertRaisesRegex(ValueError, ".*requires.*replicas", lambda: f(x))
f = pmap(lambda x: pmap(lambda x: x)(x))
x = np.ones((device_count, 2, 10))
self.assertRaisesRegex(ValueError, ".*requires.*replicas", lambda: f(x))
def testPmapConstant(self):
device_count = xla_bridge.device_count()
f = pmap(lambda x: 3)
x = jnp.arange(device_count)
with jtu.count_jit_and_pmap_compiles() as count:
ans = f(x)
self.assertEqual(count[0], 0)
expected = np.repeat(3, device_count)
self.assertAllClose(ans, expected, check_dtypes=False)
f = pmap(lambda x: (x, 3))
x = np.arange(device_count)
with jtu.count_jit_and_pmap_compiles() as count:
_, ans = f(x)
self.assertEqual(count[0], 1)
self.assertAllClose(ans, expected, check_dtypes=False)
def testPmapConstantDevices(self):
if xla_bridge.device_count() == 1:
raise SkipTest("this test requires multiple devices")
devices = xla_bridge.devices()[:-1]
shuffle(devices)
f = pmap(lambda x: 3, devices=devices)
x = jnp.arange(len(devices))
with jtu.count_jit_and_pmap_compiles() as count:
ans = f(x)
self.assertEqual(count[0], 0)
expected = np.repeat(3, len(devices))
self.assertAllClose(ans, expected, check_dtypes=False)
# Test that 'ans' was properly replicated across devices.
self.assertEqual([b.device() for b in ans.device_buffers], devices)
def testPmapConstantError(self):
device_count = xla_bridge.device_count()
f = pmap(lambda x: 3)
x = jnp.arange(device_count + 1)
self.assertRaisesRegex(
ValueError, r"Cannot replicate across \d+ replicas because only \d+ "
r"local devices are available.", lambda: f(x))
f = pmap(lambda x: 3, devices=[xla_bridge.devices()[0]])
x = jnp.arange(2)
self.assertRaisesRegex(
ValueError, "Cannot replicate across 2 replicas because only 1 "
"local devices are available.", lambda: f(x))
def testNestedPmapConstant(self):
if xla_bridge.device_count() == 1:
raise SkipTest("this test requires multiple devices")
f = pmap(pmap(lambda x: 3))
shape = (2, xla_bridge.device_count() // 2, 3)
x = jnp.arange(prod(shape)).reshape(shape)
with jtu.count_jit_and_pmap_compiles() as count:
ans = f(x)
self.assertEqual(count[0], 0)
expected = 3 * np.ones(shape[:2])
self.assertAllClose(ans, expected, check_dtypes=False)
# Test that 'ans' was properly replicated across devices.
expected_sharded = pmap(pmap(lambda x: x))(expected)
self.assertEqual([b.device() for b in ans.device_buffers],
[b.device() for b in expected_sharded.device_buffers])
f = pmap(pmap(lambda x: (x, 3)))
x_sharded, ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
self.assertEqual([b.device() for b in ans.device_buffers],
[b.device() for b in x_sharded.device_buffers])
def testNestedPmapConstantDevices(self):
raise SkipTest("Nested pmaps with devices not yet implemented")
if xla_bridge.device_count() < 6:
raise SkipTest("this test requires >= 6 devices")
devices = xla_bridge.devices()[:-2]
shuffle(devices)
f = pmap(pmap(lambda x: 3), devices=devices)
shape = (2, len(devices) // 2, 3)
x = jnp.arange(prod(shape)).reshape(shape)
with jtu.count_jit_and_pmap_compiles() as count:
ans = f(x)
self.assertEqual(count[0], 0)
expected = 3 * np.ones(shape[:2])
self.assertAllClose(ans, expected, check_dtypes=False)
# Test that 'ans' was properly replicated across devices.
expected_sharded = pmap(pmap(lambda x: x), devices=devices)(expected)
self.assertEqual([b.device() for b in ans.device_buffers],
[b.device() for b in expected_sharded.device_buffers])
def testNestedPmapConstantError(self):
f = pmap(pmap(lambda x: 3))
shape = (2, xla_bridge.device_count() // 2 + 1, 3)
x = jnp.arange(prod(shape)).reshape(shape)
self.assertRaisesRegex(
ValueError, r"Cannot replicate across \d+ replicas because only \d+ "
r"local devices are available.", lambda: f(x))
if xla_bridge.device_count() > 1:
f = pmap(pmap(lambda x: 3), devices=xla_bridge.devices()[:-1])
shape = (2, xla_bridge.device_count() // 2, 3)
x = jnp.arange(prod(shape)).reshape(shape)
self.assertRaisesRegex(
ValueError, r"Cannot replicate across \d+ replicas because only \d+ "
r"local devices are available.", lambda: f(x))
def testCollectiveConstant(self):
device_count = xla_bridge.device_count()
f = pmap(lambda x: lax.psum(1, 'i'), 'i')
x = jnp.arange(device_count)
ans = f(x)
expected = np.repeat(device_count, device_count)
self.assertAllClose(ans, expected, check_dtypes=False)
def testCollectiveConstantNested(self):
device_count = xla_bridge.device_count()
@partial(pmap, axis_name='i')
def f(x):
@partial(pmap, axis_name='j')
def g(y):
a = lax.psum(1, 'i')
b = lax.psum(1, 'j')
c = lax.psum(1, ('i', 'j'))
return a, b, c
return g(x)
shape = (device_count, 1, 4)
x = jnp.arange(prod(shape)).reshape(shape)
a, b, c = f(x)
self.assertEqual(a.shape, shape[:-1])
self.assertEqual(b.shape, shape[:-1])
self.assertEqual(c.shape, shape[:-1])
self.assertEqual(a.ravel()[0], device_count)
self.assertEqual(b.ravel()[0], 1)
self.assertEqual(c.ravel()[0], device_count * 1)
def testAxisIndex(self):
device_count = xla_bridge.device_count()
f = pmap(lambda x: x + pxla.axis_index('i'), 'i')
x = jnp.ones(device_count)
ans = f(x)
expected = 1 + np.arange(device_count)
self.assertAllClose(ans, expected, check_dtypes=False)
def testVmapOfPmap(self):
device_count = xla_bridge.device_count()
f0 = lambda x: x
f1 = pmap(f0, axis_name='i')
ax = np.random.randn(2, device_count, 50, 60)
bx = vmap(f1)(ax)
self.assertAllClose(ax, bx, check_dtypes=False)
def testVmapOfPmap2(self):
N_DEVICES = xla_bridge.device_count()
keys = random.split(random.PRNGKey(1), 13) # [13, 2]
@pmap
def g(key):
_ = random.normal(key, ())
return 0.
@vmap
def s(keys):
keys = jnp.broadcast_to(keys, (N_DEVICES,) + keys.shape)
return g(keys)
ans = s(keys) # doesn't crash
self.assertEqual(ans.shape, (13, N_DEVICES))
def testVmapOfPmap3(self):
# https://github.com/google/jax/issues/3399
device_count = xla_bridge.device_count()
if device_count < 2:
raise SkipTest("test requires at least two devices")
def map_version(qs, pts):
return jax.lax.map(lambda x: func(x, pts), qs)
def vmap_version(qs, pts):
return jax.vmap(func, in_axes=(0, None))(qs, pts)
def func(q, pts):
q_from_pmap = jax.pmap(lambda x, y: y, in_axes=(0, None))(pts, q)
return q, q_from_pmap
pts = jnp.ones(device_count)
qs = jnp.asarray(((0,0), (3,3), (2,2)))
_, expected = map_version(qs, pts)
_, ans = vmap_version(qs, pts)
self.assertAllClose(ans, expected, check_dtypes=False)
def testVmapOfPmapNonLeadingAxis(self):
device_count = xla_bridge.device_count()
f0 = lambda x: x
f1 = pmap(f0, axis_name='i')
ax = np.random.randn(device_count, 2, 50, 60)
bx = vmap(f1, in_axes=2, out_axes=2)(ax)
self.assertAllClose(ax, bx, check_dtypes=False)
def testVmapOfPmapTuple(self):
device_count = xla_bridge.device_count()
f0 = lambda *x: x
f1 = pmap(f0, axis_name='i')
ax = np.random.randn(device_count, 2, 50, 60)
ay = np.random.randn(device_count, 30, 2)
az1 = np.random.randn(device_count, 20)
az2 = np.random.randn(2, device_count, 20)
bx, by, bz = vmap(f1, in_axes=(1, 2, (None, 0)), out_axes=(1, 2, 0))(ax, ay, (az1, az2))
self.assertAllClose(ax, bx, check_dtypes=False)
self.assertAllClose(ay, by, check_dtypes=False)
bz1, bz2 = bz
expected_bz1 = np.broadcast_to(az1, (2,) + az1.shape)
self.assertAllClose(expected_bz1, bz1, check_dtypes=False)
self.assertAllClose(bz2, bz2, check_dtypes=False)
@jtu.skip_on_devices("gpu")
def testPswapaxes(self):
device_count = xla_bridge.device_count()
# TODO: AllToAll not yet implemented on XLA:CPU
if jtu.device_under_test() == "cpu":
device_count = 1
shape = (device_count, 3, device_count, 5)
x = np.arange(prod(shape)).reshape(shape)
ans = pmap(lambda x: lax.pswapaxes(x, 'i', 1), axis_name='i')(x)
expected = np.swapaxes(x, 0, 2)
self.assertAllClose(ans, expected, check_dtypes=False)
def testReshardInput(self):
if xla_bridge.device_count() < 6:
raise SkipTest("testReshardInput requires 6 devices")
# Manually construct a ShardedDeviceArray with the wrong sharding for the
# subsequent pmap
shard_shape = (3,2)
shard = jnp.arange(jnp.prod(shard_shape)).reshape(shard_shape)
bufs = [xla.device_put(shard, d) for d in xla_bridge.devices()[:4]]
aval = ShapedArray((6,4), shard.dtype)
sharding_spec = pxla.ShardingSpec(
shards_per_axis=(2, 2),
is_axis_materialized=(True, True),
replication_factors=[])
arr = pxla.ShardedDeviceArray(aval, sharding_spec, bufs)
r = pmap(lambda x: x + 1)(arr)
self.assertAllClose(r, arr + 1)
self.assertEqual(len(r.device_buffers), 6)
@ignore_soft_pmap_warning()
def testSoftPmapPsum(self):
n = 4 * xla_bridge.device_count()
def f(x):
return x / lax.psum(x, 'i')
ans = soft_pmap(f, 'i')(jnp.ones(n))
expected = np.ones(n) / n
self.assertAllClose(ans, expected, check_dtypes=False)
@ignore_soft_pmap_warning()
def testSoftPmapAxisIndex(self):
n = 4 * xla_bridge.device_count()
def f(x):
return x * lax.axis_index('i')
ans = soft_pmap(f, 'i')(2 * jnp.ones(n))
expected = 2 * np.arange(n)
self.assertAllClose(ans, expected, check_dtypes=False)
@ignore_soft_pmap_warning()
def testSoftPmapOfJit(self):
n = 4 * xla_bridge.device_count()
def f(x):
return 3 * x
ans = soft_pmap(jit(f), 'i')(np.arange(n))
expected = 3 * np.arange(n)
self.assertAllClose(ans, expected, check_dtypes=False)
@ignore_soft_pmap_warning()
def testSoftPmapNested(self):
n = 4 * xla_bridge.device_count()
@partial(soft_pmap, axis_name='i')
@partial(soft_pmap, axis_name='j')
def f(x):
i_size = lax.psum(1, 'i')
return x + lax.axis_index('i') + i_size * lax.axis_index('j')
ans = f(jnp.zeros((n, n)))
expected = np.arange(n ** 2).reshape(n, n).T
self.assertAllClose(ans, expected, check_dtypes=False)
@ignore_soft_pmap_warning()
def testGradOfSoftPmap(self):
n = 4 * xla_bridge.device_count()
@partial(soft_pmap, axis_name='i')
def f(x):
return x * lax.axis_index('i')
ans = grad(lambda x: jnp.sum(f(x)))(jnp.zeros((n, n)))
expected = np.repeat(np.arange(n)[:, None], n, axis=1)
self.assertAllClose(ans, expected, check_dtypes=False)
@ignore_soft_pmap_warning()
def testSoftPmapDevicePersistence(self):
device_count = xla_bridge.device_count()
shape = (2 * 2 * device_count, 2, 3)
# check that we can maintain device persistence across calls
x = np.arange(prod(shape)).reshape(shape)
x = soft_pmap(lambda x: x)(x)
self.assertIsInstance(x, pxla.ShardedDeviceArray)
x._npy_value = np.float32(np.nan) # can't be coerced to ndarray for xfer
x = soft_pmap(lambda x: x)(x) # doesn't crash
self.assertIsInstance(x, pxla.ShardedDeviceArray)
# check that we don't crash when we can't maintain device persistence
x = np.arange(prod(shape)).reshape(shape)
x = soft_pmap(lambda x: x)(x)
self.assertIsInstance(x, pxla.ShardedDeviceArray)
y = x.reshape(device_count, -1)
self.assertIsInstance(y, xla.DeviceArray) # should have forced collection
soft_pmap(lambda x: x)(y) # doesn't crash
z = x + 2
self.assertIsInstance(z, xla.DeviceArray) # should have forced collection
x._npy_value = np.float32(np.nan) # can't be coerced to ndarray for xfer
self.assertRaisesRegex(
RuntimeError,
'.*does not match host shape or layout of computation parameter 0.*',
lambda: x + 2)
# check that different axis merges aren't a problem
x = np.arange(prod(shape)).reshape(shape)
x = soft_pmap(lambda x: x)(x)
self.assertIsInstance(x, pxla.ShardedDeviceArray)
x = x.reshape(2 * device_count, 2, 2, 3) # axis merge of the wrong size
self.assertIsInstance(x, xla.DeviceArray) # should have forced collection
def testSoftPmapAllToAll(self):
raise SkipTest("the underlying code here is broken") # TODO(mattjj)
n = 4 * xla_bridge.device_count()
def f(x):
return lax.all_to_all(x, 'i', 0, 0)
ans = soft_pmap(f, 'i')(jnp.arange(n ** 2).reshape(n, n))
expected = np.arange(n ** 2).reshape(n, n).T
self.assertAllClose(ans, expected, check_dtypes=False)
def testShardedDeviceArrayBlockUntilReady(self):
x = np.arange(xla_bridge.device_count())
x = pmap(lambda x: x)(x)
x.block_until_ready() # doesn't crash
def testJitPmapComposition(self):
f = lambda x: x - lax.psum(x, 'i')
shape = (xla_bridge.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
expected = x - np.sum(x, 0)
ans = jit(pmap(f, 'i'))(x)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = pmap(jit(f), 'i')(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testMakeJaxprOfOpenSpmd(self):
f = lambda x: x - lax.psum(x, 'i')
shape = (xla_bridge.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
make_jaxpr(f)(x) # doesn't crash
def testCompositionWithJitTwice(self):
@jit
def f(x):
y = 2 * x
@jit
def g(z):
return pmap(lambda x: x * y)(z)
return g(x)
f(np.arange(1.).reshape((1, 1))) # doesn't crash
def testIssue1065(self):
# from https://github.com/google/jax/issues/1065
device_count = xla_bridge.device_count()
def multi_step_pmap(state, count):
@partial(pmap, axis_name='x')
@jit
def exchange_and_multi_step(state):
return state
@jit
def time_evolution(state):
return lax.fori_loop(0, count, lambda i, s: exchange_and_multi_step(s), state)
return time_evolution(state)
multi_step_pmap(jnp.zeros((device_count,)), count=1)
def testShardedDeviceArrayGetItem(self):
f = lambda x: 2 * x
f = pmap(f, axis_name='i')
shape = (xla_bridge.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
y = f(x)
self.assertIsInstance(y, jnp.ndarray)
self.assertIsInstance(y, pxla.ShardedDeviceArray)
z = y[0] # doesn't crash
self.assertAllClose(z, 2 * x[0], check_dtypes=False)
def testPostProcessMap(self):
# TODO(mattjj): this fails with multiple devices (unless we add a jit)
# because we assume eager ops (like scan here) can't require more than 1
# replica.
raise SkipTest("need eager multi-replica support")
# test came from https://github.com/google/jax/issues/1369
nrep = xla_bridge.device_count()
def pmvm(a, b):
a = a.reshape((nrep, -1, a.shape[1]))
func = pmap(lambda z: jnp.dot(z, b))
return func(a).reshape(b.shape)
n = nrep * 2
rng = np.random.RandomState(0)
a = rng.randn(n, n)
b = rng.randn(n)
iters = jnp.arange(5)
def body(carry, i):
return pmvm(a, carry), i
ans, _ = lax.scan(body, b, iters)
expected = np.linalg.matrix_power(a, 5).dot(b)
self.assertAllClose(ans, expected, check_dtypes=False)
def testManyArgs(self):
@pmap
def f(args_list):
return sum(args_list)
vals = list(range(500))
ndevices = xla_bridge.device_count()
self.assertAllClose(f(jnp.array([vals] * ndevices)),
jnp.array([sum(vals)] * ndevices))
def testPostProcessMap2(self):
# code from https://github.com/google/jax/issues/2787
def vv(x, y):
"""Vector-vector multiply"""
return jnp.dot(x, y)
def distributed_matrix_vector(x, y):
"""Matrix vector multiply. First batch it and then row by row"""
fv = lambda z: lax.map(lambda j: vv(j, y), z)
res = pmap(fv)(x.reshape((jax.device_count(), -1) + tuple(x.shape[1:])))
res = res.reshape(res.shape[0] * res.shape[1], *res.shape[2:])
return res
key = random.PRNGKey(1)
x = random.normal(key, (80, 50))
batched_mvm = vmap(lambda b: distributed_matrix_vector(x, b), in_axes=0)
y = random.normal(key, (10, 50, 1))
result = batched_mvm(y)
expected = jnp.einsum('ij,njk->nik', x, y)
tol = 1e-1 if jtu.device_under_test() == "tpu" else 1e-3
self.assertAllClose(result, expected, check_dtypes=False, atol=tol, rtol=tol)
def testAxisIndexRemat(self):
# https://github.com/google/jax/issues/2716
n = len(jax.devices())
def f(key):
key = random.fold_in(key, jax.lax.axis_index('i'))
return random.bernoulli(key, p=0.5)
keys = random.split(random.PRNGKey(0), n)
jax.pmap(jax.remat(f), axis_name='i')(keys)
def testPmapMapVmapCombinations(self):
# https://github.com/google/jax/issues/2822
def vv(x, y):
"""Vector-vector multiply"""
return jnp.dot(x, y)
def matrix_vector(x, y, parallel=True):
"""Matrix vector multiply. First batch it and then row by row"""
fv = lambda z: lax.map(lambda j: vv(j, y), z)
if parallel:
# split leading axis in two
new_x = x.reshape((jax.device_count(), -1, *x.shape[1:]))
# apply map
new_res = pmap(fv)(new_x)
# reshape back out
res = new_res.reshape(x.shape[0], *new_res.shape[2:])
else:
res = fv(x)
return res
x = random.normal(random.PRNGKey(1), (80, 5))
y = random.normal(random.PRNGKey(1), (10, 5))
result1 = vmap(lambda b: matrix_vector(x, b, True))(y) # vmap + pmap
result2 = lax.map(lambda b: matrix_vector(x, b, False), y) # map + map
result3 = lax.map(lambda b: matrix_vector(x, b, True), y) # map + pmap
result4 = jnp.stack([matrix_vector(x, b, False) for b in y]) # none + map
self.assertAllClose(result1, result2, check_dtypes=False, atol=1e-3, rtol=1e-3)
self.assertAllClose(result1, result3, check_dtypes=False, atol=1e-3, rtol=1e-3)
self.assertAllClose(result1, result4, check_dtypes=False, atol=1e-3, rtol=1e-3)
def testPmapAxisNameError(self):
# https://github.com/google/jax/issues/3120
a = np.arange(4)[np.newaxis,:]
def test(x):
return jax.lax.psum(x, axis_name='batch')
with self.assertRaisesRegex(NameError, "unbound axis name: batch"):
jax.pmap(test)(a)
def testPsumOnBooleanDtype(self):
# https://github.com/google/jax/issues/3123
n = xla_bridge.device_count()
if n > 1:
x = jnp.array([True, False])
out = pmap(lambda x: jax.lax.psum(x, 'i'), 'i')(x)
self.assertEqual(list(out), [1, 1])
out = pmap(lambda x: jax.lax.pmean(x, 'i'), 'i')(x)
self.assertEqual(list(out), [1/2, 1/2])
else:
x = jnp.array([True])
out = pmap(lambda x: jax.lax.psum(x, 'i'), 'i')(x)
self.assertEqual(list(out), [1])
out = pmap(lambda x: jax.lax.pmean(x, 'i'), 'i')(x)
self.assertEqual(list(out), [1])
def testPsumWithNoAxisDoesntLeakFunctions(self):
x = jnp.ones((1, 1024), dtype=np.float32)
f = lambda _: x
w = weakref.ref(f)
g = pmap(f)
g(np.ones((1,), dtype=np.float32)).block_until_ready()
del f, g
gc.collect()
# 'f' should not be alive at this point; in particular the pmap cache must
# not keep it alive.
self.assertTrue(w() is None)
def testJitOfPmapWarningMessage(self):
device_count = xla_bridge.device_count()
if device_count == 1:
raise SkipTest("test requires at least two devices")
def foo(x): return x
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
jit(pmap(foo))(jnp.arange(device_count))
self.assertGreaterEqual(len(w), 1)
self.assertIn("The jitted function foo includes a pmap",
str(w[-1].message))
def testPsumZeroCotangents(self):
# https://github.com/google/jax/issues/3651
def loss(params, meta_params):
(net, mpo) = params
return meta_params * mpo * net
def inner(meta_params, params):
grads = jax.grad(loss)(params, meta_params)
grads = lax.psum(grads, axis_name="i")
net_grads, mpo_grads = grads
net = params[0] + net_grads
mpo = params[1]
return mpo * net
def outer(params):
meta_params = jnp.array(4.0)
return jax.grad(inner)(meta_params, params)
params = (jnp.array([2.0]), jnp.array([3.0]))
jax.pmap(outer, axis_name='i')(params) # doesn't crash
f = jax.pmap(outer, axis_name='i')
jtu.check_grads(f, (params,), 2, ["fwd", "rev"], 1e-3, 1e-3)
class VmapOfPmapTest(jtu.JaxTestCase):
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": f"{shapes}_{vmap_bdims}_{pmap_bdims}",
"shapes": shapes, "vmap_bdims": vmap_bdims, "pmap_bdims": pmap_bdims}
for shape_group in compatible_shapes
for num_args in range(1, 4)
for shapes in it.combinations_with_replacement(shape_group, num_args)
for vmap_bdims in all_bdims(*shapes)
for pmap_bdims in it.product([0, None], repeat=num_args)
if not all(bd is None for bd in pmap_bdims)
))
def testVmapOfPmap(self, shapes, vmap_bdims, pmap_bdims):
vmapped_size = 3
pmapped_size = xla_bridge.device_count()
rng = jtu.rand_default(self.rng())
def fun(*args):
return sum(args)
final_shapes = map(partial(add_bdim, vmapped_size), vmap_bdims,
map(partial(add_bdim, pmapped_size), pmap_bdims, shapes))
args = [rng(shape, jnp.float32) for shape in final_shapes]
args_slice = args_slicer(args, vmap_bdims)
ans = vmap(pmap(fun, in_axes=pmap_bdims), vmap_bdims)(*args)
expected = np.stack([fun(*args_slice(i)) for i in range(vmapped_size)])
self.assertAllClose(ans, expected)
class PmapWithDevicesTest(jtu.JaxTestCase):
def testAllDevices(self):
f = pmap(lambda x: x - lax.psum(x, 'i'), axis_name='i',
devices=xla_bridge.devices())
shape = (xla_bridge.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
expected = x - np.sum(x, 0)
ans = f(x)
self.assertAllClose(ans, expected)
def testOneDevice(self):
if xla_bridge.device_count() == 1:
raise SkipTest("this test requires multiple devices")
d0 = xla_bridge.devices()[0]
d1 = xla_bridge.devices()[1]
f = lambda x: jnp.dot(x, x.T)
f0 = pmap(f, devices=[d0])
f1 = pmap(f, devices=[d1])
x = np.random.rand(1, 1000, 1000)
r0 = f0(x)
r1 = f1(x)
expected = np.expand_dims(np.dot(x.squeeze(), x.squeeze().T), 0)
self.assertAllClose(r0, expected, atol=1e-6, rtol=1e-3)
self.assertAllClose(r1, expected, atol=1e-6, rtol=1e-3)
def testNoDevicesError(self):
f = pmap(lambda x: x - lax.psum(x, 'i'), axis_name='i', devices=[])
shape = (xla_bridge.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
with self.assertRaisesRegex(
ValueError, "'devices' argument to pmap must be non-empty, or None."):
f(x)
def testBadAxisSizeError(self):
if xla_bridge.device_count() == 1:
raise SkipTest("this test requires multiple devices")
f = pmap(lambda x: lax.psum(x, 'i'), axis_name='i',
devices=xla_bridge.devices())
with self.assertRaisesRegex(
ValueError, r"Leading axis size of input to pmapped function must "
r"equal the number of local devices passed to pmap. Got axis_size=1, "
r"num_local_devices=\d."):
f(jnp.ones(1))
with self.assertRaisesRegex(
ValueError, r"Leading axis size of input to pmapped function must "
r"equal the number of local devices passed to pmap. Got axis_size=\d, "
r"num_local_devices=\d."):
f(jnp.ones(xla_bridge.device_count() + 1))
def testNestedPmaps(self):
if xla_bridge.device_count() % 2 != 0:
raise SkipTest
# Devices specified in outer pmap are OK
@partial(pmap, axis_name='i', devices=xla_bridge.devices())
def foo(x):
@partial(pmap, axis_name='j')
def bar(y):
return lax.psum(y, 'j')
return bar(x)
x = jnp.ones((xla_bridge.device_count() // 2, 2))
ans = foo(x)
expected = x * 2
self.assertAllClose(ans, expected)
def testNestedPmapsError(self):
# Devices specified in inner pmap not OK
@partial(pmap, axis_name='i')
def foo(x):
@partial(pmap, axis_name='j', devices=xla_bridge.devices())
def bar(y):
return lax.psum(y, 'j')
return bar(x)
with self.assertRaisesRegex(
ValueError,
"Nested pmap with explicit devices argument."):
foo(jnp.ones((xla_bridge.device_count(), 1)))
def testJitInPmap(self):
@partial(pmap, axis_name='i', devices=xla_bridge.devices())
def foo(x):
@jit
def bar(y):
return y + 1
return lax.psum(bar(x), 'i')
ndevices = xla_bridge.device_count()
ans = foo(jnp.ones((ndevices, 1)))
expected = np.ones((ndevices, 1), dtype=jnp.float_) * ndevices * 2
self.assertAllClose(ans, expected)
def testPmapInJit(self):
@jit
def foo(x):
@partial(pmap, axis_name='i', devices=xla_bridge.devices())
def bar(y):
return lax.psum(y, 'i')
return bar(x)
ndevices = xla_bridge.device_count()
ans = foo(jnp.ones((ndevices, 1)))
expected = np.ones((ndevices, 1), dtype=jnp.float_) * ndevices
self.assertAllClose(ans, expected)
def testGradBasic(self):
@partial(pmap, axis_name='i', devices=xla_bridge.devices())
def f(x):
return jnp.sin(x)
shape = (xla_bridge.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
ans = grad(lambda x: jnp.sum(jnp.sin(x)))(x)
expected = grad(lambda x: jnp.sum(f(x)))(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testPmapStaticArgnums(self):
@partial(pmap, axis_name='i', static_broadcasted_argnums=1)
def f(x, y):
return jnp.sin(x + y)
shape = (xla_bridge.device_count(), 4)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
y = np.arange(4, dtype=np.float32)
ans = f(x, y)
expected = np.sin(x + y[None])
self.assertAllClose(ans, expected, check_dtypes=False)
class ShardedDeviceArrayTest(jtu.JaxTestCase):
def testThreadsafeIndexing(self):
# NOTE(skye): I picked these values to be big enough to cause interesting
# execution overlap, but small enough to not use too much memory. YMMV.
shape = (8, 8000, 1000)
if jax.device_count() < shape[0]:
raise SkipTest(f"requires {shape[0]} devices")
x = jnp.arange(jnp.prod(shape)).reshape(shape)
sharded_x = pmap(lambda x: x)(x)
num_threads = 10
futures = []
expected = []
with ThreadPoolExecutor(max_workers=num_threads) as executor:
for i in range(num_threads):
idx = i % shape[0]
# Mix together different kinds of indices
if i % 2 == 0:
idx = slice(idx, idx + 1)
# Use the "kwarg trick" to work around late-binding closures. See
# https://docs.python-guide.org/writing/gotchas/#late-binding-closures.
futures.append(executor.submit(
lambda idx=idx: [sharded_x[idx] for _ in range(10)][0]))
expected.append(x[idx])
actual = [f.result() for f in futures]
self.assertAllClose(actual, expected, check_dtypes=False)
class SpecToIndicesTest(jtu.JaxTestCase):
def testShardsPerAxis(self):
shape = (4, 8)
spec = pxla.ShardingSpec(shards_per_axis=(2, 2),
is_axis_materialized=(True, True),
replication_factors=[])
self.assertEqual(pxla.spec_to_indices(shape, spec),
((slice(0,2), slice(0,4)),
(slice(0,2), slice(4,8)),
(slice(2,4), slice(0,4)),
(slice(2,4), slice(4,8))))
def testUnshardedAxis(self):
shape = (4, 8)
spec = pxla.ShardingSpec(shards_per_axis=(2, 1),
is_axis_materialized=(True, True),
replication_factors=[])
self.assertEqual(pxla.spec_to_indices(shape, spec),
(slice(0,2), (slice(2,4))))
def testNoSharding(self):
shape = (4, 8)
spec = pxla.ShardingSpec(shards_per_axis=(1, 1),
is_axis_materialized=(True, True),
replication_factors=[])
self.assertEqual(pxla.spec_to_indices(shape, spec),
(slice(None),))
def testUnmaterializedAxis(self):
shape = (4, 8)
spec = pxla.ShardingSpec(shards_per_axis=(4, 1),
is_axis_materialized=(False, True),
replication_factors=[])
self.assertEqual(pxla.spec_to_indices(shape, spec),
(0, 1, 2, 3))
shape = (2, 2)
spec = pxla.ShardingSpec(shards_per_axis=(1, 2),
is_axis_materialized=(True, False),
replication_factors=[])
self.assertEqual(pxla.spec_to_indices(shape, spec),
((slice(None), 0),
(slice(None), 1)))
def testReplicationAfterUnsharded(self):
shape = (2, 8)
spec = pxla.ShardingSpec(shards_per_axis=(2, 1),
is_axis_materialized=(False, True),
replication_factors=[(3, 2)])
self.assertEqual(pxla.spec_to_indices(shape, spec),
(0, 0, 0, 1, 1, 1))
def testReplicationPosition2(self):
shape = (2, 8)
spec = pxla.ShardingSpec(shards_per_axis=(2, 2),
is_axis_materialized=(False, True),
replication_factors=[(3, 2)])
self.assertEqual(pxla.spec_to_indices(shape, spec),
((0, slice(0, 4)), (0, slice(0, 4)), (0, slice(0, 4)),
(0, slice(4, 8)), (0, slice(4, 8)), (0, slice(4, 8)),
(1, slice(0, 4)), (1, slice(0, 4)), (1, slice(0, 4)),
(1, slice(4, 8)), (1, slice(4, 8)), (1, slice(4, 8))))
def testReplicationPosition1(self):
shape = (2, 8)
spec = pxla.ShardingSpec(shards_per_axis=(2, 2),
is_axis_materialized=(False, True),
replication_factors=[(3, 1)])
self.assertEqual(pxla.spec_to_indices(shape, spec),
((0, slice(0, 4)), (0, slice(4, 8)),
(0, slice(0, 4)), (0, slice(4, 8)),
(0, slice(0, 4)), (0, slice(4, 8)),
(1, slice(0, 4)), (1, slice(4, 8)),
(1, slice(0, 4)), (1, slice(4, 8)),
(1, slice(0, 4)), (1, slice(4, 8))))
def testReplicationPosition0(self):
shape = (2, 8)
spec = pxla.ShardingSpec(shards_per_axis=(2, 1),
is_axis_materialized=(False, True),
replication_factors=[(3, 0)])
self.assertEqual(pxla.spec_to_indices(shape, spec),
(0, 1, 0, 1, 0, 1))
def testMultipleReplications(self):
shape = (2, 7, 4)
spec = pxla.ShardingSpec(shards_per_axis=(2, 1, 2),
is_axis_materialized=(False, True, True),
replication_factors=[(3, 0), (2, 0), (2, 2)])
self.assertEqual(
pxla.spec_to_indices(shape, spec),
((0, slice(None), slice(0, 2)), (0, slice(None), slice(2, 4)),
(0, slice(None), slice(0, 2)), (0, slice(None), slice(2, 4)),
(1, slice(None), slice(0, 2)), (1, slice(None), slice(2, 4)),
(1, slice(None), slice(0, 2)), (1, slice(None), slice(2, 4))) * 3 * 2)
def testReplicatedScalar(self):
shape = ()
spec = pxla.ShardingSpec(shards_per_axis=(),
is_axis_materialized=(),
replication_factors=[(3, 0)])
self.assertEqual(pxla.spec_to_indices(shape, spec),
((), (), ()))
def _spec_str(spec):
return (f"({spec.shards_per_axis},"
f"{spec.is_axis_materialized},"
f"{spec.replication_factors})")
class ShardArgsTest(jtu.JaxTestCase):
def numpy_array(x):
return x
def device_array(x):
return jax.device_put(x)
# TODO(skye): add coverage for ShardedDeviceArrays
@parameterized.named_parameters(
{"testcase_name":
f"_shape={shape}_spec={_spec_str(spec)}_arg={make_arg.__name__}"
.replace(" ", ""),
"shape": shape, "spec": spec, "make_arg": make_arg}
for make_arg in [numpy_array, device_array]
for shape, spec in [
# pmap(in_axes=0)
[(4, 8), pxla.ShardingSpec(shards_per_axis=(4, 1),
is_axis_materialized=(False, True),
replication_factors=[])],
# pmap(in_axes=1)
[(2, 2), pxla.ShardingSpec(shards_per_axis=(1, 2),
is_axis_materialized=(True, False),
replication_factors=[])],
# unsharded
[(4, 8), pxla.ShardingSpec(shards_per_axis=(1, 1),
is_axis_materialized=(True, True),
replication_factors=[])],
# partitioned, 1 axis
[(4, 8), pxla.ShardingSpec(shards_per_axis=(2, 1),
is_axis_materialized=(True, True),
replication_factors=[])],
# partitioned, 2 axes
[(4, 8), pxla.ShardingSpec(shards_per_axis=(2, 2),
is_axis_materialized=(True, True),
replication_factors=[])],
# partitioned + sharding
[(2, 8), pxla.ShardingSpec(shards_per_axis=(2, 2),
is_axis_materialized=(False, True),
replication_factors=[])],
# replication + sharding
[(2, 8), pxla.ShardingSpec(shards_per_axis=(2, 1),
is_axis_materialized=(False, True),
replication_factors=[(3, 2)])],
# replication, no sharding
[(2, 8), pxla.ShardingSpec(shards_per_axis=(1, 1),
is_axis_materialized=(True, True),
replication_factors=[(3, 2)])],
# multiple replicated axes
[(1, 8), pxla.ShardingSpec(shards_per_axis=(1, 2),
is_axis_materialized=(False, True),
replication_factors=[(2, 0), (2, 1)])],
# replicated scalar
[(), pxla.ShardingSpec(shards_per_axis=(),
is_axis_materialized=(),
replication_factors=[(2, 0), (3, 0)])]
])
def testShardArgs(self, shape, spec, make_arg):
indices = pxla.spec_to_indices(shape, spec)
nshards = len(indices)
if jax.device_count() < nshards:
raise SkipTest
x = np.arange(np.prod(shape)).reshape(shape)
arg = make_arg(x)
bufs = pxla.shard_args(jax.devices()[:nshards],
[indices], [arg])
self.assertEqual(len(bufs), nshards)
for buf, idx in zip(bufs, indices):
self.assertEqual(len(buf), 1)
self.assertAllClose(buf[0].to_py(), x[idx], check_dtypes=False)
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| 36.353909 | 92 | 0.615867 |
00ae96317bc3cb28b1bad4cae154b83219576d0f | 30,589 | py | Python | ufedmm/cvlib.py | craabreu/ufedmm | 0c3cb7730d7fbc7dcaddcd2f9c86324ad6457be5 | [
"MIT"
] | 5 | 2020-04-22T14:51:39.000Z | 2021-11-26T23:28:46.000Z | ufedmm/cvlib.py | craabreu/ufedmm | 0c3cb7730d7fbc7dcaddcd2f9c86324ad6457be5 | [
"MIT"
] | 9 | 2020-04-23T16:40:56.000Z | 2021-09-17T18:20:51.000Z | ufedmm/cvlib.py | craabreu/ufedmm | 0c3cb7730d7fbc7dcaddcd2f9c86324ad6457be5 | [
"MIT"
] | 4 | 2020-04-20T19:06:31.000Z | 2022-03-06T20:05:48.000Z | """
.. module:: cvlib
:platform: Unix, Windows
:synopsis: A collection of custom collective variables
.. moduleauthor:: Charlles Abreu <abreu@eq.ufrj.br>
.. _Context: http://docs.openmm.org/latest/api-python/generated/simtk.openmm.openmm.Context.html
.. _CustomCVForce: http://docs.openmm.org/latest/api-python/generated/simtk.openmm.openmm.CustomCVForce.html
.. _CustomIntegrator: http://docs.openmm.org/latest/api-python/generated/simtk.openmm.openmm.CustomIntegrator.html
.. _Force: http://docs.openmm.org/latest/api-python/generated/simtk.openmm.openmm.Force.html
.. _NonbondedForce: http://docs.openmm.org/latest/api-python/generated/simtk.openmm.openmm.NonbondedForce.html
.. _System: http://docs.openmm.org/latest/api-python/generated/simtk.openmm.openmm.System.html
.. _coordination: https://www.plumed.org/doc-v2.6/user-doc/html/_c_o_o_r_d_i_n_a_t_i_o_n.html
.. _PLUMED: https://www.plumed.org
"""
import re
import itertools
import math
from collections import namedtuple
from simtk import openmm, unit
from ufedmm.ufedmm import _standardized
_ParamTuple = namedtuple('_ParamTuple', 'charge sigma epsilon')
class SquareRadiusOfGyration(openmm.CustomBondForce):
"""
The square of the radius of gyration of a group of atoms, defined as:
.. math::
R_g^2 = \\frac{1}{n^2} \\sum_i \\sum_{j>i} r_{i,j}^2,
where :math:`n` is the number of atoms in the group and :math:`r_{i,j}` is the distance between
atoms `i` and `j`.
Parameters
----------
group : list(int)
The indices of the atoms in the group.
Example
-------
>>> import ufedmm
>>> from ufedmm import cvlib
>>> model = ufedmm.AlanineDipeptideModel()
>>> RgSq = cvlib.SquareRadiusOfGyration(range(model.system.getNumParticles()))
>>> RgSq.setForceGroup(1)
>>> model.system.addForce(RgSq)
4
>>> context = openmm.Context(model.system, openmm.CustomIntegrator(0))
>>> context.setPositions(model.positions)
>>> context.getState(getEnergy=True, groups={1}).getPotentialEnergy()._value
0.08711416289256209
"""
def __init__(self, group):
super().__init__(f'r^2/{len(group)**2}')
self.setUsesPeriodicBoundaryConditions(False)
for i, j in itertools.combinations(group, 2):
self.addBond(i, j)
class RadiusOfGyration(openmm.CustomCVForce):
"""
The radius of gyration of a group of atoms, defined as:
.. math::
R_g = \\frac{1}{n} \\sqrt{\\sum_i \\sum_{j>i} r_{i,j}^2},
where :math:`n` is the number of atoms in the group and :math:`r_{i,j}` is the distance between
atoms `i` and `j`.
Parameters
----------
group : list(int)
The indices of the atoms in the group.
Example
-------
>>> import ufedmm
>>> from ufedmm import cvlib
>>> model = ufedmm.AlanineDipeptideModel()
>>> Rg = cvlib.RadiusOfGyration(range(model.system.getNumParticles()))
>>> Rg.setForceGroup(1)
>>> model.system.addForce(Rg)
4
>>> context = openmm.Context(model.system, openmm.CustomIntegrator(0))
>>> context.setPositions(model.positions)
>>> context.getState(getEnergy=True, groups={1}).getPotentialEnergy()._value
0.2951510848575048
"""
def __init__(self, group):
RgSq = openmm.CustomBondForce('r^2')
RgSq.setUsesPeriodicBoundaryConditions(False)
for i, j in itertools.combinations(group, 2):
RgSq.addBond(i, j)
super().__init__(f'sqrt(RgSq)/{len(group)}')
self.addCollectiveVariable('RgSq', RgSq)
class CoordinationNumber(openmm.CustomNonbondedForce):
"""
A continuos approximation for the number of neighbor pairs among atoms of two groups,
defined as:
.. math::
N_{\\mathbf{g}_1, \\mathbf{g}_2}(\\mathbf{r}) =
\\sum_{i \\in \\mathbf{g}_1} \\sum_{j \\in \\mathbf{g}_2}
S\\left(\\frac{r_{i,j}}{r_0}-1\\right) F_n \\left(\\frac{r_{i,j}}{r_0}\\right)
where :math:`r_0` is a threshold distance and :math:`r_{ij}` is the distance between atoms
:math:`i \\in \\mathbf{g}_1` and :math:`j \\in \\mathbf{g}_2`.
The function :math:`F_n(x)` is a continuous step function defined as
.. math::
F_n(x) = \\frac{1}{1+x^n}
where :math:`n` is a sharpness parameter. With :math:`n = 6` (default), this is the same
function defined in :cite:`Iannuzzi_2003`.
It is also a special case with :math:`d_0 = 0` and :math:`m=2n` of the coordination_ collective
variable defined in PLUMED_.
It has the following shape for varying `n` values:
.. image::
figures/coordination_number.png
:align: center
Besides, :math:`S(x)` is a switching function given by
.. math::
S(x) = \\begin{cases}
1 & x < 0 \\\\
1-6x^5+15x^4-10x^3 & 0 \\leq x \\leq 1 \\\\
0 & x > 1
\\end{cases}
Thus, the amount summed up for each atom pair decays smoothly to zero throughout the interval
:math:`r_{i,j} \\in [r_0, 2 r_0]`, meaning that :math:`2 r_0` is an actual cutoff distance.
.. warning::
If the two specified atom groups share atoms, each pair `i,j` among these atoms will be
counted only once.
Parameters
----------
system : openmm.System
The system for which this collective variable will be computed.
group1 : list(int)
The indices of the atoms in the first group.
group2 : list(int)
The indices of the atoms in the second group.
Keyword Args
------------
n : int or float, default=6
Exponent that controls the sharpness of the sigmoidal function.
r0 : unit.Quantity, default=4*unit.angstroms
The threshold distance, which is also half the actual cutoff distance.
Example
-------
>>> import ufedmm
>>> from ufedmm import cvlib
>>> from simtk.openmm import app
>>> model = ufedmm.AlanineDipeptideModel()
>>> carbons = [atom.index for atom in model.topology.atoms() if atom.element == app.element.carbon]
>>> oxygens = [atom.index for atom in model.topology.atoms() if atom.element == app.element.carbon]
>>> N = cvlib.CoordinationNumber(model.system, carbons, oxygens)
>>> N.setForceGroup(1)
>>> model.system.addForce(N)
4
>>> context = openmm.Context(model.system, openmm.CustomIntegrator(0))
>>> context.setPositions(model.positions)
>>> context.getState(getEnergy=True, groups={1}).getPotentialEnergy()._value
9.461968630563433
"""
def __init__(self, system, group1, group2, n=6, r0=4*unit.angstroms):
super().__init__(f'1/(1+(r/r0)^{n})')
if system.usesPeriodicBoundaryConditions():
self.setNonbondedMethod(openmm.CustomNonbondedForce.CutoffPeriodic)
else:
self.setNonbondedMethod(openmm.CustomNonbondedForce.CutoffNonPeriodic)
for i in range(system.getNumParticles()):
self.addParticle([])
self.addGlobalParameter('r0', r0)
self.setUseSwitchingFunction(True)
self.setSwitchingDistance(r0)
self.setCutoffDistance(2*r0)
self.setUseLongRangeCorrection(False)
self.addInteractionGroup(group1, group2)
class HelixAngleContent(openmm.CustomAngleForce):
"""
Fractional alpha-helix content of a sequence of residues in a protein chain based on the
angles between consecutive alpha-carbon atoms, defined as follows:
.. math::
\\alpha_\\theta(r_M,\\cdots,r_N) = \\frac{1}{N-M-1} \\sum_{i=M+1}^{N-1} F_n\\left(
\\frac{\\theta(\\mathrm{C}_\\alpha^{i-1},\\mathrm{C}_\\alpha^i,\\mathrm{C}_\\alpha^{i+1})
- \\theta_\\mathrm{ref}}{\\theta_\\mathrm{tol}}\\right)
where :math:`\\theta(\\mathrm{C}_\\alpha^{i-1},\\mathrm{C}_\\alpha^i,\\mathrm{C}_\\alpha^{i+1})`
is the angle between three consecutive alpha-carbon atoms, :math:`\\theta_\\mathrm{ref}` is the
reference value of this angle, and :math:`\\theta_\\mathrm{tol}` is the tolerance threshold
around this reference.
The function :math:`F_n(x)` is defined as in :class:`CoordinationNumber`, but only even integer
values are accepted for `n`.
Parameters
----------
topology : openmm.app.Topology
The topology of the system for which this collective variable will be computed.
first, last : int
The indices of the first and last residues involved in the alpha helix.
Keyword Args
------------
n : even integer, default=6
Exponent that controls the sharpness of the sigmoidal function.
theta_ref : unit.Quantity, default=88*unit.degrees
The reference value of the alpha carbon angle in the alpha helix.
theta_tol : unit.Quantity, default=*unit.degrees
The tolerance for the deviation from the alpha carbon angle.
"""
def __init__(self, topology, first, last,
n=6, theta_ref=88*unit.degrees, theta_tol=15*unit.degrees):
residues = [r for r in topology.residues() if first <= r.index <= last]
if len(set(r.chain.index for r in residues)) > 1:
raise ValueError('AngleHelixContent requires all residues in a single chain')
if n % 2 != 0:
raise ValueError("AngleHelixContent requires n to be an even integer number")
super().__init__(f'1/({last-first-1}*(1+x^{n})); x=(theta - theta_ref)/theta_tol')
self.addGlobalParameter('theta_ref', theta_ref)
self.addGlobalParameter('theta_tol', theta_tol)
alpha_carbons = [atom.index for r in residues for atom in r.atoms() if atom.name == 'CA']
for i, j, k in zip(alpha_carbons[0:-2], alpha_carbons[1:-1], alpha_carbons[2:]):
self.addAngle(i, j, k, [])
class HelixHydrogenBondContent(openmm.CustomBondForce):
"""
Fractional alpha-helix content of a sequence of residues in a protein chain based on the
hydrogen bonds between oxygen atoms and H-N groups located four residues apart, defined as
follows:
.. math::
\\alpha_\\mathrm{hb}(r_M,\\cdots,r_N) = \\frac{1}{M-N-2} \\sum_{i=M+2}^{N-2} F_n\\left(
\\frac{d(\\mathrm{O}^{i-2}, \\mathrm{H}^{i+2})}{d_0}\\right)
where :math:`d(\\mathrm{O}^{i-2}, \\mathrm{H}^{i+2})` is the distance between the oxygen and
hydrogen atoms and :math:`d_0` is the threshold distance for characterizing a hydrogen bond.
The function :math:`F_n(x)` is defined as in :class:`CoordinationNumber`.
Parameters
----------
topology : openmm.app.Topology
The topology of the system for which this collective variable will be computed.
first, last : int
The indices of the first and last residues involved in the alpha helix.
Keyword Args
------------
n : int or float, default=6
Exponent that controls the sharpness of the sigmoidal function.
d0 : unit.Quantity, default=4*unit.angstroms
The threshold distance, which is also half the actual cutoff distance.
"""
def __init__(self, topology, first, last, n=6, d0=3.3*unit.angstroms):
residues = [r for r in topology.residues() if first <= r.index <= last]
if len(set(r.chain.index for r in residues)) > 1:
raise ValueError('HelixHydrogenBondContent requires all residues in a single chain')
super().__init__(f'1/({last-first-2}*(1+x^{n})); x=r/d0')
self.addGlobalParameter('d0', d0)
reH = re.compile('\\b(H|1H|HN1|HT1|H1|HN)\\b')
reO = re.compile('\\b(O|OCT1|OC1|OT1|O1)\\b')
oxygens = [atom.index for r in residues for atom in r.atoms() if re.match(reO, atom.name)]
hydrogens = [atom.index for r in residues for atom in r.atoms() if re.match(reH, atom.name)]
for i, j in zip(oxygens[:-3], hydrogens[3:]):
self.addBond(i, j, [])
class HelixRamachandranContent(openmm.CustomTorsionForce):
"""
Fractional alpha-helix content of a sequence of residues in a protein chain based on the
Ramachandran dihedral angles, defined as follows:
.. math::
\\alpha_{\\phi,\\psi}(r_M,\\cdots,r_N) = \\frac{1}{2(N-M-1)} \\sum_{i=M+1}^{N-1} \\Bigg[
F_n\\left(
\\frac{\\phi(\\mathrm{C}^{i-1},\\mathrm{N}^i,\\mathrm{C}_\\alpha^i, \\mathrm{C}^i)
- \\phi_\\mathrm{ref}}{\\phi_\\mathrm{tol}}
\\right) + \\\\
F_n\\left(
\\frac{\\psi(\\mathrm{N}^i,\\mathrm{C}_\\alpha^i, \\mathrm{C}^i, \\mathrm{N}^{i+1})
- \\psi_\\mathrm{ref}}{\\psi_\\mathrm{tol}}
\\right)
\\Bigg]
where :math:`\\phi(\\mathrm{C}^{i-1},\\mathrm{N}^i,\\mathrm{C}_\\alpha^i, \\mathrm{C}^i)` and
:math:`\\psi(\\mathrm{N}^i,\\mathrm{C}_\\alpha^i, \\mathrm{C}^i, \\mathrm{N}^{i+1})` are the
Ramachandran dihedral angles, :math:`\\phi_\\mathrm{ref}` and :math:`\\psi_\\mathrm{ref}` are
their reference values in an alpha helix, and :math:`\\phi_\\mathrm{tol}` and
:math:`\\psi_\\mathrm{tol}` are the threshold tolerances around these refenrences.
The function :math:`F_n(x)` is defined as in :class:`CoordinationNumber`, but only even integer
values are accepted for `n`.
Default values are the overall average alpha-helix dihedral angles and their dispersions
reported in :cite:`Hovmoller_2002`.
Parameters
----------
topology : openmm.app.Topology
The topology of the system for which this collective variable will be computed.
first, last : int
The indices of the first and last residues involved in the alpha helix.
Keyword Args
------------
n : even integer, default=6
Exponent that controls the sharpness of the sigmoidal function.
phi_ref : unit.Quantity, default=-63.8*unit.degrees
The reference value of the Ramachandran :math:`\\phi` dihedral angle in the alpha helix.
phi_tol : unit.Quantity, default=25*unit.degrees
The tolerance for the deviation from the Ramachandran :math:`\\phi` dihedral angle.
psi_ref : unit.Quantity, default=-41.1*unit.degrees
The reference value of the Ramachandran :math:`\\psi` dihedral angle in the alpha helix.
psi_tol : unit.Quantity, default=25*unit.degrees
The tolerance for the deviation from the Ramachandran :math:`\\psi` dihedral angle.
"""
def __init__(self, topology, first, last, n=6,
phi_ref=-63.8*unit.degrees, phi_tol=25*unit.degrees,
psi_ref=-41.1*unit.degrees, psi_tol=25*unit.degrees):
residues = [r for r in topology.residues() if first <= r.index <= last]
if len(set(r.chain.index for r in residues)) > 1:
raise ValueError('HelixRamachandranContent requires all residues in a single chain')
super().__init__(f'1/({2*(last-first)}*(1+x^{n})); x=(theta - theta_ref)/theta_tol')
self.addPerTorsionParameter('theta_ref')
self.addPerTorsionParameter('theta_tol')
C = [atom.index for r in residues for atom in r.atoms() if atom.name == 'C']
N = [atom.index for r in residues for atom in r.atoms() if atom.name == 'N']
CA = [atom.index for r in residues for atom in r.atoms() if atom.name == 'CA']
for i, j, k, l in zip(C[:-1], N[1:], CA[1:], C[1:]):
self.addTorsion(i, j, k, l, [phi_ref, phi_tol])
for i, j, k, l in zip(N[:-1], CA[:-1], C[:-1], N[1:]):
self.addTorsion(i, j, k, l, [psi_ref, psi_tol])
def atom_indices(self):
"""
Returns
-------
phi_indices : list of tuples
The indices of the atoms in the :math:`\\phi` dihedrals.
psi_indices : list of tuples
The indices of the atoms in the :math:`\\psi` dihedrals.
"""
N = self.getNumTorsions()//2
phi_indices = []
psi_indices = []
for index in range(N):
i, j, k, l, parameters = self.getTorsionParameters(index)
phi_indices.append((i, j, k, l))
i, j, k, l, parameters = self.getTorsionParameters(index + N)
psi_indices.append((i, j, k, l))
return phi_indices, psi_indices
class _InOutForce(openmm.CustomNonbondedForce):
"""
An abstract class for In/Out-force collective variables.
"""
def _import_properties(self, nbforce):
self.setNonbondedMethod(self.CutoffPeriodic)
self.setCutoffDistance(nbforce.getCutoffDistance())
self.setUseSwitchingFunction(nbforce.getUseSwitchingFunction())
self.setSwitchingDistance(nbforce.getSwitchingDistance())
for index in range(nbforce.getNumExceptions()):
i, j, _, _, _ = nbforce.getExceptionParameters(index)
self.addExclusion(i, j)
def _get_parameters(self, nbforce):
parameters = []
for i in range(nbforce.getNumParticles()):
charge, sigma, epsilon = [p/p.unit for p in nbforce.getParticleParameters(i)]
parameters.append(_ParamTuple(charge, sigma if epsilon != 0.0 else 1.0, epsilon))
return parameters
def _update_nonbonded_force(self, group, nbforce, parameters, pbc_for_exceptions):
internal_exception_pairs = []
for index in range(nbforce.getNumExceptions()):
i, j, _, _, epsilon = nbforce.getExceptionParameters(index)
i_in_group, j_in_group = i in group, j in group
if i_in_group and j_in_group:
internal_exception_pairs.append(set([i, j]))
elif (i_in_group or j_in_group):
raise ValueError("No exceptions are allowed for in-group/out-group interactions")
for i, j in itertools.combinations(group, 2):
if set([i, j]) not in internal_exception_pairs:
chargeprod = parameters[i].charge*parameters[j].charge
sigma = (parameters[i].sigma + parameters[j].sigma)/2
epsilon = unit.sqrt(parameters[i].epsilon*parameters[j].epsilon)
nbforce.addException(i, j, chargeprod, sigma, epsilon)
if pbc_for_exceptions:
nbforce.setExceptionsUsePeriodicBoundaryConditions(True)
class InOutLennardJonesForce(_InOutForce):
"""
Lennard-Jones (LJ) interactions between the atoms of a specified group and all other atoms
in the system, referred to as in/out LJ interactions. All LJ parameters are imported from a
provided NonbondedForce_ object, which is then modified so that all in-group interactions are
treated as exceptions and all atoms of the group are removed from regular LJ interactions.
.. note::
No exceptions which involve in/out atom pairs are allowed.
Warnings
--------
side effect:
The constructor of this class modifies the passed NonbondedForce_ object.
The model equation is:
.. math::
V_\\mathrm{LJ}(\\mathbf{r}) = \\sum_{i \\in \\mathcal{G}} \\sum_{j \\notin \\mathcal{G}}
\\epsilon_{ij} u_\\mathrm{LJ}\\left(\\frac{r_{ij}}{\\sigma_{ij}}\\right)
where :math:`\\mathcal{G}` is the specified group and
.. math::
u_\\mathrm{LJ}(x) = 4(x^{-12} - x^{-6})
Parameters
----------
group : list of int
The atoms in the specified group.
nbforce : openmm.NonbondedForce
The NonbondedForce_ object from which the atom parameters are imported.
Keyword Args
------------
pbc_for_exceptions : bool, default=False
Whether to consider periodic boundary conditions for exceptions in the NonbondedForce_
object. This might be necessary if the specified group contains several detached
molecules or one long molecule.
softcore : bool, default=False
Whether to include a softcore version :cite:`Beutler_1994` of the Lennard-Jones
potential. In this case, a global variable `lambda_vdw` is added to the constructed
object.
keep_charges : bool, default=True
Whether to keep the charges of the solute atoms. Otherwise, the charges of all solute
atoms will be set to zero.
Raises
------
ValueError:
Raised if there are any exceptions in the NonbondedForce_ object involving
cross-group (i.e. in/out) atom pairs.
"""
def __init__(self, group, nbforce, pbc_for_exceptions=False, softcore=False, keep_charges=True):
u_LJ = '4/x^2-4/x'
definitions = ['sigma=(sigma1+sigma2)/2', 'epsilon=sqrt(epsilon1*epsilon2)']
if softcore:
equations = [f'lambda_vdw*epsilon*({u_LJ}); x=(r/sigma)^6+(1-lambda_vdw)/2']
else:
equations = [f'epsilon*({u_LJ}); x=(r/sigma)^6']
super().__init__(';'.join(equations + definitions))
if softcore:
self.addGlobalParameter('lambda_vdw', 1.0)
parameters = self._get_parameters(nbforce)
self.addPerParticleParameter('sigma')
self.addPerParticleParameter('epsilon')
for parameter in parameters:
self.addParticle([parameter.sigma, parameter.epsilon])
self._update_nonbonded_force(group, nbforce, parameters, pbc_for_exceptions)
self._import_properties(nbforce)
self.addInteractionGroup(set(group), set(range(nbforce.getNumParticles())) - set(group))
self.setUseLongRangeCorrection(nbforce.getUseDispersionCorrection())
for i in group:
charge = parameters[i].charge if keep_charges else 0.0
nbforce.setParticleParameters(i, charge, 1.0, 0.0)
def capped_version(self, m=2):
"""
Returns a capped (Buelens-Grubmüller-type) version of the in/out Lennard-Jones force.
The model equation is
.. math::
V_\\mathrm{BG}(\\mathbf{r}) = \\sum_{i \\in \\mathcal{G}} \\sum_{j \\notin \\mathcal{G}}
\\epsilon_{ij} u_\\mathrm{BG}\\left(\\frac{r_{ij}}{\\sigma_{ij}}\\right)
where
.. math::
u_\\mathrm{BG}(x) = \\left\\{ \\begin{array}{ccc}
u_\\mathrm{cap}(x) & \\mathrm{if} & x < 1 \\\\
4(x^{-12} - x^{-6}) & \\mathrm{if} & x \\geq 1
\\end{array}\\right.
with
.. math::
u_\\mathrm{cap}(x) = \\left\\{ \\begin{array}{ccc}
126 x^4 - 176 x^3 + 50 & \\mathrm{if} & m = 2 \\\\
\\frac{-4340 x^6 + 10944 x^5 - 7200 x^4 + 596}{5} & \\mathrm{if} & m = 3 \\\\
\\frac{43365 x^8 - 155880 x^7 + 191065 x^6 - 80472 x^5 + 1922}{35} & \\mathrm{if} & m = 4
\\end{array}\\right.
Keyword Args
------------
m : int, default=2
The highest order of derivatives to be zero at :math:`r=0` and to match the
Lennard-Jones values at :math:`r=\\sigma`. Valid options are 2, 3, and 4.
"""
u_LJ = '4/x^12-4/x^6'
if m == 2:
u_cap = '126*x^4-176*x^3+50'
elif m == 3:
u_cap = '(-4340*x^6+10944*x^5-7200*x^4+596)/5'
elif m == 4:
u_cap = '(43365*x^8-155880*x^7+191065*x^6-80472*x^5+1922)/35'
else:
raise ValueError("Raised if an invalid `m` keyword value is passed.")
definitions = ['x=r/sigma', 'sigma=(sigma1+sigma2)/2', 'epsilon=sqrt(epsilon1*epsilon2)']
equations = [f'epsilon*select(step(1-x),{u_cap},{u_LJ})'] + definitions
force = openmm.CustomNonbondedForce(';'.join(equations))
force.addPerParticleParameter('sigma')
force.addPerParticleParameter('epsilon')
for index in range(self.getNumParticles()):
force.addParticle(self.getParticleParameters(index))
for index in range(self.getNumExclusions()):
force.addExclusion(*self.getExclusionParticles(index))
force.setNonbondedMethod(force.CutoffPeriodic)
force.setCutoffDistance(self.getCutoffDistance())
force.setUseSwitchingFunction(self.getUseSwitchingFunction())
force.setSwitchingDistance(self.getSwitchingDistance())
force.setUseLongRangeCorrection(self.getUseLongRangeCorrection())
force.addInteractionGroup(*self.getInteractionGroupParameters(0))
return force
class InOutCoulombForce(_InOutForce):
"""
Cut-off, pairwise Coulomb interactions between the atoms of a specified group and al other atoms
in the system, referred to as in/out Coulomb interactions. All charges are imported from a
provided NonbondedForce_ object, which is then modified so that all in-group interactions are
treated as exceptions and all charges of the group atoms are scaled by a newly created Context_
global parameter whose default value is 0.0.
.. note::
No exceptions which involve in/out atom pairs are allowed.
Warnings
--------
side effect:
The constructor of this class modifies the passed NonbondedForce_ object.
The model equation is
.. math::
V_\\mathrm{coul}(\\mathbf{r}) = \\sum_{i \\in \\mathcal{G}} \\sum_{j \\notin \\mathcal{G}}
\\frac{q_i q_j}{4 \\pi \\epsilon_0 r_c} u\\left(\\frac{r}{r_c}\\right)
where :math:`\\mathcal{G}` is the specified group and the function :math:`u(x)` can be
chosen from a number of different styles:
1. Shifted:
.. math::
u(x) = \\frac{1}{x} - 1
2. Shifted-force:
.. math::
u(x) = \\frac{1}{x} + x - 2
3. Conductor Reaction-field (default):
.. math::
u(x) = \\frac{1}{x} + \\frac{x^2}{2} - \\frac{3}{2}
4. Reaction-field (with finite dielectric constant :math:`\\epsilon`):
.. math::
u(x) = \\frac{1}{x} + \\frac{(2\\epsilon-1)x^2-3\\epsilon}{2\\epsilon+1}
5. Damped:
.. math::
u(x) = \\frac{\\mathrm{erfc}(\\alpha_c x)}{x}
6. Damped-shifted-force (DSF), with :math:`\\alpha_c = \\alpha r_c`:
.. math::
u(x) = \\frac{\\mathrm{erfc}(\\alpha_c x)}{x} - \\mathrm{erfc}(\\alpha_c) +
\\left[\\mathrm{erfc}(\\alpha_c) +
\\frac{2\\alpha_c e^{-\\alpha_c^2}}{\\sqrt{\\pi}}\\right]\\left(x - 1\\right)
Parameters
----------
group : list of int
The atoms in the specified group.
nbforce : openmm.NonbondedForce
The NonbondedForce_ object from which the atom charges are imported.
Keyword Args
------------
style : str, default='conductor-reaction-field'
The style of cutoff electrostatic potential to be used. Valid options are `shifted`,
`shifted-force`, `reaction-field`, `conductor-reaction-field`, `damped`, and
`damped-shifted-force`.
damping_coefficient : float or unit.Quantity, default=0.2/unit.angstroms
The damping coefficient :math:`\\alpha` in inverse distance unit.
scaling_parameter_name : str, default='inOutCoulombScaling'
A Context_ global parameter whose value will multiply, in the passed NonbondedForce_
object, the epsilon parameters of all atoms in the specified group.
pbc_for_exceptions : bool, default=False
Whether to consider periodic boundary conditions for exceptions in the NonbondedForce_
object. This might be necessary if the specified group contains several detached
molecules or one long molecule.
Raises
------
ValueError:
Raised if there are any exceptions in the NonbondedForce_ object involving
cross-group (i.e. in/out) atom pairs.
ValueError:
Raised if an invalid `style` keyword value is passed.
"""
def __init__(self, group, nbforce, style='conductor-reaction-field',
damping_coefficient=0.2/unit.angstroms,
scaling_parameter_name='inOutCoulombScaling', pbc_for_exceptions=False):
rc = _standardized(nbforce.getCutoffDistance())
alpha_c = _standardized(damping_coefficient)*rc
prefix = f'{138.935485/rc}*charge1*charge2'
if style == 'shifted':
u_C = '1/x - 1'
elif style == 'shifted-force':
u_C = '1/x + x - 2'
elif style == 'conductor-reaction-field':
u_C = '1/x + x^2/2 - 3/2'
elif style == 'reaction-field':
epsilon = nbforce.getReactionFieldDielectric()
krf = (epsilon - 1)/(2*epsilon + 1)
crf = 3*epsilon/(2*epsilon + 1)
u_C = f'1/x + {krf}*x^2 - {crf}'
elif style == 'damped':
u_C = f'erfc({alpha_c}*x)/x'
elif style == 'damped-shifted-force':
A = math.erfc(alpha_c)
B = A + 2*alpha_c*math.exp(-alpha_c**2)/math.sqrt(math.pi)
u_C = f'erfc({alpha_c}*x)/x + {A+B}*x - {2*A+B}'
else:
raise ValueError("Invalid cutoff electrostatic style")
super().__init__(f'{prefix}*({u_C}); x=r/{rc}')
parameters = self._get_parameters(nbforce)
offset_index = {}
for index in range(nbforce.getNumParticleParameterOffsets()):
variable, i, charge, _, _ = nbforce.getParticleParameterOffset(index)
if variable == scaling_parameter_name:
offset_index[i] = index
parameters[i] = _ParamTuple(charge, parameters[i].sigma, parameters[i].epsilon)
self.addPerParticleParameter('charge')
for parameter in parameters:
self.addParticle([parameter.charge])
self._update_nonbonded_force(group, nbforce, parameters, pbc_for_exceptions)
self._import_properties(nbforce)
self.addInteractionGroup(set(group), set(range(nbforce.getNumParticles())) - set(group))
self.setUseLongRangeCorrection(False)
global_vars = map(nbforce.getGlobalParameterName, range(nbforce.getNumGlobalParameters()))
if scaling_parameter_name not in global_vars:
nbforce.addGlobalParameter(scaling_parameter_name, 0.0)
for i in group:
charge, sigma, epsilon = parameters[i]
nbforce.setParticleParameters(i, 0.0, sigma, epsilon)
input = [scaling_parameter_name, i, charge, 0.0, 0.0]
if i in offset_index:
nbforce.setParticleParameterOffset(offset_index[i], *input)
else:
nbforce.addParticleParameterOffset(*input)
| 42.781818 | 114 | 0.61836 |
79e141a42699ecad6802794e90e705086ca94a8a | 442 | py | Python | configs/mmedit/super-resolution/super-resolution_tensorrt_static-256x256.py | zhiqwang/mmdeploy | 997d111a6f4ca9624ab3b36717748e6ce002037d | [
"Apache-2.0"
] | 746 | 2021-12-27T10:50:28.000Z | 2022-03-31T13:34:14.000Z | configs/mmedit/super-resolution/super-resolution_tensorrt_static-256x256.py | zhiqwang/mmdeploy | 997d111a6f4ca9624ab3b36717748e6ce002037d | [
"Apache-2.0"
] | 253 | 2021-12-28T05:59:13.000Z | 2022-03-31T18:22:25.000Z | configs/mmedit/super-resolution/super-resolution_tensorrt_static-256x256.py | zhiqwang/mmdeploy | 997d111a6f4ca9624ab3b36717748e6ce002037d | [
"Apache-2.0"
] | 147 | 2021-12-27T10:50:33.000Z | 2022-03-30T10:44:20.000Z | _base_ = ['./super-resolution_static.py', '../../_base_/backends/tensorrt.py']
onnx_config = dict(input_shape=[256, 256])
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 256, 256],
opt_shape=[1, 3, 256, 256],
max_shape=[1, 3, 256, 256])))
])
| 34 | 78 | 0.529412 |
684b2c14f8780372c2bfad5bb49cf62b13c3352a | 251 | py | Python | app/__init__.py | configuresystems/restful-api-with-flask | b6e4da905446fac8f899653a6f7a5408d6419fc4 | [
"MIT"
] | 2 | 2015-05-07T18:39:12.000Z | 2016-07-01T20:06:06.000Z | app/__init__.py | configuresystems/restful-api-with-flask | b6e4da905446fac8f899653a6f7a5408d6419fc4 | [
"MIT"
] | null | null | null | app/__init__.py | configuresystems/restful-api-with-flask | b6e4da905446fac8f899653a6f7a5408d6419fc4 | [
"MIT"
] | 2 | 2016-03-02T05:33:51.000Z | 2021-02-24T02:28:26.000Z | from flask import Flask
# instantiate Flask so that we may use it!
app = Flask(__name__)
# Set our application constants via a config.py object
app.config.from_object('config.DevConfiguration')
# Import our master views file
from app import views
| 20.916667 | 54 | 0.780876 |
7ff45e31459b990dafb43dac35cf539aaf965455 | 8,033 | py | Python | solver.py | chenbys/MPRM | 26ae0797c15c095a7712823c899fbeef1df99274 | [
"MIT"
] | null | null | null | solver.py | chenbys/MPRM | 26ae0797c15c095a7712823c899fbeef1df99274 | [
"MIT"
] | null | null | null | solver.py | chenbys/MPRM | 26ae0797c15c095a7712823c899fbeef1df99274 | [
"MIT"
] | null | null | null | from losses import multilabel_soft_margin_loss
from model import fc_resnet50, finetune
from prm.prm import peak_response_mapping, prm_visualize
from optims import sgd_optimizer
import shutil
import time, os
import torch
import numpy as np
from typing import Tuple, List, Union, Dict, Iterable
from torch.autograd import Variable
import matplotlib.pyplot as plt
from scipy.misc import imresize
import torch.nn as nn
image_size = 448
class Solver(object):
def __init__(self, config):
"""Initialize configurations."""
self.basebone = fc_resnet50(20, True)
self.model = nn.DataParallel(peak_response_mapping(self.basebone, **config['model']))
self.criterion = multilabel_soft_margin_loss
self.max_epoch = config['max_epoch']
self.cuda = (config['device'] == 'cuda')
self.params = finetune(self.model, **config['finetune'])
# print(self.params)
self.optimizer = sgd_optimizer(self.params, **config['optimizer'])
self.lr_update_step = 999999
self.lr = config['optimizer']['lr']
self.snapshot = config['snapshot']
if self.cuda:
self.model.to('cuda')
def print_network(self, model, name):
"""Print out the network information."""
num_params = 0
for p in model.parameters():
num_params += p.numel()
print(model)
print(name)
print("The number of parameters: {}".format(num_params))
def restore_model(self, resume_epoch):
"""Restore the trained generator and discriminator."""
print('Loading the trained models from step %d ...' % (resume_epoch))
model_path = os.path.join(self.snapshot, 'prm__%d_checkpoint.pth.tar' % (resume_epoch))
checkpoint = torch.load(model_path)
start_epoch = checkpoint['epoch']
self.model.load_state_dict(checkpoint['state_dict'], False)
self.lr = checkpoint['lr']
# return start_epoch + 1
def update_lr(self, lr):
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
def count_parameters(self, model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def save_checkpoint(self, state, path, prefix, epoch, filename='checkpoint.pth.tar'):
prefix_save = os.path.join(path, prefix)
name = '%s_%d_%s' % (prefix_save, epoch, filename)
torch.save(state, name)
shutil.copyfile(name, '%s_latest.pth.tar' % (prefix_save))
def train(self, train_data_loader, train_logger, val_data_loader=None, val_logger=None, resume_iters=0):
# torch.manual_seed(999)
# Start training from scratch or resume training.
# start_epoch = 0
# if resume_iters:
# start_epoch = self.restore_model(resume_iters)
# Start training.
print('Start training...')
since = time.time()
self.model.train() # Set model to training mode
for epoch in range(self.max_epoch):
average_loss = 0.
for iteration, (inp, tar) in enumerate(train_data_loader):
if iteration % 50 == 0:
print(self.basebone.features[0].weight[0][0][0])
if self.cuda:
inp = Variable(inp.cuda())
tar = Variable(tar.cuda())
else:
inp = Variable(inp)
tar = Variable(tar)
_output = self.model(inp)
loss = self.criterion(_output, tar, difficult_samples=True)
average_loss += loss.item()
print('trainning loss at (epoch %d, iteration %d) = %4f' % (
epoch + 1, iteration, average_loss / (iteration + 1)))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
#################### LOGGING #############################
lr = self.optimizer.param_groups[0]['lr']
train_logger.add_scalar('lr', lr, epoch)
train_logger.add_scalar('loss', loss, epoch)
self.save_checkpoint({'arch' : 'prm',
'lr' : self.lr,
'epoch' : epoch,
'state_dict': self.model.state_dict(),
'error' : average_loss},
self.snapshot, 'prm_', epoch)
print('training %d epoch,loss is %.4f' % (epoch + 1, average_loss))
# TO-DO: modify learning rates.
time_elapsed = time.time() - since
print('train phrase completed in %.0fm %.0fs' % (time_elapsed // 60, time_elapsed % 60))
def inference(self, input_var, raw_img, epoch=0, proposals=None):
self.restore_model(epoch)
plt.figure(figsize=(5, 5))
plt.imshow(raw_img)
self.model.eval()
# print(input_var)
class_names = [
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor']
print('Object categories: ' + ', '.join(class_names))
print('Object categories in the image:')
confidence = self.model(input_var)
for idx in range(len(class_names)):
if confidence.data[0, idx] > 0:
print('[class_idx: %d] %s (%.2f)' % (idx, class_names[idx], confidence[0, idx]))
# Visual cue extraction
self.model.inference()
visual_cues = self.model(input_var, peak_threshold=30)
# print(visual_cues)
if visual_cues is None:
print('No class peak response detected')
else:
confidence, class_response_maps, class_peak_responses, peak_response_maps = visual_cues
_, class_idx = torch.max(confidence, dim=1)
class_idx = class_idx.item()
num_plots = 2 + len(peak_response_maps)
print(num_plots, ' numplots')
f, axarr = plt.subplots(1, num_plots, figsize=(num_plots * 4, 4))
axarr[0].imshow(imresize(raw_img, (image_size, image_size), interp='bicubic'))
axarr[0].set_title('Image')
axarr[0].axis('off')
axarr[1].imshow(class_response_maps[0, class_idx].cpu(), interpolation='bicubic')
axarr[1].set_title('Class Response Map ("%s")' % class_names[class_idx])
axarr[1].axis('off')
for idx, (prm, peak) in enumerate(
sorted(zip(peak_response_maps, class_peak_responses), key=lambda v: v[-1][-1])):
axarr[idx + 2].imshow(prm.cpu(), cmap=plt.cm.jet)
axarr[idx + 2].set_title('Peak Response Map ("%s")' % (class_names[peak[1].item()]))
axarr[idx + 2].axis('off')
# Weakly supervised instance segmentation
# predict instance masks via proposal retrieval
instance_list = self.model(input_var, retrieval_cfg=dict(proposals=proposals, param=(0.95, 1e-5, 0.8)))
# visualization
if instance_list is None:
print('No object detected')
else:
# peak response maps are merged if they select similar proposals
vis = prm_visualize(instance_list, class_names=class_names)
f, axarr = plt.subplots(1, 3, figsize=(12, 5))
axarr[0].imshow(imresize(raw_img, (image_size, image_size), interp='bicubic'))
axarr[0].set_title('Image')
axarr[0].axis('off')
axarr[1].imshow(vis[0])
axarr[1].set_title('Prediction')
axarr[1].axis('off')
axarr[2].imshow(vis[1])
axarr[2].set_title('Peak Response Maps')
axarr[2].axis('off')
plt.show()
def validation(self, data_loader, test_logger, inference_epoch=0):
# to-do
pass
| 38.620192 | 111 | 0.574505 |
d74d14ec4f303f9d49b91305627c790201c98a50 | 6,439 | py | Python | test/import_config_test.py | PalmerBSocrata/socrata-py | 794a41bcbe50c7556c670681f5e38cb2366f8dcc | [
"Apache-2.0"
] | null | null | null | test/import_config_test.py | PalmerBSocrata/socrata-py | 794a41bcbe50c7556c670681f5e38cb2366f8dcc | [
"Apache-2.0"
] | null | null | null | test/import_config_test.py | PalmerBSocrata/socrata-py | 794a41bcbe50c7556c670681f5e38cb2366f8dcc | [
"Apache-2.0"
] | null | null | null | import unittest
from socrata import Socrata
from socrata.authorization import Authorization
from test.auth import auth, TestCase
import uuid
from socrata.operations.utils import SocrataException
class ImportConfigTest(TestCase):
def test_create_config(self):
name = "some_config %s" % str(uuid.uuid4())
p = Socrata(auth)
(ok, config) = p.configs.create(name, "replace")
self.assertTrue(ok, config)
self.assertEqual(config.attributes['name'], name)
def test_create_config_with_non_defaults(self):
name = "some_config %s" % str(uuid.uuid4())
p = Socrata(auth)
(ok, config) = p.configs.create(
name,
"replace",
parse_options = {
"encoding": "utf8",
"header_count": 2,
"column_header": 2
},
columns = [
{
"field_name": "foo",
"display_name": "Foo is the display name",
"transform_expr": "to_number(`foo`)"
}
]
)
self.assertTrue(ok, config)
self.assertEqual(config.attributes['name'], name)
self.assertEqual(config.attributes['parse_options'], {
"encoding": "utf8",
"header_count": 2,
"column_header": 2,
"quote_char": '"',
"parse_source": True,
"column_separator": ",",
"remove_empty_rows": True,
"trim_whitespace": True
})
self.assertEqual(config.attributes['columns'], [
{
"field_name": "foo",
"display_name": "Foo is the display name",
"transform_expr": "to_number(`foo`)",
"format": {},
"description": "",
"is_primary_key": None,
"flags": []
}
])
def test_list_operations(self):
p = Socrata(auth)
name = "some_config %s" % str(uuid.uuid4())
(ok, config) = p.configs.create(name, "replace")
self.assertTrue(ok, config)
(ok, configs) = p.configs.list()
# Assert there's some config on this domain where the
# name is what we want
self.assertTrue(any([
config.attributes['name'] == name
for config in configs
]))
def test_lookup_config(self):
p = Socrata(auth)
name = "some_config %s" % str(uuid.uuid4())
(ok, config) = p.configs.create(name, "replace")
self.assertTrue(ok, config)
(ok, config) = p.configs.lookup(name)
self.assertTrue(ok, config)
self.assertEqual(config.attributes['name'], name)
def test_upload_to_config(self):
p = Socrata(auth)
name = "some_config %s" % str(uuid.uuid4())
(ok, config) = p.configs.create(name, "replace")
self.assertTrue(ok, config)
p = Socrata(auth)
with open('test/fixtures/simple.csv', 'rb') as my_file:
(rev, job) = p.using_config(name, self.view).csv(my_file)
self.assertEqual(rev.attributes['action']['type'], 'replace')
self.assertTrue(job.attributes['created_at'])
def test_config_not_found(self):
p = Socrata(auth)
with open('test/fixtures/simple.csv', 'rb') as my_file:
with self.assertRaises(SocrataException):
(rev, job) = p.using_config("nope", self.view).csv(my_file)
def test_source_to_config(self):
p = Socrata(auth)
name = "some_config %s" % str(uuid.uuid4())
(ok, config) = p.configs.create(name, "replace")
self.assertTrue(ok, config)
p = Socrata(auth)
(rev, job) = p.using_config(name, self.view).csv(
"""a,b,c
1,2,3
4,5,6
7,8,9
""",
filename = "abc.csv"
)
self.assertEqual(rev.attributes['action']['type'], 'replace')
self.assertTrue(job.attributes['created_at'])
def test_show_config(self):
p = Socrata(auth)
name = "some_config %s" % str(uuid.uuid4())
(ok, config) = p.configs.create(name, "replace")
self.assertTrue(ok, config)
(ok, config) = config.show()
self.assertTrue(ok, config)
def test_delete_config(self):
p = Socrata(auth)
name = "some_config %s" % str(uuid.uuid4())
(ok, config) = p.configs.create(name, "replace")
self.assertTrue(ok, config)
(ok, _) = config.delete()
self.assertTrue(ok)
(ok, _) = config.show()
self.assertFalse(ok)
def test_update_config(self):
p = Socrata(auth)
name = "some_config %s" % str(uuid.uuid4())
(ok, config) = p.configs.create(name, "replace")
self.assertTrue(ok, config)
columns = [
{
"field_name": "foo",
"display_name": "Foo is the display name",
"transform_expr": "to_number(`foo`)",
"format": {},
"description": "",
"is_primary_key": None,
"flags": []
}
]
(ok, config) = config.update({
'data_action': 'update',
'columns': columns
})
self.assertTrue(ok, config)
self.assertEqual(config.attributes["data_action"], "update")
self.assertEqual(config.attributes["columns"], columns)
def test_update_config_using_builder(self):
p = Socrata(auth)
name = "some_config %s" % str(uuid.uuid4())
(ok, config) = p.configs.create(name, "replace")
self.assertTrue(ok, config)
columns = [
{
"field_name": "foo",
"display_name": "Foo is the display name",
"transform_expr": "to_number(`foo`)"
}
]
(ok, config) = config\
.change_parse_option('header_count').to(2)\
.change_parse_option('encoding').to('utf16')\
.change_parse_option('column_header').to(2)\
.run()
self.assertTrue(ok, config)
parse_options = config.attributes['parse_options']
self.assertEqual(parse_options['header_count'], 2)
self.assertEqual(parse_options['column_header'], 2)
self.assertEqual(parse_options['encoding'], 'utf16')
| 32.034826 | 75 | 0.533623 |
fb133bf124cfc37f7f2cd720533dc81ca0feb91f | 6,207 | py | Python | cqw2_calibrate/algae_rates_constants.py | rmlz/cqw2calibratio | 23605b40c1b093351fc131e36b1cb941c98530cf | [
"MIT"
] | null | null | null | cqw2_calibrate/algae_rates_constants.py | rmlz/cqw2calibratio | 23605b40c1b093351fc131e36b1cb941c98530cf | [
"MIT"
] | null | null | null | cqw2_calibrate/algae_rates_constants.py | rmlz/cqw2calibratio | 23605b40c1b093351fc131e36b1cb941c98530cf | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 5 16:30:52 2019
@author: Ramon Barros
CE-QUAL-W2 Calibration Tool v0.0.1
MODEL ALGAE GROUPS RATES & CONSTANTS
paramcontrol(name, calibrate, value, low, high, guess)
name = Parameter or setting name,
calibrate = boolean, True if the parameter must be calibrated (value will be not used)
value = if calibrate = False, value will be inputed to the parameter field
low = minimum value for calibration purposes
high = maximum value for calibration purposes
guess = optimum guess for calibration purposes'''
"""
from cqw2_calibrate.paramcontrol import paramcontrol
###############################################################################
#////////////////// ALGAE GROUPS RATES & CONSTANTS ////////////////////////////
###############################################################################
algae_rates_constants = {
#respiration and nutrient rates
'number_of_algal_groups' : paramcontrol('NAL', False," 0",1,1,1),
'algal_growth_rate': [ #The number of entries must be equal to the number_of_algal_groups!
paramcontrol('AG_1', False," 2.5",1,1,1),
],
'algal_darkrespiration_rate': [ #The number of entries must be equal to the number_of_algal_groups!
paramcontrol('AR_1', False," 0.04",1,1,1),
],
'algal_excretion_rate': [ #The number of entries must be equal to the number_of_algal_groups!
paramcontrol('AE_1', False," 0.04",1,1,1),
],
'algal_mortality_rate': [ #The number of entries must be equal to the number_of_algal_groups!
paramcontrol('AM_1', False," 0.01",1,1,1),
],
'algal_settling_rate' : [ #The number of entries must be equal to the number_of_algal_groups!
paramcontrol('AS_1', False," 0.01",1,1,1),
],
'algal_halfsaturation_P' : [ #The number of entries must be equal to the number_of_algal_groups!
paramcontrol('AHSP_1', False," 0.003",1,1,1),
],
'algal_halfsaturation_N' : [ #The number of entries must be equal to the number_of_algal_groups!
paramcontrol('AHSN_1', False," 0.01",1,1,1),
],
'algal_halfsaturation_SI' : [ #The number of entries must be equal to the number_of_algal_groups!
paramcontrol('AHSSI_1', False," 0",1,1,1),
],
'algal_light_saturation' : [ #The number of entries must be equal to the number_of_algal_groups!
paramcontrol('ASAT_1', False," 75",1,1,1),
],
#algal temperature rates
'algal_lower_temp_growth' : [ #The number of entries must be equal to the number_of_algal_groups!
paramcontrol('AT1_1', False," 5",1,1,1),
],
'algal_lower_temp_max_growth': [ #The number of entries must be equal to the number_of_algal_groups!
paramcontrol('AT2_1', False," 15",1,1,1),
],
'algal_upper_temp_max_growth' : [ #The number of entries must be equal to the number_of_algal_groups!
paramcontrol('AT3_1', False," 25",1,1,1),
],
'algal_upper_temp_growth' : [ #The number of entries must be equal to the number_of_algal_groups!
paramcontrol('AT4_1', False," 30",1,1,1),
],
'algal_fraction_algal_growth_T1' : [ #The number of entries must be equal to the number_of_algal_groups!
paramcontrol('AK1_1', False," 0.01",1,1,1),
],
'algal_fraction_algal_growth_T2' : [ #The number of entries must be equal to the number_of_algal_groups!
paramcontrol('AK2_1', False," 0.9",1,1,1),
],
'algal_fraction_algal_growth_T3' : [ #The number of entries must be equal to the number_of_algal_groups!
paramcontrol('AK3_1', False," 0.99",1,1,1),
],
'algal_fraction_algal_growth_T4' : [ #The number of entries must be equal to the number_of_algal_groups!
paramcontrol('AK4_1', False," 0.1",1,1,1),
],
#Algae Stichometry
'algal_fraction_P' : [ #The number of entries must be equal to the number_of_algal_groups!
paramcontrol('ALGP_1', False," 0.005",1,1,1),
],
'algal_fraction_N' : [ #The number of entries must be equal to the number_of_algal_groups!
paramcontrol('ALGN_1', False," 0.08",1,1,1),
],
'algal_fraction_C' : [ #The number of entries must be equal to the number_of_algal_groups!
paramcontrol('ALGC_1', False," 0.45",1,1,1),
],
'algal_fraction_Si' : [ #The number of entries must be equal to the number_of_algal_groups!
paramcontrol('ALGSI_1', False," 0",1,1,1),
],
'algal_chlorophyll_algae_ratio' : [ #The number of entries must be equal to the number_of_algal_groups!
paramcontrol('ACHLA_1', False," 100",1,1,1),
],
'algal_fraction_algae_lost_to_POM' : [ #The number of entries must be equal to the number_of_algal_groups!
paramcontrol('ALPOM_1', False," 0.8",1,1,1),
],
'algal_ammonia_equation' : [ #The number of entries must be equal to the number_of_algal_groups!
paramcontrol('ANEQN_1', False," 1",1,1,1),
],
'algal_ammonia_halfsat_coeff' : [ #The number of entries must be equal to the number_of_algal_groups!
paramcontrol('ANPR_1', False," 0.001",1,1,1),
],
'algal_oxygen_equiv_om_algal_growth': [ #The number of entries must be equal to the number_of_algal_groups!
paramcontrol('O2AR_1', False," 1.1",1,1,1),
],
'algal_oxygen_equiv_om_algal_respiration': [ #The number of entries must be equal to the number_of_algal_groups!
paramcontrol('O2AG_1', False," 1.4",1,1,1),
],
} | 55.419643 | 120 | 0.571613 |
6487f18e83c29a9b8ecac6b2faf2328ee130c1ef | 4,585 | py | Python | app/core/models.py | Akshay-ch-dj/RESTapi-app-series | 0a7ca3384e3d8d6b2d1c746b23f95426988a9bf9 | [
"MIT"
] | null | null | null | app/core/models.py | Akshay-ch-dj/RESTapi-app-series | 0a7ca3384e3d8d6b2d1c746b23f95426988a9bf9 | [
"MIT"
] | null | null | null | app/core/models.py | Akshay-ch-dj/RESTapi-app-series | 0a7ca3384e3d8d6b2d1c746b23f95426988a9bf9 | [
"MIT"
] | null | null | null | import uuid
import os
from django.db import models
# The base classes need to use when customizing or overriding Django User model
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
from django.conf import settings
# To add timezone based on settings UTC
from django.utils import timezone
# To add images to api, helper function, instance-creates the path
def series_image_file_path(instance, filename):
"""Generate File path for new series file"""
# Strip out the extension part of the file name
ext = filename.split('.')[-1]
# Creates a file with a random uuid and same extension.
filename = f"{uuid.uuid4()}.{ext}"
return os.path.join('uploads/series/', filename)
# Custom manager-default django behaviour changed, usname replaced with Email
class UserManager(BaseUserManager):
"""
Manager for User profiles
"""
# Add any additional fields to 'extra_fields'- ad hoc
def create_user(self, email, password=None, **extra_fields):
"""Creates and saves a new user"""
if not email:
raise ValueError("User must have an email address")
# Normalize the email address,(lowercase the second half of the email)
user = self.model(email=self.normalize_email(email), **extra_fields)
# to ensure password is hashed
user.set_password(password)
user.save(using=self._db)
return user
# All superusers need password, to create a superuser
def create_superuser(self, email, password):
"""Create and save a new superuser with given details"""
# Class(self) automatically passed in when a fun. calls
user = self.create_user(email, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
# customizing the django default user model, need to configure in settings.py
# Before the first migrate, put:- "AUTH_USER_MODEL = 'core.User'"
class User(AbstractBaseUser, PermissionsMixin):
"""
Custom User model that uses 'email' instead of 'username'
"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
# By default-activated, but can deactivated
is_active = models.BooleanField(default=True)
# By default no one is a staff
is_staff = models.BooleanField(default=False)
# Assign the 'UserManager' to the 'objects'(inbuilt class attr- modified)
objects = UserManager()
# Need a username field, changed the username field to email field
USERNAME_FIELD = 'email'
# IMP: Every model needs a string representation
def __str__(self):
"""Return String Representation of the user( by email address)"""
return self.email
# For the series app 'Tag' model
class Tag(models.Model):
"""
Tag to attach to the series
"""
name = models.CharField(max_length=255)
# user-> foreign key to 'User' object-but not address it directly instead
# use the 'AUTH_USER-MODEL' from the django settings
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
def __str__(self):
"""String representation of Tag model"""
return self.name
# For the series app 'Character' model
class Character(models.Model):
"""
Characters in the series
"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
def __str__(self):
"""String representation of Character model"""
return self.name
# Real Series MODEL
class Series(models.Model):
"""
Series object
"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
title = models.CharField(max_length=255)
start_date = models.DateTimeField(default=timezone.now)
status = models.BooleanField(default=False)
watch_rate = models.IntegerField()
rating = models.DecimalField(max_digits=4, decimal_places=2)
# Use 'blank=True' to make CharField object optional
link = models.CharField(max_length=255, blank=True)
# The tags and Characters can be add as many-to-many relationships
characters = models.ManyToManyField('Character')
tags = models.ManyToManyField('Tag')
# Add the ImageField, pass reference to the function
image = models.ImageField(null=True, upload_to=series_image_file_path)
def __str__(self):
return self.title
| 33.467153 | 79 | 0.686369 |
880648aa5963abbca3e9c5a3040fa310340c7305 | 5,222 | py | Python | docs/source/conf.py | Coldog2333/Financial-NLP | 237d10d0984e8d27ff30ccca9f4be141d585c727 | [
"Apache-2.0"
] | 114 | 2018-08-13T07:11:43.000Z | 2022-03-16T03:53:51.000Z | docs/source/conf.py | Coldog2333/Financial-NLP | 237d10d0984e8d27ff30ccca9f4be141d585c727 | [
"Apache-2.0"
] | null | null | null | docs/source/conf.py | Coldog2333/Financial-NLP | 237d10d0984e8d27ff30ccca9f4be141d585c727 | [
"Apache-2.0"
] | 26 | 2018-10-14T07:12:20.000Z | 2021-01-30T16:23:20.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Financial-NLP documentation build configuration file, created by
# sphinx-quickstart on Thu Aug 30 23:34:21 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Financial-NLP'
copyright = '2018, Junfeng Jiang, Jiahao Li'
author = 'Junfeng Jiang, Jiahao Li'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'chinese'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Financial-NLPdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Financial-NLP.tex', 'Financial-NLP Documentation',
'Junfeng Jiang, Jiahao Li', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'financial-nlp', 'Financial-NLP Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Financial-NLP', 'Financial-NLP Documentation',
author, 'Financial-NLP', 'One line description of project.',
'Miscellaneous'),
]
| 30.360465 | 79 | 0.682114 |
019203c382d57dabd5def1bb2269ab916296e6f2 | 397 | py | Python | microservices/landing/gcd/app/app.py | Rishab2707/Cloud-Computing-Hack3 | 0528b08b205c1b93035f915e05cd09b56ae08cf6 | [
"MIT"
] | null | null | null | microservices/landing/gcd/app/app.py | Rishab2707/Cloud-Computing-Hack3 | 0528b08b205c1b93035f915e05cd09b56ae08cf6 | [
"MIT"
] | null | null | null | microservices/landing/gcd/app/app.py | Rishab2707/Cloud-Computing-Hack3 | 0528b08b205c1b93035f915e05cd09b56ae08cf6 | [
"MIT"
] | null | null | null | from flask import Flask
from flask_restful import Resource,Api
import math
app = Flask(__name__)
app.secret_key = 'thisisjustarandomstring'
api=Api(app)
class Gcd(Resource):
def get(self,n1,n2):
return math.gcd(int(n1),int(n2))
api.add_resource(Gcd, '/<n1>/<n2>')
if __name__ == '__main__':
app.run(
debug=True,
port=5055,
host="0.0.0.0"
)
| 18.045455 | 42 | 0.627204 |
4c26e9ef152e4600ea37aade504dc1c01d75183b | 9,706 | py | Python | docassemble_webapp/setup.py | Partnervine/docassemble | 05a154d4788ada27ad220a0d95456b0b0a26c46b | [
"MIT"
] | null | null | null | docassemble_webapp/setup.py | Partnervine/docassemble | 05a154d4788ada27ad220a0d95456b0b0a26c46b | [
"MIT"
] | null | null | null | docassemble_webapp/setup.py | Partnervine/docassemble | 05a154d4788ada27ad220a0d95456b0b0a26c46b | [
"MIT"
] | null | null | null | import os
import sys
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
install_requires = [
'docassemble==1.2.96',
'docassemble.base==1.2.96',
'docassemble.demo==1.2.96',
"3to2==1.1.1",
"airtable-python-wrapper==0.15.2",
"alembic==1.6.2",
"aloe==0.2.0",
"amqp==5.0.6",
"ansicolors==1.1.8",
"asn1crypto==1.4.0",
"astunparse==1.6.3",
"atomicwrites==1.4.0",
"attrs==21.2.0",
"azure-common==1.1.27",
"azure-core==1.13.0",
"azure-identity==1.5.0",
"azure-keyvault-secrets==4.2.0",
"azure-nspkg==3.0.2",
"azure-storage-blob==12.8.1",
"Babel==2.9.1",
"bcrypt==3.2.0",
"beautifulsoup4==4.9.3",
"bidict==0.21.2",
"billiard==3.6.4.0",
"bleach==3.3.0",
"blinker==1.4",
"boto3==1.17.71",
"boto==2.49.0",
"botocore==1.20.71",
"cachetools==4.2.2",
"celery==5.0.5",
"certifi==2020.12.5",
"cffi==1.14.5",
"chardet==4.0.0",
"click-didyoumean==0.0.3",
"click-plugins==1.1.1",
"click-repl==0.1.6",
"click==7.1.2",
"colorama==0.4.4",
"configparser==5.0.2",
"convertapi==1.4.0",
"crayons==0.4.0",
"cryptography==3.4.7",
"da-pkg-resources==0.0.1",
"dnspython==1.16.0",
"Docassemble-Flask-User==0.6.24",
"Docassemble-Pattern==3.6.2",
"docassemble-textstat==0.7.1",
"docassemblekvsession==0.6",
"docopt==0.6.2",
"docutils==0.17.1",
"docxcompose==1.3.2",
"docxtpl==0.11.5",
"email-validator==1.1.2",
"et-xmlfile==1.1.0",
"eventlet==0.31.0",
"Flask-Babel==2.0.0",
"Flask-Cors==3.0.10",
"Flask-Login==0.5.0",
"Flask-Mail==0.9.1",
"Flask-SocketIO==5.0.1",
"Flask-SQLAlchemy==2.4.4",
"Flask-WTF==0.14.3",
"Flask==1.1.2",
"future==0.18.2",
"gcs-oauth2-boto-plugin==2.7",
"geographiclib==1.50",
"geopy==2.1.0",
"gherkin-official==4.1.3",
"google-api-core==1.26.3",
"google-api-python-client==2.3.0",
"google-auth-httplib2==0.1.0",
"google-auth-oauthlib==0.4.4",
"google-auth==1.30.0",
"google-cloud-core==1.5.0",
"google-cloud-storage==1.38.0",
"google-cloud-translate==3.1.0",
"google-crc32c==1.1.2",
"google-i18n-address==2.4.0",
"google-reauth==0.1.1",
"google-resumable-media==1.2.0",
"googleapis-common-protos==1.53.0",
"greenlet==1.1.0",
"grpcio==1.37.1",
"gspread==3.7.0",
"guess-language-spirit==0.5.3",
"httplib2==0.19.1",
"humanize==3.5.0",
"Hyphenate==1.1.0",
"idna==2.10",
"importlib-metadata==4.0.1",
"importlib-resources==5.1.2",
"iniconfig==1.1.1",
"iso8601==0.1.14",
"isodate==0.6.0",
"itsdangerous==2.0.0",
"jdcal==1.4.1",
"jeepney==0.6.0",
"jellyfish==0.6.1",
"Jinja2==3.0.0",
"jmespath==0.10.0",
"joblib==1.0.1",
"keyring==23.0.1",
"kombu==5.0.2",
"libcst==0.3.18",
"links-from-link-header==0.1.0",
"lxml==4.6.3",
"Mako==1.1.4",
"Marisol==0.3.0",
"Markdown==3.3.4",
"MarkupSafe==2.0.0",
"mdx-smartypants==1.5.1",
"minio==7.0.3",
"monotonic==1.6",
"msal-extensions==0.3.0",
"msal==1.11.0",
"msrest==0.6.21",
"mypy-extensions==0.4.3",
"namedentities==1.5.2",
"netifaces==0.10.9",
"nltk==3.5",
"nose==1.3.7",
"num2words==0.5.10",
"numpy==1.19.4",
"oauth2client==4.1.3",
"oauthlib==3.1.0",
"openpyxl==3.0.7",
"ordered-set==4.0.2",
"packaging==20.9",
"pandas==1.2.4",
"passlib==1.7.4",
"pathlib==1.0.1",
"pdfminer.six==20201018",
"phonenumbers==8.12.22",
"Pillow==8.2.0",
"pip==20.1.1",
"pkginfo==1.7.0",
"pluggy==0.13.1",
"ply==3.11",
"portalocker==1.7.1",
"prompt-toolkit==3.0.18",
"proto-plus==1.18.1",
"protobuf==3.16.0",
"psutil==5.8.0",
"psycopg2-binary==2.8.6",
"py==1.10.0",
"pyasn1-modules==0.2.8",
"pyasn1==0.4.8",
"pycountry==20.7.3",
"pycparser==2.20",
"pycryptodome==3.10.1",
"pycryptodomex==3.10.1",
"pycurl==7.43.0.6",
"Pygments==2.9.0",
"PyJWT==1.7.1",
"PyLaTeX==1.4.1",
"pyOpenSSL==20.0.1",
"pyotp==2.6.0",
"pyparsing==2.4.7",
"PyPDF2==1.26.0",
"pyPdf==1.13",
"pypdftk==0.5",
"pypng==0.0.20",
"PySocks==1.7.1",
"pytest==6.2.4",
"python-dateutil==2.8.1",
"python-docx==0.8.10",
"python-editor==1.0.4",
"python-engineio==4.1.0",
"python-http-client==3.3.2",
"python-ldap==3.3.1",
"python-socketio==5.2.1",
"pytz==2021.1",
"pyu2f==0.1.5",
"PyNaCl==1.4.0",
"PyYAML==5.4.1",
"pyzbar==0.1.8",
"qrcode==6.1",
"rauth==0.7.3",
"readme-renderer==29.0",
"redis==3.5.3",
"regex==2021.4.4",
"reportlab==3.3.0",
"repoze.lru==0.7",
"requests-oauthlib==1.3.0",
"requests-toolbelt==0.9.1",
"requests==2.25.1",
"retry-decorator==1.1.1",
"rfc3339==6.2",
"rfc3986==1.5.0",
"rsa==4.7.2",
"ruamel.yaml.clib==0.2.2",
"ruamel.yaml==0.17.4",
"s3transfer==0.4.2",
"s4cmd==2.1.0",
"scikit-learn==0.24.2",
"scipy==1.5.4",
"SecretStorage==3.3.1",
"selenium==3.141.0",
"sendgrid==6.7.0",
"simplekv==0.14.1",
"six==1.16.0",
"sklearn==0.0",
"SocksiPy-branch==1.1",
"sortedcontainers==2.3.0",
"soupsieve==2.2.1",
"SQLAlchemy==1.4.15",
"starkbank-ecdsa==1.1.0",
"tailer==0.4.1",
"telnyx==1.4.0",
"threadpoolctl==2.1.0",
"titlecase==2.0.0",
"toml==0.10.2",
"tqdm==4.60.0",
"twilio==6.58.0",
"twine==3.4.1",
"typing-extensions==3.10.0.0",
"typing-inspect==0.6.0",
"tzlocal==2.1",
"ua-parser==0.10.0",
"uritemplate==3.0.1",
"urllib3==1.26.5",
"us==2.0.2",
"user-agents==2.2.0",
"uWSGI==2.0.19.1",
"vine==5.0.0",
"wcwidth==0.2.5",
"webdriver-manager==3.4.1",
"webencodings==0.5.1",
"Werkzeug==2.0.0",
"WTForms==2.3.3",
"xfdfgen==0.4",
"xlrd==2.0.1",
"XlsxWriter==1.4.3",
"xlwt==1.3.0",
"zipp==3.4.1"
]
setup(name='docassemble.webapp',
version='1.2.96',
python_requires='>=3.8',
description=('The web application components of the docassemble system.'),
long_description=read("README.md"),
long_description_content_type='text/markdown',
author='Jonathan Pyle',
author_email='jhpyle@gmail.com',
license='MIT',
url='https://docassemble.org',
packages=find_packages(),
namespace_packages = ['docassemble'],
install_requires = install_requires,
zip_safe = False,
package_data={'docassemble.webapp': ['alembic.ini', os.path.join('alembic', '*'), os.path.join('alembic', 'versions', '*'), os.path.join('data', '*.*'), os.path.join('data', 'static', '*.*'), os.path.join('data', 'static', 'favicon', '*.*'), os.path.join('data', 'questions', '*.*'), os.path.join('templates', 'base_templates', '*.html'), os.path.join('templates', 'flask_user', '*.html'), os.path.join('templates', 'flask_user', 'emails', '*.*'), os.path.join('templates', 'pages', '*.html'), os.path.join('templates', 'pages', '*.xml'), os.path.join('templates', 'pages', '*.js'), os.path.join('templates', 'users', '*.html'), os.path.join('static', 'app', '*.*'), os.path.join('static', 'yamlmixed', '*.*'), os.path.join('static', 'sounds', '*.*'), os.path.join('static', 'examples', '*.*'), os.path.join('static', 'fontawesome', 'js', '*.*'), os.path.join('static', 'office', '*.*'), os.path.join('static', 'bootstrap-fileinput', 'img', '*'), os.path.join('static', 'img', '*'), os.path.join('static', 'bootstrap-fileinput', 'themes', 'fas', '*'), os.path.join('static', 'bootstrap-fileinput', 'js', 'locales', '*'), os.path.join('static', 'bootstrap-fileinput', 'js', 'plugins', '*'), os.path.join('static', 'bootstrap-slider', 'dist', '*.js'), os.path.join('static', 'bootstrap-slider', 'dist', 'css', '*.css'), os.path.join('static', 'bootstrap-fileinput', 'css', '*.css'), os.path.join('static', 'bootstrap-fileinput', 'js', '*.js'), os.path.join('static', 'bootstrap-fileinput', 'themes', 'fa', '*.js'), os.path.join('static', 'bootstrap-fileinput', 'themes', 'fas', '*.js'), os.path.join('static', 'bootstrap-combobox', 'css', '*.css'), os.path.join('static', 'bootstrap-combobox', 'js', '*.js'), os.path.join('static', 'bootstrap-fileinput', '*.md'), os.path.join('static', 'bootstrap', 'js', '*.*'), os.path.join('static', 'bootstrap', 'css', '*.*'), os.path.join('static', 'labelauty', 'source', '*.*'), os.path.join('static', 'codemirror', 'lib', '*.*'), os.path.join('static', 'codemirror', 'addon', 'search', '*.*'), os.path.join('static', 'codemirror', 'addon', 'display', '*.*'), os.path.join('static', 'codemirror', 'addon', 'scroll', '*.*'), os.path.join('static', 'codemirror', 'addon', 'dialog', '*.*'), os.path.join('static', 'codemirror', 'addon', 'edit', '*.*'), os.path.join('static', 'codemirror', 'addon', 'hint', '*.*'), os.path.join('static', 'codemirror', 'mode', 'yaml', '*.*'), os.path.join('static', 'codemirror', 'mode', 'markdown', '*.*'), os.path.join('static', 'codemirror', 'mode', 'javascript', '*.*'), os.path.join('static', 'codemirror', 'mode', 'css', '*.*'), os.path.join('static', 'codemirror', 'mode', 'python', '*.*'), os.path.join('static', 'codemirror', 'mode', 'htmlmixed', '*.*'), os.path.join('static', 'codemirror', 'mode', 'xml', '*.*'), os.path.join('static', 'codemirror', 'keymap', '*.*'), os.path.join('static', 'areyousure', '*.js'), os.path.join('static', 'popper', '*.*'), os.path.join('static', 'popper', 'umd', '*.*'), os.path.join('static', 'popper', 'esm', '*.*'), os.path.join('static', '*.html')]},
)
| 36.904943 | 3,071 | 0.54523 |
525d57859c92defcef78f0c6daf1b2b7eb40e5b4 | 4,841 | py | Python | examples/CooperativeSearch/psaltlib/LMCP/py/afrl/cmasi/VehicleAction.py | GaloisInc/salty | f410659b3399ad7c527513e67b06dff0332d823e | [
"BSD-3-Clause"
] | 13 | 2017-02-15T21:56:46.000Z | 2022-03-23T12:59:26.000Z | examples/VIP-Escort/lmcp/py/afrl/cmasi/VehicleAction.py | GaloisInc/salty | f410659b3399ad7c527513e67b06dff0332d823e | [
"BSD-3-Clause"
] | 17 | 2016-07-21T10:47:23.000Z | 2020-08-07T13:26:21.000Z | examples/CooperativeSearch/psaltlib/LMCP/py/afrl/cmasi/VehicleAction.py | GaloisInc/salty | f410659b3399ad7c527513e67b06dff0332d823e | [
"BSD-3-Clause"
] | 2 | 2019-06-11T11:59:40.000Z | 2022-02-09T12:48:39.000Z | #! /usr/bin/python
import sys, struct
import xml.dom.minidom
from lmcp import LMCPObject
## ===============================================================================
## Authors: AFRL/RQQA
## Organization: Air Force Research Laboratory, Aerospace Systems Directorate, Power and Control Division
##
## Copyright (c) 2017 Government of the United State of America, as represented by
## the Secretary of the Air Force. No copyright is claimed in the United States under
## Title 17, U.S. Code. All Other Rights Reserved.
## ===============================================================================
## This file was auto-created by LmcpGen. Modifications will be overwritten.
class VehicleAction(LMCPObject.LMCPObject):
def __init__(self):
self.LMCP_TYPE = 7
self.SERIES_NAME = "CMASI"
self.FULL_LMCP_TYPE_NAME = "afrl.cmasi.VehicleAction"
#Series Name turned into a long for quick comparisons.
self.SERIES_NAME_ID = 4849604199710720000
self.SERIES_VERSION = 3
#Define message fields
self.AssociatedTaskList = [] #int64
def pack(self):
"""
Packs the object data and returns a string that contains all of the serialized
members.
"""
buffer = bytearray()
buffer.extend(LMCPObject.LMCPObject.pack(self))
buffer.extend(struct.pack(">H", len(self.AssociatedTaskList) ))
for x in self.AssociatedTaskList:
buffer.extend(struct.pack(">q", x ))
return buffer
def unpack(self, buffer, _pos):
"""
Unpacks data from a bytearray and sets class members
"""
_pos = LMCPObject.LMCPObject.unpack(self, buffer, _pos)
_arraylen = struct.unpack_from(">H", buffer, _pos )[0]
_pos += 2
self.AssociatedTaskList = [None] * _arraylen
if _arraylen > 0:
self.AssociatedTaskList = struct.unpack_from(">" + repr(_arraylen) + "q", buffer, _pos )
_pos += 8 * _arraylen
return _pos
def unpackFromXMLNode(self, el, seriesFactory):
LMCPObject.LMCPObject.unpackFromXMLNode(self, el, seriesFactory)
for e in el.childNodes:
if e.nodeType == xml.dom.Node.ELEMENT_NODE:
if e.localName == "AssociatedTaskList" and len(e.childNodes) > 0 :
self.AssociatedTaskList = []
for c in e.childNodes:
if c.nodeType == xml.dom.Node.ELEMENT_NODE:
self.AssociatedTaskList.append( int(c.childNodes[0].nodeValue) )
return
def unpackFromDict(self, d, seriesFactory):
LMCPObject.LMCPObject.unpackFromDict(self, d, seriesFactory)
for key in d:
if key == "AssociatedTaskList":
self.AssociatedTaskList = []
for c in d[key]:
self.AssociatedTaskList.append( c )
return
def get_AssociatedTaskList(self):
return self.AssociatedTaskList
def toString(self):
"""
Returns a string representation of all variables
"""
buf = LMCPObject.LMCPObject.toString(self)
buf += "From VehicleAction:\n"
buf += "AssociatedTaskList = " + str( self.AssociatedTaskList ) + "\n"
return buf;
def toDict(self):
m = {}
self.toDictMembers(m)
d = {}
if ("CMASI" is None) or ("CMASI" is ""): # this should never happen
# need to fill this with error message
d["datatype"] = str("DEBUG_PROBLEM_HERE" + "/VehicleAction")
d["datastring"] = str(m)
else:
d['datatype'] = str("CMASI" + "/VehicleAction")
d['datastring'] = str(m)
return d
def toDictMembers(self, d):
LMCPObject.LMCPObject.toDictMembers(self, d)
d['AssociatedTaskList'] = []
for x in self.AssociatedTaskList:
d['AssociatedTaskList'].append(x)
return
def getLMCPType(self):
return self.LMCP_TYPE
def getSeriesName(self):
return self.SERIES_NAME
def getSeriesNameID(self):
return self.SERIES_NAME_ID
def getSeriesVersion(self):
return self.SERIES_VERSION
def toXMLStr(self, ws):
str = ws + '<VehicleAction Series="CMASI" >\n';
#str +=LMCPObject.LMCPObject.toXMLMembersStr(self, ws + " ")
str += self.toXMLMembersStr(ws + " ")
str += ws + "</VehicleAction>\n";
return str
def toXMLMembersStr(self, ws):
buf = ""
buf += LMCPObject.LMCPObject.toXMLMembersStr(self, ws)
buf += ws + "<AssociatedTaskList>\n"
for x in self.AssociatedTaskList:
buf += ws + "<int64>" + str(x) + "</int64>\n"
buf += ws + "</AssociatedTaskList>\n"
return buf
| 32.489933 | 105 | 0.578393 |
84c77df6b0eef5ca7363577167147b232bbabb98 | 50,801 | py | Python | lib/spack/spack/environment.py | williamfgc/spack | c8c795e7dbde22dc47c9ae285a4dd59004b115b1 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | lib/spack/spack/environment.py | williamfgc/spack | c8c795e7dbde22dc47c9ae285a4dd59004b115b1 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | lib/spack/spack/environment.py | williamfgc/spack | c8c795e7dbde22dc47c9ae285a4dd59004b115b1 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import re
import sys
import shutil
import copy
import socket
import ruamel.yaml
import six
from ordereddict_backport import OrderedDict
import llnl.util.filesystem as fs
import llnl.util.tty as tty
from llnl.util.tty.color import colorize
import spack.error
import spack.hash_types as ht
import spack.repo
import spack.schema.env
import spack.spec
import spack.util.spack_json as sjson
import spack.config
from spack.filesystem_view import YamlFilesystemView
from spack.util.environment import EnvironmentModifications
import spack.architecture as architecture
from spack.spec import Spec
from spack.spec_list import SpecList, InvalidSpecConstraintError
from spack.variant import UnknownVariantError
#: environment variable used to indicate the active environment
spack_env_var = 'SPACK_ENV'
#: currently activated environment
_active_environment = None
#: path where environments are stored in the spack tree
env_path = os.path.join(spack.paths.var_path, 'environments')
#: Name of the input yaml file for an environment
manifest_name = 'spack.yaml'
#: Name of the input yaml file for an environment
lockfile_name = 'spack.lock'
#: Name of the directory where environments store repos, logs, views
env_subdir_name = '.spack-env'
#: default spack.yaml file to put in new environments
default_manifest_yaml = """\
# This is a Spack Environment file.
#
# It describes a set of packages to be installed, along with
# configuration settings.
spack:
# add package specs to the `specs` list
specs:
-
view: true
"""
#: regex for validating enviroment names
valid_environment_name_re = r'^\w[\w-]*$'
#: version of the lockfile format. Must increase monotonically.
lockfile_format_version = 2
#: legal first keys in the spack.yaml manifest file
env_schema_keys = ('spack', 'env')
# Magic names
# The name of the standalone spec list in the manifest yaml
user_speclist_name = 'specs'
# The name of the default view (the view loaded on env.activate)
default_view_name = 'default'
# Default behavior to link all packages into views (vs. only root packages)
default_view_link = 'all'
def valid_env_name(name):
return re.match(valid_environment_name_re, name)
def validate_env_name(name):
if not valid_env_name(name):
raise ValueError((
"'%s': names must start with a letter, and only contain "
"letters, numbers, _, and -.") % name)
return name
def activate(
env, use_env_repo=False, add_view=True, shell='sh', prompt=None
):
"""Activate an environment.
To activate an environment, we add its configuration scope to the
existing Spack configuration, and we set active to the current
environment.
Arguments:
env (Environment): the environment to activate
use_env_repo (bool): use the packages exactly as they appear in the
environment's repository
add_view (bool): generate commands to add view to path variables
shell (string): One of `sh`, `csh`.
prompt (string): string to add to the users prompt, or None
Returns:
cmds: Shell commands to activate environment.
TODO: environment to use the activated spack environment.
"""
global _active_environment
_active_environment = env
prepare_config_scope(_active_environment)
if use_env_repo:
spack.repo.path.put_first(_active_environment.repo)
tty.debug("Using environmennt '%s'" % _active_environment.name)
# Construct the commands to run
cmds = ''
if shell == 'csh':
# TODO: figure out how to make color work for csh
cmds += 'setenv SPACK_ENV %s;\n' % env.path
cmds += 'alias despacktivate "spack env deactivate";\n'
if prompt:
cmds += 'if (! $?SPACK_OLD_PROMPT ) '
cmds += 'setenv SPACK_OLD_PROMPT "${prompt}";\n'
cmds += 'set prompt="%s ${prompt}";\n' % prompt
else:
if os.getenv('TERM') and 'color' in os.getenv('TERM') and prompt:
prompt = colorize('@G{%s} ' % prompt, color=True)
cmds += 'export SPACK_ENV=%s;\n' % env.path
cmds += "alias despacktivate='spack env deactivate';\n"
if prompt:
cmds += 'if [ -z ${SPACK_OLD_PS1+x} ]; then\n'
cmds += ' if [ -z ${PS1+x} ]; then\n'
cmds += " PS1='$$$$';\n"
cmds += ' fi;\n'
cmds += ' export SPACK_OLD_PS1="${PS1}";\n'
cmds += 'fi;\n'
cmds += 'export PS1="%s ${PS1}";\n' % prompt
if add_view and default_view_name in env.views:
cmds += env.add_default_view_to_shell(shell)
return cmds
def deactivate(shell='sh'):
"""Undo any configuration or repo settings modified by ``activate()``.
Arguments:
shell (string): One of `sh`, `csh`. Shell style to use.
Returns:
(string): shell commands for `shell` to undo environment variables
"""
global _active_environment
if not _active_environment:
return
deactivate_config_scope(_active_environment)
# use _repo so we only remove if a repo was actually constructed
if _active_environment._repo:
spack.repo.path.remove(_active_environment._repo)
cmds = ''
if shell == 'csh':
cmds += 'unsetenv SPACK_ENV;\n'
cmds += 'if ( $?SPACK_OLD_PROMPT ) '
cmds += 'set prompt="$SPACK_OLD_PROMPT" && '
cmds += 'unsetenv SPACK_OLD_PROMPT;\n'
cmds += 'unalias despacktivate;\n'
else:
cmds += 'if [ ! -z ${SPACK_ENV+x} ]; then\n'
cmds += 'unset SPACK_ENV; export SPACK_ENV;\n'
cmds += 'fi;\n'
cmds += 'unalias despacktivate;\n'
cmds += 'if [ ! -z ${SPACK_OLD_PS1+x} ]; then\n'
cmds += ' if [ "$SPACK_OLD_PS1" = \'$$$$\' ]; then\n'
cmds += ' unset PS1; export PS1;\n'
cmds += ' else\n'
cmds += ' export PS1="$SPACK_OLD_PS1";\n'
cmds += ' fi;\n'
cmds += ' unset SPACK_OLD_PS1; export SPACK_OLD_PS1;\n'
cmds += 'fi;\n'
if default_view_name in _active_environment.views:
cmds += _active_environment.rm_default_view_from_shell(shell)
tty.debug("Deactivated environmennt '%s'" % _active_environment.name)
_active_environment = None
return cmds
def find_environment(args):
"""Find active environment from args, spack.yaml, or environment variable.
This is called in ``spack.main`` to figure out which environment to
activate.
Check for an environment in this order:
1. via ``spack -e ENV`` or ``spack -D DIR`` (arguments)
2. as a spack.yaml file in the current directory, or
3. via a path in the SPACK_ENV environment variable.
If an environment is found, read it in. If not, return None.
Arguments:
args (Namespace): argparse namespace wtih command arguments
Returns:
(Environment): a found environment, or ``None``
"""
# try arguments
env = getattr(args, 'env', None)
# treat env as a name
if env:
if exists(env):
return read(env)
else:
# if env was specified, see if it is a dirctory otherwise, look
# at env_dir (env and env_dir are mutually exclusive)
env = getattr(args, 'env_dir', None)
# if no argument, look for a manifest file
if not env:
if os.path.exists(manifest_name):
env = os.getcwd()
# if no env, env_dir, or manifest try the environment
if not env:
env = os.environ.get(spack_env_var)
# nothing was set; there's no active environment
if not env:
return None
# if we get here, env isn't the name of a spack environment; it has
# to be a path to an environment, or there is something wrong.
if is_env_dir(env):
return Environment(env)
raise SpackEnvironmentError('no environment in %s' % env)
def get_env(args, cmd_name, required=False):
"""Used by commands to get the active environment.
This first checks for an ``env`` argument, then looks at the
``active`` environment. We check args first because Spack's
subcommand arguments are parsed *after* the ``-e`` and ``-D``
arguments to ``spack``. So there may be an ``env`` argument that is
*not* the active environment, and we give it precedence.
This is used by a number of commands for determining whether there is
an active environment.
If an environment is not found *and* is required, print an error
message that says the calling command *needs* an active environment.
Arguments:
args (Namespace): argparse namespace wtih command arguments
cmd_name (str): name of calling command
required (bool): if ``True``, raise an exception when no environment
is found; if ``False``, just return ``None``
Returns:
(Environment): if there is an arg or active environment
"""
# try argument first
env = getattr(args, 'env', None)
if env:
if exists(env):
return read(env)
elif is_env_dir(env):
return Environment(env)
else:
raise SpackEnvironmentError('no environment in %s' % env)
# try the active environment. This is set by find_environment() (above)
if _active_environment:
return _active_environment
elif not required:
return None
else:
tty.die(
'`spack %s` requires an environment' % cmd_name,
'activate an environment first:',
' spack env activate ENV',
'or use:',
' spack -e ENV %s ...' % cmd_name)
def _root(name):
"""Non-validating version of root(), to be used internally."""
return os.path.join(env_path, name)
def root(name):
"""Get the root directory for an environment by name."""
validate_env_name(name)
return _root(name)
def exists(name):
"""Whether an environment with this name exists or not."""
if not valid_env_name(name):
return False
return os.path.isdir(root(name))
def active(name):
"""True if the named environment is active."""
return _active_environment and name == _active_environment.name
def is_env_dir(path):
"""Whether a directory contains a spack environment."""
return os.path.isdir(path) and os.path.exists(
os.path.join(path, manifest_name))
def read(name):
"""Get an environment with the supplied name."""
validate_env_name(name)
if not exists(name):
raise SpackEnvironmentError("no such environment '%s'" % name)
return Environment(root(name))
def create(name, init_file=None, with_view=None):
"""Create a named environment in Spack."""
validate_env_name(name)
if exists(name):
raise SpackEnvironmentError("'%s': environment already exists" % name)
return Environment(root(name), init_file, with_view)
def config_dict(yaml_data):
"""Get the configuration scope section out of an spack.yaml"""
key = spack.config.first_existing(yaml_data, env_schema_keys)
return yaml_data[key]
def all_environment_names():
"""List the names of environments that currently exist."""
# just return empty if the env path does not exist. A read-only
# operation like list should not try to create a directory.
if not os.path.exists(env_path):
return []
candidates = sorted(os.listdir(env_path))
names = []
for candidate in candidates:
yaml_path = os.path.join(_root(candidate), manifest_name)
if valid_env_name(candidate) and os.path.exists(yaml_path):
names.append(candidate)
return names
def all_environments():
"""Generator for all named Environments."""
for name in all_environment_names():
yield read(name)
def validate(data, filename=None):
import jsonschema
try:
spack.schema.Validator(spack.schema.env.schema).validate(data)
except jsonschema.ValidationError as e:
raise spack.config.ConfigFormatError(
e, data, filename, e.instance.lc.line + 1)
def _read_yaml(str_or_file):
"""Read YAML from a file for round-trip parsing."""
data = ruamel.yaml.load(str_or_file, ruamel.yaml.RoundTripLoader)
filename = getattr(str_or_file, 'name', None)
validate(data, filename)
return data
def _write_yaml(data, str_or_file):
"""Write YAML to a file preserving comments and dict order."""
filename = getattr(str_or_file, 'name', None)
validate(data, filename)
ruamel.yaml.dump(data, str_or_file, Dumper=ruamel.yaml.RoundTripDumper,
default_flow_style=False)
def _eval_conditional(string):
"""Evaluate conditional definitions using restricted variable scope."""
arch = architecture.Arch(
architecture.platform(), 'default_os', 'default_target')
valid_variables = {
'target': str(arch.target),
'os': str(arch.os),
'platform': str(arch.platform),
'arch': str(arch),
'architecture': str(arch),
're': re,
'env': os.environ,
'hostname': socket.gethostname()
}
return eval(string, valid_variables)
class ViewDescriptor(object):
def __init__(self, root, projections={}, select=[], exclude=[],
link=default_view_link):
self.root = root
self.projections = projections
self.select = select
self.select_fn = lambda x: any(x.satisfies(s) for s in self.select)
self.exclude = exclude
self.exclude_fn = lambda x: not any(x.satisfies(e)
for e in self.exclude)
self.link = link
def to_dict(self):
ret = {'root': self.root}
if self.projections:
ret['projections'] = self.projections
if self.select:
ret['select'] = self.select
if self.exclude:
ret['exclude'] = self.exclude
if self.link != default_view_link:
ret['link'] = self.link
return ret
@staticmethod
def from_dict(d):
return ViewDescriptor(d['root'],
d.get('projections', {}),
d.get('select', []),
d.get('exclude', []),
d.get('link', default_view_link))
def view(self):
return YamlFilesystemView(self.root, spack.store.layout,
ignore_conflicts=True,
projections=self.projections)
def regenerate(self, all_specs, roots):
specs_for_view = []
specs = all_specs if self.link == 'all' else roots
for spec in specs:
# The view does not store build deps, so if we want it to
# recognize environment specs (which do store build deps), then
# they need to be stripped
if spec.concrete: # Do not link unconcretized roots
specs_for_view.append(spec.copy(deps=('link', 'run')))
if self.select:
specs_for_view = list(filter(self.select_fn, specs_for_view))
if self.exclude:
specs_for_view = list(filter(self.exclude_fn, specs_for_view))
installed_specs_for_view = set(s for s in specs_for_view
if s.package.installed)
view = self.view()
view.clean()
specs_in_view = set(view.get_all_specs())
tty.msg("Updating view at {0}".format(self.root))
rm_specs = specs_in_view - installed_specs_for_view
view.remove_specs(*rm_specs, with_dependents=False)
add_specs = installed_specs_for_view - specs_in_view
view.add_specs(*add_specs, with_dependencies=False)
class Environment(object):
def __init__(self, path, init_file=None, with_view=None):
"""Create a new environment.
The environment can be optionally initialized with either a
spack.yaml or spack.lock file.
Arguments:
path (str): path to the root directory of this environment
init_file (str or file object): filename or file object to
initialize the environment
with_view (str or bool): whether a view should be maintained for
the environment. If the value is a string, it specifies the
path to the view.
"""
self.path = os.path.abspath(path)
self.clear()
if init_file:
with fs.open_if_filename(init_file) as f:
if hasattr(f, 'name') and f.name.endswith('.lock'):
self._read_manifest(default_manifest_yaml)
self._read_lockfile(f)
self._set_user_specs_from_lockfile()
else:
self._read_manifest(f)
else:
default_manifest = not os.path.exists(self.manifest_path)
if default_manifest:
# No manifest, use default yaml
self._read_manifest(default_manifest_yaml)
else:
with open(self.manifest_path) as f:
self._read_manifest(f)
if os.path.exists(self.lock_path):
with open(self.lock_path) as f:
read_lock_version = self._read_lockfile(f)
if default_manifest:
# No manifest, set user specs from lockfile
self._set_user_specs_from_lockfile()
if read_lock_version == 1:
tty.debug(
"Storing backup of old lockfile {0} at {1}".format(
self.lock_path, self._lock_backup_v1_path))
shutil.copy(self.lock_path, self._lock_backup_v1_path)
if with_view is False:
self.views = {}
elif with_view is True:
self.views = {
default_view_name: ViewDescriptor(self.view_path_default)}
elif isinstance(with_view, six.string_types):
self.views = {default_view_name: ViewDescriptor(with_view)}
# If with_view is None, then defer to the view settings determined by
# the manifest file
def _read_manifest(self, f):
"""Read manifest file and set up user specs."""
self.yaml = _read_yaml(f)
self.spec_lists = OrderedDict()
for item in config_dict(self.yaml).get('definitions', []):
entry = copy.deepcopy(item)
when = _eval_conditional(entry.pop('when', 'True'))
assert len(entry) == 1
if when:
name, spec_list = next(iter(entry.items()))
user_specs = SpecList(name, spec_list, self.spec_lists.copy())
if name in self.spec_lists:
self.spec_lists[name].extend(user_specs)
else:
self.spec_lists[name] = user_specs
spec_list = config_dict(self.yaml).get(user_speclist_name)
user_specs = SpecList(user_speclist_name, [s for s in spec_list if s],
self.spec_lists.copy())
self.spec_lists[user_speclist_name] = user_specs
enable_view = config_dict(self.yaml).get('view')
# enable_view can be boolean, string, or None
if enable_view is True or enable_view is None:
self.views = {
default_view_name: ViewDescriptor(self.view_path_default)}
elif isinstance(enable_view, six.string_types):
self.views = {default_view_name: ViewDescriptor(enable_view)}
elif enable_view:
self.views = dict((name, ViewDescriptor.from_dict(values))
for name, values in enable_view.items())
else:
self.views = {}
@property
def user_specs(self):
return self.spec_lists[user_speclist_name]
def _set_user_specs_from_lockfile(self):
"""Copy user_specs from a read-in lockfile."""
self.spec_lists = {
user_speclist_name: SpecList(
user_speclist_name,
[str(s) for s in self.concretized_user_specs]
)
}
def clear(self):
self.spec_lists = {user_speclist_name: SpecList()} # specs from yaml
self.concretized_user_specs = [] # user specs from last concretize
self.concretized_order = [] # roots of last concretize, in order
self.specs_by_hash = {} # concretized specs by hash
self.new_specs = [] # write packages for these on write()
self._repo = None # RepoPath for this env (memoized)
self._previous_active = None # previously active environment
@property
def internal(self):
"""Whether this environment is managed by Spack."""
return self.path.startswith(env_path)
@property
def name(self):
"""Human-readable representation of the environment.
This is the path for directory environments, and just the name
for named environments.
"""
if self.internal:
return os.path.basename(self.path)
else:
return self.path
@property
def active(self):
"""True if this environment is currently active."""
return _active_environment and self.path == _active_environment.path
@property
def manifest_path(self):
"""Path to spack.yaml file in this environment."""
return os.path.join(self.path, manifest_name)
@property
def lock_path(self):
"""Path to spack.lock file in this environment."""
return os.path.join(self.path, lockfile_name)
@property
def _lock_backup_v1_path(self):
"""Path to backup of v1 lockfile before conversion to v2"""
return self.lock_path + '.backup.v1'
@property
def env_subdir_path(self):
"""Path to directory where the env stores repos, logs, views."""
return os.path.join(self.path, env_subdir_name)
@property
def repos_path(self):
return os.path.join(self.path, env_subdir_name, 'repos')
@property
def log_path(self):
return os.path.join(self.path, env_subdir_name, 'logs')
@property
def view_path_default(self):
# default path for environment views
return os.path.join(self.env_subdir_path, 'view')
@property
def repo(self):
if self._repo is None:
self._repo = make_repo_path(self.repos_path)
return self._repo
def included_config_scopes(self):
"""List of included configuration scopes from the environment.
Scopes are listed in the YAML file in order from highest to
lowest precedence, so configuration from earlier scope will take
precedence over later ones.
This routine returns them in the order they should be pushed onto
the internal scope stack (so, in reverse, from lowest to highest).
"""
scopes = []
# load config scopes added via 'include:', in reverse so that
# highest-precedence scopes are last.
includes = config_dict(self.yaml).get('include', [])
for i, config_path in enumerate(reversed(includes)):
# allow paths to contain environment variables
config_path = config_path.format(**os.environ)
# treat relative paths as relative to the environment
if not os.path.isabs(config_path):
config_path = os.path.join(self.path, config_path)
config_path = os.path.normpath(os.path.realpath(config_path))
if os.path.isdir(config_path):
# directories are treated as regular ConfigScopes
config_name = 'env:%s:%s' % (
self.name, os.path.basename(config_path))
scope = spack.config.ConfigScope(config_name, config_path)
else:
# files are assumed to be SingleFileScopes
base, ext = os.path.splitext(os.path.basename(config_path))
config_name = 'env:%s:%s' % (self.name, base)
scope = spack.config.SingleFileScope(
config_name, config_path, spack.schema.merged.schema)
scopes.append(scope)
return scopes
def env_file_config_scope_name(self):
"""Name of the config scope of this environment's manifest file."""
return 'env:%s' % self.name
def env_file_config_scope(self):
"""Get the configuration scope for the environment's manifest file."""
config_name = self.env_file_config_scope_name()
return spack.config.SingleFileScope(config_name,
self.manifest_path,
spack.schema.env.schema,
[env_schema_keys])
def config_scopes(self):
"""A list of all configuration scopes for this environment."""
return self.included_config_scopes() + [self.env_file_config_scope()]
def destroy(self):
"""Remove this environment from Spack entirely."""
shutil.rmtree(self.path)
def update_stale_references(self, from_list=None):
"""Iterate over spec lists updating references."""
if not from_list:
from_list = next(iter(self.spec_lists.keys()))
index = list(self.spec_lists.keys()).index(from_list)
# spec_lists is an OrderedDict, all list entries after the modified
# list may refer to the modified list. Update stale references
for i, (name, speclist) in enumerate(
list(self.spec_lists.items())[index + 1:], index + 1
):
new_reference = dict((n, self.spec_lists[n])
for n in list(self.spec_lists.keys())[:i])
speclist.update_reference(new_reference)
def add(self, user_spec, list_name=user_speclist_name):
"""Add a single user_spec (non-concretized) to the Environment
Returns:
(bool): True if the spec was added, False if it was already
present and did not need to be added
"""
spec = Spec(user_spec)
if list_name not in self.spec_lists:
raise SpackEnvironmentError(
'No list %s exists in environment %s' % (list_name, self.name)
)
if list_name == user_speclist_name:
if not spec.name:
raise SpackEnvironmentError(
'cannot add anonymous specs to an environment!')
elif not spack.repo.path.exists(spec.name):
raise SpackEnvironmentError('no such package: %s' % spec.name)
list_to_change = self.spec_lists[list_name]
existing = str(spec) in list_to_change.yaml_list
if not existing:
list_to_change.add(str(spec))
self.update_stale_references(list_name)
return bool(not existing)
def remove(self, query_spec, list_name=user_speclist_name, force=False):
"""Remove specs from an environment that match a query_spec"""
query_spec = Spec(query_spec)
list_to_change = self.spec_lists[list_name]
matches = []
if not query_spec.concrete:
matches = [s for s in list_to_change if s.satisfies(query_spec)]
if not matches:
# concrete specs match against concrete specs in the env
specs_hashes = zip(
self.concretized_user_specs, self.concretized_order)
matches = [
s for s, h in specs_hashes
if query_spec.dag_hash() == h
]
if not matches:
raise SpackEnvironmentError(
"Not found: {0}".format(query_spec))
old_specs = set(self.user_specs)
for spec in matches:
if spec in list_to_change:
list_to_change.remove(spec)
self.update_stale_references(list_name)
# If force, update stale concretized specs
# Only check specs removed by this operation
new_specs = set(self.user_specs)
for spec in old_specs - new_specs:
if force and spec in self.concretized_user_specs:
i = self.concretized_user_specs.index(spec)
del self.concretized_user_specs[i]
dag_hash = self.concretized_order[i]
del self.concretized_order[i]
del self.specs_by_hash[dag_hash]
def concretize(self, force=False):
"""Concretize user_specs in this environment.
Only concretizes specs that haven't been concretized yet unless
force is ``True``.
This only modifies the environment in memory. ``write()`` will
write out a lockfile containing concretized specs.
Arguments:
force (bool): re-concretize ALL specs, even those that were
already concretized
Returns:
List of specs that have been concretized. Each entry is a tuple of
the user spec and the corresponding concretized spec.
"""
if force:
# Clear previously concretized specs
self.concretized_user_specs = []
self.concretized_order = []
self.specs_by_hash = {}
# keep any concretized specs whose user specs are still in the manifest
old_concretized_user_specs = self.concretized_user_specs
old_concretized_order = self.concretized_order
old_specs_by_hash = self.specs_by_hash
self.concretized_user_specs = []
self.concretized_order = []
self.specs_by_hash = {}
for s, h in zip(old_concretized_user_specs, old_concretized_order):
if s in self.user_specs:
concrete = old_specs_by_hash[h]
self._add_concrete_spec(s, concrete, new=False)
# Concretize any new user specs that we haven't concretized yet
concretized_specs = []
for uspec, uspec_constraints in zip(
self.user_specs, self.user_specs.specs_as_constraints):
if uspec not in old_concretized_user_specs:
concrete = _concretize_from_constraints(uspec_constraints)
self._add_concrete_spec(uspec, concrete)
concretized_specs.append((uspec, concrete))
return concretized_specs
def install(self, user_spec, concrete_spec=None, **install_args):
"""Install a single spec into an environment.
This will automatically concretize the single spec, but it won't
affect other as-yet unconcretized specs.
"""
spec = Spec(user_spec)
if self.add(spec):
concrete = concrete_spec if concrete_spec else spec.concretized()
self._add_concrete_spec(spec, concrete)
else:
# spec might be in the user_specs, but not installed.
# TODO: Redo name-based comparison for old style envs
spec = next(s for s in self.user_specs if s.satisfies(user_spec))
concrete = self.specs_by_hash.get(spec.build_hash())
if not concrete:
concrete = spec.concretized()
self._add_concrete_spec(spec, concrete)
self._install(concrete, **install_args)
def _install(self, spec, **install_args):
spec.package.do_install(**install_args)
# Make sure log directory exists
log_path = self.log_path
fs.mkdirp(log_path)
with fs.working_dir(self.path):
# Link the resulting log file into logs dir
build_log_link = os.path.join(
log_path, '%s-%s.log' % (spec.name, spec.dag_hash(7)))
if os.path.lexists(build_log_link):
os.remove(build_log_link)
os.symlink(spec.package.build_log_path, build_log_link)
@property
def default_view(self):
if not self.views:
raise SpackEnvironmentError(
"{0} does not have a view enabled".format(self.name))
if default_view_name not in self.views:
raise SpackEnvironmentError(
"{0} does not have a default view enabled".format(self.name))
return self.views[default_view_name]
def update_default_view(self, viewpath):
name = default_view_name
if name in self.views and self.default_view.root != viewpath:
shutil.rmtree(self.default_view.root)
if viewpath:
if name in self.views:
self.default_view.root = viewpath
else:
self.views[name] = ViewDescriptor(viewpath)
else:
self.views.pop(name, None)
def regenerate_views(self):
if not self.views:
tty.debug("Skip view update, this environment does not"
" maintain a view")
return
specs = self._get_environment_specs()
for view in self.views.values():
view.regenerate(specs, self.roots())
def _shell_vars(self):
updates = [
('PATH', ['bin']),
('MANPATH', ['man', 'share/man']),
('ACLOCAL_PATH', ['share/aclocal']),
('LD_LIBRARY_PATH', ['lib', 'lib64']),
('LIBRARY_PATH', ['lib', 'lib64']),
('CPATH', ['include']),
('PKG_CONFIG_PATH', ['lib/pkgconfig', 'lib64/pkgconfig',
'share/pkgconfig']),
('CMAKE_PREFIX_PATH', ['']),
]
path_updates = list()
if default_view_name in self.views:
for var, dirs in updates:
paths = [os.path.join(self.default_view.root, x) for x in dirs]
path_updates.append((var, paths))
return path_updates
def add_default_view_to_shell(self, shell):
env_mod = EnvironmentModifications()
for var, paths in self._shell_vars():
for path in paths:
env_mod.prepend_path(var, path)
return env_mod.shell_modifications(shell)
def rm_default_view_from_shell(self, shell):
env_mod = EnvironmentModifications()
for var, paths in self._shell_vars():
for path in paths:
env_mod.remove_path(var, path)
return env_mod.shell_modifications(shell)
def _add_concrete_spec(self, spec, concrete, new=True):
"""Called when a new concretized spec is added to the environment.
This ensures that all internal data structures are kept in sync.
Arguments:
spec (Spec): user spec that resulted in the concrete spec
concrete (Spec): spec concretized within this environment
new (bool): whether to write this spec's package to the env
repo on write()
"""
assert concrete.concrete
# when a spec is newly concretized, we need to make a note so
# that we can write its package to the env repo on write()
if new:
self.new_specs.append(concrete)
# update internal lists of specs
self.concretized_user_specs.append(spec)
h = concrete.build_hash()
self.concretized_order.append(h)
self.specs_by_hash[h] = concrete
def install_all(self, args=None):
"""Install all concretized specs in an environment."""
for concretized_hash in self.concretized_order:
spec = self.specs_by_hash[concretized_hash]
# Parse cli arguments and construct a dictionary
# that will be passed to Package.do_install API
kwargs = dict()
if args:
spack.cmd.install.update_kwargs_from_args(args, kwargs)
self._install(spec, **kwargs)
if not spec.external:
# Link the resulting log file into logs dir
build_log_link = os.path.join(
self.log_path, '%s-%s.log' % (spec.name, spec.dag_hash(7)))
if os.path.lexists(build_log_link):
os.remove(build_log_link)
os.symlink(spec.package.build_log_path, build_log_link)
self.regenerate_views()
def all_specs_by_hash(self):
"""Map of hashes to spec for all specs in this environment."""
# Note this uses dag-hashes calculated without build deps as keys,
# whereas the environment tracks specs based on dag-hashes calculated
# with all dependencies. This function should not be used by an
# Environment object for management of its own data structures
hashes = {}
for h in self.concretized_order:
specs = self.specs_by_hash[h].traverse(deptype=('link', 'run'))
for spec in specs:
hashes[spec.dag_hash()] = spec
return hashes
def all_specs(self):
"""Return all specs, even those a user spec would shadow."""
return sorted(self.all_specs_by_hash().values())
def all_hashes(self):
"""Return all specs, even those a user spec would shadow."""
return list(self.all_specs_by_hash().keys())
def roots(self):
"""Specs explicitly requested by the user *in this environment*.
Yields both added and installed specs that have user specs in
`spack.yaml`.
"""
concretized = dict(self.concretized_specs())
for spec in self.user_specs:
concrete = concretized.get(spec)
yield concrete if concrete else spec
def added_specs(self):
"""Specs that are not yet installed.
Yields the user spec for non-concretized specs, and the concrete
spec for already concretized but not yet installed specs.
"""
concretized = dict(self.concretized_specs())
for spec in self.user_specs:
concrete = concretized.get(spec)
if not concrete:
yield spec
elif not concrete.package.installed:
yield concrete
def concretized_specs(self):
"""Tuples of (user spec, concrete spec) for all concrete specs."""
for s, h in zip(self.concretized_user_specs, self.concretized_order):
yield (s, self.specs_by_hash[h])
def removed_specs(self):
"""Tuples of (user spec, concrete spec) for all specs that will be
removed on nexg concretize."""
needed = set()
for s, c in self.concretized_specs():
if s in self.user_specs:
for d in c.traverse():
needed.add(d)
for s, c in self.concretized_specs():
for d in c.traverse():
if d not in needed:
yield d
def _get_environment_specs(self, recurse_dependencies=True):
"""Returns the specs of all the packages in an environment.
If these specs appear under different user_specs, only one copy
is added to the list returned.
"""
spec_list = list()
for spec_hash in self.concretized_order:
spec = self.specs_by_hash[spec_hash]
specs = (spec.traverse(deptype=('link', 'run'))
if recurse_dependencies else (spec,))
spec_list.extend(specs)
return spec_list
def _to_lockfile_dict(self):
"""Create a dictionary to store a lockfile for this environment."""
concrete_specs = {}
for spec in self.specs_by_hash.values():
for s in spec.traverse():
dag_hash_all = s.build_hash()
if dag_hash_all not in concrete_specs:
spec_dict = s.to_node_dict(hash=ht.build_hash)
spec_dict[s.name]['hash'] = s.dag_hash()
concrete_specs[dag_hash_all] = spec_dict
hash_spec_list = zip(
self.concretized_order, self.concretized_user_specs)
# this is the lockfile we'll write out
data = {
# metadata about the format
'_meta': {
'file-type': 'spack-lockfile',
'lockfile-version': lockfile_format_version,
},
# users specs + hashes are the 'roots' of the environment
'roots': [{
'hash': h,
'spec': str(s)
} for h, s in hash_spec_list],
# Concrete specs by hash, including dependencies
'concrete_specs': concrete_specs,
}
return data
def _read_lockfile(self, file_or_json):
"""Read a lockfile from a file or from a raw string."""
lockfile_dict = sjson.load(file_or_json)
self._read_lockfile_dict(lockfile_dict)
return lockfile_dict['_meta']['lockfile-version']
def _read_lockfile_dict(self, d):
"""Read a lockfile dictionary into this environment."""
roots = d['roots']
self.concretized_user_specs = [Spec(r['spec']) for r in roots]
self.concretized_order = [r['hash'] for r in roots]
json_specs_by_hash = d['concrete_specs']
root_hashes = set(self.concretized_order)
specs_by_hash = {}
for dag_hash, node_dict in json_specs_by_hash.items():
specs_by_hash[dag_hash] = Spec.from_node_dict(node_dict)
for dag_hash, node_dict in json_specs_by_hash.items():
for dep_name, dep_hash, deptypes in (
Spec.dependencies_from_node_dict(node_dict)):
specs_by_hash[dag_hash]._add_dependency(
specs_by_hash[dep_hash], deptypes)
# If we are reading an older lockfile format (which uses dag hashes
# that exclude build deps), we use this to convert the old
# concretized_order to the full hashes (preserving the order)
old_hash_to_new = {}
self.specs_by_hash = {}
for _, spec in specs_by_hash.items():
dag_hash = spec.dag_hash()
build_hash = spec.build_hash()
if dag_hash in root_hashes:
old_hash_to_new[dag_hash] = build_hash
if (dag_hash in root_hashes or build_hash in root_hashes):
self.specs_by_hash[build_hash] = spec
if old_hash_to_new:
# Replace any older hashes in concretized_order with hashes
# that include build deps
self.concretized_order = [
old_hash_to_new.get(h, h) for h in self.concretized_order]
def write(self):
"""Writes an in-memory environment to its location on disk.
This will also write out package files for each newly concretized spec.
"""
# ensure path in var/spack/environments
fs.mkdirp(self.path)
if self.specs_by_hash:
# ensure the prefix/.env directory exists
fs.mkdirp(self.env_subdir_path)
for spec in self.new_specs:
for dep in spec.traverse():
if not dep.concrete:
raise ValueError('specs passed to environment.write() '
'must be concrete!')
root = os.path.join(self.repos_path, dep.namespace)
repo = spack.repo.create_or_construct(root, dep.namespace)
pkg_dir = repo.dirname_for_package_name(dep.name)
fs.mkdirp(pkg_dir)
spack.repo.path.dump_provenance(dep, pkg_dir)
self.new_specs = []
# write the lock file last
with fs.write_tmp_and_move(self.lock_path) as f:
sjson.dump(self._to_lockfile_dict(), stream=f)
else:
if os.path.exists(self.lock_path):
os.unlink(self.lock_path)
# invalidate _repo cache
self._repo = None
# put any changes in the definitions in the YAML
for name, speclist in self.spec_lists.items():
if name == user_speclist_name:
# The primary list is handled differently
continue
conf = config_dict(self.yaml)
active_yaml_lists = [l for l in conf.get('definitions', [])
if name in l and
_eval_conditional(l.get('when', 'True'))]
# Remove any specs in yaml that are not in internal representation
for ayl in active_yaml_lists:
# If it's not a string, it's a matrix. Those can't have changed
# If it is a string that starts with '$', it's a reference.
# Those also can't have changed.
ayl[name][:] = [s for s in ayl.setdefault(name, [])
if (not isinstance(s, six.string_types)) or
s.startswith('$') or Spec(s) in speclist.specs]
# Put the new specs into the first active list from the yaml
new_specs = [entry for entry in speclist.yaml_list
if isinstance(entry, six.string_types) and
not any(entry in ayl[name]
for ayl in active_yaml_lists)]
list_for_new_specs = active_yaml_lists[0].setdefault(name, [])
list_for_new_specs[:] = list_for_new_specs + new_specs
# put the new user specs in the YAML.
# This can be done directly because there can't be multiple definitions
# nor when clauses for `specs` list.
yaml_spec_list = config_dict(self.yaml).setdefault(user_speclist_name,
[])
yaml_spec_list[:] = self.user_specs.yaml_list
default_name = default_view_name
if self.views and len(self.views) == 1 and default_name in self.views:
path = self.default_view.root
if self.default_view == ViewDescriptor(self.view_path_default):
view = True
elif self.default_view == ViewDescriptor(path):
view = path
else:
view = dict((name, view.to_dict())
for name, view in self.views.items())
elif self.views:
view = dict((name, view.to_dict())
for name, view in self.views.items())
else:
view = False
yaml_dict = config_dict(self.yaml)
if view is not True:
# The default case is to keep an active view inside of the
# Spack environment directory. To avoid cluttering the config,
# we omit the setting in this case.
yaml_dict['view'] = view
elif 'view' in yaml_dict:
del yaml_dict['view']
# if all that worked, write out the manifest file at the top level
with fs.write_tmp_and_move(self.manifest_path) as f:
_write_yaml(self.yaml, f)
# TODO: for operations that just add to the env (install etc.) this
# could just call update_view
self.regenerate_views()
def __enter__(self):
self._previous_active = _active_environment
activate(self)
return
def __exit__(self, exc_type, exc_val, exc_tb):
deactivate()
if self._previous_active:
activate(self._previous_active)
def display_specs(concretized_specs):
"""Displays the list of specs returned by `Environment.concretize()`.
Args:
concretized_specs (list): list of specs returned by
`Environment.concretize()`
"""
def _tree_to_display(spec):
return spec.tree(
recurse_dependencies=True,
status_fn=spack.spec.Spec.install_status,
hashlen=7, hashes=True)
for user_spec, concrete_spec in concretized_specs:
tty.msg('Concretized {0}'.format(user_spec))
sys.stdout.write(_tree_to_display(concrete_spec))
print('')
def _concretize_from_constraints(spec_constraints):
# Accept only valid constraints from list and concretize spec
# Get the named spec even if out of order
root_spec = [s for s in spec_constraints if s.name]
if len(root_spec) != 1:
m = 'The constraints %s are not a valid spec ' % spec_constraints
m += 'concretization target. all specs must have a single name '
m += 'constraint for concretization.'
raise InvalidSpecConstraintError(m)
spec_constraints.remove(root_spec[0])
invalid_constraints = []
while True:
# Attach all anonymous constraints to one named spec
s = root_spec[0].copy()
for c in spec_constraints:
if c not in invalid_constraints:
s.constrain(c)
try:
return s.concretized()
except spack.spec.InvalidDependencyError as e:
invalid_deps_string = ['^' + d for d in e.invalid_deps]
invalid_deps = [c for c in spec_constraints
if any(c.satisfies(invd, strict=True)
for invd in invalid_deps_string)]
if len(invalid_deps) != len(invalid_deps_string):
raise e
invalid_constraints.extend(invalid_deps)
except UnknownVariantError as e:
invalid_variants = e.unknown_variants
inv_variant_constraints = [c for c in spec_constraints
if any(name in c.variants
for name in invalid_variants)]
if len(inv_variant_constraints) != len(invalid_variants):
raise e
invalid_constraints.extend(inv_variant_constraints)
def make_repo_path(root):
"""Make a RepoPath from the repo subdirectories in an environment."""
path = spack.repo.RepoPath()
if os.path.isdir(root):
for repo_root in os.listdir(root):
repo_root = os.path.join(root, repo_root)
if not os.path.isdir(repo_root):
continue
repo = spack.repo.Repo(repo_root)
path.put_last(repo)
return path
def prepare_config_scope(env):
"""Add env's scope to the global configuration search path."""
for scope in env.config_scopes():
spack.config.config.push_scope(scope)
def deactivate_config_scope(env):
"""Remove any scopes from env from the global config path."""
for scope in env.config_scopes():
spack.config.config.remove_scope(scope.name)
class SpackEnvironmentError(spack.error.SpackError):
"""Superclass for all errors to do with Spack environments."""
| 36.521208 | 79 | 0.610145 |
24161eb3d69e56d34b2689adc4338e451594cc35 | 1,852 | py | Python | deepdrive_zero/experiments/intersection_2_agents_fine_tune_add_left_yield_from_scratch_resume2.py | shantanuwadnerkar/deepdrive-zero | 3134a5b092a53ff60e4207d7419fd6a19cb5a6e9 | [
"MIT"
] | null | null | null | deepdrive_zero/experiments/intersection_2_agents_fine_tune_add_left_yield_from_scratch_resume2.py | shantanuwadnerkar/deepdrive-zero | 3134a5b092a53ff60e4207d7419fd6a19cb5a6e9 | [
"MIT"
] | null | null | null | deepdrive_zero/experiments/intersection_2_agents_fine_tune_add_left_yield_from_scratch_resume2.py | shantanuwadnerkar/deepdrive-zero | 3134a5b092a53ff60e4207d7419fd6a19cb5a6e9 | [
"MIT"
] | null | null | null | import os
import sys
from deepdrive_zero.experiments import utils
from spinup.utils.run_utils import ExperimentGrid
from spinup import ppo_pytorch
import torch
experiment_name = os.path.basename(__file__)[:-3]
notes = """Attempting to speed up reduction in g-force, jerk, and lane
violations"""
env_config = dict(
env_name='deepdrive-2d-intersection-w-gs-allow-decel-v0',
is_intersection_map=True,
expect_normalized_action_deltas=False,
jerk_penalty_coeff=6.7e-5, # 2 * 0.20 / (60*100)
gforce_penalty_coeff=0.12, # 2 * 0.06
collision_penalty_coeff=4,
lane_penalty_coeff=0.04, # 2 * 0.04
speed_reward_coeff=0.50,
end_on_harmful_gs=False,
end_on_lane_violation=False,
incent_win=True,
constrain_controls=False,
incent_yield_to_oncoming_traffic=True,
physics_steps_per_observation=12,
)
net_config = dict(
hidden_units=(256, 256),
activation=torch.nn.Tanh
)
eg = ExperimentGrid(name=experiment_name)
eg.add('env_name', env_config['env_name'], '', False)
# eg.add('seed', 0)
eg.add('resume', '/workspace/dd0-data-resume1/intersection_2_agents_fine_tune_add_left_yield_from_scratch_resume/intersection_2_agents_fine_tune_add_left_yield_from_scratch_resume_s0_2020_03-29_00-28.47')
# eg.add('reinitialize_optimizer_on_resume', True)
# eg.add('num_inputs_to_add', 0)
eg.add('pi_lr', 3e-6)
eg.add('vf_lr', 1e-5)
# eg.add('boost_explore', 5)
eg.add('epochs', 8000)
eg.add('steps_per_epoch', 32000)
eg.add('ac_kwargs:hidden_sizes', net_config['hidden_units'], 'hid')
eg.add('ac_kwargs:activation', net_config['activation'], '')
eg.add('notes', notes, '')
eg.add('run_filename', os.path.realpath(__file__), '')
eg.add('env_config', env_config, '')
def train():
eg.run(ppo_pytorch)
if __name__ == '__main__':
utils.run(train_fn=train, env_config=env_config, net_config=net_config) | 32.491228 | 204 | 0.7473 |
1f6f1a4aa9eaf55ac2c96539fd3d835fe67d6c8b | 3,496 | py | Python | nlp/text_classification/cnn_multiclass/model.py | zhangyong2/tensorflow_nlp | 4cc3cc4abec27526336897f1c62cf904b48f2676 | [
"Apache-2.0"
] | 1 | 2019-01-30T08:39:24.000Z | 2019-01-30T08:39:24.000Z | nlp/text_classification/cnn_multiclass/model.py | zhangyong2/tensorflow_nlp | 4cc3cc4abec27526336897f1c62cf904b48f2676 | [
"Apache-2.0"
] | null | null | null | nlp/text_classification/cnn_multiclass/model.py | zhangyong2/tensorflow_nlp | 4cc3cc4abec27526336897f1c62cf904b48f2676 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
class CNNModel(object):
def __init__(self, config):
self.config = config
self.x_in = tf.placeholder(tf.int64, shape=[None, config.sentence_length])
self.y_in = tf.placeholder(tf.int64, shape=[None])
self.keep_prob = tf.placeholder(tf.float32)
self.embeddings = tf.Variable(
tf.random_uniform([config.vocab_size, config.vector_size], -1.0, 1.0))
self.loss, self.accuracy, self.scores = self.build_model()
self.global_step = tf.Variable(0)
self.learning_rate = \
tf.train.exponential_decay(1e-2, self.global_step, config.num_epochs, 0.99, staircase=True) # 学习率递减
self.optimizer = tf.train.AdagradOptimizer(self.learning_rate)\
.minimize(self.loss, global_step=self.global_step)
def conv2d(self, x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='VALID')
def max_pool(self, x, filter_h):
return tf.nn.max_pool(x, ksize=[1, self.config.img_h - filter_h + 1, 1, 1],
strides=[1, 1, 1, 1], padding='VALID')
def build_model(self):
config = self.config
# Embedding layer===============================
x_image_tmp = tf.nn.embedding_lookup(self.embeddings, self.x_in)
x_image = tf.expand_dims(x_image_tmp, -1) # 单通道
with tf.variable_scope('cnn_conv', reuse=None):
h_conv = []
for filter_h in config.filter_hs:
filter_shape = [filter_h, config.vector_size, 1, config.num_filters]
W_conv1 = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
b_conv1 = tf.Variable(tf.constant(0.1, shape=[config.num_filters]), name="b")
h_conv1 = tf.nn.relu(self.conv2d(x_image, W_conv1) + b_conv1)
h_conv.append(h_conv1)
h_pool_output = []
for h_conv1, filter_h in zip(h_conv, config.filter_hs):
h_pool1 = self.max_pool(h_conv1, filter_h) # 输出szie:1
h_pool_output.append(h_pool1)
l2_reg_lambda = 0.001
num_filters_total = config.num_filters * len(config.filter_hs)
h_pool = tf.concat(h_pool_output, 3)
h_pool_flat = tf.reshape(h_pool, [-1, num_filters_total])
h_drop = tf.nn.dropout(h_pool_flat, self.keep_prob)
with tf.variable_scope('cnn_score', reuse=None):
W = tf.Variable(tf.truncated_normal([num_filters_total, config.label_size], stddev=0.1))
b = tf.Variable(tf.constant(0.1, shape=[config.label_size]), name="b")
l2_loss = tf.nn.l2_loss(W) + tf.nn.l2_loss(b)
scores = tf.nn.xw_plus_b(h_drop, W, b, name="scores") # wx+b
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=scores, labels=self.y_in)
loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
prediction = tf.argmax(scores, 1)
correct_prediction = tf.equal(prediction, self.y_in)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return loss, accuracy, scores
def predict_label(self, sess, labels, text):
x = np.array(text)
feed = {self.x_in: x, self.keep_prob: 1.0}
probs = sess.run([self.scores], feed_dict=feed)
results = np.argmax(probs[0], 1)
id2labels = dict(zip(labels.values(), labels.keys()))
labels = map(id2labels.get, results)
return labels
| 44.820513 | 112 | 0.62214 |
08119bee3121fbcd34e9d0bd1b13a5d9e6024168 | 170 | py | Python | Completed/python3/1486.py | zainkai/LeetCodeChallenges | 60645b895437bc1b56b88c48cdf22c38027285ec | [
"MIT"
] | 1 | 2020-07-01T05:33:30.000Z | 2020-07-01T05:33:30.000Z | Completed/python3/1486.py | zainkai/LeetCodeChallenges | 60645b895437bc1b56b88c48cdf22c38027285ec | [
"MIT"
] | null | null | null | Completed/python3/1486.py | zainkai/LeetCodeChallenges | 60645b895437bc1b56b88c48cdf22c38027285ec | [
"MIT"
] | null | null | null | class Solution:
def xorOperation(self, n: int, start: int) -> int:
res = start
for i in range(1,n):
res ^= start + 2*i
return res
| 24.285714 | 54 | 0.511765 |
2aa7b3c27a8949eb64dd63f70c39e944100ffa5c | 2,658 | py | Python | tests/test_TRADE_RulePolicy_TemplateNLG.py | Malavikka/ConvLab-2 | f2a0d251e4fab9e36e9d9f04df6308623d2d780c | [
"Apache-2.0"
] | 339 | 2020-03-04T09:43:22.000Z | 2022-03-26T17:27:38.000Z | tests/test_TRADE_RulePolicy_TemplateNLG.py | Malavikka/ConvLab-2 | f2a0d251e4fab9e36e9d9f04df6308623d2d780c | [
"Apache-2.0"
] | 122 | 2020-04-12T04:19:06.000Z | 2022-03-23T14:20:57.000Z | tests/test_TRADE_RulePolicy_TemplateNLG.py | Malavikka/ConvLab-2 | f2a0d251e4fab9e36e9d9f04df6308623d2d780c | [
"Apache-2.0"
] | 138 | 2020-02-18T16:48:04.000Z | 2022-03-26T17:27:43.000Z | # available NLU models
# from convlab2.nlu.svm.multiwoz import SVMNLU
from convlab2.nlu.jointBERT.multiwoz import BERTNLU
# from convlab2.nlu.milu.multiwoz import MILU
# available DST models
# from convlab2.dst.rule.multiwoz import RuleDST
# from convlab2.dst.mdbt.multiwoz import MDBT
# from convlab2.dst.sumbt.multiwoz import SUMBT
from convlab2.dst.trade.multiwoz import TRADE
# from convlab2.dst.comer.multiwoz import COMER
# available Policy models
from convlab2.policy.rule.multiwoz import RulePolicy
# from convlab2.policy.ppo.multiwoz import PPOPolicy
# from convlab2.policy.pg.multiwoz import PGPolicy
# from convlab2.policy.mle.multiwoz import MLEPolicy
# from convlab2.policy.gdpl.multiwoz import GDPLPolicy
# from convlab2.policy.vhus.multiwoz import UserPolicyVHUS
# from convlab2.policy.mdrg.multiwoz import MDRGWordPolicy
# from convlab2.policy.hdsa.multiwoz import HDSA
# from convlab2.policy.larl.multiwoz import LaRL
# available NLG models
from convlab2.nlg.template.multiwoz import TemplateNLG
from convlab2.nlg.sclstm.multiwoz import SCLSTM
# available E2E models
# from convlab2.e2e.sequicity.multiwoz import Sequicity
# from convlab2.e2e.damd.multiwoz import Damd
from convlab2.dialog_agent import PipelineAgent, BiSession
from convlab2.evaluator.multiwoz_eval import MultiWozEvaluator
from convlab2.util.analysis_tool.analyzer import Analyzer
from pprint import pprint
import random
import numpy as np
import torch
def set_seed(r_seed):
random.seed(r_seed)
np.random.seed(r_seed)
torch.manual_seed(r_seed)
def test_end2end():
# go to README.md of each model for more information
# BERT nlu
sys_nlu = None
# simple rule DST
sys_dst = TRADE()
# rule policy
sys_policy = RulePolicy()
# template NLG
sys_nlg = TemplateNLG(is_user=False)
# assemble
sys_agent = PipelineAgent(sys_nlu, sys_dst, sys_policy, sys_nlg, name='sys')
# BERT nlu trained on sys utterance
user_nlu = BERTNLU(mode='sys', config_file='multiwoz_sys_context.json',
model_file='https://convlab.blob.core.windows.net/convlab-2/bert_multiwoz_sys_context.zip')
# not use dst
user_dst = None
# rule policy
user_policy = RulePolicy(character='usr')
# template NLG
user_nlg = TemplateNLG(is_user=True)
# assemble
user_agent = PipelineAgent(user_nlu, user_dst, user_policy, user_nlg, name='user')
analyzer = Analyzer(user_agent=user_agent, dataset='multiwoz')
set_seed(20200202)
analyzer.comprehensive_analyze(sys_agent=sys_agent, model_name='TRADE-RulePolicy-TemplateNLG', total_dialog=1000)
if __name__ == '__main__':
test_end2end()
| 35.44 | 117 | 0.77088 |
ae6d4e3ed428fa7bda1fcfc2bf3037940a6de9d0 | 1,485 | py | Python | t_9_classes/t_9_6_private_variables/main.py | naokiur/Python-tutorial | 7b03dc8fd2e5992859fde00bfe2873b4fb7ca5e5 | [
"Apache-2.0"
] | null | null | null | t_9_classes/t_9_6_private_variables/main.py | naokiur/Python-tutorial | 7b03dc8fd2e5992859fde00bfe2873b4fb7ca5e5 | [
"Apache-2.0"
] | null | null | null | t_9_classes/t_9_6_private_variables/main.py | naokiur/Python-tutorial | 7b03dc8fd2e5992859fde00bfe2873b4fb7ca5e5 | [
"Apache-2.0"
] | null | null | null | class Mapping:
def __init__(self, iterable):
self.items_list = []
self.__update(iterable)
def update(self, iterable):
for item in iterable:
self.items_list.append(item)
__update = update
class MappingSubclass(Mapping):
# noinspection PyMethodOverriding
def update(self, keys, values):
# provides new signature for update
# but does not break __init__()
for item in zip(keys, values):
self.items_list.append(item)
first_list = range(1, 4)
mapping = Mapping(first_list)
print(mapping.items_list)
second_list = range(4, 7)
mapping.update(second_list)
print(mapping.items_list)
sub_mapping = MappingSubclass(first_list)
# Because subclass method does not break based method, __init__() can create array in sub_mapping.
print(sub_mapping.items_list)
sub_mapping.update(first_list, second_list)
print(sub_mapping.items_list)
class BreakMapping:
def __init__(self, iterable):
self.item_list = []
self.update(iterable)
def update(self, iterable):
for item in iterable:
self.item_list.append(item)
class BreakSubMapping(BreakMapping):
def update(self, keys, values):
for item in zip(keys, values):
self.item_list.append(item)
# Will be type error because update() of BreakMapping class is overrode by update() of BreakSubMapping class.
break_sub_mapping = BreakSubMapping(first_list)
print(break_sub_mapping.item_list)
| 26.517857 | 109 | 0.705051 |
9041b7c69ba0ab4e477e98fd9331ad258678c1f3 | 8,498 | py | Python | src/order_neighbors_3Angle.py | lyjspx/A-Novel-Protein-Structural-Structure | bd167ef7200e25657ddb58cb141ba5fa6a3351f1 | [
"MIT"
] | null | null | null | src/order_neighbors_3Angle.py | lyjspx/A-Novel-Protein-Structural-Structure | bd167ef7200e25657ddb58cb141ba5fa6a3351f1 | [
"MIT"
] | null | null | null | src/order_neighbors_3Angle.py | lyjspx/A-Novel-Protein-Structural-Structure | bd167ef7200e25657ddb58cb141ba5fa6a3351f1 | [
"MIT"
] | null | null | null | #import external modules
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.cluster import KMeans
from multiprocessing import Pool
#import internal modules
from src.utils import workingRoot
class NeighborProcessing:
def __init__(self,fileName,workingRoot=workingRoot):
self.filename = fileName
self.workingRoot = workingRoot
self.neighbors = pd.read_csv(workingRoot+fileName).iloc[:,1:]
self.kClusterDic = {}
self.importantClusters = []
self.v_one_column = np.vectorize(self._one_column)
self.v_norm_distance = np.vectorize(self._norm_distance)
def get_similar_neighbor_in_each_sample(self,numNeigh=18,numberProcess=18):
totalDis = []
with Pool(processes=numberProcess) as pool:
for i in pool.map(self._wrap, [5 * x for x in range(numNeigh)]):
tempList = []
for x in i:
temp = [z for y in x for z in y]
tempList.append(temp)
tempList = pd.DataFrame(tempList)
tempList['order'] = list(range(tempList.shape[0]))
totalDis.append(tempList)
totalDis = pd.concat(totalDis)
totalDis['numNum'] = [i for i in range(numNeigh) for j in range(tempList.shape[0])]
totalDis['resName'] = np.ravel(self.neighbors.iloc[:,[5*i for i in range(numNeigh)]],order='F')
totalDis['resDis'] = np.ravel(self.neighbors.iloc[:, [5 * i + 1 for i in range(numNeigh)]], order='F')
totalDis['resAng'] = np.ravel(self.neighbors.iloc[:, [5 * i + 2 for i in range(numNeigh)]], order='F')
totalDis['resAngCa'] = np.ravel(self.neighbors.iloc[:, [5 * i + 3 for i in range(numNeigh)]], order='F')
totalDis['resAngC'] = np.ravel(self.neighbors.iloc[:, [5 * i + 4 for i in range(numNeigh)]], order='F')
return totalDis
def get_group_dis(self,similarNeigh,numberProcess=1):
similarNeigh["resName"] = similarNeigh["resName"].fillna('21')
similarNeigh["resDis"] = similarNeigh["resDis"].fillna(999)
similarNeigh["resAng"] = similarNeigh["resAng"].fillna(999)
similarNeigh["resAngCa"] = similarNeigh["resAngCa"].fillna(999)
similarNeigh["resAngC"] = similarNeigh["resAngC"].fillna(999)
numNeigh = int((similarNeigh.shape[1]-7)/5)
disMatrix = [self.v_norm_distance(similarNeigh.iloc[:,5*i+1],
similarNeigh.iloc[:,5*i+2],
similarNeigh.iloc[:,5*i+3],
similarNeigh.iloc[:,5*i+4],
similarNeigh.iloc[:,-4],
similarNeigh.iloc[:,-3],
similarNeigh.iloc[:,-2],
similarNeigh.iloc[:,-1])
for i in range(numNeigh)]
return [sum(i) for i in np.column_stack(disMatrix)]
def get_K_neighbor(self,neighInfo,K=2):
numNeigh = int((neighInfo.shape[1] - 8) / 5)
colIndex = [j for i in range(numNeigh) for j in (5*i+1,5*i+2,5*i+3,5*i+4)]
resSegment = []
for res in neighInfo['resName'].unique():
oneRes = neighInfo[neighInfo['resName'] == res].copy(deep=True)
kmeans = KMeans(n_clusters=min(oneRes.shape[0]-1,K)).fit(oneRes.iloc[:,colIndex])
self.kClusterDic[res] = kmeans
oneRes['Kmeans_cluster'] = kmeans.labels_
resSegment.append(oneRes)
result = pd.concat(resSegment)
result["total_dis"][result['total_dis'] < 1] = max(result['total_dis']) * 10
return result
def get_final_feature(self,clusteredNearDf:pd.DataFrame,M=4):
allClusters = clusteredNearDf.sort_values(by=['total_dis']).loc[:,
['resName','Kmeans_cluster']].drop_duplicates().iloc[0:M,]
self.importantClusters = [str(allClusters.iloc[i,0]) +\
str(allClusters.iloc[i,1]) for i in range(M)]
numNeigh = int((clusteredNearDf.shape[1] - 9) / 5)
finalFeature = []
for i in range(numNeigh):
oneSample = []
oneData = clusteredNearDf[clusteredNearDf["order"]==i].copy(deep=True)
oneData["cluster"] = oneData["resName"] + oneData["Kmeans_cluster"].astype('str')
for keyCluster in self.importantClusters:
if oneData[oneData['cluster'] == keyCluster].shape[0] == 0:
oneSample.extend(['unknown',999,999,999,999])
else:
oneSample.extend(list(oneData[oneData['cluster']==
keyCluster].sample(1).iloc[:,[-8,-7,-6,-5,-4]].values[0]))
finalFeature.append(oneSample)
return finalFeature
def _one_column(self,res_name, res_dis, res_angle, res_angleCa, res_angleC):
return self.neighbors.apply(lambda x: self._process_one_row(x, res_name,
res_dis,
res_angle,
res_angleCa,
res_angleC), axis=1)
def _wrap(self,i):
return self.v_one_column(self.neighbors.iloc[:,i],
self.neighbors.iloc[:,i+1],
self.neighbors.iloc[:,i+2],
self.neighbors.iloc[:,i+3],
self.neighbors.iloc[:,i+4])
def _process_one_row(self,one_row_protein, res_name, res_dis, res_angle, res_angleCa, res_angleC):
one_row = []
length = sum(one_row_protein == res_name)
if length < 1:
one_row.append(21)
one_row.append(999)
one_row.append(999)
one_row.append(999)
one_row.append(999)
elif length == 1:
for index in one_row_protein[one_row_protein == res_name].index:
index = int(index)
one_row.append(one_row_protein[index - 1])
one_row.append(one_row_protein[index])
one_row.append(one_row_protein[index + 1])
one_row.append(one_row_protein[index + 2])
one_row.append(one_row_protein[index + 3])
else:
candidate_index = []
compare = []
compare_candidate = []
for index in one_row_protein[one_row_protein == res_name].index:
index = int(index)
candidate_index.append(index)
compare.append(np.sqrt((one_row_protein[index] - res_dis)**2 \
+ (one_row_protein[index + 1] - res_angle)**2 \
+ (one_row_protein[index + 2] - res_angleCa)**2 \
+ (one_row_protein[index + 3] - res_angleC)**2))
compare_candidate.append(one_row_protein[index - 1])
compare_candidate.append(one_row_protein[index])
compare_candidate.append(one_row_protein[index + 1])
compare_candidate.append(one_row_protein[index + 2])
compare_candidate.append(one_row_protein[index + 3])
min_index = compare.index(min(compare))
one_row.append(compare_candidate[min_index * 5])
one_row.append(compare_candidate[min_index * 5 + 1])
one_row.append(compare_candidate[min_index * 5 + 2])
one_row.append(compare_candidate[min_index * 5 + 3])
one_row.append(compare_candidate[min_index * 5 + 4])
return one_row
def _norm_distance(self,ang1, ang1Ca, ang1C, dis1, ang2, ang2Ca, ang2C, dis2):
normArray = preprocessing.normalize([[ang1, ang2],
[ang1Ca, ang2Ca],
[ang1C, ang2C],
[dis1, dis2]], axis=0)
totalDis = np.sqrt((normArray[0][0] - normArray[0][1]) ** 2 + \
(normArray[1][0] - normArray[1][1]) ** 2 + \
(normArray[2][0] - normArray[2][1]) ** 2 + \
(normArray[3][0] - normArray[3][1]) ** 2)
return (totalDis) | 53.78481 | 112 | 0.534361 |
ace980b445b470e6e67bda670f40cf5354d7a0ce | 33,841 | py | Python | modeling/model_net_search.py | HankKung/Dynamic-AutoDeepLab | 4150a19d632269f7ebcb63e92906a7f40e6a283b | [
"Apache-2.0"
] | 9 | 2020-02-12T07:20:42.000Z | 2021-10-16T06:36:19.000Z | modeling/model_net_search.py | HankKung/Distributed-AutoDeepLab | 4150a19d632269f7ebcb63e92906a7f40e6a283b | [
"Apache-2.0"
] | 2 | 2020-04-02T06:39:53.000Z | 2021-01-19T10:36:07.000Z | modeling/model_net_search.py | HankKung/Distributed-AutoDeepLab | 4150a19d632269f7ebcb63e92906a7f40e6a283b | [
"Apache-2.0"
] | 3 | 2020-02-28T22:15:34.000Z | 2021-08-05T07:26:03.000Z | import torch
import torch.nn as nn
import numpy as np
from modeling.genotypes import PRIMITIVES
import torch.nn.functional as F
from modeling.operations import *
from modeling.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
class Cell_fixed(nn.Module):
def __init__(self,
B,
prev_prev_C,
prev_C_down,
prev_C_same,
prev_C_up,
C_out,
cell,
BatchNorm=nn.BatchNorm2d,
pre_preprocess_sample_rate=1):
super(Cell_fixed, self).__init__()
eps = 1e-5
momentum = 0.1
self.B = B
self.cell_arch = cell
if prev_C_down is not None:
self.preprocess_down = FactorizedReduce(
prev_C_down, C_out, BatchNorm=BatchNorm, affine=False)
if prev_C_same is not None:
self.preprocess_same = ReLUConvBN(
prev_C_same, C_out, 1, 1, 0, BatchNorm=BatchNorm, affine=False)
if prev_C_up is not None:
self.preprocess_up = ReLUConvBN(
prev_C_up, C_out, 1, 1, 0, BatchNorm=BatchNorm, affine=False)
self._ops = nn.ModuleList()
if prev_prev_C != -1:
if pre_preprocess_sample_rate >= 1:
self.pre_preprocess = ReLUConvBN(
prev_prev_C, C_out, 1, 1, 0, BatchNorm=BatchNorm, affine=False)
elif pre_preprocess_sample_rate == 0.5:
self.pre_preprocess = FactorizedReduce(
prev_prev_C, C_out, BatchNorm=BatchNorm, affine=False)
elif pre_preprocess_sample_rate == 0.25:
self.pre_preprocess = DoubleFactorizedReduce(
prev_prev_C, C_out, BatchNorm=BatchNorm, affine=False)
for x in self.cell_arch:
primitive = PRIMITIVES[x[1]]
op = OPS[primitive](C_out, 1, BatchNorm, eps=eps, momentum=momentum, affine=False)
self._ops.append(op)
def scale_dimension(self, dim, scale):
return int((float(dim) - 1.0) * scale + 1.0)
def prev_feature_resize(self, prev_feature, mode):
if mode == 'down':
feature_size_h = self.scale_dimension(prev_feature.shape[2], 0.5)
feature_size_w = self.scale_dimension(prev_feature.shape[3], 0.5)
elif mode == 'up':
feature_size_h = self.scale_dimension(prev_feature.shape[2], 2)
feature_size_w = self.scale_dimension(prev_feature.shape[3], 2)
return F.interpolate(prev_feature, (feature_size_h, feature_size_w), mode='bilinear')
def forward(self, s0, s1_down, s1_same, s1_up):
if s1_down is not None:
s1_down = self.preprocess_down(s1_down)
size_h, size_w = s1_down.shape[2], s1_down.shape[3]
if s1_same is not None:
s1_same = self.preprocess_same(s1_same)
size_h, size_w = s1_same.shape[2], s1_same.shape[3]
if s1_up is not None:
s1_up = self.prev_feature_resize(s1_up, 'up')
s1_up = self.preprocess_up(s1_up)
size_h, size_w = s1_up.shape[2], s1_up.shape[3]
all_states = []
if s0 is not None:
s0 = F.interpolate(s0, (size_h, size_w), mode='bilinear') if (
s0.shape[2] < size_h) or (s0.shape[3] < size_w) else s0
s0 = self.pre_preprocess(s0)
if s1_down is not None:
states_down = [s0, s1_down]
all_states.append(states_down)
del s1_down
if s1_same is not None:
states_same = [s0, s1_same]
all_states.append(states_same)
del s1_same
if s1_up is not None:
states_up = [s0, s1_up]
all_states.append(states_up)
del s1_up
else:
if s1_down is not None:
states_down = [0, s1_down]
all_states.append(states_down)
if s1_same is not None:
states_same = [0, s1_same]
all_states.append(states_same)
if s1_up is not None:
states_up = [0, s1_up]
all_states.append(states_up)
del s0
final_concates = []
for states in all_states:
offset = 0
ops_index = 0
for i in range(self.B):
new_states = []
for j, h in enumerate(states):
branch_index = offset + j
if branch_index in self.cell_arch[:, 0]:
new_state = self._ops[ops_index](h)
new_states.append(new_state)
ops_index += 1
s = sum(new_states)
offset += len(states)
states.append(s)
concat_feature = torch.cat(states[-self.B:], dim=1)
final_concates.append(concat_feature)
return final_concates
class Model_net_search (nn.Module) :
def __init__(self,
num_classes,
num_layers,
args,
C_index=5,
alphas=None):
super(Model_net_search, self).__init__()
cell = Cell_fixed
BatchNorm = SynchronizedBatchNorm2d if args.sync_bn == True else nn.BatchNorm2d
self.cells = nn.ModuleList()
self._num_layers = num_layers
self._num_classes = num_classes
self.C_index = C_index
self._initialize_alphas_betas()
self.alphas = alphas
B = args.B
F = args.F
f_initial = F * B
half_f_initial = int(f_initial / 2)
FB = F * B
self.dense_preprocess = nn.ModuleList()
for i in range(self._num_layers-2):
if i == 0:
self.dense_preprocess.append(nn.ModuleList())
self.dense_preprocess[0].append(ReLUConvBN(FB, F, 1, 1, 0, BatchNorm=BatchNorm, affine=False))
self.dense_preprocess[0].append(ReLUConvBN(FB * 2, F * 2, 1, 1, 0, BatchNorm=BatchNorm, affine=False))
self.dense_preprocess[0].append(FactorizedReduce(FB * 2, F * 4, BatchNorm=BatchNorm, affine=False))
self.dense_preprocess[0].append(DoubleFactorizedReduce(FB * 2, F * 8, BatchNorm=BatchNorm, affine=False))
elif i == 1:
self.dense_preprocess.append(nn.ModuleList())
self.dense_preprocess[1].append(ReLUConvBN(FB, F, 1, 1, 0, BatchNorm=BatchNorm, affine=False))
self.dense_preprocess[1].append(ReLUConvBN(FB * 2, F * 2, 1, 1, 0, BatchNorm=BatchNorm, affine=False))
self.dense_preprocess[1].append(ReLUConvBN(FB * 4, F * 4, 1, 1, 0, BatchNorm=BatchNorm, affine=False))
self.dense_preprocess[1].append(FactorizedReduce(FB * 4, F * 8, BatchNorm=BatchNorm, affine=False))
else:
self.dense_preprocess.append(nn.ModuleList())
self.dense_preprocess[i].append(ReLUConvBN(FB, F, 1, 1, 0, BatchNorm=BatchNorm, affine=False))
self.dense_preprocess[i].append(ReLUConvBN(FB * 2, F * 2, 1, 1, 0, BatchNorm=BatchNorm, affine=False))
self.dense_preprocess[i].append(ReLUConvBN(FB * 4, F * 4, 1, 1, 0, BatchNorm=BatchNorm, affine=False))
self.dense_preprocess[i].append(ReLUConvBN(FB * 8, F * 8, 1, 1, 0, BatchNorm=BatchNorm, affine=False))
self.stem0 = nn.Sequential(
nn.Conv2d(3, half_f_initial, 3, stride=2, padding=1, bias=False),
BatchNorm(half_f_initial),
)
self.stem1 = nn.Sequential(
nn.ReLU(),
nn.Conv2d(half_f_initial, f_initial, 3, stride=2, padding=1, bias=False),
BatchNorm(f_initial),
)
""" build the cells """
for i in range (self._num_layers):
if i == 0 :
cell1 = cell (B, half_f_initial,
None, f_initial, None,
F, alphas, BatchNorm=BatchNorm, pre_preprocess_sample_rate=0.5)
cell2 = cell (B, half_f_initial,
f_initial, None, None,
F * 2, alphas, BatchNorm=BatchNorm, pre_preprocess_sample_rate=0.25)
self.cells += [cell1]
self.cells += [cell2]
elif i == 1 :
cell1 = cell (B, f_initial,
None, FB, FB * 2,
F, alphas, BatchNorm=BatchNorm)
cell2 = cell (B, f_initial,
FB, FB * 2, None,
F * 2, alphas, BatchNorm=BatchNorm, pre_preprocess_sample_rate=0.5)
cell3 = cell (B, f_initial,
FB * 2, None, None,
F * 4, alphas, BatchNorm=BatchNorm, pre_preprocess_sample_rate=0.25)
self.cells += [cell1]
self.cells += [cell2]
self.cells += [cell3]
elif i == 2 :
cell1 = cell (B, FB,
None, FB, FB * 2,
F, alphas, BatchNorm=BatchNorm)
cell2 = cell (B, FB * 2,
FB, FB * 2, FB * 4,
F * 2, alphas, BatchNorm=BatchNorm)
cell3 = cell (B, FB * 2,
FB * 2, FB * 4, None,
F * 4, alphas, BatchNorm=BatchNorm, pre_preprocess_sample_rate=0.5)
cell4 = cell (B, FB * 2,
FB * 4, None, None,
F * 8, alphas, BatchNorm=BatchNorm, pre_preprocess_sample_rate=0.25)
self.cells += [cell1]
self.cells += [cell2]
self.cells += [cell3]
self.cells += [cell4]
else:
cell1 = cell (B, F * (i-1),
None, FB, FB * 2,
F, alphas, BatchNorm=BatchNorm)
cell2 = cell (B, F * (i-1) * 2,
FB, FB * 2, FB * 4,
F * 2, alphas, BatchNorm=BatchNorm)
cell3 = cell (B, F * (i-1) * 4,
FB * 2, FB * 4, FB * 8,
F * 4, alphas, BatchNorm=BatchNorm)
cell4 = cell (B, F * (i-1) * 8,
FB * 4, FB * 8, None,
F * 8, alphas, BatchNorm=BatchNorm)
self.cells += [cell1]
self.cells += [cell2]
self.cells += [cell3]
self.cells += [cell4]
self.aspp_4 = ASPP (FB, self._num_classes, 24, 24, BatchNorm=BatchNorm) #96 / 4 as in the paper
self.aspp_8 = ASPP (FB * 2, self._num_classes, 12, 12, BatchNorm=BatchNorm) #96 / 8
self.aspp_16 = ASPP (FB * 4, self._num_classes, 6, 6, BatchNorm=BatchNorm) #96 / 16
self.aspp_32 = ASPP (FB * 8, self._num_classes, 3, 3, BatchNorm=BatchNorm) #96 / 32
self._init_weight()
def forward (self, x) :
level_4 = []
level_8 = []
level_16 = []
level_32 = []
level_4_dense = []
level_8_dense = []
level_16_dense = []
level_32_dense = []
C_output_4 = []
C_output_8 = []
C_output_16 = []
C_output_32 = []
temp = self.stem0(x)
level_4.append (self.stem1(temp))
count = 0
normalized_betas = torch.randn(12, 4, 3).cuda().half()
""" Softmax on betas """
for layer in range (len(self.betas)):
if layer == 0:
normalized_betas[layer][0][1:] = F.softmax (self.betas[layer][0][1:], dim=-1) * (2/3)
elif layer == 1:
normalized_betas[layer][0][1:] = F.softmax (self.betas[layer][0][1:], dim=-1) * (2/3)
normalized_betas[layer][1] = F.softmax (self.betas[layer][1], dim=-1)
elif layer == 2:
normalized_betas[layer][0][1:] = F.softmax (self.betas[layer][0][1:], dim=-1) * (2/3)
normalized_betas[layer][1] = F.softmax (self.betas[layer][1], dim=-1)
normalized_betas[layer][2] = F.softmax (self.betas[layer][2], dim=-1)
else :
normalized_betas[layer][0][1:] = F.softmax (self.betas[layer][0][1:], dim=-1) * (2/3)
normalized_betas[layer][1] = F.softmax (self.betas[layer][1], dim=-1)
normalized_betas[layer][2] = F.softmax (self.betas[layer][2], dim=-1)
normalized_betas[layer][3][:2] = F.softmax (self.betas[layer][3][:2], dim=-1) * (2/3)
for layer in range (self._num_layers) :
if layer == 0 :
level4_new, = self.cells[count] (temp, None, level_4[-1], None)
count += 1
level8_new, = self.cells[count] (temp, level_4[-1], None, None)
count += 1
level4_new = normalized_betas[layer][0][1] * level4_new
level8_new = normalized_betas[layer][0][2] * level8_new
level_4.append (level4_new)
level_8.append (level8_new)
del temp
level_4_dense.append(self.dense_preprocess[layer][0](level4_new))
level_8_dense.append(self.dense_preprocess[layer][1](level8_new))
level_16_dense.append(self.dense_preprocess[layer][2](level8_new))
level_32_dense.append(self.dense_preprocess[layer][3](level8_new))
elif layer == 1 :
level4_new_1, level4_new_2 = self.cells[count] (level_4[-2],
None,
level_4[-1],
level_8[-1])
level4_new = normalized_betas[layer][0][1] * level4_new_1 + normalized_betas[layer][1][0] * level4_new_2
count += 1
level8_new_1, level8_new_2 = self.cells[count] (level_4[-2],
level_4[-1],
level_8[-1],
None)
level8_new = normalized_betas[layer][0][2] * level8_new_1 + normalized_betas[layer][1][1] * level8_new_2
count += 1
level16_new, = self.cells[count] (level_4[-2],
level_8[-1],
None,
None)
level16_new = normalized_betas[layer][1][2] * level16_new
count += 1
level_4.append (level4_new)
level_8.append (level8_new)
level_16.append (level16_new)
level_4_dense.append(self.dense_preprocess[layer][0](level4_new))
level_8_dense.append(self.dense_preprocess[layer][1](level8_new))
level_16_dense.append(self.dense_preprocess[layer][2](level16_new))
level_32_dense.append(self.dense_preprocess[layer][3](level16_new))
elif layer == 2 :
level4_new_1, level4_new_2 = self.cells[count] (level_4[-2],
None,
level_4[-1],
level_8[-1])
count += 1
level4_new = normalized_betas[layer][0][1] * level4_new_1 + normalized_betas[layer][1][0] * level4_new_2
level8_new_1, level8_new_2, level8_new_3 = self.cells[count] (level_8[-2],
level_4[-1],
level_8[-1],
level_16[-1])
level8_new = normalized_betas[layer][0][2] * level8_new_1 + normalized_betas[layer][1][1] * level8_new_2 + normalized_betas[layer][2][0] * level8_new_3
count += 1
level16_new_1, level16_new_2 = self.cells[count] (level_8[-2],
level_8[-1],
level_16[-1],
None)
level16_new = normalized_betas[layer][1][2] * level16_new_1 + normalized_betas[layer][2][1] * level16_new_2
count += 1
level32_new, = self.cells[count] (level_8[-2],
level_16[-1],
None,
None)
level32_new = normalized_betas[layer][2][2] * level32_new
count += 1
level_4.append (level4_new)
level_8.append (level8_new)
level_16.append (level16_new)
level_32.append (level32_new)
level_4_dense.append(self.dense_preprocess[layer][0](level4_new))
level_8_dense.append(self.dense_preprocess[layer][1](level8_new))
level_16_dense.append(self.dense_preprocess[layer][2](level16_new))
level_32_dense.append(self.dense_preprocess[layer][3](level32_new))
if 2 in self.C_index:
C_output_4.append(self.aspp_4(level_4[-1]))
C_output_8.append(self.aspp_8(level_8[-1]))
C_output_16.append(self.aspp_16(level_16[-1]))
C_output_32.append(self.aspp_32(level_32[-1]))
elif layer == 3 :
level4_new_1, level4_new_2 = self.cells[count] (torch.cat(level_4_dense[:-1], dim=1),
None,
level_4[-1],
level_8[-1])
level4_new = normalized_betas[layer][0][1] * level4_new_1 + normalized_betas[layer][1][0] * level4_new_2
count += 1
level8_new_1, level8_new_2, level8_new_3 = self.cells[count] (torch.cat(level_8_dense[:-1], dim=1),
level_4[-1],
level_8[-1],
level_16[-1])
level8_new = normalized_betas[layer][0][2] * level8_new_1 + normalized_betas[layer][1][1] * level8_new_2 + normalized_betas[layer][2][0] * level8_new_3
count += 1
level16_new_1, level16_new_2, level16_new_3 = self.cells[count] (torch.cat(level_16_dense[:-1], dim=1),
level_8[-1],
level_16[-1],
level_32[-1])
level16_new = normalized_betas[layer][1][2] * level16_new_1 + normalized_betas[layer][2][1] * level16_new_2 + normalized_betas[layer][3][0] * level16_new_3
count += 1
level32_new_1, level32_new_2 = self.cells[count] (torch.cat(level_32_dense[:-1], dim=1),
level_16[-1],
level_32[-1],
None)
level32_new = normalized_betas[layer][2][2] * level32_new_1 + normalized_betas[layer][3][1] * level32_new_2
count += 1
level_4.append (level4_new)
level_8.append (level8_new)
level_16.append (level16_new)
level_32.append (level32_new)
level_4_dense.append(self.dense_preprocess[layer][0](level4_new))
level_8_dense.append(self.dense_preprocess[layer][1](level8_new))
level_16_dense.append(self.dense_preprocess[layer][2](level16_new))
level_32_dense.append(self.dense_preprocess[layer][3](level32_new))
if 3 in self.C_index:
C_output_4.append(self.aspp_4(level_4[-1]))
C_output_8.append(self.aspp_8(level_8[-1]))
C_output_16.append(self.aspp_16(level_16[-1]))
C_output_32.append(self.aspp_32(level_32[-1]))
elif layer not in self.C_index and layer < self._num_layers - 2:
level4_new_1, level4_new_2 = self.cells[count] (torch.cat(level_4_dense[:-1], dim=1),
None,
level_4[-1],
level_8[-1])
level4_new = normalized_betas[layer][0][1] * level4_new_1 + normalized_betas[layer][1][0] * level4_new_2
count += 1
level8_new_1, level8_new_2, level8_new_3 = self.cells[count] (torch.cat(level_8_dense[:-1], dim=1),
level_4[-1],
level_8[-1],
level_16[-1])
level8_new = normalized_betas[layer][0][2] * level8_new_1 + normalized_betas[layer][1][1] * level8_new_2 + normalized_betas[layer][2][0] * level8_new_3
count += 1
level16_new_1, level16_new_2, level16_new_3 = self.cells[count] (torch.cat(level_16_dense[:-1], dim=1),
level_8[-1],
level_16[-1],
level_32[-1])
level16_new = normalized_betas[layer][1][2] * level16_new_1 + normalized_betas[layer][2][1] * level16_new_2 + normalized_betas[layer][3][0] * level16_new_3
count += 1
level32_new_1, level32_new_2 = self.cells[count] (torch.cat(level_32_dense[:-1], dim=1),
level_16[-1],
level_32[-1],
None)
level32_new = normalized_betas[layer][2][2] * level32_new_1 + normalized_betas[layer][3][1] * level32_new_2
count += 1
level_4.append (level4_new)
level_8.append (level8_new)
level_16.append (level16_new)
level_32.append (level32_new)
level_4_dense.append(self.dense_preprocess[layer][0](level4_new))
level_8_dense.append(self.dense_preprocess[layer][1](level8_new))
level_16_dense.append(self.dense_preprocess[layer][2](level16_new))
level_32_dense.append(self.dense_preprocess[layer][3](level32_new))
elif layer in self.C_index and layer < self._num_layers - 2:
level4_new_1, level4_new_2 = self.cells[count] (torch.cat(level_4_dense[:-1], dim=1),
None,
level_4[-1],
level_8[-1])
level4_new = normalized_betas[layer][0][1] * level4_new_1 + normalized_betas[layer][1][0] * level4_new_2
count += 1
level8_new_1, level8_new_2, level8_new_3 = self.cells[count] (torch.cat(level_8_dense[:-1], dim=1),
level_4[-1],
level_8[-1],
level_16[-1])
level8_new = normalized_betas[layer][0][2] * level8_new_1 + normalized_betas[layer][1][1] * level8_new_2 + normalized_betas[layer][2][0] * level8_new_3
count += 1
level16_new_1, level16_new_2, level16_new_3 = self.cells[count] (torch.cat(level_16_dense[:-1], dim=1),
level_8[-1],
level_16[-1],
level_32[-1])
level16_new = normalized_betas[layer][1][2] * level16_new_1 + normalized_betas[layer][2][1] * level16_new_2 + normalized_betas[layer][3][0] * level16_new_3
count += 1
level32_new_1, level32_new_2 = self.cells[count] (torch.cat(level_32_dense[:-1], dim=1),
level_16[-1],
level_32[-1],
None)
level32_new = normalized_betas[layer][2][2] * level32_new_1 + normalized_betas[layer][3][1] * level32_new_2
count += 1
level_4.append (level4_new)
level_8.append (level8_new)
level_16.append (level16_new)
level_32.append (level32_new)
level_4_dense.append(self.dense_preprocess[layer][0](level4_new))
level_8_dense.append(self.dense_preprocess[layer][1](level8_new))
level_16_dense.append(self.dense_preprocess[layer][2](level16_new))
level_32_dense.append(self.dense_preprocess[layer][3](level32_new))
C_output_4.append(self.aspp_4(level_4[-1]))
C_output_8.append(self.aspp_8(level_8[-1]))
C_output_16.append(self.aspp_16(level_16[-1]))
C_output_32.append(self.aspp_32(level_32[-1]))
elif layer == self._num_layers-1:
level4_new_1, level4_new_2 = self.cells[count] (torch.cat(level_4_dense, dim=1),
None,
level_4[-1],
level_8[-1])
level4_new = normalized_betas[layer][0][1] * level4_new_1 + normalized_betas[layer][1][0] * level4_new_2
count += 1
level8_new_1, level8_new_2, level8_new_3 = self.cells[count] (torch.cat(level_8_dense, dim=1),
level_4[-1],
level_8[-1],
level_16[-1])
level8_new = normalized_betas[layer][0][2] * level8_new_1 + normalized_betas[layer][1][1] * level8_new_2 + normalized_betas[layer][2][0] * level8_new_3
count += 1
level16_new_1, level16_new_2, level16_new_3 = self.cells[count] (torch.cat(level_16_dense, dim=1),
level_8[-1],
level_16[-1],
level_32[-1])
level16_new = normalized_betas[layer][1][2] * level16_new_1 + normalized_betas[layer][2][1] * level16_new_2 + normalized_betas[layer][3][0] * level16_new_3
count += 1
level32_new_1, level32_new_2 = self.cells[count] (torch.cat(level_32_dense, dim=1),
level_16[-1],
level_32[-1],
None)
level32_new = normalized_betas[layer][2][2] * level32_new_1 + normalized_betas[layer][3][1] * level32_new_2
count += 1
level_4.append (level4_new)
level_8.append (level8_new)
level_16.append (level16_new)
level_32.append (level32_new)
else :
level4_new_1, level4_new_2 = self.cells[count] (torch.cat(level_4_dense[:-1], dim=1),
None,
level_4[-1],
level_8[-1])
level4_new = normalized_betas[layer][0][1] * level4_new_1 + normalized_betas[layer][1][0] * level4_new_2
count += 1
level8_new_1, level8_new_2, level8_new_3 = self.cells[count] (torch.cat(level_8_dense[:-1], dim=1),
level_4[-1],
level_8[-1],
level_16[-1])
level8_new = normalized_betas[layer][0][2] * level8_new_1 + normalized_betas[layer][1][1] * level8_new_2 + normalized_betas[layer][2][0] * level8_new_3
count += 1
level16_new_1, level16_new_2, level16_new_3 = self.cells[count] (torch.cat(level_16_dense[:-1], dim=1),
level_8[-1],
level_16[-1],
level_32[-1])
level16_new = normalized_betas[layer][1][2] * level16_new_1 + normalized_betas[layer][2][1] * level16_new_2 + normalized_betas[layer][3][0] * level16_new_3
count += 1
level32_new_1, level32_new_2 = self.cells[count] (torch.cat(level_32_dense[:-1], dim=1),
level_16[-1],
level_32[-1],
None)
level32_new = normalized_betas[layer][2][2] * level32_new_1 + normalized_betas[layer][3][1] * level32_new_2
count += 1
level_4.append (level4_new)
level_8.append (level8_new)
level_16.append (level16_new)
level_32.append (level32_new)
if layer < 3:
level_4 = level_4[-2:]
level_8 = level_8[-2:]
level_16 = level_16[-2:]
level_32 = level_32[-2:]
else:
level_4 = level_4[-1:]
level_8 = level_8[-1:]
level_16 = level_16[-1:]
level_32 = level_32[-1:]
C_output_4.append(self.aspp_4(level_4[-1]))
C_output_8.append(self.aspp_8(level_8[-1]))
C_output_16.append(self.aspp_16(level_16[-1]))
C_output_32.append(self.aspp_32(level_32[-1]))
C_sum_maps = []
upsample = nn.Upsample(size=x.size()[2:], mode='bilinear', align_corners=True)
for c in range(len(self.C_index) +1):
C_output_4[c] = upsample(C_output_4[c])
C_output_8[c] = upsample(C_output_8[c])
C_output_16[c] = upsample(C_output_16[c])
C_output_32[c] = upsample(C_output_32[c])
C_sum_maps.append(C_output_4[c] + C_output_8[c] + C_output_16[c] + C_output_32[c])
return C_sum_maps
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
if m.affine != False:
m.weight.data.fill_(1)
m.bias.data.zero_()
def _initialize_alphas_betas(self):
betas = torch.tensor (1e-3*torch.randn(12, 4, 3).cuda(), requires_grad=True)
self._arch_parameters = [betas]
self._arch_param_names = ['betas']
[self.register_parameter(name, torch.nn.Parameter(param)) for name, param in zip(self._arch_param_names, self._arch_parameters)]
def arch_parameters (self) :
return [param for name, param in self.named_parameters() if name in self._arch_param_names]
def weight_parameters(self):
return [param for name, param in self.named_parameters() if name not in self._arch_param_names]
def main () :
model = Model_search (7, 12, None)
x = torch.tensor (torch.ones (4, 3, 224, 224))
if __name__ == '__main__' :
main () | 47.865629 | 171 | 0.467924 |
0d0b5e9c731802f81299326146512c4c71bf40db | 874 | py | Python | tests/test_dynamic_links.py | heykarimoff/firebase_dynamic_links | bed465e7d1706095af2f60948f3efdd0a9051875 | [
"MIT"
] | 12 | 2018-07-14T16:31:47.000Z | 2021-11-21T18:58:44.000Z | tests/test_dynamic_links.py | heykarimoff/firebase_dynamic_links | bed465e7d1706095af2f60948f3efdd0a9051875 | [
"MIT"
] | 2 | 2018-07-08T08:17:00.000Z | 2018-11-11T17:14:56.000Z | tests/test_dynamic_links.py | heykarimoff/firebase_dynamic_links | bed465e7d1706095af2f60948f3efdd0a9051875 | [
"MIT"
] | 2 | 2019-02-05T12:49:38.000Z | 2020-06-04T13:30:05.000Z | from firebase_dynamic_links import generate_long_link, generate_short_link
class MockFirebaseClient:
'''
For testing purposes only.
'''
def __init__(self, api_key):
self.api_key = api_key
def shorten_link(self, long_link=None):
return long_link
def test_generate_short_link():
client = MockFirebaseClient(api_key='fake_key')
app_code = 'my_app_code'
params = {
'isi': 'store_id',
}
short_link = generate_short_link(client=client, app_code=app_code, query_params=params)
assert short_link == 'https://my_app_code.page.link/?isi=store_id'
def test_generate_long_link():
app_code = 'my_app_code'
params = {
'isi': 'store_id',
}
dynamic_link = generate_long_link(app_code=app_code, query_params=params)
assert dynamic_link == 'https://my_app_code.page.link/?isi=store_id'
| 24.971429 | 91 | 0.695652 |
43ecda716c460f7ede5b08fd1e16f8580e68e9c2 | 476 | py | Python | tests/conftest.py | glumia/pylaprof | d43a0e78453badef9b5620daa96c344a53cc4631 | [
"MIT"
] | 14 | 2021-11-14T17:36:54.000Z | 2022-03-20T00:03:07.000Z | tests/conftest.py | glumia/pylaprof | d43a0e78453badef9b5620daa96c344a53cc4631 | [
"MIT"
] | null | null | null | tests/conftest.py | glumia/pylaprof | d43a0e78453badef9b5620daa96c344a53cc4631 | [
"MIT"
] | null | null | null | import os
import tempfile
from unittest.mock import Mock
import pytest
@pytest.fixture
def tmpcwd():
"""Run test in a temporary directory."""
cwd = os.getcwd()
with tempfile.TemporaryDirectory(prefix="pylaprof-test") as tmpdir:
os.chdir(tmpdir)
yield
os.chdir(cwd)
@pytest.fixture
def boto3_mock(monkeypatch):
"""Monkeypatch pylaprof's boto3 module."""
mock = Mock()
monkeypatch.setattr("pylaprof.boto3", mock)
return mock
| 19.833333 | 71 | 0.684874 |
931a5915617125882fe7eb85000f8eaab4ad94d7 | 8,170 | py | Python | src/run.py | deligentfool/SIDE | 561fc6c5312906fd2073af043c2c17ec4ea3758d | [
"Apache-2.0"
] | 3 | 2021-12-22T10:28:28.000Z | 2022-03-16T10:55:51.000Z | src/run.py | deligentfool/SIDE | 561fc6c5312906fd2073af043c2c17ec4ea3758d | [
"Apache-2.0"
] | null | null | null | src/run.py | deligentfool/SIDE | 561fc6c5312906fd2073af043c2c17ec4ea3758d | [
"Apache-2.0"
] | null | null | null | import datetime
import os
import pprint
import time
import threading
import torch as th
from types import SimpleNamespace as SN
from utils.logging import Logger
from utils.timehelper import time_left, time_str
from os.path import dirname, abspath
from learners import REGISTRY as le_REGISTRY
from runners import REGISTRY as r_REGISTRY
from controllers import REGISTRY as mac_REGISTRY
from components.episode_buffer import ReplayBuffer
from components.transforms import OneHot
def run(_run, _config, _log):
# check args sanity
_config = args_sanity_check(_config, _log)
args = SN(**_config)
args.device = "cuda" if args.use_cuda else "cpu"
# setup loggers
logger = Logger(_log)
_log.info("Experiment Parameters:")
experiment_params = pprint.pformat(_config,
indent=4,
width=1)
_log.info("\n\n" + experiment_params + "\n")
# configure tensorboard logger
unique_token = "{}__{}".format(args.name, datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
args.unique_token = unique_token
if args.use_tensorboard:
tb_logs_direc = os.path.join(dirname(dirname(abspath(__file__))), "results", "tb_logs")
tb_exp_direc = os.path.join(tb_logs_direc, "{}").format(unique_token)
logger.setup_tb(tb_exp_direc)
# sacred is on by default
logger.setup_sacred(_run)
# Run and train
run_sequential(args=args, logger=logger)
# Clean up after finishing
print("Exiting Main")
print("Stopping all threads")
for t in threading.enumerate():
if t.name != "MainThread":
print("Thread {} is alive! Is daemon: {}".format(t.name, t.daemon))
t.join(timeout=1)
print("Thread joined")
print("Exiting script")
# Making sure framework really exits
os._exit(os.EX_OK)
def evaluate_sequential(args, runner):
for _ in range(args.test_nepisode):
runner.run(test_mode=True)
if args.save_replay:
runner.save_replay()
runner.close_env()
def run_sequential(args, logger):
# Init runner so we can get env info
runner = r_REGISTRY[args.runner](args=args, logger=logger)
# Set up schemes and groups here
env_info = runner.get_env_info()
args.n_agents = env_info["n_agents"]
args.n_actions = env_info["n_actions"]
args.state_shape = env_info["state_shape"]
args.observation_shape = env_info["obs_shape"]
# Default/Base scheme
scheme = {
"state": {"vshape": env_info["state_shape"]},
"obs": {"vshape": env_info["obs_shape"], "group": "agents"},
"actions": {"vshape": (1,), "group": "agents", "dtype": th.long},
"avail_actions": {"vshape": (env_info["n_actions"],), "group": "agents", "dtype": th.int},
"reward": {"vshape": (1,)},
"terminated": {"vshape": (1,), "dtype": th.uint8},
"alive_allies": {"vshape": (env_info["n_agents"], env_info["n_agents"])},
"visible_allies": {"vshape": (env_info["n_agents"], env_info["n_agents"] + env_info["n_enemies"])}
}
groups = {
"agents": args.n_agents
}
preprocess = {
"actions": ("actions_onehot", [OneHot(out_dim=args.n_actions)])
}
buffer = ReplayBuffer(scheme, groups, args.buffer_size, env_info["episode_limit"] + 1,
preprocess=preprocess,
device="cpu" if args.buffer_cpu_only else args.device)
# Setup multiagent controller here
mac = mac_REGISTRY[args.mac](buffer.scheme, groups, args)
# Give runner the scheme
runner.setup(scheme=scheme, groups=groups, preprocess=preprocess, mac=mac)
# Learner
learner = le_REGISTRY[args.learner](mac, buffer.scheme, logger, args)
if args.use_cuda:
learner.cuda()
if args.checkpoint_path != "":
timesteps = []
timestep_to_load = 0
if not os.path.isdir(args.checkpoint_path):
logger.console_logger.info("Checkpoint directiory {} doesn't exist".format(args.checkpoint_path))
return
# Go through all files in args.checkpoint_path
for name in os.listdir(args.checkpoint_path):
full_name = os.path.join(args.checkpoint_path, name)
# Check if they are dirs the names of which are numbers
if os.path.isdir(full_name) and name.isdigit():
timesteps.append(int(name))
if args.load_step == 0:
# choose the max timestep
timestep_to_load = max(timesteps)
else:
# choose the timestep closest to load_step
timestep_to_load = min(timesteps, key=lambda x: abs(x - args.load_step))
model_path = os.path.join(args.checkpoint_path, str(timestep_to_load))
logger.console_logger.info("Loading model from {}".format(model_path))
learner.load_models(model_path)
runner.t_env = timestep_to_load
if args.evaluate or args.save_replay:
evaluate_sequential(args, runner)
return
# start training
episode = 0
last_test_T = -args.test_interval - 1
last_log_T = 0
model_save_time = 0
start_time = time.time()
last_time = start_time
logger.console_logger.info("Beginning training for {} timesteps".format(args.t_max))
while runner.t_env <= args.t_max:
# Run for a whole episode at a time
episode_batch = runner.run(test_mode=False)
buffer.insert_episode_batch(episode_batch)
if buffer.can_sample(args.batch_size):
episode_sample = buffer.sample(args.batch_size)
# Truncate batch to only filled timesteps
max_ep_t = episode_sample.max_t_filled()
episode_sample = episode_sample[:, :max_ep_t]
if episode_sample.device != args.device:
episode_sample.to(args.device)
learner.train(episode_sample, runner.t_env, episode)
# Execute test runs once in a while
n_test_runs = max(1, args.test_nepisode // runner.batch_size)
if (runner.t_env - last_test_T) / args.test_interval >= 1.0:
logger.console_logger.info("t_env: {} / {}".format(runner.t_env, args.t_max))
logger.console_logger.info("Estimated time left: {}. Time passed: {}".format(
time_left(last_time, last_test_T, runner.t_env, args.t_max), time_str(time.time() - start_time)))
last_time = time.time()
last_test_T = runner.t_env
for _ in range(n_test_runs):
runner.run(test_mode=True)
if args.save_model and (runner.t_env - model_save_time >= args.save_model_interval or model_save_time == 0):
model_save_time = runner.t_env
save_path = os.path.join(args.local_results_path, "models", args.unique_token, str(runner.t_env))
#"results/models/{}".format(unique_token)
os.makedirs(save_path, exist_ok=True)
logger.console_logger.info("Saving models to {}".format(save_path))
# learner should handle saving/loading -- delegate actor save/load to mac,
# use appropriate filenames to do critics, optimizer states
learner.save_models(save_path)
episode += args.batch_size_run
if (runner.t_env - last_log_T) >= args.log_interval:
logger.log_stat("episode", episode, runner.t_env)
logger.print_recent_stats()
last_log_T = runner.t_env
runner.close_env()
logger.console_logger.info("Finished Training")
def args_sanity_check(config, _log):
# set CUDA flags
# config["use_cuda"] = True # Use cuda whenever possible!
if config["use_cuda"] and not th.cuda.is_available():
config["use_cuda"] = False
_log.warning("CUDA flag use_cuda was switched OFF automatically because no CUDA devices are available!")
if config["test_nepisode"] < config["batch_size_run"]:
config["test_nepisode"] = config["batch_size_run"]
else:
config["test_nepisode"] = (config["test_nepisode"]//config["batch_size_run"]) * config["batch_size_run"]
return config
| 35.064378 | 116 | 0.645043 |
380d4049e22cb3bf8a58ff951fe43ff85b5cc8ba | 1,888 | py | Python | contrib/devtools/check-doc.py | sarielsaz/LiteCoin | 4b827d08a0a45f7ed78f596c0fed78e059d35cf6 | [
"MIT"
] | null | null | null | contrib/devtools/check-doc.py | sarielsaz/LiteCoin | 4b827d08a0a45f7ed78f596c0fed78e059d35cf6 | [
"MIT"
] | null | null | null | contrib/devtools/check-doc.py | sarielsaz/LiteCoin | 4b827d08a0a45f7ed78f596c0fed78e059d35cf6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2015-2016 The Sarielsaz Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
This checks if all command line args are documented.
Return value is 0 to indicate no error.
Author: @MarcoFalke
'''
from subprocess import check_output
import re
import sys
FOLDER_GREP = 'src'
FOLDER_TEST = 'src/test/'
CMD_ROOT_DIR = '`git rev-parse --show-toplevel`/%s' % FOLDER_GREP
CMD_GREP_ARGS = r"egrep -r -I '(map(Multi)?Args(\.count\(|\[)|Get(Bool)?Arg\()\"\-[^\"]+?\"' %s | grep -v '%s'" % (CMD_ROOT_DIR, FOLDER_TEST)
CMD_GREP_DOCS = r"egrep -r -I 'HelpMessageOpt\(\"\-[^\"=]+?(=|\")' %s" % (CMD_ROOT_DIR)
REGEX_ARG = re.compile(r'(?:map(?:Multi)?Args(?:\.count\(|\[)|Get(?:Bool)?Arg\()\"(\-[^\"]+?)\"')
REGEX_DOC = re.compile(r'HelpMessageOpt\(\"(\-[^\"=]+?)(?:=|\")')
# list unsupported, deprecated and duplicate args as they need no documentation
SET_DOC_OPTIONAL = set(['-rpcssl', '-benchmark', '-h', '-help', '-socks', '-tor', '-debugnet', '-whitelistalwaysrelay', '-prematurewitness', '-walletprematurewitness', '-promiscuousmempoolflags', '-blockminsize', '-dbcrashratio', '-forcecompactdb', '-usehd'])
def main():
used = check_output(CMD_GREP_ARGS, shell=True)
docd = check_output(CMD_GREP_DOCS, shell=True)
args_used = set(re.findall(REGEX_ARG,used))
args_docd = set(re.findall(REGEX_DOC,docd)).union(SET_DOC_OPTIONAL)
args_need_doc = args_used.difference(args_docd)
args_unknown = args_docd.difference(args_used)
print "Args used : %s" % len(args_used)
print "Args documented : %s" % len(args_docd)
print "Args undocumented: %s" % len(args_need_doc)
print args_need_doc
print "Args unknown : %s" % len(args_unknown)
print args_unknown
sys.exit(len(args_need_doc))
if __name__ == "__main__":
main()
| 40.170213 | 259 | 0.68697 |
70443025976381f9ebfeba00ecb60062e533af9c | 671 | py | Python | tests/test_exponential.py | elsandal/pyclesperanto_prototype | 7bda828813b86b44b63d73d5e8f466d9769cded1 | [
"BSD-3-Clause"
] | 64 | 2020-03-18T12:11:22.000Z | 2022-03-31T08:19:18.000Z | tests/test_exponential.py | elsandal/pyclesperanto_prototype | 7bda828813b86b44b63d73d5e8f466d9769cded1 | [
"BSD-3-Clause"
] | 148 | 2020-05-14T06:14:11.000Z | 2022-03-26T15:02:31.000Z | tests/test_exponential.py | elsandal/pyclesperanto_prototype | 7bda828813b86b44b63d73d5e8f466d9769cded1 | [
"BSD-3-Clause"
] | 16 | 2020-05-31T00:53:44.000Z | 2022-03-23T13:20:57.000Z | import pyclesperanto_prototype as cle
import numpy as np
def test_exponential():
test = cle.push(np.asarray([
[0, 0, 0, 0, 0],
[0, 1, 1, 2, 0],
[0, 2, 2, 3, 0],
[0, 3, 3, 4, 0],
[0, 0, 0, 0, 0]
]))
reference = cle.push(np.asarray([
[1, 1, 1, 1, 1],
[1, 2.7182817, 2.7182817, 7.389056, 1],
[1, 7.389056, 7.389056, 20.085537, 1],
[1, 20.085537, 20.085537, 54.59815, 1],
[1, 1, 1, 1, 1]
]))
result = cle.create(test)
cle.exponential(test, result)
print(result)
a = cle.pull(result)
b = cle.pull(reference)
assert (np.allclose(a, b, atol=0.00001))
| 21.645161 | 47 | 0.499255 |
126ed19284e4c1eb7ed070885d368e881b418c92 | 97 | py | Python | surround/run_modes.py | ScottyB/surround | f9a86f5b5677de7bd5763d26de4a6f18c36f2a4d | [
"BSD-3-Clause"
] | 11 | 2019-04-11T22:53:00.000Z | 2021-02-12T07:42:39.000Z | surround/run_modes.py | ScottyB/surround | f9a86f5b5677de7bd5763d26de4a6f18c36f2a4d | [
"BSD-3-Clause"
] | 162 | 2019-04-08T22:49:36.000Z | 2021-09-08T03:28:28.000Z | surround/run_modes.py | ScottyB/surround | f9a86f5b5677de7bd5763d26de4a6f18c36f2a4d | [
"BSD-3-Clause"
] | 22 | 2019-04-11T01:16:39.000Z | 2022-01-03T11:45:36.000Z | from enum import Enum
class RunMode(Enum):
BATCH_PREDICT = 1
PREDICT = 2
TRAIN = 3
| 12.125 | 21 | 0.639175 |
1b43a34fc49cb48c5bc28183a1848205ebbabac9 | 531 | py | Python | pip_service_data_python/data/EntityV1Schema.py | pip-templates-services/pip-service-data-python | 74149496a4e02c6e7c14d5f4c9453bf196577a3c | [
"MIT"
] | null | null | null | pip_service_data_python/data/EntityV1Schema.py | pip-templates-services/pip-service-data-python | 74149496a4e02c6e7c14d5f4c9453bf196577a3c | [
"MIT"
] | null | null | null | pip_service_data_python/data/EntityV1Schema.py | pip-templates-services/pip-service-data-python | 74149496a4e02c6e7c14d5f4c9453bf196577a3c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from pip_services3_commons.convert import TypeCode
from pip_services3_commons.validate import ObjectSchema
class EntityV1Schema(ObjectSchema):
def __init__(self):
super().__init__()
self.with_optional_property('id', TypeCode.String)
self.with_required_property('site_id', TypeCode.String)
self.with_optional_property('type', TypeCode.String)
self.with_optional_property('name', TypeCode.String)
self.with_optional_property('content', TypeCode.String)
| 33.1875 | 63 | 0.734463 |
db8a6216ec942089d36c5ebe903ad813556945e3 | 1,961 | py | Python | tests/test_2.py | rmcgibbo/covar | b4ce247f8a622f6cebf1156b3e392eadf2de1533 | [
"BSD-2-Clause"
] | 16 | 2015-10-01T03:50:30.000Z | 2022-02-02T15:03:41.000Z | tests/test_2.py | rmcgibbo/covar | b4ce247f8a622f6cebf1156b3e392eadf2de1533 | [
"BSD-2-Clause"
] | null | null | null | tests/test_2.py | rmcgibbo/covar | b4ce247f8a622f6cebf1156b3e392eadf2de1533 | [
"BSD-2-Clause"
] | 4 | 2019-07-02T04:54:41.000Z | 2021-08-08T11:41:21.000Z | from __future__ import division
import os.path
import numpy as np
import scipy.stats
import matplotlib
matplotlib.use('Agg')
matplotlib.rc('font',family='serif')
import matplotlib.pyplot as plt
from covar import cov_shrink_ss, cov_shrink_rblw
DIRNAME = os.path.dirname(os.path.realpath(__file__))
def test_1():
random = np.random.RandomState(0)
p = 100
sigma = scipy.stats.wishart(scale=np.eye(p), seed=random).rvs()
Ns = [int(x) for x in [p/10, p/2, 2*p, 10*p]]
x = np.arange(p)
plt.figure(figsize=(8,8))
for i, N in enumerate(Ns):
X = random.multivariate_normal(mean=np.zeros(p), cov=sigma, size=N)
S1 = np.cov(X.T)
S2 = cov_shrink_ss(X)[0]
S3 = cov_shrink_rblw(np.cov(X.T), len(X))[0]
plt.subplot(3,2,i+1)
plt.title('p/n = %.1f' % (p/N))
plt.plot(x, sorted(np.linalg.eigvalsh(S2), reverse=True), 'b', lw=2, label='cov_shrink_ss')
plt.plot(x, sorted(np.linalg.eigvalsh(S3), reverse=True), 'g', alpha=0.7, lw=2, label='cov_shrink_rblw')
plt.plot(x, sorted(np.linalg.eigvalsh(sigma), reverse=True), 'k--', lw=2, label='true')
plt.plot(x, sorted(np.linalg.eigvalsh(S1), reverse=True), 'r--', lw=2, label='sample covariance')
if i == 1:
plt.legend(fontsize=10)
# plt.ylim(max(plt.ylim()[0], 1e-4), plt.ylim()[1])
plt.figtext(.05, .05,
"""Ordered eigenvalues of the sample covariance matrix (red),
cov_shrink_ss()-estimated covariance matrix (blue),
cov_shrink_rblw()-estimated covariance matrix (green), and
true eigenvalues (dashed black). The data generated by sampling
from a p-variate normal distribution for p=100 and various
ratios of p/n. Note that for the larger value of p/n, the
cov_shrink_rblw() estimator is identical to the sample
covariance matrix.""")
# plt.yscale('log')
plt.ylabel('Eigenvalue')
plt.tight_layout()
plt.savefig('%s/test_2.png' % DIRNAME, dpi=300)
| 33.237288 | 112 | 0.653748 |
57df08faa4fd45f0fe4a681459fbe6e2beff2821 | 2,817 | py | Python | pyhpecw7/features/sflow_intf.py | HPENetworking/hpe-cw7-ansible | a7569b1dd21ad38a53d825eb4d4b2caf8ff6ea16 | [
"Apache-2.0"
] | 4 | 2022-01-10T21:02:00.000Z | 2022-03-09T03:05:22.000Z | pyhpecw7/features/sflow_intf.py | flycoolman/hpe-cw7-ansible | a7569b1dd21ad38a53d825eb4d4b2caf8ff6ea16 | [
"Apache-2.0"
] | null | null | null | pyhpecw7/features/sflow_intf.py | flycoolman/hpe-cw7-ansible | a7569b1dd21ad38a53d825eb4d4b2caf8ff6ea16 | [
"Apache-2.0"
] | 2 | 2022-01-10T21:03:07.000Z | 2022-01-20T09:11:44.000Z | """Manage sflow interface on HPCOM7 devices.
"""
from pyhpecw7.utils.xml.lib import *
from pyhpecw7.features.interface import Interface
import base64
import binascii
class Sflow(object):
def __init__(self, device, intf_name, collector, rate):
self.device = device
self.intf_name = intf_name
self.collector = collector
self.rate = rate
def _get_cmd(self, **SFLOW):
index = SFLOW.get('intf_name')
Collector = SFLOW.get('collector')
Rate = SFLOW.get('rate')
commands = []
if index:
cmd_1 = 'interface {0}'.format(index)
commands.append(cmd_1)
if Collector:
cmd_2 = 'sflow flow collector {0}'.format(Collector)
commands.append(cmd_2)
if Rate:
cmd_3 = 'sflow sampling-rate {0}'.format(Rate)
commands.append(cmd_3)
return commands
def _get_cmd_remove(self, **SFLOW):
index = SFLOW.get('intf_name')
Collector = SFLOW.get('collector')
Rate = SFLOW.get('rate')
cmd_1 = 'undo sflow flow collector'
cmd_2 = 'undo sflow sampling-rate'
commands = []
if index:
cmd_3 = 'interface {0}'.format(index)
commands.append(cmd_3)
if Collector:
commands.append(cmd_2)
if Rate:
commands.append(cmd_1)
return commands
def remove(self, stage=False, **SFLOW):
return self._build_config_absent(state='absent', stage=stage, **SFLOW)
def build(self, stage=False, **SFLOW):
return self._build_config_present(state='present', stage=stage, **SFLOW)
def _build_config_present(self, state, stage=False, **SFLOW):
SFLOW['intf_name'] = self.intf_name
SFLOW['rate'] = self.rate
SFLOW['collector'] = self.collector
c2 = True
if state == 'present':
get_cmd = self._get_cmd(**SFLOW)
if get_cmd:
if stage:
c2 = self.device.stage_config(get_cmd, 'cli_config')
else:
c2 = self.device.cli_config(get_cmd)
if stage:
return c2
else:
return [c2]
def _build_config_absent(self, state, stage=False, **SFLOW):
SFLOW['intf_name'] = self.intf_name
SFLOW['rate'] = self.rate
SFLOW['collector'] = self.collector
c2 = True
if state == 'absent' :
get_cmd = self._get_cmd_remove(**SFLOW)
if get_cmd:
if stage:
c2 = self.device.stage_config(get_cmd, 'cli_config')
else:
c2 = self.device.cli_config(get_cmd)
if stage:
return c2
else:
return [c2] | 27.617647 | 80 | 0.552716 |
84df14d35f6c234ff01604ae80bf30f444da952b | 20,518 | py | Python | gui/qt/installwizard.py | stashpayio/electrum-stash | a04e1fde408196e547cf80f8ce9d9391133bd865 | [
"MIT"
] | null | null | null | gui/qt/installwizard.py | stashpayio/electrum-stash | a04e1fde408196e547cf80f8ce9d9391133bd865 | [
"MIT"
] | null | null | null | gui/qt/installwizard.py | stashpayio/electrum-stash | a04e1fde408196e547cf80f8ce9d9391133bd865 | [
"MIT"
] | null | null | null | import sys
import os
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
import electrum_dash
from electrum_dash import Wallet, WalletStorage
from electrum_dash.util import UserCancelled, InvalidPassword
from electrum_dash.base_wizard import BaseWizard
from electrum_dash.i18n import _
from seed_dialog import SeedLayout, KeysLayout
from network_dialog import NetworkChoiceLayout
from util import *
from password_dialog import PasswordLayout, PW_NEW
class GoBack(Exception):
pass
MSG_GENERATING_WAIT = _("Electrum-DASH is generating your addresses, please wait...")
MSG_ENTER_ANYTHING = _("Please enter a seed phrase, a master key, a list of "
"Dash addresses, or a list of private keys")
MSG_ENTER_SEED_OR_MPK = _("Please enter a seed phrase or a master key (xpub or xprv):")
MSG_COSIGNER = _("Please enter the master public key of cosigner #%d:")
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_RESTORE_PASSPHRASE = \
_("Please enter your seed derivation passphrase. "
"Note: this is NOT your encryption password. "
"Leave this field empty if you did not use one or are unsure.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
import math
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, QtCore.Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
#if out is None:
# out = ()
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
def __init__(self, config, app, plugins, storage):
BaseWizard.__init__(self, config, storage)
QDialog.__init__(self, None)
self.setWindowTitle('Electrum-DASH - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.plugins = plugins
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.connect(self, QtCore.SIGNAL('accept'), self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addLayout(inner_vbox)
hbox.setStretchFactor(inner_vbox, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon(':icons/electrum-dash.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def run_and_get_wallet(self):
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Electrum-DASH wallet'))
wallet_folder = os.path.dirname(self.storage.path)
def on_choose():
path = unicode(QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder))
if path:
self.name_e.setText(path)
def on_filename(filename):
filename = unicode(filename)
path = os.path.join(wallet_folder, filename.encode('utf8'))
try:
self.storage = WalletStorage(path)
except IOError:
self.storage = None
if self.storage:
if not self.storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
pw = False
elif self.storage.file_exists() and self.storage.is_encrypted():
msg = _("This file is encrypted.") + '\n' + _('Enter your password or choose another file.')
pw = True
else:
msg = _("Press 'Next' to open this wallet.")
pw = False
else:
msg = _('Cannot read file')
pw = False
self.msg_label.setText(msg)
if pw:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.storage.path)
self.name_e.setText(n.decode('utf8'))
while True:
if self.storage.file_exists() and not self.storage.is_encrypted():
break
if self.loop.exec_() != 2: # 2 = next
return
if not self.storage.file_exists():
break
if self.storage.file_exists() and self.storage.is_encrypted():
password = unicode(self.pw_e.text())
try:
self.storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e), _('OK'))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e), _('OK'))
return
path = self.storage.path
if self.storage.requires_split():
self.hide()
msg = _("The wallet '%s' contains multiple accounts, which are no longer supported in Electrum-DASH 2.7.\n\n"
"Do you want to split your wallet into multiple files?"%path)
if not self.question(msg):
return
file_list = '\n'.join(self.storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
return
if self.storage.requires_upgrade():
self.hide()
msg = _("The format of your wallet '%s' must be upgraded for Electrum-DASH. This change will not be backward compatible"%path)
if not self.question(msg):
return
self.storage.upgrade()
self.show_warning(_('Your wallet was upgraded successfully'))
self.wallet = Wallet(self.storage)
return self.wallet
action = self.storage.get_action()
if action and action != 'new':
self.hide()
msg = _("The file '%s' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?") % path
if not self.question(msg):
if self.question(_("Do you want to delete '%s'?") % path):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
if action:
# self.wallet is set in run
self.run(action)
return self.wallet
self.wallet = Wallet(self.storage)
return self.wallet
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(filename).scaledToWidth(60))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid):
slayout = KeysLayout(parent=self, title=message, is_valid=is_valid)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next):
return self.text_input(title, message, is_valid)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind):
playout = PasswordLayout(None, msg, kind, self.next_button)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW)
def show_restore(self, wallet, network):
# FIXME: these messages are shown after the install wizard is
# finished and the window closed. On MacOSX they appear parented
# with a re-appeared ghost install wizard window...
if network:
def task():
wallet.wait_until_synchronized()
if wallet.is_found():
msg = _("Recovery successful")
else:
msg = _("No transactions found for this seed")
self.emit(QtCore.SIGNAL('synchronized'), msg)
self.connect(self, QtCore.SIGNAL('synchronized'), self.show_message)
t = threading.Thread(target = task)
t.daemon = True
t.start()
else:
msg = _("This wallet was restored offline. It may "
"contain more addresses than displayed.")
self.show_message(msg)
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self):
self.emit(QtCore.SIGNAL('accept'))
def waiting_dialog(self, task, msg):
self.please_wait.setText(MSG_GENERATING_WAIT)
self.refresh_gui()
t = threading.Thread(target = task)
t.start()
t.join()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = map(lambda x: x[0], choices)
c_titles = map(lambda x: x[1], choices)
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning=''):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(unicode(text)))
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
self.exec_layout(vbox, title, next_enabled=test(default))
return ' '.join(unicode(line.text()).split())
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electrum-DASH communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfil the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum-DASH "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require %d signatures')%m)
cw.set_m(m)
def on_n(n):
n_label.setText(_('From %d cosigners')%n)
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
| 37.856089 | 140 | 0.600253 |
5ffa03cd41b1cef2643f8771adc924fac26b5aa6 | 10,837 | py | Python | openstack_dashboard/dashboards/project/network_topology/views.py | Tehsmash/horizon | 8ffade099a3a437509dcdcf25d5b054e5c188b61 | [
"Apache-2.0"
] | 60 | 2015-03-09T14:31:46.000Z | 2021-12-12T19:22:31.000Z | openstack_dashboard/dashboards/project/network_topology/views.py | 2733284198/avos | becf7dd313fb8569581f985118c8367921c731ab | [
"Apache-2.0"
] | 7 | 2015-04-13T13:21:10.000Z | 2016-02-24T18:38:28.000Z | openstack_dashboard/dashboards/project/network_topology/views.py | 2733284198/avos | becf7dd313fb8569581f985118c8367921c731ab | [
"Apache-2.0"
] | 13 | 2015-03-09T17:26:26.000Z | 2020-02-22T19:19:14.000Z | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2013 NTT MCL Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.http import HttpResponse # noqa
from django.views.generic import TemplateView # noqa
from django.views.generic import View # noqa
from openstack_dashboard import api
from openstack_dashboard.usage import quotas
from openstack_dashboard.dashboards.project.network_topology.instances \
import tables as instances_tables
from openstack_dashboard.dashboards.project.network_topology.ports \
import tables as ports_tables
from openstack_dashboard.dashboards.project.network_topology.routers \
import tables as routers_tables
from openstack_dashboard.dashboards.project.instances import\
views as i_views
from openstack_dashboard.dashboards.project.instances.workflows import\
create_instance as i_workflows
from openstack_dashboard.dashboards.project.networks import\
views as n_views
from openstack_dashboard.dashboards.project.networks import\
workflows as n_workflows
from openstack_dashboard.dashboards.project.routers import\
views as r_views
class NTCreateRouterView(r_views.CreateView):
template_name = 'project/network_topology/create_router.html'
success_url = reverse_lazy("horizon:project:network_topology:index")
class NTCreateNetwork(n_workflows.CreateNetwork):
def get_success_url(self):
return reverse("horizon:project:network_topology:index")
def get_failure_url(self):
return reverse("horizon:project:network_topology:index")
class NTCreateNetworkView(n_views.CreateView):
workflow_class = NTCreateNetwork
class NTLaunchInstance(i_workflows.LaunchInstance):
success_url = "horizon:project:network_topology:index"
class NTLaunchInstanceView(i_views.LaunchInstanceView):
workflow_class = NTLaunchInstance
class InstanceView(i_views.IndexView):
table_class = instances_tables.InstancesTable
template_name = 'project/network_topology/iframe.html'
class RouterView(r_views.IndexView):
table_class = routers_tables.RoutersTable
template_name = 'project/network_topology/iframe.html'
class RouterDetailView(r_views.DetailView):
table_classes = (ports_tables.PortsTable, )
template_name = 'project/network_topology/iframe.html'
def get_interfaces_data(self):
pass
class NetworkTopologyView(TemplateView):
template_name = 'project/network_topology/index.html'
def _has_permission(self, policy):
has_permission = True
policy_check = getattr(settings, "POLICY_CHECK_FUNCTION", None)
if policy_check:
has_permission = policy_check(policy, self.request)
return has_permission
def _quota_exceeded(self, quota):
usages = quotas.tenant_quota_usages(self.request)
available = usages[quota]['available']
return available <= 0
def get_context_data(self, **kwargs):
context = super(NetworkTopologyView, self).get_context_data(**kwargs)
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
context['launch_instance_allowed'] = self._has_permission(
(("compute", "compute:create"),))
context['instance_quota_exceeded'] = self._quota_exceeded('instances')
context['create_network_allowed'] = self._has_permission(
(("network", "create_network"),))
context['network_quota_exceeded'] = self._quota_exceeded('networks')
context['create_router_allowed'] = (
network_config.get('enable_router', True) and
self._has_permission((("network", "create_router"),)))
context['router_quota_exceeded'] = self._quota_exceeded('routers')
context['console_type'] = getattr(
settings, 'CONSOLE_TYPE', 'AUTO')
return context
class JSONView(View):
@property
def is_router_enabled(self):
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
return network_config.get('enable_router', True)
def add_resource_url(self, view, resources):
tenant_id = self.request.user.tenant_id
for resource in resources:
if (resource.get('tenant_id')
and tenant_id != resource.get('tenant_id')):
continue
resource['url'] = reverse(view, None, [str(resource['id'])])
def _check_router_external_port(self, ports, router_id, network_id):
for port in ports:
if (port['network_id'] == network_id
and port['device_id'] == router_id):
return True
return False
def _get_servers(self, request):
# Get nova data
try:
servers, more = api.nova.server_list(request)
except Exception:
servers = []
console_type = getattr(settings, 'CONSOLE_TYPE', 'AUTO')
if console_type == 'SPICE':
console = 'spice'
else:
console = 'vnc'
data = [{'name': server.name,
'status': server.status,
'console': console,
'task': getattr(server, 'OS-EXT-STS:task_state'),
'id': server.id} for server in servers]
self.add_resource_url('horizon:project:instances:detail', data)
return data
def _get_networks(self, request):
# Get neutron data
# if we didn't specify tenant_id, all networks shown as admin user.
# so it is need to specify the networks. However there is no need to
# specify tenant_id for subnet. The subnet which belongs to the public
# network is needed to draw subnet information on public network.
try:
neutron_networks = api.neutron.network_list_for_tenant(
request,
request.user.tenant_id)
except Exception:
neutron_networks = []
networks = [{'name': network.name,
'id': network.id,
'subnets': [{'cidr': subnet.cidr}
for subnet in network.subnets],
'router:external': network['router:external']}
for network in neutron_networks]
self.add_resource_url('horizon:project:networks:detail',
networks)
# Add public networks to the networks list
if self.is_router_enabled:
try:
neutron_public_networks = api.neutron.network_list(
request,
**{'router:external': True})
except Exception:
neutron_public_networks = []
my_network_ids = [net['id'] for net in networks]
for publicnet in neutron_public_networks:
if publicnet.id in my_network_ids:
continue
try:
subnets = [{'cidr': subnet.cidr}
for subnet in publicnet.subnets]
except Exception:
subnets = []
networks.append({
'name': publicnet.name,
'id': publicnet.id,
'subnets': subnets,
'router:external': publicnet['router:external']})
return sorted(networks,
key=lambda x: x.get('router:external'),
reverse=True)
def _get_routers(self, request):
if not self.is_router_enabled:
return []
try:
neutron_routers = api.neutron.router_list(
request,
tenant_id=request.user.tenant_id)
except Exception:
neutron_routers = []
routers = [{'id': router.id,
'name': router.name,
'status': router.status,
'external_gateway_info': router.external_gateway_info}
for router in neutron_routers]
self.add_resource_url('horizon:project:routers:detail', routers)
return routers
def _get_ports(self, request):
try:
neutron_ports = api.neutron.port_list(request)
except Exception:
neutron_ports = []
ports = [{'id': port.id,
'network_id': port.network_id,
'device_id': port.device_id,
'fixed_ips': port.fixed_ips,
'device_owner': port.device_owner,
'status': port.status}
for port in neutron_ports]
self.add_resource_url('horizon:project:networks:ports:detail',
ports)
return ports
def _prepare_gateway_ports(self, routers, ports):
# user can't see port on external network. so we are
# adding fake port based on router information
for router in routers:
external_gateway_info = router.get('external_gateway_info')
if not external_gateway_info:
continue
external_network = external_gateway_info.get(
'network_id')
if not external_network:
continue
if self._check_router_external_port(ports,
router['id'],
external_network):
continue
fake_port = {'id': 'gateway%s' % external_network,
'network_id': external_network,
'device_id': router['id'],
'fixed_ips': []}
ports.append(fake_port)
def get(self, request, *args, **kwargs):
data = {'servers': self._get_servers(request),
'networks': self._get_networks(request),
'ports': self._get_ports(request),
'routers': self._get_routers(request)}
self._prepare_gateway_ports(data['routers'], data['ports'])
json_string = json.dumps(data, ensure_ascii=False)
return HttpResponse(json_string, content_type='text/json')
| 38.429078 | 78 | 0.627295 |
b03f7608c2d5335a7663e44d0505ebe80f641106 | 8,063 | py | Python | wordle.py | CyanideCN/TaxolLib | 6d27c81c88a8f2c173a1c6f289e62f53b196290b | [
"MIT"
] | 1 | 2022-01-30T02:10:13.000Z | 2022-01-30T02:10:13.000Z | wordle.py | CyanideCN/TaxolLib | 6d27c81c88a8f2c173a1c6f289e62f53b196290b | [
"MIT"
] | null | null | null | wordle.py | CyanideCN/TaxolLib | 6d27c81c88a8f2c173a1c6f289e62f53b196290b | [
"MIT"
] | null | null | null | import random
import asyncio
import sqlite3
import datetime
import time
import uuid
from collections import defaultdict
from nonebot.typing import T_State
from nonebot.adapters import Bot, Event
from nonebot.adapters.cqhttp.message import MessageSegment, Message
import pandas as pd
with open('data/wordle_cet4.txt', 'r') as f:
wordle_cet4 = f.readlines()
with open('data/wordle_gre.txt', 'r') as f:
wordle_gre = f.readlines()
wordle_session = dict()
class WordleDB(object):
def __init__(self):
self.db = sqlite3.connect('Wordle.db')
def _get_user_list(self):
command = f'SELECT DISTINCT UserID FROM UserRecord'
cursor = self.db.cursor()
ret = cursor.execute(command)
return [i[0] for i in ret]
def update_score(self, userid, score):
cursor = self.db.cursor()
if userid not in self._get_user_list():
command_create = 'INSERT INTO UserRecord(UserID,TotalScore) VALUES(?,?)'
cursor.execute(command_create, (userid, score))
else:
command_select = f'SELECT TotalScore FROM UserRecord WHERE UserID={userid}'
prev_score = list(cursor.execute(command_select))[0][0]
command_update = f'UPDATE UserRecord SET TotalScore={prev_score + score} WHERE UserID={userid}'
cursor.execute(command_update)
self.db.commit()
cursor.close()
def save_session(self, id, groupid, stime, word, etime, acount, winner, score):
stime = stime + datetime.timedelta(hours=8)
self._check_session_valid(id)
cmd = '''INSERT INTO WordleRecord(ID,Year,Month,Day,Hour,Minute,Second,GroupID,Word,ElapsedTime,AttemptCount,WinnerID,Score)
VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?)'''
cursor = self.db.cursor()
cursor.execute(cmd, (id, stime.year, stime.month, stime.day, stime.hour, stime.minute, stime.second,
groupid, word, etime, acount, winner, score))
self.db.commit()
cursor.close()
def _check_session_valid(self, id):
command = f'SELECT ID FROM WordleRecord WHERE ID="{id}"'
cursor = self.db.cursor()
ret = cursor.execute(command)
ret = [i[0] for i in ret]
if len(ret) == 0:
return True
else:
return False
def get_score(self):
command = f'SELECT UserID, TotalScore FROM UserRecord ORDER BY TotalScore DESC'
cursor = self.db.cursor()
ret = cursor.execute(command)
df = pd.DataFrame(ret, columns=['ID', 'Score'])
ser = pd.Series(data=df['Score'].values, index=df['ID'])
return ser.astype(int)
def get_time_rank(self):
command = f'SELECT WinnerID, ElapsedTime FROM WordleRecord ORDER BY ElapsedTime ASC LIMIT 20'
cursor = self.db.cursor()
ret = cursor.execute(command)
df = pd.DataFrame(ret, columns=['ID', 'Score'])
ser = pd.Series(data=df['Score'].values, index=df['ID'])
return ser.round(1)
def get_wordle_stats(self, uid):
command = f'SELECT ElapsedTime, AttemptCount, Word FROM WordleRecord WHERE WinnerID={uid}'
cursor = self.db.cursor()
ret = cursor.execute(command)
df = pd.DataFrame(ret, columns=['Time', 'Count', 'Word'])
return df
db = WordleDB()
class WordleSession(object):
CORRECT = '✔'
MISSING = '❌'
WRONG = '⭕'
def __init__(self, group_id, difficulty):
self.session_id = str(uuid.uuid1())
self.group_id = group_id
self.difficulty = 0 if difficulty != 1 else difficulty
self.status = 0
self.attempt_count = 0
self.answer = self.choose_word()
self.player_resp_time = defaultdict(int)
self.disclosed_position = list()
def choose_word(self):
word_list = wordle_gre if self.difficulty == 1 else wordle_cet4
return random.choice(word_list).strip()
def process_input(self, uid, word):
if len(word) != len(self.answer):
return 1
message = ''
if len(set(word)) == 1:
return 2
ctime = time.time()
self.attempt_count += 1
for i in range(len(word)):
if word[i] not in self.answer:
message += self.MISSING
elif word[i] == self.answer[i]:
message += self.CORRECT
else:
message += self.WRONG
msg_set = set(message)
if len(msg_set) == 1 and msg_set.pop() == self.CORRECT:
self.status = 1
return 0
if ctime < self.player_resp_time[uid] + 6:
return 3
self.player_resp_time[uid] = ctime
return f'不对哦~\n' + message
def init_prompt(self):
self.start_time = datetime.datetime.utcnow()
return f'游戏难度:{self.difficulty}\n本轮要猜的单词长度为{len(self.answer)}\n输入#加上单词即可参与游戏'
def disclose_one(self):
ret = ''
if len(self.disclosed_position) < len(self.answer) - 3:
while True:
position = random.randint(0, len(self.answer) - 1)
if position not in self.disclosed_position:
self.disclosed_position.append(position)
break
for i in range(len(self.answer)):
if i in self.disclosed_position:
ret += self.answer[i]
else:
ret += '?'
return ret
def save_record(self, winner_id):
msg = ''
total_time = (datetime.datetime.utcnow() - self.start_time).total_seconds()
if self.status == 0:
score = 0
else:
base_score = (self.difficulty + 2) * 5
length_bonus = len(self.answer) - 10
if length_bonus < 0:
length_bonus = 0
raw_score = base_score + length_bonus
time_scale = 120 / total_time
if time_scale < 1:
time_scale = 1
elif time_scale > 5:
time_scale = 5
score = int(raw_score * time_scale)
db.update_score(winner_id, score)
msg += f'总尝试时间: {int(total_time)}s 获得分数: {score}分'
db.save_session(self.session_id, self.group_id, self.start_time, self.answer, total_time, self.attempt_count,
winner_id, score)
return msg
async def start_wordle(bot: Bot, event: Event, state: T_State):
mode = event.get_plaintext().strip()
group_id = event.group_id
if group_id in wordle_session:
return await bot.send(event, '上一局游戏尚未结束!')
try:
mode = int(mode)
ws = WordleSession(group_id, mode)
except ValueError:
if mode.isascii():
ws = WordleSession(group_id, 0)
else:
raise
wordle_session[group_id] = ws
await bot.send(event, ws.init_prompt())
await asyncio.sleep(60)
for _ in range(len(ws.answer) - 2):
await asyncio.sleep(60)
if ws.status == 1:
break
dis = ws.disclose_one()
if dis:
await bot.send(event, f'提示:{dis}')
if ws.status == 0:
await bot.send(event, f'答案是{ws.answer}')
if group_id in wordle_session:
ws.save_record(None)
del wordle_session[group_id]
code_mapping = {0:'恭喜你,猜对了!', 1:'单词长度不匹配', 2:'禁止穷举', 3:'你刷得太快啦'}
async def play_wordle(bot: Bot, event: Event, state: T_State):
group_id = event.group_id
uid = event.user_id
ws = wordle_session.get(group_id, None)
word = event.get_plaintext().strip()[1:].strip().lower()
if ws and word.isalpha() and word.isascii():
msg = ws.process_input(uid, word)
if isinstance(msg, int):
send_msg = code_mapping[msg]
if msg == 0:
ret = ws.save_record(uid)
send_msg += f' 答案是{ws.answer}' + '\n' + ret
send_msg = Message(Message(MessageSegment.at(uid)).extend([send_msg]))
del wordle_session[group_id]
await bot.send(event, send_msg)
else:
await bot.send(event, msg) | 35.835556 | 132 | 0.590599 |
0d851b9969a0eb090acbaf43f1bca3f829f1d840 | 4,970 | pyt | Python | example/Example_toolbox.pyt | alessioarena/xGIS | 7e80c0c183681db02f499679ab484db34271c2e8 | [
"MIT",
"Unlicense"
] | 2 | 2019-10-14T04:44:46.000Z | 2020-04-21T03:06:48.000Z | example/Example_toolbox.pyt | alessioarena/xGIS | 7e80c0c183681db02f499679ab484db34271c2e8 | [
"MIT",
"Unlicense"
] | null | null | null | example/Example_toolbox.pyt | alessioarena/xGIS | 7e80c0c183681db02f499679ab484db34271c2e8 | [
"MIT",
"Unlicense"
] | null | null | null | import os
import arcpy
import xgis
class Toolbox(object):
def __init__(self):
self.label = "Example Toolbox"
self.alias = "exampleToolbox"
# List of tool classes associated with this toolbox
self.tools = [AwesomeTool]
class AwesomeTool(object):
def __init__(self):
self.label = 'AwesomeTool'
self.description = 'This tool will run an awesome script externally'
self.canRunInBackground = True
def getParameterInfo(self):
# Define parameter definitions
parameters = []
params0 = arcpy.Parameter(
displayName='Input Raster',
name='in_raster',
datatype='GPRasterLayer', # This will source the loaded rasters, or give you the option to specify one from disk
parameterType='Required')
parameters.append(params0)
params1 = arcpy.Parameter(
displayName='Region of Interest',
name='in_shp',
datatype='GPFeatureLayer', # Same deal, but for features (like shapefiles)
parameterType='Optional') # This parameter is optional
parameters.append(params1)
params2 = arcpy.Parameter(
displayName='Output Name',
name='out_name',
datatype='GPString', # simple string
parameterType='Required')
params2.value = os.path.join(os.path.expanduser('~'), 'output_raster.tif') # This will be the default value
parameters.append(params2)
params3 = arcpy.Parameter(
displayName='Algorithm to use',
name='algorithm',
datatype='GPString',
parameterType='Required')
params3.filter.list = ['GAUSSIAN_MIXTURE', 'K_MEANS', 'SPECTRAL_CLUSTERING'] # to create a dropdown menu
params3.value = 'K_MEANS'
parameters.append(params3)
params4 = arcpy.Parameter(
displayName='Variables',
name='vars',
datatype='GPString',
multiValue=True, # To create a check list
parameterType='Required')
params4.filter.list = ['MEAN', 'MODE', 'MEDIAN', 'HISTOGRAM', 'TEXTURE'] # Values for the check list
params4.value = ['MEAN', 'MODE']
parameters.append(params4)
params5 = arcpy.Parameter(
displayName='Debug Mode',
name='debug',
datatype='GPBoolean', # just a tick box
parameterType='Required')
params5.value = False
parameters.append(params5)
params6 = arcpy.Parameter(
displayName='Result',
name='result',
datatype='DERasterDataset',
parameterType='Derived',
direction='Output') # This parameter is hidden, and its value will be used at the end to automatically source and load the output raster
parameters.append(params6)
return parameters
def isLicensed(self): # optional
return True
def updateParameters(self, parameters): # optional
# this will automatically fill the output_path parameter using the same directory as the input
# anything in this section will be run every time you modify one of the parameters
if parameters[0].value and not parameters[0].hasBeenValidated:
parameters[2].value = os.path.join(arcpy.Describe(parameters[0].value).path, 'output_raster.tif')
return
def updateMessages(self, parameters): # optional
# this will be run any time you modify a parameter
# You can use specific methods to raise warnings or error in the graphic interface
return
def execute(self, parameters, messages):
# Let's start building the list of arguments
args = []
args.append('awesome_script.py')
# Mandatory arguments
raster_path = arcpy.Describe(parameters[0].value).catalogPath
args.append(raster_path)
args.append(parameters[2].valueAsText)
# optional arguments
if parameters[1].value:
roi_path = arcpy.Describe(parameters[1].value).catalogPath
args.extend(['--roi', roi_path])
args.extend(['--algorithm', parameters[3].valueAsText])
args.extend(['--variables'] + str(parameters[4].valueAsText).split(';'))
if parameters[5].value is not False:
args.append(['--debug'])
# let's run this!
proc = xgis.Executor(args, external_libs='external_libs', cwd=os.path.dirname(__file__))
proc.run()
# if the output file exists, assign it to the output parameter (this is hidden in the GUI)
# The output parameter will be automatically loaded back in ArcMap, and used in ModelBuilder and other ESRI tools
if os.path.isfile(parameters[2].valueAsText):
arcpy.SetParameter(len(parameters) - 1, parameters[2].valueAsText)
else:
raise RuntimeError('Could not find the output file')
| 38.828125 | 149 | 0.626559 |
1601aec910856e449b6d0a1c5f5646f83fd75294 | 2,001 | py | Python | manoward/endorsementmgmt.py | chalbersma/manowar | 023a696f7ea0458e1c2ae9a18e40a9d09e824cc4 | [
"BSD-2-Clause"
] | 3 | 2019-02-16T03:14:11.000Z | 2020-05-28T23:14:23.000Z | manoward/endorsementmgmt.py | chalbersma/manowar | 023a696f7ea0458e1c2ae9a18e40a9d09e824cc4 | [
"BSD-2-Clause"
] | 4 | 2018-08-09T22:39:59.000Z | 2020-02-12T00:36:47.000Z | manoward/endorsementmgmt.py | chalbersma/manowar | 023a696f7ea0458e1c2ae9a18e40a9d09e824cc4 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
'''
Copyright 2018, VDMS
Licensed under the terms of the BSD 2-clause license. See LICENSE file for terms.
'''
# Validate Endorsements
import logging
from flask import abort
def process_endorsements(endorsements=[], restrictions=[], session_endorsements=[], session_restrictions=[], ignore_abort=False, **kwargs):
'''
Process Endorsements
Looks at the listed restrictions and endorsements for the session and compares them with the endpoint ones. 403 if nedded
'''
logger = logging.getLogger("endorsementmgmt.py")
if ignore_abort is True or kwargs.get("do_abort", True) is False:
logger.warning("Aborts are off (Likely because of Debug Mode).")
# Grab All Tokens
# select * from sapiActiveTokens join sapiUsers on sapiUsers.sapiuid = sapiActiveTokens.fk_sapikeyid where token_expire_date > NOW() ;
# Grab all the Matching Endorsements
found_endorsements = [
endorsement for endorsement in endorsements if endorsement in session_endorsements]
#logger.debug("Current Session Endorsements : {}".format(found_endorsements))
# Grab all the Restrictions
found_restrictions = [
restriction for restriction in restrictions if restriction in session_restrictions]
#logger.debug("Current Session Restrictions : {}".format(found_restrictions))
if len(found_restrictions) > 0:
# Fail this
result = (False, found_restrictions)
elif len(found_endorsements) > 0:
# Pass this
result = (True, found_endorsements)
else:
# No Endorsments or Restrictions Found
result = (False, "No Matches")
if result[0] is False:
if ignore_abort is False:
logger.info(
"Dropping Session {}/{}".format(found_endorsements, found_restrictions))
logger.debug(
"Dropping Session with Endorsements/Restrictions {}/{}".format(endorsements, restrictions))
abort(403)
return result
| 33.35 | 139 | 0.693153 |
a4fba08b43fad703555d5894107cb541f053faa4 | 2,107 | py | Python | test/functional/test_framework/siphash.py | minerscore/Ritocoin | cf4e1570b2bab487b9a70f2e8cf6f98fb42fd4b7 | [
"MIT"
] | 18 | 2018-11-30T19:07:06.000Z | 2021-05-17T11:06:12.000Z | test/functional/test_framework/siphash.py | minerscore/Ritocoin | cf4e1570b2bab487b9a70f2e8cf6f98fb42fd4b7 | [
"MIT"
] | 1 | 2018-12-08T19:41:43.000Z | 2018-12-08T19:41:43.000Z | test/functional/test_framework/siphash.py | minerscore/Ritocoin | cf4e1570b2bab487b9a70f2e8cf6f98fb42fd4b7 | [
"MIT"
] | 17 | 2018-11-30T17:16:21.000Z | 2021-10-30T17:33:14.000Z | #!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Copyright (c) 2017 The Raven Core developers
# Copyright (c) 2018 The Rito Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Specialized SipHash-2-4 implementations.
This implements SipHash-2-4 for 256-bit integers.
"""
def rotl64(n, b):
return n >> (64 - b) | (n & ((1 << (64 - b)) - 1)) << b
def siphash_round(v0, v1, v2, v3):
v0 = (v0 + v1) & ((1 << 64) - 1)
v1 = rotl64(v1, 13)
v1 ^= v0
v0 = rotl64(v0, 32)
v2 = (v2 + v3) & ((1 << 64) - 1)
v3 = rotl64(v3, 16)
v3 ^= v2
v0 = (v0 + v3) & ((1 << 64) - 1)
v3 = rotl64(v3, 21)
v3 ^= v0
v2 = (v2 + v1) & ((1 << 64) - 1)
v1 = rotl64(v1, 17)
v1 ^= v2
v2 = rotl64(v2, 32)
return (v0, v1, v2, v3)
def siphash256(k0, k1, h):
n0 = h & ((1 << 64) - 1)
n1 = (h >> 64) & ((1 << 64) - 1)
n2 = (h >> 128) & ((1 << 64) - 1)
n3 = (h >> 192) & ((1 << 64) - 1)
v0 = 0x736f6d6570736575 ^ k0
v1 = 0x646f72616e646f6d ^ k1
v2 = 0x6c7967656e657261 ^ k0
v3 = 0x7465646279746573 ^ k1 ^ n0
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0 ^= n0
v3 ^= n1
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0 ^= n1
v3 ^= n2
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0 ^= n2
v3 ^= n3
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0 ^= n3
v3 ^= 0x2000000000000000
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0 ^= 0x2000000000000000
v2 ^= 0xFF
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
return v0 ^ v1 ^ v2 ^ v3
| 31.924242 | 69 | 0.543427 |
9baa0dc65ffb6b0a5536c7d9cde733626e64dee5 | 81,857 | py | Python | pytype/pyi/parser_test.py | dertilo/pytype | 385e4e846a98d352143a3cbeb9dff12c4be850b2 | [
"Apache-2.0"
] | null | null | null | pytype/pyi/parser_test.py | dertilo/pytype | 385e4e846a98d352143a3cbeb9dff12c4be850b2 | [
"Apache-2.0"
] | null | null | null | pytype/pyi/parser_test.py | dertilo/pytype | 385e4e846a98d352143a3cbeb9dff12c4be850b2 | [
"Apache-2.0"
] | null | null | null | import hashlib
import sys
import textwrap
from pytype.pyi import parser
from pytype.pyi import parser_test_base
from pytype.pytd import pytd
from pytype.tests import test_base
import unittest
class ParseErrorTest(unittest.TestCase):
def check(self, expected, *args, **kwargs):
e = parser.ParseError(*args, **kwargs)
self.assertMultiLineEqual(textwrap.dedent(expected).lstrip("\n"), str(e))
def test_plain_error(self):
self.check("""
ParseError: my message""", "my message")
def test_full_error(self):
self.check("""
File: "foo.py", line 123
this is a test
^
ParseError: my message""", "my message", line=123, filename="foo.py",
text="this is a test", column=6)
def test_indented_text(self):
self.check("""
File: "foo.py", line 123
this is a test
^
ParseError: my message""", "my message", line=123, filename="foo.py",
text=" this is a test", column=16)
def test_line_without_filename(self):
self.check("""
File: "None", line 1
ParseError: my message""", "my message", line=1)
def test_filename_without_line(self):
self.check("""
File: "foo.py", line None
ParseError: my message""", "my message", filename="foo.py")
def test_text_without_column(self):
self.check("""
ParseError: my message""", "my message", text="this is a test")
def test_column_without_text(self):
self.check(" ParseError: my message", "my message", column=5)
class ParserTest(parser_test_base.ParserTestBase):
def test_syntax_error(self):
self.check_error("123", 1, "Unexpected expression")
def test_illegal_character(self):
self.check_error("^", 1, "invalid syntax")
def test_invalid_indentation(self):
self.check_error("""
class Foo:
x = ... # type: int
y""", 3, "unindent does not match")
@unittest.skip("New parser does not support this")
def test_type_on_next_line(self):
self.check("""
a = ...
# type: int""",
"""
a: int""")
def test_constant(self):
self.check("x = ...", "x: Any", "from typing import Any")
self.check("x: str")
self.check("x = 0", "x: int")
self.check("x = 0.0", "x: float")
@unittest.skip("Not checking invalid literals")
def test_invalid_constant(self):
self.check_error("x = 123", 1,
"Only '0' allowed as int literal")
self.check("x = 0.0", "x: float")
self.check_error("x = 12.3", 1,
"Only '0.0' allowed as float literal")
def test_string_constant(self):
self.check("x = b''", "x: bytes")
self.check("x = u''", "x: unicode")
self.check('x = b""', "x: bytes")
self.check('x = u""', "x: unicode")
self.check("x = ''", "x: str")
self.check('x = ""', "x: str")
@unittest.skip("We allow all strings.")
def test_invalid_string_constant(self):
self.check_error("x = b'x'", 1,
"Only '', b'', and u'' allowed as string literals")
self.check_error("x = u'x'", 1,
"Only '', b'', and u'' allowed as string literals")
self.check_error("x = 'x'", 1,
"Only '', b'', and u'' allowed as string literals")
def test_constant_pep526(self):
self.check("x : str", "x: str")
self.check("x : str = ...", "x: str")
def test_alias_or_constant(self):
self.check("x = True", "x: bool")
self.check("x = False", "x: bool")
self.check("x = Foo")
self.check("""
class A:
x = True""", """
class A:
x: bool
""")
self.check("""
class A:
x = ... # type: int
y = x
z = y""", """
class A:
x: int
y: int
z: int
""")
def test_method_aliases(self):
self.check("""
class A:
def x(self) -> int: ...
y = x
z = y
@classmethod
def a(cls) -> str: ...
b = a
c = b""", """
class A:
def x(self) -> int: ...
@classmethod
def a(cls) -> str: ...
def y(self) -> int: ...
def z(self) -> int: ...
@classmethod
def b(cls) -> str: ...
@classmethod
def c(cls) -> str: ...
""")
def test_chained_assignment(self):
self.check("""
a = b = int
""", """
a = int
b = int
""")
def test_multiple_assignment(self):
self.check("""
a, b = int, str
""", """
a = int
b = str
""")
self.check("""
(a, b) = (c, d) = int, str
""", """
a = int
b = str
c = int
d = str
""")
def test_invalid_multiple_assignment(self):
self.check_error("""
a, b = int, str, bool
""", 1, "Cannot unpack 2 values for multiple assignment")
self.check_error("""
a, b = int
""", 1, "Cannot unpack 2 values for multiple assignment")
def test_slots(self):
self.check("""
class A:
__slots__ = ... # type: tuple
""", """
class A: ...
""")
self.check("""
class A:
__slots__ = ["foo", "bar", "baz"]
""")
self.check("""
class A:
__slots__ = []
""")
self.check_error("""
__slots__ = ["foo", "bar"]
""", 1, "__slots__ only allowed on the class level")
self.check_error("""
class A:
__slots__ = ["foo", "bar"]
__slots__ = ["foo", "bar", "baz"]
""", 1, "Duplicate __slots__ declaration")
self.check_error("""
class A:
__slots__ = ["foo", ?]
""", 2, "invalid syntax")
self.check_error("""
class A:
__slots__ = int
""", 2, "__slots__ must be a list of strings")
def test_nested_class(self):
self.check("""
class A:
class B: ...
""")
def test_nested_class_alias(self):
self.check("""
class A:
class B: ...
C = A.B
""", """
from typing import Type
class A:
class B: ...
C: Type[A.B]
""")
def test_nested_class_module_alias(self):
self.check("""
class A:
class B: ...
C = A.B
""", """
from typing import Type
C: Type[A.B]
class A:
class B: ...
""")
def test_conditional_nested_class(self):
self.check("""
if sys.version_info < (3, 5):
class A:
class B: ...
""", "")
def test_import(self):
self.check("import foo.bar.baz")
self.check("import a as b")
self.check("from foo.bar import baz")
self.check("from foo.bar import baz as abc")
self.check("from typing import NamedTuple, TypeVar", "")
self.check("from foo.bar import *")
self.check_error("from foo import * as bar", 1, "invalid syntax")
self.check("from foo import a, b",
"from foo import a\nfrom foo import b")
self.check("from foo import (a, b)",
"from foo import a\nfrom foo import b")
self.check("from foo import (a, b, )",
"from foo import a\nfrom foo import b")
def test_from_import(self):
ast = self.check("from foo import c\nclass Bar(c.X): ...",
parser_test_base.IGNORE)
parent, = ast.Lookup("Bar").parents
self.assertEqual(parent, pytd.NamedType("foo.c.X"))
def test_duplicate_names(self):
self.check_error("""
def foo() -> int: ...
foo = ... # type: int""",
None,
"Duplicate top-level identifier(s): foo")
self.check_error("""
from x import foo
def foo() -> int: ...""",
None,
"Duplicate top-level identifier(s): foo")
self.check_error("""
X = ... # type: int
class X: ...""",
None,
"Duplicate top-level identifier(s): X")
self.check_error("""
X = ... # type: int
X = TypeVar('X')""",
None,
"Duplicate top-level identifier(s): X")
# A function is allowed to appear multiple times.
self.check("""
def foo(x: int) -> int: ...
def foo(x: str) -> str: ...""",
"""
from typing import overload
@overload
def foo(x: int) -> int: ...
@overload
def foo(x: str) -> str: ...""")
# @overload decorators should be properly round-tripped.
self.check("""
@overload
def foo(x: int) -> int: ...
@overload
def foo(x: str) -> str: ...""", """
from typing import overload
@overload
def foo(x: int) -> int: ...
@overload
def foo(x: str) -> str: ...""")
def test_type(self):
self.check("x: str")
self.check("x = ... # type: (str)", "x: str")
self.check("x: foo.bar.Baz", prologue="import foo.bar")
self.check("x: nothing")
@unittest.skip("TODO: add errors for these")
def test_deprecated_type(self):
self.check_error("x = ... # type: int and str and float",
1, "invalid syntax")
self.check_error("x = ... # type: ?", 1, "invalid syntax")
self.check("x = ... # type: int or str or float",
1, "invalid syntax")
def test_empty_union_or_intersection_or_optional(self):
self.check_error("def f(x: typing.Union): ...", 1,
"Missing options to typing.Union")
self.check_error("def f(x: typing.Intersection): ...", 1,
"Missing options to typing.Intersection")
self.check_error("def f(x: typing.Optional): ...", 1,
"Missing options to typing.Optional")
def test_optional_extra_parameters(self):
self.check_error("def f(x: typing.Optional[int, str]): ...", 1,
"Too many options to typing.Optional")
def test_alias_lookup(self):
self.check("""
from somewhere import Foo
x = ... # type: Foo
""", """
import somewhere
from somewhere import Foo
x: somewhere.Foo""")
def test_external_alias(self):
self.check("""
from somewhere import Foo
class Bar:
Baz = Foo
""", """
from somewhere import Foo
from typing import Any
class Bar:
Baz: Any
""")
def test_same_named_alias(self):
self.check("""
import somewhere
class Bar:
Foo = somewhere.Foo
""", """
import somewhere
from typing import Any
class Bar:
Foo: Any
""")
def test_type_params(self):
ast = self.check("""
from typing import TypeVar
T = TypeVar('T')
def func(x: T) -> T: ...""")
# During parsing references to type paraemters are instances of NamedType.
# They should be replaced by TypeParameter objects during post-processing.
sig = ast.functions[0].signatures[0]
self.assertIsInstance(sig.params[0].type, pytd.TypeParameter)
self.assertIsInstance(sig.return_type, pytd.TypeParameter)
# Check various illegal TypeVar arguments.
self.check_error("T = TypeVar()", 1, "Missing arguments to TypeVar")
self.check_error("T = TypeVar(*args)", 1, "Bad arguments to TypeVar")
self.check_error("T = TypeVar(...)", 1, "Bad arguments to TypeVar")
self.check_error("T = TypeVar('Q')", 1,
"TypeVar name needs to be 'Q' (not 'T')")
self.check_error("T = TypeVar('T', covariant=True, int, float)", 1,
"positional argument follows keyword argument")
self.check_error("T = TypeVar('T', rumpelstiltskin=True)", 1,
"Unrecognized keyword")
def test_type_param_arguments(self):
self.check("""
from typing import List, TypeVar
T = TypeVar('T', List[int], List[str])""")
self.check("""
from typing import List, TypeVar
T = TypeVar('T', bound=List[str])""")
# 'covariant' and 'contravariant' are ignored for now.
self.check("""
from typing import TypeVar
T = TypeVar('T', str, unicode, covariant=True)""", """
from typing import TypeVar
T = TypeVar('T', str, unicode)""")
self.check("""
import other_mod
from typing import TypeVar
T = TypeVar('T', other_mod.A, other_mod.B)""")
def test_typing_typevar(self):
self.check("""
import typing
T = typing.TypeVar('T')
""", """
import typing
from typing import TypeVar
T = TypeVar('T')
""")
def test_error_formatting(self):
src = """
class Foo:
this is not valid"""
with self.assertRaises(parser.ParseError) as e:
parser.parse_string(textwrap.dedent(src).lstrip(), filename="foo.py",
python_version=self.python_version)
self.assertMultiLineEqual(textwrap.dedent("""
File: "foo.py", line 2
this is not valid
^
ParseError: Unexpected expression
""").strip("\n"), str(e.exception))
def test_pep484_translations(self):
ast = self.check("""
x: None""")
self.assertEqual(pytd.NamedType("NoneType"), ast.constants[0].type)
def test_module_name(self):
ast = self.check("x = ... # type: int",
"foo.x: int",
name="foo")
self.assertEqual("foo", ast.name)
def test_no_module_name(self):
# If the name is not specified, it is a digest of the source.
src = ""
ast = self.check(src)
self.assertEqual(hashlib.md5(src.encode()).hexdigest(), ast.name)
src = "x: int"
ast = self.check(src)
self.assertEqual(hashlib.md5(src.encode()).hexdigest(), ast.name)
def test_pep84_aliasing(self):
# This should not be done for the typing module itself.
self.check("x = ... # type: Hashable",
"typing.x: Hashable",
name="typing")
def test_module_class_clash(self):
ast = parser.parse_string(textwrap.dedent("""
from bar import X
class bar:
X = ... # type: Any
y = bar.X.Baz
z = X.Baz
"""), name="foo", python_version=self.python_version)
self.assertEqual("foo.bar.X.Baz", ast.Lookup("foo.y").type.name)
self.assertEqual("bar.X.Baz", ast.Lookup("foo.z").type.name)
def test_trailing_list_comma(self):
self.check("""
from typing import Any, Callable
x: Callable[
[
int,
int,
],
Any,
]
""", """
from typing import Any, Callable
x: Callable[[int, int], Any]
""")
def test_all(self):
self.check("""
__all__ = ['a']
""", """
from typing import List
__all__: List[str]
""")
class QuotedTypeTest(parser_test_base.ParserTestBase):
def test_annotation(self):
self.check("""
class A: ...
x: "A"
y: "List[A]" = ...
""", """
x: A
y: List[A]
class A: ...
""")
def test_def(self):
self.check("""
def f(x: "int") -> "str": ...
""", """
def f(x: int) -> str: ...
""")
def test_subscript(self):
self.check_error("x: List['int']", 1, "List['int'] not supported")
class HomogeneousTypeTest(parser_test_base.ParserTestBase):
def test_callable_parameters(self):
self.check("""
from typing import Callable
x: Callable[[int, str], bool]""")
self.check("""
from typing import Callable
x = ... # type: Callable[..., bool]""", """
from typing import Callable
x: Callable[..., bool]""")
self.check("""
from typing import Any, Callable
x: Callable[Any, bool]""", """
from typing import Callable
x: Callable[..., bool]""")
self.check("""
from typing import Any, Callable
x: Callable[[Any], bool]""")
self.check("""
from typing import Callable
x: Callable[[], bool]""")
self.check("""
from typing import Callable
x = ... # type: Callable[[int]]""", """
from typing import Any, Callable
x: Callable[[int], Any]""")
self.check("""
from typing import Callable
x = ... # type: Callable[[], ...]""", """
from typing import Any, Callable
x: Callable[[], Any]""")
self.check_error(
"import typing\n\nx = ... # type: typing.Callable[int]", 3,
"First argument to Callable must be a list of argument types")
self.check_error(
"import typing\n\nx = ... # type: typing.Callable[[], bool, bool]", 3,
"Expected 2 parameters to Callable, got 3")
def test_ellipsis(self):
# B[T, ...] becomes B[T].
self.check("from typing import List\n\nx = ... # type: List[int, ...]",
"from typing import List\n\nx: List[int]")
# Double ellipsis is not allowed.
self.check_error("x = ... # type: List[..., ...]", 1,
"not supported")
# Tuple[T] and Tuple[T, ...] are distinct.
self.check("from typing import Tuple\n\nx = ... # type: Tuple[int]",
"from typing import Tuple\n\nx: Tuple[int]")
self.check("from typing import Tuple\n\nx = ... # type: Tuple[int, ...]",
"from typing import Tuple\n\nx: Tuple[int, ...]")
def test_tuple(self):
self.check("""
from typing import Tuple
x = ... # type: Tuple[int, str]""",
"""
from typing import Tuple
x: Tuple[int, str]""")
self.check("""
from typing import Tuple
x = ... # type: Tuple[int, str, ...]""",
"""
from typing import Any, Tuple
x: Tuple[int, str, Any]""")
def test_empty_tuple(self):
self.check("""
from typing import Tuple
def f() -> Tuple[()]: ...
""", """
from typing import Tuple
def f() -> Tuple[()]: ...
""")
def test_simple(self):
self.check("x: Foo[int, str]")
def test_type_tuple(self):
self.check("x = (str, bytes)",
"x: tuple")
self.check("x = (str, bytes,)",
"x: tuple")
self.check("x = (str,)",
"x: tuple")
self.check("x = str,",
"x: tuple")
class NamedTupleTest(parser_test_base.ParserTestBase):
@unittest.skip("Constructors in type annotations not supported")
def test_no_fields(self):
self.check("x = ... # type: NamedTuple('foo', [])", """
from typing import Any, Tuple, Type, TypeVar
x: namedtuple_foo_0
_Tnamedtuple_foo_0 = TypeVar('_Tnamedtuple_foo_0', bound=namedtuple_foo_0)
class namedtuple_foo_0(Tuple[()]):
__slots__ = []
_asdict: Any
__dict__: Any
_fields: Any
__getnewargs__: Any
__getstate__: Any
_make: Any
_replace: Any
def __new__(cls: Type[_Tnamedtuple_foo_0]) -> _Tnamedtuple_foo_0: ...
def __init__(self, *args, **kwargs) -> None: ...
""")
@unittest.skip("Constructors in type annotations not supported")
def test_multiple_fields(self):
expected = """
from typing import Any, Tuple, Type, TypeVar
x: namedtuple_foo_0
_Tnamedtuple_foo_0 = TypeVar('_Tnamedtuple_foo_0', bound=namedtuple_foo_0)
class namedtuple_foo_0(Tuple[int, str]):
__slots__ = ["a", "b"]
a: int
b: str
_asdict: Any
__dict__: Any
_fields: Any
__getnewargs__: Any
__getstate__: Any
_make: Any
_replace: Any
def __new__(cls: Type[_Tnamedtuple_foo_0], a: int, b: str) -> _Tnamedtuple_foo_0: ...
def __init__(self, *args, **kwargs) -> None: ...
"""
self.check("x = ... # type: NamedTuple('foo', [('a', int), ('b', str)])",
expected)
self.check("x = ... # type: NamedTuple('foo', [('a', int), ('b', str),])",
expected)
self.check("x = ... # type: NamedTuple('foo', [('a', int,), ('b', str),])",
expected)
# pylint: disable=line-too-long
@unittest.skip("Constructors in type annotations not supported")
def test_dedup_basename(self):
self.check("""
x = ... # type: NamedTuple('foo', [('a', int,)])
y = ... # type: NamedTuple('foo', [('b', str,)])""",
"""
from typing import Any, Tuple, Type, TypeVar
x: namedtuple_foo_0
y: namedtuple_foo_1
_Tnamedtuple_foo_0 = TypeVar('_Tnamedtuple_foo_0', bound=namedtuple_foo_0)
_Tnamedtuple_foo_1 = TypeVar('_Tnamedtuple_foo_1', bound=namedtuple_foo_1)
class namedtuple_foo_0(Tuple[int]):
__slots__ = ["a"]
a: int
_asdict: Any
__dict__: Any
_fields: Any
__getnewargs__: Any
__getstate__: Any
_make: Any
_replace: Any
def __new__(cls: Type[_Tnamedtuple_foo_0], a: int) -> _Tnamedtuple_foo_0: ...
def __init__(self, *args, **kwargs) -> None: ...
class namedtuple_foo_1(Tuple[str]):
__slots__ = ["b"]
b: str
_asdict: Any
__dict__: Any
_fields: Any
__getnewargs__: Any
__getstate__: Any
_make: Any
_replace: Any
def __new__(cls: Type[_Tnamedtuple_foo_1], b: str) -> _Tnamedtuple_foo_1: ...
def __init__(self, *args, **kwargs) -> None: ...
""")
# pylint: enable=line-too-long
def test_assign_namedtuple(self):
self.check("X = NamedTuple('X', [])", """
from typing import Any, Tuple, Type, TypeVar
X = namedtuple_X_0
_Tnamedtuple_X_0 = TypeVar('_Tnamedtuple_X_0', bound=namedtuple_X_0)
class namedtuple_X_0(Tuple[()]):
__slots__ = []
_asdict: Any
__dict__: Any
_fields: Any
__getnewargs__: Any
__getstate__: Any
_make: Any
_replace: Any
def __new__(cls: Type[_Tnamedtuple_X_0]) -> _Tnamedtuple_X_0: ...
def __init__(self, *args, **kwargs) -> None: ...
""")
def test_subclass_namedtuple(self):
self.check("class X(NamedTuple('X', [])): ...", """
from typing import Any, Tuple, Type, TypeVar
_Tnamedtuple_X_0 = TypeVar('_Tnamedtuple_X_0', bound=namedtuple_X_0)
class namedtuple_X_0(Tuple[()]):
__slots__ = []
_asdict: Any
__dict__: Any
_fields: Any
__getnewargs__: Any
__getstate__: Any
_make: Any
_replace: Any
def __new__(cls: Type[_Tnamedtuple_X_0]) -> _Tnamedtuple_X_0: ...
def __init__(self, *args, **kwargs) -> None: ...
class X(namedtuple_X_0): ...
""")
def test_trailing_comma(self):
self.check("""
from typing import NamedTuple
Foo = NamedTuple(
"Foo",
[
("a", int),
("b", str),
],
)
""", """
from typing import Any, Tuple, Type, TypeVar
Foo = namedtuple_Foo_0
_Tnamedtuple_Foo_0 = TypeVar('_Tnamedtuple_Foo_0', bound=namedtuple_Foo_0)
class namedtuple_Foo_0(Tuple[int, str]):
__slots__ = ["a", "b"]
a: int
b: str
_asdict: Any
__dict__: Any
_fields: Any
__getnewargs__: Any
__getstate__: Any
_make: Any
_replace: Any
def __new__(cls: Type[_Tnamedtuple_Foo_0], a: int, b: str) -> _Tnamedtuple_Foo_0: ...
def __init__(self, *args, **kwargs) -> None: ...
""")
def test_collections_trailing_comma(self):
self.check("""
from collections import namedtuple
Foo = namedtuple(
"Foo",
[
"a",
"b",
],
)
""", """
from collections import namedtuple
from typing import Any, Tuple, Type, TypeVar
Foo = namedtuple_Foo_0
_Tnamedtuple_Foo_0 = TypeVar('_Tnamedtuple_Foo_0', bound=namedtuple_Foo_0)
class namedtuple_Foo_0(Tuple[Any, Any]):
__slots__ = ["a", "b"]
a: Any
b: Any
_asdict: Any
__dict__: Any
_fields: Any
__getnewargs__: Any
__getstate__: Any
_make: Any
_replace: Any
def __new__(cls: Type[_Tnamedtuple_Foo_0], a, b) -> _Tnamedtuple_Foo_0: ...
def __init__(self, *args, **kwargs) -> None: ...
""")
def test_collections_namedtuple(self):
expected = """
from collections import namedtuple
from typing import Any, Tuple, Type, TypeVar
X = namedtuple_X_0
_Tnamedtuple_X_0 = TypeVar('_Tnamedtuple_X_0', bound=namedtuple_X_0)
class namedtuple_X_0(Tuple[Any]):
__slots__ = ["y"]
y: Any
_asdict: Any
__dict__: Any
_fields: Any
__getnewargs__: Any
__getstate__: Any
_make: Any
_replace: Any
def __new__(cls: Type[_Tnamedtuple_X_0], y) -> _Tnamedtuple_X_0: ...
def __init__(self, *args, **kwargs) -> None: ...
"""
self.check("""
from collections import namedtuple
X = namedtuple("X", ["y"])
""", expected)
self.check("""
from collections import namedtuple
X = namedtuple("X", ["y",])
""", expected)
def test_typing_namedtuple_class(self):
self.check("""
from typing import NamedTuple
class X(NamedTuple):
y: int
z: str
""", """
from typing import Any, Tuple, Type, TypeVar
_Tnamedtuple_X_0 = TypeVar('_Tnamedtuple_X_0', bound=namedtuple_X_0)
class namedtuple_X_0(Tuple[int, str]):
__slots__ = ["y", "z"]
y: int
z: str
_asdict: Any
__dict__: Any
_fields: Any
__getnewargs__: Any
__getstate__: Any
_make: Any
_replace: Any
def __new__(cls: Type[_Tnamedtuple_X_0], y: int, z: str) -> _Tnamedtuple_X_0: ...
def __init__(self, *args, **kwargs) -> None: ...
class X(namedtuple_X_0): ...
""")
def test_typing_namedtuple_class_with_method(self):
self.check("""
from typing import NamedTuple
class X(NamedTuple):
y: int
z: str
def foo(self) -> None: ...
""", """
from typing import Any, Tuple, Type, TypeVar
_Tnamedtuple_X_0 = TypeVar('_Tnamedtuple_X_0', bound=namedtuple_X_0)
class namedtuple_X_0(Tuple[int, str]):
__slots__ = ["y", "z"]
y: int
z: str
_asdict: Any
__dict__: Any
_fields: Any
__getnewargs__: Any
__getstate__: Any
_make: Any
_replace: Any
def __new__(cls: Type[_Tnamedtuple_X_0], y: int, z: str) -> _Tnamedtuple_X_0: ...
def __init__(self, *args, **kwargs) -> None: ...
class X(namedtuple_X_0):
def foo(self) -> None: ...
""")
def test_typing_namedtuple_class_multi_inheritance(self):
self.check("""
from typing import NamedTuple
class X(dict, NamedTuple):
y: int
z: str
""", """
from typing import Any, Tuple, Type, TypeVar
_Tnamedtuple_X_0 = TypeVar('_Tnamedtuple_X_0', bound=namedtuple_X_0)
class namedtuple_X_0(Tuple[int, str]):
__slots__ = ["y", "z"]
y: int
z: str
_asdict: Any
__dict__: Any
_fields: Any
__getnewargs__: Any
__getstate__: Any
_make: Any
_replace: Any
def __new__(cls: Type[_Tnamedtuple_X_0], y: int, z: str) -> _Tnamedtuple_X_0: ...
def __init__(self, *args, **kwargs) -> None: ...
class X(dict, namedtuple_X_0): ...
""")
def test_multi_namedtuple_parent(self):
self.check_error("""
from typing import NamedTuple
class X(NamedTuple, NamedTuple): ...
""", 2, "cannot inherit from bare NamedTuple more than once")
def test_redefine_namedtuple(self):
self.check("""
class NamedTuple: ...
""")
class FunctionTest(parser_test_base.ParserTestBase):
def test_params(self):
self.check("def foo() -> int: ...")
self.check("def foo(x) -> int: ...")
self.check("def foo(x: int) -> int: ...")
self.check("def foo(x: int, y: str) -> int: ...")
# Default values can add type information.
self.check("def foo(x = 123) -> int: ...",
"def foo(x: int = ...) -> int: ...")
self.check("def foo(x = 12.3) -> int: ...",
"def foo(x: float = ...) -> int: ...")
self.check("def foo(x = None) -> int: ...",
"def foo(x = ...) -> int: ...")
self.check("def foo(x = xyz) -> int: ...",
"def foo(x = ...) -> int: ...")
self.check("def foo(x = ...) -> int: ...",
"def foo(x = ...) -> int: ...")
# Defaults are ignored if a declared type is present.
self.check("def foo(x: str = 123) -> int: ...",
"def foo(x: str = ...) -> int: ...")
self.check("def foo(x: str = None) -> int: ...",
"def foo(x: str = ...) -> int: ...")
# Allow but do not preserve a trailing comma in the param list.
self.check("def foo(x: int, y: str, z: bool,) -> int: ...",
"def foo(x: int, y: str, z: bool) -> int: ...")
def test_star_params(self):
self.check("def foo(*, x) -> str: ...")
self.check("def foo(x: int, *args) -> str: ...")
self.check("def foo(x: int, *args, key: int = ...) -> str: ...")
self.check("def foo(x: int, *args: float) -> str: ...")
self.check("def foo(x: int, **kwargs) -> str: ...")
self.check("def foo(x: int, **kwargs: float) -> str: ...")
self.check("def foo(x: int, *args, **kwargs) -> str: ...")
# Various illegal uses of * args.
self.check_error("def foo(*) -> int: ...", 1,
"named arguments must follow bare *")
self.check_error("def foo(*x, *y) -> int: ...", 1, "invalid syntax")
self.check_error("def foo(**x, *y) -> int: ...", 1, "invalid syntax")
@unittest.skip("New parser does not support this syntax")
def test_ellipsis_param(self):
self.check("def foo(...) -> int: ...",
"def foo(*args, **kwargs) -> int: ...")
self.check("def foo(x: int, ...) -> int: ...",
"def foo(x: int, *args, **kwargs) -> int: ...")
self.check_error("def foo(..., x) -> int: ...", 1,
"ellipsis (...) must be last parameter")
self.check_error("def foo(*, ...) -> int: ...", 1,
"ellipsis (...) not compatible with bare *")
def test_typeignore(self):
self.check("def foo() -> int: # type: ignore\n ...",
"def foo() -> int: ...")
self.check("def foo() -> int: ... # type: ignore",
"def foo() -> int: ...")
self.check("def foo() -> int: pass # type: ignore",
"def foo() -> int: ...")
self.check("def foo(x) -> int: # type: ignore\n x=List[int]",
"def foo(x) -> int:\n x = List[int]")
self.check("""
def foo(x: int, # type: ignore
y: str) -> bool: ...""",
"def foo(x: int, y: str) -> bool: ...")
self.check("""
class Foo:
bar: str # type: ignore
""", """
class Foo:
bar: str
""")
self.check("""
class Foo:
bar = ... # type: str # type: ignore
""", """
class Foo:
bar: str
""")
self.check("""
class Foo:
bar: str = ... # type: ignore
""", """
class Foo:
bar: str
""")
self.check("""
def f( # type: ignore
x: int) -> None: ...
""", """
def f(x: int) -> None: ...
""")
def test_typeignore_alias(self):
self.check("""
class Foo:
def f(self) -> None: ...
g = f # type: ignore
""", """
class Foo:
def f(self) -> None: ...
def g(self) -> None: ...
""")
def test_typeignore_slots(self):
self.check("""
class Foo:
__slots__ = ["a", "b"] # type: ignore
""", """
class Foo:
__slots__ = ["a", "b"]
""")
def test_typeignore_errorcode(self):
self.check("""
def f() -> None: ... # type: ignore[override]
def g() -> None: ... # type: ignore[var-annotated]
def h() -> None: ... # type: ignore[abstract, no-untyped-def]
""", """
def f() -> None: ...
def g() -> None: ...
def h() -> None: ...
""")
def test_decorators(self):
# These tests are a bit questionable because most of the decorators only
# make sense for methods of classes. But this at least gives us some
# coverage of the decorator logic. More sensible tests can be created once
# classes are implemented.
self.check("""
@overload
def foo() -> int: ...""",
"""
def foo() -> int: ...""")
# Accept and disregard type: ignore comments on a decorator
self.check("""
@overload
def foo() -> int: ...
@overload # type: ignore # unsupported signature
def foo(bool) -> int: ...""",
"""
from typing import overload
@overload
def foo() -> int: ...
@overload
def foo(bool) -> int: ...""")
self.check("""
@abstractmethod
def foo() -> int: ...""",
"""
@abstractmethod
def foo() -> int: ...""")
self.check("""
@abc.abstractmethod
def foo() -> int: ...""",
"""
@abstractmethod
def foo() -> int: ...""")
self.check("""
@staticmethod
def foo() -> int: ...""")
self.check("""
@classmethod
def foo() -> int: ...""")
self.check("""
@coroutine
def foo() -> int: ...""")
self.check("""
@asyncio.coroutine
def foo() -> int: ...""",
"""
@coroutine
def foo() -> int: ...""")
self.check("""
@asyncio.coroutine
def foo() -> int: ...
@coroutines.coroutine
def foo() -> int: ...
@coroutine
def foo() -> str: ...""",
"""
from typing import overload
@coroutine
@overload
def foo() -> int: ...
@coroutine
@overload
def foo() -> int: ...
@coroutine
@overload
def foo() -> str: ...""")
self.check_error("""
def foo() -> str: ...
@coroutine
def foo() -> int: ...""",
None,
"Overloaded signatures for foo disagree on "
"coroutine decorators")
self.check_error("""
@property
def foo(self) -> int: ...""",
None,
"Module-level functions with property decorators: foo")
self.check_error("""
@foo.setter
def foo(self, x) -> int: ...""",
None,
"Module-level functions with property decorators: foo")
self.check_error("""
@classmethod
@staticmethod
def foo() -> int: ...""",
1,
"Too many decorators for foo")
def test_type_check_only(self):
self.check("""
from typing import type_check_only
@type_check_only
def f() -> None: ...
""", "def f() -> None: ...")
def test_type_check_only_class(self):
self.check("""
from typing import type_check_only
@type_check_only
class Foo: ...
""", "class Foo: ...")
def test_decorated_class(self):
self.check("""
@decorator
class Foo: ...
""")
def test_multiple_class_decorators(self):
self.check("""
@decorator1
@decorator2
class Foo: ...
""")
def test_bad_decorated_class(self):
self.check_error("""
@classmethod
class Foo: ...
""", 1, "Unsupported class decorators: classmethod")
def test_dataclass_decorator(self):
self.check("""
from dataclasses import dataclass
@dataclass
class Foo:
x: int
y: str = ...
""", """
from dataclasses import dataclass
@dataclass
class Foo:
x: int
y: str
""")
def test_dataclass_default_error(self):
self.check_error("""
from dataclasses import dataclass
@dataclass
class Foo:
x: int = ...
y: str
""", None, "non-default argument y follows default argument x")
def test_empty_body(self):
self.check("def foo() -> int: ...")
self.check("def foo() -> int: ...",
"def foo() -> int: ...")
self.check("def foo() -> int: pass",
"def foo() -> int: ...")
self.check("""
def foo() -> int:
...""",
"""
def foo() -> int: ...""")
self.check("""
def foo() -> int:
pass""",
"""
def foo() -> int: ...""")
self.check("""
def foo() -> int:
'''doc string'''""",
"""
def foo() -> int: ...""")
def test_mutators(self):
# Mutators.
self.check("""
def foo(x) -> int:
x = int""")
self.check_error("""
def foo(x) -> int:
y = int""", 1, "No parameter named 'y'")
def test_mutator_from_annotation(self):
self.check("""
from typing import Generic, TypeVar
T = TypeVar('T')
class Foo(Generic[T]):
def __init__(self: Foo[str]) -> None: ...
""", """
from typing import Generic, TypeVar
T = TypeVar('T')
class Foo(Generic[T]):
def __init__(self) -> None:
self = Foo[str]
""")
def test_exceptions(self):
self.check("""
def foo(x) -> int:
raise Error""",
"""
def foo(x) -> int:
raise Error()""")
self.check("""
def foo(x) -> int:
raise Error()""")
self.check("""
def foo() -> int:
raise RuntimeError()
raise TypeError()""")
self.check("""
def foo() -> int:
raise Bar.Error()""", prologue="import Bar")
def test_invalid_body(self):
self.check_error("""
def foo(x) -> int:
a: str""", 1, "Unexpected statement in function body")
def test_return(self):
self.check("def foo() -> int: ...")
self.check("def foo(): ...",
"def foo() -> Any: ...",
prologue="from typing import Any")
def test_async(self):
self.check("async def foo() -> int: ...",
"def foo() -> Coroutine[Any, Any, int]: ...",
prologue="from typing import Any, Coroutine")
class ClassTest(parser_test_base.ParserTestBase):
def test_no_parents(self):
canonical = """
class Foo: ...
"""
self.check(canonical, canonical)
self.check("""
class Foo():
pass
""", canonical)
def test_parents(self):
self.check("""
class Foo(Bar): ...
""")
self.check("""
class Foo(Bar, Baz): ...
""")
def test_parent_remove_nothingtype(self):
self.check("""
class Foo(nothing): ...
""", """
class Foo: ...
""")
self.check("""
class Foo(Bar, nothing): ...
""", """
class Foo(Bar): ...
""")
def test_class_type_ignore(self):
canonical = """
class Foo: # type: ignore
pass
class Bar(Foo): # type: ignore
pass
"""
self.check(canonical, """
class Foo: ...
class Bar(Foo): ...
""")
def test_metaclass(self):
self.check("""
class Foo(metaclass=Meta): ...
""")
self.check("""
class Foo(Bar, metaclass=Meta): ...
""")
self.check_error("""
class Foo(badkeyword=Meta): ...
""", 1, "Unexpected classdef kwarg 'badkeyword'")
self.check_error("""
class Foo(metaclass=Meta, Bar): ...
""", 1, "positional argument follows keyword argument")
def test_shadow_pep484(self):
self.check("""
class List:
def bar(self) -> List: ...
""")
def test_no_body(self):
canonical = """
class Foo: ...
"""
# There are numerous ways to indicate an empty body.
self.check(canonical, canonical)
self.check("""
class Foo(): pass
""", canonical)
self.check("""
class Foo():
pass
""", canonical)
self.check("""
class Foo():
...
""", canonical)
# pylint: disable=g-inconsistent-quotes
self.check('''\
class Foo():
"""docstring"""
...
''', canonical)
self.check('''\
class Foo():
"""docstring"""
''', canonical)
# Accept type: ignore with empty body
self.check("""
class Foo: ... # type: ignore
""", canonical)
self.check("""
class Foo: # type: ignore
pass
""", canonical)
def test_attribute(self):
self.check("""
class Foo:
a: int
""")
def test_method(self):
self.check("""
class Foo:
def a(self, x: int) -> str: ...
""")
def test_property(self):
self.check("""
class Foo:
@property
def a(self) -> int: ...
""", """
from typing import Annotated
class Foo:
a: Annotated[int, 'property']
""")
def test_duplicate_name(self):
self.check_error("""
class Foo:
bar = ... # type: int
bar = ... # type: str
""", 1, "Duplicate class-level identifier(s): bar")
self.check_error("""
class Foo:
def bar(self) -> int: ...
bar = ... # type: str
""", 1, "Duplicate class-level identifier(s): bar")
# Multiple method defs are ok (needed for variant signatures).
self.check("""
class Foo:
@overload
def x(self) -> int: ...
@overload
def x(self) -> str: ...
""", """
from typing import overload
class Foo:
@overload
def x(self) -> int: ...
@overload
def x(self) -> str: ...
""")
def test_protocol_parent(self):
self.check("""
from typing import Protocol
class Foo(Protocol): ...
""")
def test_parameterized_protocol_parent(self):
self.check("""
from typing import Protocol, TypeVar
T = TypeVar('T')
class Foo(Protocol[T]): ...
""", """
from typing import Generic, Protocol, TypeVar
T = TypeVar('T')
class Foo(Protocol, Generic[T]): ...
""")
def test_typing_extensions_parameterized_protocol(self):
self.check("""
from typing import TypeVar
from typing_extensions import Protocol
T = TypeVar('T')
class Foo(Protocol[T]): ...
""", """
import typing
from typing import Generic, TypeVar
from typing_extensions import Protocol
T = TypeVar('T')
class Foo(typing.Protocol, Generic[T]): ...
""")
def test_bad_typevar_in_mutation(self):
self.check_error("""
from typing import Generic, TypeVar
S = TypeVar('S')
T = TypeVar('T')
U = TypeVar('U')
V = TypeVar('V')
class Foo(Generic[T]):
def __init__(self, x: S):
self = Generic[S, T, U, V]
""", None, "Type parameter(s) {U, V}")
def test_nested_class_typing_class_conflict(self):
ast = parser.parse_string(textwrap.dedent("""
from typing import Mapping
class Foo:
class Mapping: ...
x: Mapping
""").lstrip())
x = ast.Lookup("x")
self.assertEqual(x.type.name, "typing.Mapping")
class IfTest(parser_test_base.ParserTestBase):
def test_if_true(self):
self.check("""
if sys.version_info >= (3, 5, 0):
x = ... # type: int
""", """
x: int""")
def test_if_false(self):
self.check("""
if sys.version_info == (1, 2, 3):
x = ... # type: int
""", "")
def test_else_used(self):
self.check("""
if sys.version_info == (1, 2, 3):
x = ... # type: int
else:
y = ... # type: str
""", """
y: str""")
def test_else_ignored(self):
self.check("""
if sys.version_info >= (3, 5, 0):
x = ... # type: int
else:
y = ... # type: str
""", """
x: int""")
def test_elif_used(self):
self.check("""
if sys.version_info == (1, 2, 3):
x = ... # type: int
elif sys.version_info >= (3, 5, 0):
y = ... # type: float
else:
z = ... # type: str
""", """
y: float""")
def test_elif_preempted(self):
self.check("""
if sys.version_info > (1, 2, 3):
x = ... # type: int
elif sys.version_info >= (3, 5, 0):
y = ... # type: float
else:
z = ... # type: str
""", """
x: int""")
def test_elif_ignored(self):
self.check("""
if sys.version_info == (1, 2, 3):
x = ... # type: int
elif sys.version_info == (4, 5, 6):
y = ... # type: float
else:
z = ... # type: str
""", """
z: str""")
def test_nested_if(self):
self.check("""
if sys.version_info >= (2, 0):
if sys.platform == "linux":
a = ... # type: int
else:
b = ... # type: int
else:
if sys.platform == "linux":
c = ... # type: int
else:
d = ... # type: int
""", "a: int")
def test_if_or(self):
self.check("""
if sys.version_info >= (2, 0) or sys.version_info < (0, 0, 0):
a = ... # type: int
if sys.version_info < (0, 0, 0) or sys.version_info >= (2, 0):
b = ... # type: int
if sys.version_info < (0, 0, 0) or sys.version_info > (4,):
c = ... # type: int
if sys.version_info >= (2, 0) or sys.version_info >= (4, 7):
d = ... # type: int
if (sys.platform == "windows" or sys.version_info < (0,) or
sys.version_info >= (3, 5)):
e = ... # type: int
""", """
a: int
b: int
d: int
e: int""")
def test_if_and(self):
self.check("""
if sys.version_info >= (3, 0) and sys.version_info < (4, 0):
a = ... # type: int
if sys.version_info >= (3, 0) and sys.version_info >= (4, 0):
b = ... # type: int
""", """
a: int""")
# The remaining tests verify that actions with side effects only take effect
# within a true block.
def test_conditional_import(self):
self.check("""
if sys.version_info >= (3, 5, 0):
from foo import Processed
else:
from foo import Ignored
""", "from foo import Processed")
def test_conditional_alias_or_constant(self):
self.check("""
if sys.version_info >= (3, 5, 0):
x = Processed
else:
y = Ignored
""", "x = Processed")
def test_conditional_class(self):
self.check("""
if sys.version_info >= (3, 5, 0):
class Processed: ...
else:
class Ignored: ...
""", """
class Processed: ...
""")
def test_conditional_class_registration(self):
# Class registration allows a local class name to shadow a PEP 484 name.
# The only time this is noticeable is when the PEP 484 name is one of the
# capitalized names that gets converted to lower case (i.e. List -> list).
# In these cases a non-shadowed name would be converted to lower case, and
# a properly shadowed name would remain capitalized. In the test below,
# Dict should be registered, List should not be registered. Thus after
# the "if" statement Dict refers to the local Dict class and List refers
# to the PEP 484 list class.
self.check("""
from typing import List
if sys.version_info >= (3, 5, 0):
class Dict: ...
else:
class List: ...
x = ... # type: Dict
y = ... # type: List
""", """
x: Dict
y: list
class Dict: ...
""")
def test_conditional_typevar(self):
# The legacy parser did not handle this correctly - typevars are added
# regardless of any conditions.
self.check("""
if sys.version_info >= (3, 5, 0):
T = TypeVar('T')
else:
F = TypeVar('F')
""", """
from typing import TypeVar
T = TypeVar('T')""")
class ClassIfTest(parser_test_base.ParserTestBase):
# These tests assume that IfTest has already covered the inner workings of
# peer's functions. Instead, they focus on verifying that if statements
# under a class allow things that normally appear in a class (constants,
# functions), and disallow statements that aren't allowed in a class (import,
# etc).
def test_conditional_constant(self):
self.check("""
class Foo:
if sys.version_info == (3, 4, 0):
x = ... # type: int
elif sys.version_info >= (3, 5, 0):
y = ... # type: str
else:
z = ... # type: float
""", """
class Foo:
y: str
""")
def test_conditional_method(self):
self.check("""
class Foo:
if sys.version_info == (3, 4, 0):
def a(self, x: int) -> str: ...
elif sys.version_info >= (3, 5, 0):
def b(self, x: int) -> str: ...
else:
def c(self, x: int) -> str: ...
""", """
class Foo:
def b(self, x: int) -> str: ...
""")
def test_nested(self):
self.check("""
class Foo:
if sys.version_info > (3, 4, 0):
if sys.version_info >= (3, 5, 0):
def b(self, x: int) -> str: ...
""", """
class Foo:
def b(self, x: int) -> str: ...
""")
def test_no_import(self):
self.check_error("""
class Foo:
if sys.version_info > (3, 4, 0):
import foo
""", 3, "Import statements need to be at module level")
def test_no_class(self):
self.check("""
class Foo:
if sys.version_info <= (3, 4, 0):
class Bar: ...
""", """
class Foo: ...
""")
def test_no_typevar(self):
self.check_error("""
class Foo:
if sys.version_info > (3, 4, 0):
T = TypeVar('T')
""", 3, r"TypeVars need to be defined at module level")
class ConditionTest(parser_test_base.ParserTestBase):
def check_cond(self, condition, expected, **kwargs):
out = "x: int" if expected else ""
if "version" not in kwargs:
kwargs["version"] = (3, 6, 5)
self.check("""
if %s:
x = ... # type: int
""" % condition, out, **kwargs)
def check_cond_error(self, condition, message):
self.check_error("""
if %s:
x = ... # type: int
""" % condition, 1, message)
def test_version_eq(self):
self.check_cond("sys.version_info == (3, 6, 4)", False)
self.check_cond("sys.version_info == (3, 6, 5)", True)
self.check_cond("sys.version_info == (3, 6, 6)", False)
def test_version_ne(self):
self.check_cond("sys.version_info != (3, 6, 4)", True)
self.check_cond("sys.version_info != (3, 6, 5)", False)
self.check_cond("sys.version_info != (3, 6, 6)", True)
def test_version_lt(self):
self.check_cond("sys.version_info < (3, 6, 4)", False)
self.check_cond("sys.version_info < (3, 6, 5)", False)
self.check_cond("sys.version_info < (3, 6, 6)", True)
self.check_cond("sys.version_info < (3, 7, 0)", True)
def test_version_le(self):
self.check_cond("sys.version_info <= (3, 6, 4)", False)
self.check_cond("sys.version_info <= (3, 6, 5)", True)
self.check_cond("sys.version_info <= (3, 6, 6)", True)
self.check_cond("sys.version_info <= (3, 7, 0)", True)
def test_version_gt(self):
self.check_cond("sys.version_info > (3, 6, 0)", True)
self.check_cond("sys.version_info > (3, 6, 4)", True)
self.check_cond("sys.version_info > (3, 6, 5)", False)
self.check_cond("sys.version_info > (3, 6, 6)", False)
def test_version_ge(self):
self.check_cond("sys.version_info >= (3, 6, 0)", True)
self.check_cond("sys.version_info >= (3, 6, 4)", True)
self.check_cond("sys.version_info >= (3, 6, 5)", True)
self.check_cond("sys.version_info >= (3, 6, 6)", False)
def test_version_item(self):
self.check_cond("sys.version_info[0] == 3", True)
def test_version_slice(self):
self.check_cond("sys.version_info[:] == (3, 6, 5)", True)
self.check_cond("sys.version_info[:2] == (3, 6)", True)
self.check_cond("sys.version_info[2:] == (5,)", True)
self.check_cond("sys.version_info[0:1] == (3,)", True)
self.check_cond("sys.version_info[::] == (3, 6, 5)", True)
self.check_cond("sys.version_info[1::] == (6, 5)", True)
self.check_cond("sys.version_info[:2:] == (3, 6)", True)
self.check_cond("sys.version_info[::-2] == (5, 3)", True)
self.check_cond("sys.version_info[1:3:] == (6, 5)", True)
self.check_cond("sys.version_info[1::2] == (6,)", True)
self.check_cond("sys.version_info[:2:2] == (3,)", True)
self.check_cond("sys.version_info[3:1:-1] == (5,)", True)
def test_version_shorter_tuples(self):
self.check_cond("sys.version_info == (3,)", True, version=(3, 0, 0))
self.check_cond("sys.version_info == (3, 0)", True, version=(3, 0, 0))
self.check_cond("sys.version_info == (3, 0, 0)", True, version=(3, 0, 0))
self.check_cond("sys.version_info == (3,)", False, version=(3, 0, 1))
self.check_cond("sys.version_info == (3, 0)", False, version=(3, 0, 1))
self.check_cond("sys.version_info > (3,)", True, version=(3, 0, 1))
self.check_cond("sys.version_info > (3, 0)", True, version=(3, 0, 1))
self.check_cond("sys.version_info == (3, 0, 0)", True, version=(3,))
self.check_cond("sys.version_info == (3, 0, 0)", True, version=(3, 0))
def test_version_slice_shorter_tuples(self):
self.check_cond("sys.version_info[:2] == (3,)", True, version=(3, 0, 1))
self.check_cond("sys.version_info[:2] == (3, 0)", True, version=(3, 0, 1))
self.check_cond(
"sys.version_info[:2] == (3, 0, 0)", True, version=(3, 0, 1))
self.check_cond("sys.version_info[:2] == (3,)", False, version=(3, 1, 0))
self.check_cond("sys.version_info[:2] == (3, 0)", False, version=(3, 1, 0))
self.check_cond("sys.version_info[:2] > (3,)", True, version=(3, 1, 0))
self.check_cond("sys.version_info[:2] > (3, 0)", True, version=(3, 1, 0))
self.check_cond(
"sys.version_info[:2] == (3, 0, 0)", True, version=(3,))
self.check_cond(
"sys.version_info[:2] == (3, 0, 0)", True, version=(3, 0))
def test_version_error(self):
self.check_cond_error('sys.version_info == "foo"',
"sys.version_info must be compared to a tuple of "
"integers")
self.check_cond_error("sys.version_info == (1.2, 3)",
"sys.version_info must be compared to a tuple of "
"integers")
self.check_cond_error("sys.version_info[0] == 2.0",
"an element of sys.version_info must be compared to "
"an integer")
self.check_cond_error("sys.version_info[0] == (2,)",
"an element of sys.version_info must be compared to "
"an integer")
self.check_cond_error("sys.version_info[:2] == (2.0, 7)",
"sys.version_info must be compared to a tuple of "
"integers")
self.check_cond_error("sys.version_info[:2] == 2",
"sys.version_info must be compared to a tuple of "
"integers")
self.check_cond_error("sys.version_info[42] == 42",
"tuple index out of range")
def test_platform_eq(self):
self.check_cond('sys.platform == "linux"', True)
self.check_cond('sys.platform == "win32"', False)
self.check_cond('sys.platform == "foo"', True, platform="foo")
def test_platform_error(self):
self.check_cond_error("sys.platform == (1, 2, 3)",
"sys.platform must be compared to a string")
self.check_cond_error('sys.platform < "linux"',
"sys.platform must be compared using == or !=")
self.check_cond_error('sys.platform <= "linux"',
"sys.platform must be compared using == or !=")
self.check_cond_error('sys.platform > "linux"',
"sys.platform must be compared using == or !=")
self.check_cond_error('sys.platform >= "linux"',
"sys.platform must be compared using == or !=")
def test_unsupported_condition(self):
self.check_cond_error("foo.bar == (1, 2, 3)",
"Unsupported condition: 'foo.bar'")
class PropertyDecoratorTest(parser_test_base.ParserTestBase):
"""Tests that cover _parse_signature_as_property()."""
def test_property_with_type(self):
expected = """
from typing import Annotated
class A:
name: Annotated[str, 'property']
"""
# The return type of @property is used for the property type.
self.check("""
class A:
@property
def name(self) -> str:...
""", expected)
self.check("""
class A:
@name.setter
def name(self, value: str) -> None: ...
""", """
from typing import Annotated, Any
class A:
name: Annotated[Any, 'property']
""")
self.check("""
class A:
@property
def name(self) -> str:...
@name.setter
def name(self, value: str) -> None: ...
""", expected)
self.check("""
class A:
@property
def name(self) -> str:...
@name.setter
def name(self, value) -> None: ...
""", expected)
self.check("""
class A:
@property
def name(self) -> str:...
@name.setter
def name(self, value: int) -> None: ...
""", expected)
def test_property_decorator_any_type(self):
expected = """
from typing import Annotated, Any
class A:
name: Annotated[Any, 'property']
"""
self.check("""
class A:
@property
def name(self): ...
""", expected)
self.check("""
class A:
@name.setter
def name(self, value): ...
""", expected)
self.check("""
class A:
@name.deleter
def name(self): ...
""", expected)
self.check("""
class A:
@name.setter
def name(self, value): ...
@name.deleter
def name(self): ...
""", expected)
def test_property_decorator_bad_syntax(self):
self.check_error("""
class A:
@property
def name(self, bad_arg): ...
""", 1, "@property needs 1 param(s), got 2")
self.check_error("""
class A:
@name.setter
def name(self): ...
""", 1, "@name.setter needs 2 param(s), got 1")
self.check_error("""
class A:
@property
@staticmethod
def name(self): ...
""", 2, "Too many decorators for name")
self.check_error("""
@property
def name(self): ...
""", None, "Module-level functions with property decorators: name")
def test_property_clash(self):
self.check_error("""
class A:
@property
def name(self) -> str: ...
@property
def name(self) -> int: ...
""", 1, "Invalid property decorators for method `name`")
class MergeSignaturesTest(parser_test_base.ParserTestBase):
def test_property(self):
self.check("""
class A:
@property
def name(self) -> str: ...
""", """
from typing import Annotated
class A:
name: Annotated[str, 'property']
""")
def test_method(self):
self.check("""
class A:
def name(self) -> str: ...
""")
def test_merged_method(self):
ast = self.check("""
def foo(x: int) -> str: ...
def foo(x: str) -> str: ...""",
"""
from typing import overload
@overload
def foo(x: int) -> str: ...
@overload
def foo(x: str) -> str: ...""")
self.assertEqual(len(ast.functions), 1)
foo = ast.functions[0]
self.assertEqual(len(foo.signatures), 2)
def test_method_and_property_error(self):
self.check_error("""
class A:
@property
def name(self): ...
def name(self): ...
""", 1, "Overloaded signatures for name disagree on decorators")
def test_overloaded_signatures_disagree(self):
self.check_error("""
class A:
@staticmethod
def foo(x: int): ...
@classmethod
def foo(x: str): ...
""", 1, "Overloaded signatures for foo disagree on decorators")
def test_classmethod(self):
ast = self.check("""
class A:
@classmethod
def foo(x: int) -> str: ...
""")
self.assertEqual("classmethod", ast.classes[0].methods[0].kind)
def test_staticmethod(self):
ast = self.check("""
class A:
@staticmethod
def foo(x: int) -> str: ...
""")
self.assertEqual("staticmethod", ast.classes[0].methods[0].kind)
def test_new(self):
ast = self.check("""
class A:
def __new__(self) -> A: ...
""")
self.assertEqual("staticmethod", ast.classes[0].methods[0].kind)
def test_abstractmethod(self):
ast = self.check("""
class A:
@abstractmethod
def foo(x: int) -> str: ...
""")
self.assertEqual("method", ast.Lookup("A").Lookup("foo").kind)
self.assertEqual(True, ast.Lookup("A").Lookup("foo").is_abstract)
def test_abstractmethod_manysignatures(self):
ast = self.check("""
class A:
@abstractmethod
def foo(x: int) -> str: ...
@abstractmethod
def foo(x: int, y: int) -> str: ...
@abstractmethod
def foo(x: int, y: int, z: int) -> str: ...
""", """
from typing import overload
class A:
@abstractmethod
@overload
def foo(x: int) -> str: ...
@abstractmethod
@overload
def foo(x: int, y: int) -> str: ...
@abstractmethod
@overload
def foo(x: int, y: int, z: int) -> str: ...
""")
self.assertEqual("method", ast.Lookup("A").Lookup("foo").kind)
self.assertEqual(True, ast.Lookup("A").Lookup("foo").is_abstract)
def test_abstractmethod_conflict(self):
self.check_error("""
class A:
@abstractmethod
def foo(x: int) -> str: ...
def foo(x: int, y: int) -> str: ...
""", 1, "Overloaded signatures for foo disagree on "
"abstractmethod decorators")
class AnyTest(parser_test_base.ParserTestBase):
def test_generic_any(self):
self.check("""
from typing import Any
x = ... # type: Any[int]""",
"""
from typing import Any
x: Any""")
def test_generic_any_alias(self):
self.check("""
from typing import Any
Foo = Any
Bar = Foo[int]
x = ... # type: Bar[int, str]""",
"""
from typing import Any
Foo = Any
Bar = Any
x: Any""")
class CanonicalPyiTest(parser_test_base.ParserTestBase):
def test_canonical_version(self):
src = textwrap.dedent("""
from typing import Any
def foo(x: int = 0) -> Any: ...
def foo(x: str) -> Any: ...
""")
expected = textwrap.dedent("""
from typing import Any, overload
@overload
def foo(x: int = ...) -> Any: ...
@overload
def foo(x: str) -> Any: ...
""").strip()
self.assertMultiLineEqual(
parser.canonical_pyi(src, self.python_version), expected)
class TypeMacroTest(parser_test_base.ParserTestBase):
def test_simple(self):
self.check("""
from typing import List, TypeVar
Alias = List[List[T]]
T = TypeVar('T')
S = TypeVar('S')
def f(x: Alias[S]) -> S: ...
def g(x: Alias[str]) -> str: ...""", """
from typing import List, TypeVar
Alias = List[List[T]]
S = TypeVar('S')
T = TypeVar('T')
def f(x: List[List[S]]) -> S: ...
def g(x: List[List[str]]) -> str: ...""")
def test_partial_replacement(self):
self.check("""
from typing import Dict, TypeVar
DictAlias = Dict[int, V]
V = TypeVar('V')
def f(x: DictAlias[str]) -> None: ...""", """
from typing import Dict, TypeVar
DictAlias = Dict[int, V]
V = TypeVar('V')
def f(x: Dict[int, str]) -> None: ...""")
def test_multiple_parameters(self):
self.check("""
from typing import Dict, List, TypeVar
Alias = List[Dict[K, V]]
K = TypeVar('K')
V = TypeVar('V')
def f(x: Alias[K, V]) -> Dict[K, V]: ...""", """
from typing import Dict, List, TypeVar
Alias = List[Dict[K, V]]
K = TypeVar('K')
V = TypeVar('V')
def f(x: List[Dict[K, V]]) -> Dict[K, V]: ...""")
def test_no_parameters(self):
self.check("""
from typing import List, TypeVar
Alias = List[List[T]]
T = TypeVar('T')
def f(x: Alias) -> None: ...""", """
from typing import Any, List, TypeVar
Alias = List[List[T]]
T = TypeVar('T')
def f(x: List[List[Any]]) -> None: ...""")
def test_union(self):
self.check("""
from typing import List, TypeVar, Union
Alias = Union[List[T], List[S]]
T = TypeVar('T')
S = TypeVar('S')
def f(x: Alias[S, T]) -> Union[S, T]: ...""", """
from typing import List, TypeVar, Union
Alias = Union[List[T], List[S]]
S = TypeVar('S')
T = TypeVar('T')
def f(x: Union[List[S], List[T]]) -> Union[S, T]: ...""")
def test_repeated_type_parameter(self):
self.check("""
from typing import Dict, TypeVar
Alias = Dict[T, T]
T = TypeVar('T')
def f(x: Alias[str]) -> None: ...""", """
from typing import Dict, TypeVar
Alias = Dict[T, T]
T = TypeVar('T')
def f(x: Dict[str, str]) -> None: ...""")
def test_wrong_parameter_count(self):
self.check_error("""
from typing import List, TypeVar
Alias = List[List[T]]
T = TypeVar('T')
def f(x: Alias[T, T]) -> T: ...
""", 4, "List[List[T]] expected 1 parameters, got 2")
def test_anystr(self):
self.check("""
from typing import AnyStr, List
Alias = List[AnyStr]
def f(x: Alias[str]) -> None: ...
""", """
from typing import AnyStr, List
Alias = List[AnyStr]
def f(x: List[str]) -> None: ...
""")
class ImportTypeIgnoreTest(parser_test_base.ParserTestBase):
def test_import(self):
self.check("""
import mod # type: ignore
def f(x: mod.attr) -> None: ...
""", """
import mod
def f(x: mod.attr) -> None: ...""")
def test_from_import(self):
src = textwrap.dedent("""
from mod import attr # type: ignore
def f(x: attr) -> None: ...
""")
ast = parser.parse_string(src, python_version=self.python_version)
self.assertTrue(ast.Lookup("attr"))
self.assertTrue(ast.Lookup("f"))
def test_relative_import(self):
src = textwrap.dedent("""
from . import attr # type: ignore
def f(x: attr) -> None: ...
""")
ast = parser.parse_string(src, python_version=self.python_version)
self.assertTrue(ast.Lookup("attr"))
self.assertTrue(ast.Lookup("f"))
def test_relative_import_parent(self):
src = textwrap.dedent("""
from .. import attr # type: ignore
def f(x: attr) -> None: ...
""")
ast = parser.parse_string(src, python_version=self.python_version)
self.assertTrue(ast.Lookup("attr"))
self.assertTrue(ast.Lookup("f"))
class LiteralTest(parser_test_base.ParserTestBase):
def test_bool(self):
self.check("""
from typing import Literal
x: Literal[False]
y: Literal[True]
""")
def test_int(self):
self.check("""
from typing import Literal
x: Literal[42]
""")
def test_string(self):
self.check("""
from typing import Literal
x: Literal['x']
y: Literal['']
""")
def test_bytestring(self):
self.check("""
from typing import Literal
x: Literal[b'']
y: Literal[b'']
z: Literal[b'xyz']
""")
def test_unicodestring(self):
self.check("""
from typing import Literal
y: Literal[u'']
z: Literal[u'xyz']
""")
def test_none(self):
self.check("""
from typing import Literal
x: Literal[None]
""", "x: None")
def test_enum(self):
# TODO(b/173742489): support enums.
self.check("""
import enum
from typing import Literal
x: Literal[Color.RED]
class Color(enum.Enum):
RED: str
""", """
import enum
from typing import Any
x: Any
class Color(enum.Enum):
RED: str
""")
def test_multiple_parameters(self):
self.check("""
from typing import Literal
x: Literal[True, 0, b"", u"", None]
""", """
from typing import Literal, Optional
x: Optional[Literal[True, 0, b'', u'']]
""")
def test_stray_number(self):
self.check_error("""
from typing import Tuple
x: Tuple[int, int, 0, int]
""", 3, "Tuple[_, _, 0, _] not supported")
def test_stray_string(self):
self.check_error("""
from typing import Tuple
x: Tuple[str, str, '', str]
""", 3, "Tuple[_, _, '', _] not supported")
def test_stray_bytestring(self):
self.check_error("""
from typing import Tuple
x: Tuple[str, b'', str, str]
""", 3, "Tuple[_, b'', _, _] not supported")
def test_stray_unicodestring(self):
self.check_error("""
from typing import Tuple
x: Tuple[str, u'', str, str]
""", 3, "Tuple[_, u'', _, _] not supported")
def test_typing_extensions(self):
self.check("""
from typing_extensions import Literal
x: Literal[42]
""")
def test_unnest(self):
self.check("""
from typing import Literal
MyLiteralAlias = Literal[42]
x: Literal[MyLiteralAlias, Literal[Literal[True]], None]
y: Literal[1, Literal[2, Literal[3]]]
""", """
from typing import Literal, Optional
MyLiteralAlias = Literal[42]
x: Optional[Literal[42, True]]
y: Literal[1, 2, 3]
""")
def test_bad_value(self):
self.check_error("""
from typing import Literal
x: Literal[0.0]
""", 2, "Invalid type `float` in Literal[0.0].")
class TypedDictTest(parser_test_base.ParserTestBase):
def test_assign(self):
self.check("""
from typing_extensions import TypedDict
X = TypedDict('X', {})
""", """
from typing import Any, Dict
from typing_extensions import TypedDict
X = Dict[str, Any]
""")
def test_assign_with_items(self):
self.check("""
from typing_extensions import TypedDict
X = TypedDict('X', {'a': int, 'b': str})
""", """
from typing import Any, Dict
from typing_extensions import TypedDict
X = Dict[str, Any]
""")
def test_assign_with_kwarg(self):
self.check("""
from typing_extensions import TypedDict
X = TypedDict('X', {}, total=False)
""", """
from typing import Any, Dict
from typing_extensions import TypedDict
X = Dict[str, Any]
""")
def test_trailing_comma(self):
self.check("""
from typing_extensions import TypedDict
X = TypedDict('X', {
'a': int,
'b': str,
},)
""", """
from typing import Any, Dict
from typing_extensions import TypedDict
X = Dict[str, Any]
""")
def test_kwarg(self):
self.check("""
from typing import TypedDict
class Foo(TypedDict, total=False): ...
""", """
from typing import TypedDict
class Foo(TypedDict): ...
""")
self.check_error("""
class Foo(object, total=False): ...
""", 1, "'total' allowed as classdef kwarg only for TypedDict subclasses")
def test_typing_extensions(self):
self.check("""
from typing_extensions import TypedDict
class Foo(TypedDict, total=False): ...
""", """
import typing_extensions
from typing_extensions import TypedDict
class Foo(typing_extensions.TypedDict): ...
""")
def test_multiple_classdef_kwargs(self):
self.check("""
from typing import TypedDict
class Foo(TypedDict, total=False, metaclass=Meta): ...
""", """
from typing import TypedDict
class Foo(TypedDict, metaclass=Meta): ...
""")
class NewTypeTest(parser_test_base.ParserTestBase):
def test_basic(self):
self.check("""
from typing import NewType
X = NewType('X', int)
""", """
X = newtype_X_0
class newtype_X_0(int):
def __init__(self, val: int) -> None: ...
""")
def test_fullname(self):
self.check("""
import typing
X = typing.NewType('X', int)
""", """
import typing
X = newtype_X_0
class newtype_X_0(int):
def __init__(self, val: int) -> None: ...
""")
class MethodAliasTest(parser_test_base.ParserTestBase):
def test_normal_method(self):
self.check("""
class Foo:
def f(self, x: int) -> None: ...
_foo: Foo
f1 = Foo.f
f2 = _foo.f
""", """
_foo: Foo
class Foo:
def f(self, x: int) -> None: ...
def f1(self, x: int) -> None: ...
def f2(x: int) -> None: ...
""")
def test_classmethod(self):
self.check("""
class Foo:
@classmethod
def f(cls, x: int) -> None: ...
_foo: Foo
f1 = Foo.f
f2 = _foo.f
""", """
_foo: Foo
class Foo:
@classmethod
def f(cls, x: int) -> None: ...
def f1(x: int) -> None: ...
def f2(x: int) -> None: ...
""")
def test_staticmethod(self):
self.check("""
class Foo:
@staticmethod
def f(x: int) -> None: ...
_foo: Foo
f1 = Foo.f
f2 = _foo.f
""", """
_foo: Foo
class Foo:
@staticmethod
def f(x: int) -> None: ...
def f1(x: int) -> None: ...
def f2(x: int) -> None: ...
""")
def test_nested_constant(self):
self.check("""
class Foo:
foo: Foo
def f(self, x: int) -> None: ...
f = Foo.foo.f
""", """
class Foo:
foo: Foo
def f(self, x: int) -> None: ...
def f(x: int) -> None: ...
""")
class AnnotatedTest(parser_test_base.ParserTestBase):
"""Test typing.Annotated."""
def test_annotated(self):
self.check("""
from typing import Annotated
class Foo:
x: Annotated[int, 'a', 'b', 'c']
""")
def test_annotated_from_extensions(self):
self.check("""
from typing_extensions import Annotated
class Foo:
x: Annotated[int, 'a', 'b', 'c']
""")
def test_dict(self):
self.check("""
from typing_extensions import Annotated
class Foo:
x: Annotated[int, {'a': 'A', 'b': True, 'c': Foo}]
""", """
from typing_extensions import Annotated
class Foo:
x: Annotated[int, {'a': 'A', 'b': True, 'c': 'Foo'}]
""")
def test_call(self):
self.check("""
from typing_extensions import Annotated
class Foo:
x: Annotated[int, Deprecated("use new api")]
y: Annotated[int, unit('s', exp=9)]
""", """
from typing_extensions import Annotated
class Foo:
x: Annotated[int, {'tag': 'Deprecated', 'reason': 'use new api'}]
y: Annotated[int, {'tag': 'call', 'fn': 'unit', 'posargs': ('s',), 'kwargs': {'exp': 9}}]
""")
class ErrorTest(test_base.UnitTest):
"""Test parser errors."""
def test_filename(self):
src = textwrap.dedent("""
a: int
a: int
""")
with self.assertRaisesRegex(parser.ParseError, "File.*foo.pyi"):
parser.parse_pyi(src, "foo.pyi", "foo", (3, 6))
def test_lineno(self):
src = textwrap.dedent("""
class A:
__slots__ = 0
""")
with self.assertRaisesRegex(parser.ParseError, "line 3"):
parser.parse_pyi(src, "foo.py", "foo", (3, 6))
class ParamsTest(test_base.UnitTest):
"""Test input parameter handling."""
def test_feature_version(self):
cases = [
[3, sys.version_info.minor],
[(3,), sys.version_info.minor],
[(3, 7), 7],
[(3, 8, 2), 8]
]
for version, expected in cases:
actual = parser._feature_version(version)
self.assertEqual(actual, expected)
class ParamSpecTest(parser_test_base.ParserTestBase):
def test_from_typing(self):
self.check("""
from typing import Awaitable, Callable, ParamSpec, TypeVar
P = ParamSpec('P')
R = TypeVar('R')
def f(x: Callable[P, R]) -> Callable[P, Awaitable[R]]: ...
""", """
from typing import Awaitable, Callable, TypeVar
R = TypeVar('R')
def f(x: Callable[..., R]) -> Callable[..., Awaitable[R]]: ...
""")
def test_from_typing_extensions(self):
self.check("""
from typing import Awaitable, Callable, TypeVar
from typing_extensions import ParamSpec
P = ParamSpec('P')
R = TypeVar('R')
def f(x: Callable[P, R]) -> Callable[P, Awaitable[R]]: ...
""", """
from typing import Awaitable, Callable, TypeVar
from typing_extensions import ParamSpec
R = TypeVar('R')
def f(x: Callable[..., R]) -> Callable[..., Awaitable[R]]: ...
""")
def test_custom_generic(self):
self.check("""
from typing import Callable, Generic, ParamSpec, TypeVar
P = ParamSpec('P')
T = TypeVar('T')
class X(Generic[T, P]):
f: Callable[P, int]
x: T
""", """
from typing import Callable, Generic, TypeVar
P = TypeVar('P')
T = TypeVar('T')
class X(Generic[T, P]):
f: Callable[..., int]
x: T
""")
def test_use_custom_generic(self):
self.check("""
from typing import Callable, Generic, TypeVar
from typing_extensions import ParamSpec
_T = TypeVar('_T')
_P = ParamSpec('_P')
class Foo(Generic[_P, _T]): ...
def f(x: Callable[_P, _T]) -> Foo[_P, _T]: ...
""", """
from typing import Any, Callable, Generic, TypeVar
from typing_extensions import ParamSpec
_P = TypeVar('_P')
_T = TypeVar('_T')
class Foo(Generic[_P, _T]): ...
def f(x: Callable[..., _T]) -> Foo[Any, _T]: ...
""")
@test_base.skip("ParamSpec in custom generic classes not supported yet")
def test_double_brackets(self):
# Double brackets can be omitted when instantiating a class parameterized
# with only a single ParamSpec.
self.check("""
from typing import Generic, ParamSpec
P = ParamSpec('P')
class X(Generic[P]): ...
def f1(x: X[int, str]) -> None: ...
def f2(x: X[[int, str]]) -> None: ...
""", """
from typing import Generic, ParamSpec
P = ParamSpec('P')
class X(Generic[P]): ...
def f1(x: X[int, str]) -> None: ...
def f2(x: X[int, str]) -> None: ...
""")
def test_paramspec_args(self):
self.check("""
from typing import Callable, ParamSpec, TypeVar
P = ParamSpec('P')
T = TypeVar('T')
def f(x: Callable[P, T], *args: P.args, **kwargs: P.kwargs) -> T: ...
""", """
from typing import Callable, TypeVar
T = TypeVar('T')
def f(x: Callable[..., T], *args, **kwargs) -> T: ...
""")
class ConcatenateTest(parser_test_base.ParserTestBase):
def test_from_typing(self):
self.check("""
from typing import Callable, Concatenate, ParamSpec, TypeVar
P = ParamSpec('P')
R = TypeVar('R')
class X: ...
def f(x: Callable[Concatenate[X, P], R]) -> Callable[P, R]: ...
""", """
from typing import Callable, TypeVar
R = TypeVar('R')
class X: ...
def f(x: Callable[..., R]) -> Callable[..., R]: ...
""")
def test_from_typing_extensions(self):
self.check("""
from typing import Callable, TypeVar
from typing_extensions import Concatenate, ParamSpec
P = ParamSpec('P')
R = TypeVar('R')
class X: ...
def f(x: Callable[Concatenate[X, P], R]) -> Callable[P, R]: ...
""", """
from typing import Callable, TypeVar
from typing_extensions import Concatenate
from typing_extensions import ParamSpec
R = TypeVar('R')
class X: ...
def f(x: Callable[..., R]) -> Callable[..., R]: ...
""")
class UnionOrTest(parser_test_base.ParserTestBase):
def test_basic(self):
self.check("""
def f(x: int | str) -> None: ...
def g(x: bool | str | float) -> None: ...
def h(x: str | None) -> None: ...
""", """
from typing import Optional, Union
def f(x: Union[int, str]) -> None: ...
def g(x: Union[bool, str, float]) -> None: ...
def h(x: Optional[str]) -> None: ...
""")
class TypeGuardTest(parser_test_base.ParserTestBase):
def test_typing_extensions(self):
self.check("""
from typing import List
from typing_extensions import TypeGuard
def f(x: List[object]) -> TypeGuard[List[str]]: ...
""", """
from typing import List
from typing_extensions import TypeGuard
def f(x: List[object]) -> bool: ...
""")
def test_typing(self):
self.check("""
from typing import List, TypeGuard
def f(x: List[object]) -> TypeGuard[List[str]]: ...
""", """
from typing import List
def f(x: List[object]) -> bool: ...
""")
if __name__ == "__main__":
unittest.main()
| 27.033355 | 99 | 0.531085 |
0eae9e630e125c6e03d64bb588ef7ebfdb5bf1a5 | 18,575 | py | Python | lpot/adaptor/tf_utils/quantize_graph/quantize_graph_conv.py | deb-intel/LPOTtest | f7b7524c733e581668d15192b69f9d9a7ca5222d | [
"Apache-2.0"
] | null | null | null | lpot/adaptor/tf_utils/quantize_graph/quantize_graph_conv.py | deb-intel/LPOTtest | f7b7524c733e581668d15192b69f9d9a7ca5222d | [
"Apache-2.0"
] | null | null | null | lpot/adaptor/tf_utils/quantize_graph/quantize_graph_conv.py | deb-intel/LPOTtest | f7b7524c733e581668d15192b69f9d9a7ca5222d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from .quantize_graph_common import QuantizeGraphHelper as helper
from .quantize_graph_base import QuantizeNodeBase
class FuseNodeStartWithConv2d(QuantizeNodeBase):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.sorted_patterns = sorted(self.patterns,
key=lambda i: len(i),
reverse=True)
self.fusion_mapping = {
'Conv2DBiasAdd': self.apply_conv_biasadd_fusion,
'Conv2DBiasAddAddNRelu': self.apply_conv_biasadd_addn_relu_fusion,
'Conv2DBiasAddAddNRelu6': self.apply_conv_biasadd_addn_relu_fusion,
'Conv2DBiasAddAddV2Relu': self.apply_conv_biasadd_addn_relu_fusion,
'Conv2DBiasAddAddV2Relu6': self.apply_conv_biasadd_addn_relu_fusion,
'Conv2DBiasAddAddRelu': self.apply_conv_biasadd_addn_relu_fusion,
'Conv2DBiasAddRelu6': self.apply_conv_biasadd_relu_fusion,
'Conv2DBiasAddRelu': self.apply_conv_biasadd_relu_fusion,
'Conv2DAddRelu6': self.apply_conv_biasadd_relu_fusion,
'Conv2DAddRelu': self.apply_conv_biasadd_relu_fusion,
'DepthwiseConv2dNativeAddRelu6':
self.apply_conv_biasadd_relu_fusion,
'DepthwiseConv2dNativeBiasAddRelu6':
self.apply_conv_biasadd_relu_fusion,
'Conv2D': self.apply_conv_single_fusion,
'DepthwiseConv2dNative': self.apply_conv_single_fusion,
}
def get_fusion_list(self):
return self.fusion_mapping.keys()
def apply_conv_single_fusion(self, match_node_name):
skip_node_name = match_node_name[1:]
matched_node = self.node_name_mapping[match_node_name[0]]
_, normal_inputs = self._get_node_input(matched_node.node.name)
weight_name = normal_inputs[1]
# TODO this is workaround as the tf 2.1 doesn't support depthwise/conv s8
# feature.
if self.enable_s8 and not self._find_relu_node(matched_node.node):
self.output_graph = self.input_graph
return
q_weights_name, q_weights_min_name, q_weights_max_name = \
self._intel_cpu_quantize_weight_eightbit(matched_node.node.op,
self.node_name_mapping[weight_name].node,
self.per_channel)
all_input_names = self._add_eightbit_prologue_nodes(matched_node.node.name)
all_input_names = all_input_names[:1] + [q_weights_name] + all_input_names[1:]
all_input_names.append(q_weights_min_name)
all_input_names.append(q_weights_max_name)
skip_node_name.append(weight_name)
for _, node in enumerate(self.input_graph.node):
if node.name in skip_node_name:
self.logger.debug("skip node {}".format(node.name))
elif node.name == match_node_name[0]:
postfix = "_eightbit_quantized_depthwise_conv"
if node.op == "Conv2D":
postfix = "_eightbit_quantized_conv"
quantized_node_name = node.name + postfix
if node.op == "Conv2D":
quantized_conv_node = helper.create_node(
"QuantizedConv2DPerChannel"
if self.per_channel else "QuantizedConv2D",
quantized_node_name, all_input_names)
elif node.op == "DepthwiseConv2dNative":
quantized_conv_node = helper.create_node(
"QuantizedDepthwiseConv2D", quantized_node_name,
all_input_names)
helper.copy_attr(quantized_conv_node, "strides",
node.attr["strides"])
helper.copy_attr(quantized_conv_node, "padding",
node.attr["padding"])
if node.op != 'DepthwiseConv2dNative' and "padding_list" in node.attr:
helper.copy_attr(quantized_conv_node, "padding_list",
node.attr["padding_list"])
helper.copy_attr(quantized_conv_node, "dilations",
node.attr["dilations"])
input_data_type = dtypes.quint8 if self._find_relu_node(
node) else dtypes.qint8
helper.set_attr_dtype(quantized_conv_node, "Tinput",
input_data_type)
helper.set_attr_dtype(quantized_conv_node, "Tfilter",
dtypes.qint8)
helper.set_attr_dtype(quantized_conv_node, "out_type",
dtypes.qint32)
self.add_output_graph_node(quantized_conv_node)
quantize_down_name = self._add_quantize_down_nodes(
node, quantized_node_name, dtypes.qint8)
self._intel_cpu_add_dequantize_result_node(
quantize_down_name, node.name, dtypes.qint8)
else:
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(node)
self.add_output_graph_node(new_node)
def apply_conv_biasadd_relu_fusion(self, match_node_name):
"""Fuse the conv/biasadd/relu pattern.
Arguments:
match_node_name {[type]} -- [description]
"""
skip_node_name = match_node_name[1:]
matched_node = self.node_name_mapping[match_node_name[0]]
control_inputs, normal_inputs = self._get_node_input(matched_node.node.name)
weight_name = normal_inputs[1]
q_weights_name, q_weights_min_name, q_weights_max_name = \
self._intel_cpu_quantize_weight_eightbit(
matched_node.node.op, self.node_name_mapping[weight_name].node, self.per_channel)
all_input_names = self._add_eightbit_prologue_nodes(matched_node.node.name)
all_input_names = all_input_names[:1] + [q_weights_name] + all_input_names[1:]
all_input_names.append(q_weights_min_name)
all_input_names.append(q_weights_max_name)
skip_node_name.append(weight_name)
for _, node in enumerate(self.input_graph.node):
if node.name in skip_node_name:
self.logger.debug("skip node {}".format(node.name))
elif node.name == match_node_name[0]:
self.logger.debug("apply_conv_biasadd_relu_fusion")
postfix = "_eightbit_quantized_depthwise_conv"
if node.op == "Conv2D":
postfix = "_eightbit_quantized_conv"
quantized_node_name = node.name + postfix
bias_node_name = self.node_name_mapping[match_node_name[1]].node.input[1]
relu_node_name = match_node_name[2]
is_relu6 = self.node_name_mapping[relu_node_name].node.op == "Relu6"
quantized_node_input_names = all_input_names[:2] + \
[bias_node_name] + all_input_names[2:] + control_inputs
quantized_conv_node = helper.create_node(
"QuantizedConv2DWithBiasAndRelu" if node.op == "Conv2D"
else "QuantizedDepthwiseConv2DWithBiasAndRelu",
quantized_node_name, quantized_node_input_names)
helper.copy_attr(quantized_conv_node, "strides", node.attr["strides"])
helper.copy_attr(quantized_conv_node, "padding", node.attr["padding"])
if node.op != 'DepthwiseConv2dNative' and "padding_list" in node.attr:
helper.copy_attr(quantized_conv_node, "padding_list",
node.attr["padding_list"])
helper.copy_attr(quantized_conv_node, "dilations", node.attr["dilations"])
input_data_type = dtypes.quint8 if self._find_relu_node(
node) else dtypes.qint8
helper.set_attr_dtype(quantized_conv_node, "Tinput", input_data_type)
helper.set_attr_dtype(quantized_conv_node, "Tfilter",dtypes.qint8)
helper.set_attr_dtype(quantized_conv_node, "out_type", dtypes.qint32)
self.add_output_graph_node(quantized_conv_node)
quantize_down_name = self._add_quantize_down_nodes(
node, quantized_node_name, dtypes.quint8, is_relu6)
self._intel_cpu_add_dequantize_result_node(
quantize_down_name, relu_node_name)
else:
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(node)
self.add_output_graph_node(new_node)
def apply_conv_biasadd_fusion(self, match_node_name):
skip_node_name = match_node_name[1:]
matched_node = self.node_name_mapping[match_node_name[0]]
control_inputs, normal_inputs = self._get_node_input(
matched_node.node.name)
weight_name = normal_inputs[1]
q_weights_name, q_weights_min_name, q_weights_max_name = \
self._intel_cpu_quantize_weight_eightbit(
matched_node.node.op, self.node_name_mapping[weight_name].node, self.per_channel)
all_input_names = self._add_eightbit_prologue_nodes(matched_node.node.name)
all_input_names = all_input_names[:1] + [q_weights_name] + all_input_names[1:]
all_input_names.append(q_weights_min_name)
all_input_names.append(q_weights_max_name)
skip_node_name.append(weight_name)
for _, node in enumerate(self.input_graph.node):
if node.name in skip_node_name:
pass
elif node.name == match_node_name[0]:
self.logger.debug("matched node {} with input {}".format(
node.name, node.input))
self.logger.debug("apply_conv_biasadd_fusion")
quantized_node_name = node.name + "_eightbit_quantized_conv"
bias_node_name = self.node_name_mapping[
match_node_name[1]].node.input[1]
quantized_node_input_names = all_input_names[:2] + [
bias_node_name
] + all_input_names[2:] + control_inputs
quantized_conv_node = helper.create_node(
"QuantizedConv2DWithBias", quantized_node_name,
quantized_node_input_names)
helper.copy_attr(quantized_conv_node, "strides",
node.attr["strides"])
helper.copy_attr(quantized_conv_node, "padding",
node.attr["padding"])
if "padding_list" in node.attr:
helper.copy_attr(quantized_conv_node, "padding_list",
node.attr["padding_list"])
helper.copy_attr(quantized_conv_node, "dilations",
node.attr["dilations"])
input_data_type = dtypes.quint8 if self._find_relu_node(
node) else dtypes.qint8
helper.set_attr_dtype(quantized_conv_node, "Tinput",
input_data_type)
helper.set_attr_dtype(quantized_conv_node, "Tfilter",
dtypes.qint8)
helper.set_attr_dtype(quantized_conv_node, "out_type",
dtypes.qint32)
self.add_output_graph_node(quantized_conv_node)
requantize_type = dtypes.qint8
quantize_down_name = self._add_quantize_down_nodes(
node, quantized_node_name, requantize_type, False)
self._intel_cpu_add_dequantize_result_node(
quantize_down_name, match_node_name[1], requantize_type)
else:
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(node)
self.add_output_graph_node(new_node)
def apply_conv_biasadd_addn_relu_fusion(self, match_node_name):
skip_node_name = match_node_name[1:]
matched_node = self.node_name_mapping[match_node_name[0]]
control_inputs, normal_inputs = self._get_node_input(
matched_node.node.name)
weight_name = normal_inputs[1]
weight_node = self.node_name_mapping[weight_name].node
q_weights_name, q_weights_min_name, q_weights_max_name = \
self._intel_cpu_quantize_weight_eightbit(
matched_node.node.op, self.node_name_mapping[weight_name].node, self.per_channel)
all_input_names = self._add_eightbit_prologue_nodes(matched_node.node.name)
all_input_names = all_input_names[:1] + [q_weights_name] + all_input_names[1:]
all_input_names.append(q_weights_min_name)
all_input_names.append(q_weights_max_name)
skip_node_name.append(weight_name)
for _, node in enumerate(self.input_graph.node):
if node.name in skip_node_name:
self.logger.debug("skip node {}".format(node.name))
elif node.name == match_node_name[0]:
self.logger.debug("matched node {} with input {}".format(
node.name, node.input))
self.logger.debug("apply_conv_biasadd_addn_relu_fusion")
quantized_node_name = node.name + "_eightbit_quantized_conv"
bias_node_name = self.node_name_mapping[
match_node_name[1]].node.input[1]
relu_node_name = match_node_name[3]
is_relu6 = self.node_name_mapping[
relu_node_name].node.op == "Relu6"
sum_index = 1 if match_node_name[1] == self.node_name_mapping[
match_node_name[2]].node.input[0] else 0
quantized_node_input_names = all_input_names[:2] + [
bias_node_name
] + all_input_names[2:] + [
self.node_name_mapping[
match_node_name[2]].node.input[sum_index]
] + control_inputs
quantized_conv_node = helper.create_node(
"QuantizedConv2DWithBiasSumAndRelu", quantized_node_name,
quantized_node_input_names)
helper.copy_attr(quantized_conv_node, "strides",
node.attr["strides"])
helper.copy_attr(quantized_conv_node, "padding",
node.attr["padding"])
if "padding_list" in node.attr:
helper.copy_attr(quantized_conv_node, "padding_list",
node.attr["padding_list"])
helper.copy_attr(quantized_conv_node, "dilations",
node.attr["dilations"])
input_data_type = dtypes.quint8 if self._find_relu_node(
node) else dtypes.qint8
helper.set_attr_dtype(quantized_conv_node, "Tinput",
input_data_type)
helper.set_attr_dtype(quantized_conv_node, "Tfilter",
dtypes.qint8)
helper.set_attr_dtype(quantized_conv_node, "out_type",
dtypes.qint32)
self.add_output_graph_node(quantized_conv_node)
quantize_down_name = self._add_quantize_down_nodes(
node, quantized_node_name, dtypes.quint8, is_relu6)
self._intel_cpu_add_dequantize_result_node(
quantize_down_name, relu_node_name)
else:
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(node)
self.add_output_graph_node(new_node)
def get_longest_fuse(self):
self._get_op_list()
matched_rule, matched_node_name = self._is_match(self.sorted_patterns)
return matched_rule, matched_node_name
def apply_the_transform(self):
self._get_op_list()
matched_rule, matched_node_name = self._is_match(self.sorted_patterns)
if matched_node_name:
self.output_graph = graph_pb2.GraphDef()
fusion_name = ''.join(matched_rule)
if fusion_name in self.fusion_mapping:
if fusion_name.find('Conv2DAddRelu') != -1:
for input_name in self.node_name_mapping[matched_node_name[1]].node.input:
input_node_name = helper.node_name_from_input(input_name)
if input_node_name != matched_node_name[0]:
add_const_input_node = self.node_name_mapping[input_node_name].node
add_node_content = tensor_util.MakeNdarray(
add_const_input_node.attr["value"].tensor)
if add_node_content.ndim != 1:
fusion_name = 'Conv2D'
matched_node_name = matched_node_name[:1]
self.fusion_mapping[fusion_name](matched_node_name)
else:
self.logger.info("Unknown match {}".format(fusion_name))
if self.remove_redundant_quant_flag:
self.input_graph = self.remove_redundant_quantization(self.input_graph)
return self.input_graph
self.input_graph = self.output_graph
self._reset_output_node_maps()
if self.remove_redundant_quant_flag:
self.output_graph = self.remove_redundant_quantization(self.output_graph)
return self.output_graph
self.logger.debug("No more match, exit...")
return self.input_graph
| 50.613079 | 97 | 0.612167 |
ee63bc96f7318941c3e4638fdf0fe076edf90f7d | 5,366 | py | Python | official/legacy/bert/run_squad.py | mcasanova1445/models | 37be0fdb4abccca633bb3199a4e6f3f71cd174d9 | [
"Apache-2.0"
] | 1 | 2022-03-19T18:05:56.000Z | 2022-03-19T18:05:56.000Z | official/legacy/bert/run_squad.py | mdsaifhaider/models | 7214e17eb425963ec3d0295be215d5d26deaeb32 | [
"Apache-2.0"
] | 8 | 2020-05-19T00:52:30.000Z | 2020-06-04T23:57:20.000Z | official/legacy/bert/run_squad.py | mdsaifhaider/models | 7214e17eb425963ec3d0295be215d5d26deaeb32 | [
"Apache-2.0"
] | 2 | 2021-10-07T04:47:04.000Z | 2021-12-18T04:18:19.000Z | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run BERT on SQuAD 1.1 and SQuAD 2.0 in TF 2.x."""
import json
import os
import time
# Import libraries
from absl import app
from absl import flags
from absl import logging
import gin
import tensorflow as tf
from official.common import distribute_utils
from official.legacy.bert import configs as bert_configs
from official.legacy.bert import run_squad_helper
from official.nlp.data import squad_lib as squad_lib_wp
from official.nlp.tools import tokenization
from official.utils.misc import keras_utils
flags.DEFINE_string('vocab_file', None,
'The vocabulary file that the BERT model was trained on.')
# More flags can be found in run_squad_helper.
run_squad_helper.define_common_squad_flags()
FLAGS = flags.FLAGS
def train_squad(strategy,
input_meta_data,
custom_callbacks=None,
run_eagerly=False,
init_checkpoint=None,
sub_model_export_name=None):
"""Run bert squad training."""
bert_config = bert_configs.BertConfig.from_json_file(FLAGS.bert_config_file)
init_checkpoint = init_checkpoint or FLAGS.init_checkpoint
run_squad_helper.train_squad(strategy, input_meta_data, bert_config,
custom_callbacks, run_eagerly, init_checkpoint,
sub_model_export_name=sub_model_export_name)
def predict_squad(strategy, input_meta_data):
"""Makes predictions for the squad dataset."""
bert_config = bert_configs.BertConfig.from_json_file(FLAGS.bert_config_file)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
run_squad_helper.predict_squad(
strategy, input_meta_data, tokenizer, bert_config, squad_lib_wp)
def eval_squad(strategy, input_meta_data):
"""Evaluate on the squad dataset."""
bert_config = bert_configs.BertConfig.from_json_file(FLAGS.bert_config_file)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
eval_metrics = run_squad_helper.eval_squad(
strategy, input_meta_data, tokenizer, bert_config, squad_lib_wp)
return eval_metrics
def export_squad(model_export_path, input_meta_data):
"""Exports a trained model as a `SavedModel` for inference.
Args:
model_export_path: a string specifying the path to the SavedModel directory.
input_meta_data: dictionary containing meta data about input and model.
Raises:
Export path is not specified, got an empty string or None.
"""
bert_config = bert_configs.BertConfig.from_json_file(FLAGS.bert_config_file)
run_squad_helper.export_squad(model_export_path, input_meta_data, bert_config)
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_param)
with tf.io.gfile.GFile(FLAGS.input_meta_data_path, 'rb') as reader:
input_meta_data = json.loads(reader.read().decode('utf-8'))
if FLAGS.mode == 'export_only':
export_squad(FLAGS.model_export_path, input_meta_data)
return
# Configures cluster spec for multi-worker distribution strategy.
if FLAGS.num_gpus > 0:
_ = distribute_utils.configure_cluster(FLAGS.worker_hosts, FLAGS.task_index)
strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=FLAGS.distribution_strategy,
num_gpus=FLAGS.num_gpus,
all_reduce_alg=FLAGS.all_reduce_alg,
tpu_address=FLAGS.tpu)
if 'train' in FLAGS.mode:
if FLAGS.log_steps:
custom_callbacks = [keras_utils.TimeHistory(
batch_size=FLAGS.train_batch_size,
log_steps=FLAGS.log_steps,
logdir=FLAGS.model_dir,
)]
else:
custom_callbacks = None
train_squad(
strategy,
input_meta_data,
custom_callbacks=custom_callbacks,
run_eagerly=FLAGS.run_eagerly,
sub_model_export_name=FLAGS.sub_model_export_name,
)
if 'predict' in FLAGS.mode:
predict_squad(strategy, input_meta_data)
if 'eval' in FLAGS.mode:
eval_metrics = eval_squad(strategy, input_meta_data)
f1_score = eval_metrics['final_f1']
logging.info('SQuAD eval F1-score: %f', f1_score)
summary_dir = os.path.join(FLAGS.model_dir, 'summaries', 'eval')
summary_writer = tf.summary.create_file_writer(summary_dir)
with summary_writer.as_default():
# TODO(lehou): write to the correct step number.
tf.summary.scalar('F1-score', f1_score, step=0)
summary_writer.flush()
# Also write eval_metrics to json file.
squad_lib_wp.write_to_json_files(
eval_metrics, os.path.join(summary_dir, 'eval_metrics.json'))
time.sleep(60)
if __name__ == '__main__':
flags.mark_flag_as_required('bert_config_file')
flags.mark_flag_as_required('model_dir')
app.run(main)
| 36.013423 | 80 | 0.745434 |
f3c7df145a1168ff3cce3a3d1f03a55cde28ce0e | 659 | py | Python | incompressible/__init__.py | mojiastonybrook/pyro2 | 050eacac7fcf4d5fc28e9fa356c445777fa7585c | [
"BSD-3-Clause"
] | null | null | null | incompressible/__init__.py | mojiastonybrook/pyro2 | 050eacac7fcf4d5fc28e9fa356c445777fa7585c | [
"BSD-3-Clause"
] | null | null | null | incompressible/__init__.py | mojiastonybrook/pyro2 | 050eacac7fcf4d5fc28e9fa356c445777fa7585c | [
"BSD-3-Clause"
] | null | null | null | """
The pyro solver for incompressible flow. This implements as second-order
approximate projection method. The general flow is:
* create the limited slopes of u and v (in both directions)
* get the advective velocities through a piecewise linear Godunov
method
* enforce the divergence constraint on the velocities through a
projection (the MAC projection)
* recompute the interface states using the new advective velocity
* update U in time to get the provisional velocity field
* project the final velocity to enforce the divergence constraint.
The projections are done using multigrid
"""
__all__ = ["simulation"]
from .simulation import *
| 26.36 | 73 | 0.783005 |
7ce8cafbaaba3efb2f0ea9ca9513e3936655d9fa | 9,768 | py | Python | train_b7_ns_aa_original_large_crop_100k.py | HyeRyeongSong/SWmaestro11_TrustNet_EfficientNet-B7_Ensemble | ddd89c4c022eafad215d0858fc2bcc17a8dca8a7 | [
"MIT"
] | 98 | 2020-06-08T20:00:49.000Z | 2022-03-30T10:16:25.000Z | train_b7_ns_aa_original_large_crop_100k.py | HyeRyeongSong/SWmaestro11_TrustNet_EfficientNet-B7_Ensemble | ddd89c4c022eafad215d0858fc2bcc17a8dca8a7 | [
"MIT"
] | 5 | 2020-06-22T12:41:00.000Z | 2022-03-15T14:44:22.000Z | train_b7_ns_aa_original_large_crop_100k.py | HyeRyeongSong/SWmaestro11_TrustNet_EfficientNet-B7_Ensemble | ddd89c4c022eafad215d0858fc2bcc17a8dca8a7 | [
"MIT"
] | 23 | 2020-06-12T19:07:56.000Z | 2021-11-19T02:44:29.000Z | import yaml
import os
import random
import tqdm
import numpy as np
from PIL import Image
import torch
from torch import distributions
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import ffmpeg
from albumentations import ImageOnlyTransform
from albumentations import SmallestMaxSize, PadIfNeeded, HorizontalFlip, Normalize, Compose, RandomCrop
from albumentations.pytorch import ToTensor
from efficientnet_pytorch import EfficientNet
from timm.data.transforms_factory import transforms_imagenet_train
from datasets import TrackPairDataset
from extract_tracks_from_videos import TRACK_LENGTH, TRACKS_ROOT
from generate_track_pairs import TRACK_PAIRS_FILE_NAME
SEED = 30
BATCH_SIZE = 8
TRAIN_INDICES = [9, 13, 17, 21, 25, 29, 33, 37]
INITIAL_LR = 0.005
MOMENTUM = 0.9
WEIGHT_DECAY = 1e-4
NUM_WORKERS = 8
NUM_WARMUP_ITERATIONS = 100
SNAPSHOT_FREQUENCY = 1000
OUTPUT_FOLDER_NAME = 'efficientnet-b7_ns_aa-original-mstd0.5_large_crop_100k'
SNAPSHOT_NAME_TEMPLATE = 'snapshot_{}.pth'
MAX_ITERS = 100000
FPS_RANGE = (15, 30)
SCALE_RANGE = (0.25, 1)
CRF_RANGE = (17, 40)
TUNE_VALUES = ['film', 'animation', 'grain', 'stillimage', 'fastdecode', 'zerolatency']
CROP_HEIGHT = 224
CROP_WIDTH = 192
PRETRAINED_WEIGHTS_PATH = 'external_data/noisy_student_efficientnet-b7.pth'
SNAPSHOTS_ROOT = 'snapshots'
LOGS_ROOT = 'logs'
class TrackTransform(object):
def __init__(self, fps_range, scale_range, crf_range, tune_values):
self.fps_range = fps_range
self.scale_range = scale_range
self.crf_range = crf_range
self.tune_values = tune_values
def get_params(self, src_fps, src_height, src_width):
if random.random() > 0.5:
return None
dst_fps = src_fps
if random.random() > 0.5:
dst_fps = random.randrange(*self.fps_range)
scale = 1.0
if random.random() > 0.5:
scale = random.uniform(*self.scale_range)
dst_height = round(scale * src_height) // 2 * 2
dst_width = round(scale * src_width) // 2 * 2
crf = random.randrange(*self.crf_range)
tune = random.choice(self.tune_values)
return dst_fps, dst_height, dst_width, crf, tune
def __call__(self, track_path, src_fps, dst_fps, dst_height, dst_width, crf, tune):
out, err = (
ffmpeg
.input(os.path.join(track_path, '%d.png'), framerate=src_fps, start_number=0)
.filter('fps', fps=dst_fps)
.filter('scale', dst_width, dst_height)
.output('pipe:', format='h264', vcodec='libx264', crf=crf, tune=tune)
.run(capture_stdout=True, quiet=True)
)
out, err = (
ffmpeg
.input('pipe:', format='h264')
.output('pipe:', format='rawvideo', pix_fmt='rgb24')
.run(capture_stdout=True, input=out, quiet=True)
)
imgs = np.frombuffer(out, dtype=np.uint8).reshape(-1, dst_height, dst_width, 3)
return imgs
class VisionTransform(ImageOnlyTransform):
def __init__(
self, transform, is_tensor=True, always_apply=False, p=1.0
):
super(VisionTransform, self).__init__(always_apply, p)
self.transform = transform
self.is_tensor = is_tensor
def apply(self, image, **params):
if self.is_tensor:
return self.transform(image)
else:
return np.array(self.transform(Image.fromarray(image)))
def get_transform_init_args_names(self):
return ("transform")
def set_global_seed(seed):
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
random.seed(seed)
np.random.seed(seed)
def prepare_cudnn(deterministic=None, benchmark=None):
# https://pytorch.org/docs/stable/notes/randomness.html#cudnn
if deterministic is None:
deterministic = os.environ.get("CUDNN_DETERMINISTIC", "True") == "True"
torch.backends.cudnn.deterministic = deterministic
# https://discuss.pytorch.org/t/how-should-i-disable-using-cudnn-in-my-code/38053/4
if benchmark is None:
benchmark = os.environ.get("CUDNN_BENCHMARK", "True") == "True"
torch.backends.cudnn.benchmark = benchmark
def main():
with open('config.yaml', 'r') as f:
config = yaml.load(f)
set_global_seed(SEED)
prepare_cudnn(deterministic=True, benchmark=True)
model = EfficientNet.from_name('efficientnet-b7', override_params={'num_classes': 1})
state = torch.load(PRETRAINED_WEIGHTS_PATH, map_location=lambda storage, loc: storage)
state.pop('_fc.weight')
state.pop('_fc.bias')
res = model.load_state_dict(state, strict=False)
assert set(res.missing_keys) == set(['_fc.weight', '_fc.bias']), 'issue loading pretrained weights'
model = model.cuda()
normalize = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
_, rand_augment, _ = transforms_imagenet_train((CROP_HEIGHT, CROP_WIDTH), auto_augment='original-mstd0.5',
separate=True)
train_dataset = TrackPairDataset(os.path.join(config['ARTIFACTS_PATH'], TRACKS_ROOT),
os.path.join(config['ARTIFACTS_PATH'], TRACK_PAIRS_FILE_NAME),
TRAIN_INDICES,
track_length=TRACK_LENGTH,
track_transform=TrackTransform(FPS_RANGE, SCALE_RANGE, CRF_RANGE, TUNE_VALUES),
image_transform=Compose([
SmallestMaxSize(CROP_WIDTH),
PadIfNeeded(CROP_HEIGHT, CROP_WIDTH),
HorizontalFlip(),
RandomCrop(CROP_HEIGHT, CROP_WIDTH),
VisionTransform(rand_augment, is_tensor=False, p=0.5),
normalize,
ToTensor()
]), sequence_mode=False)
print('Train dataset size: {}.'.format(len(train_dataset)))
warmup_optimizer = torch.optim.SGD(model._fc.parameters(), INITIAL_LR, momentum=MOMENTUM,
weight_decay=WEIGHT_DECAY, nesterov=True)
full_optimizer = torch.optim.SGD(model.parameters(), INITIAL_LR, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY,
nesterov=True)
full_lr_scheduler = torch.optim.lr_scheduler.LambdaLR(full_optimizer,
lambda iteration: (MAX_ITERS - iteration) / MAX_ITERS)
snapshots_root = os.path.join(config['ARTIFACTS_PATH'], SNAPSHOTS_ROOT, OUTPUT_FOLDER_NAME)
os.makedirs(snapshots_root)
log_root = os.path.join(config['ARTIFACTS_PATH'], LOGS_ROOT, OUTPUT_FOLDER_NAME)
os.makedirs(log_root)
writer = SummaryWriter(log_root)
iteration = 0
if iteration < NUM_WARMUP_ITERATIONS:
print('Start {} warmup iterations'.format(NUM_WARMUP_ITERATIONS))
model.eval()
model._fc.train()
for param in model.parameters():
param.requires_grad = False
for param in model._fc.parameters():
param.requires_grad = True
optimizer = warmup_optimizer
else:
print('Start without warmup iterations')
model.train()
optimizer = full_optimizer
max_lr = max(param_group["lr"] for param_group in full_optimizer.param_groups)
writer.add_scalar('train/max_lr', max_lr, iteration)
epoch = 0
fake_prob_dist = distributions.beta.Beta(0.5, 0.5)
while True:
epoch += 1
print('Epoch {} is in progress'.format(epoch))
loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS, drop_last=True)
for samples in tqdm.tqdm(loader):
iteration += 1
fake_input_tensor = torch.cat(samples['fake']).cuda()
real_input_tensor = torch.cat(samples['real']).cuda()
target_fake_prob = fake_prob_dist.sample((len(fake_input_tensor),)).float().cuda()
fake_weight = target_fake_prob.view(-1, 1, 1, 1)
input_tensor = (1.0 - fake_weight) * real_input_tensor + fake_weight * fake_input_tensor
pred = model(input_tensor).flatten()
loss = F.binary_cross_entropy_with_logits(pred, target_fake_prob)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if iteration > NUM_WARMUP_ITERATIONS:
full_lr_scheduler.step()
max_lr = max(param_group["lr"] for param_group in full_optimizer.param_groups)
writer.add_scalar('train/max_lr', max_lr, iteration)
writer.add_scalar('train/loss', loss.item(), iteration)
if iteration == NUM_WARMUP_ITERATIONS:
print('Stop warmup iterations')
model.train()
for param in model.parameters():
param.requires_grad = True
optimizer = full_optimizer
if iteration % SNAPSHOT_FREQUENCY == 0:
snapshot_name = SNAPSHOT_NAME_TEMPLATE.format(iteration)
snapshot_path = os.path.join(snapshots_root, snapshot_name)
print('Saving snapshot to {}'.format(snapshot_path))
torch.save(model.state_dict(), snapshot_path)
if iteration >= MAX_ITERS:
print('Stop training due to maximum iteration exceeded')
return
if __name__ == '__main__':
main()
| 37.860465 | 120 | 0.629709 |
7d6c79a0923031a9c5c48b30f33fcb364cfb28fa | 4,228 | py | Python | cvat/apps/annotation/pascal_voc.py | godlikejay/cvat | 50c40ba70dd8f890478068f29ce3de1057ec848c | [
"MIT"
] | 2 | 2020-01-10T08:50:50.000Z | 2020-01-23T06:11:11.000Z | cvat/apps/annotation/pascal_voc.py | godlikejay/cvat | 50c40ba70dd8f890478068f29ce3de1057ec848c | [
"MIT"
] | 29 | 2020-01-28T23:08:18.000Z | 2022-03-12T00:05:33.000Z | cvat/apps/annotation/pascal_voc.py | maitreyamaity/CVAT-SIM-TEST | 6b97145c8f4584d9ad40a4b6541424955e272e42 | [
"MIT"
] | 1 | 2020-03-04T15:43:35.000Z | 2020-03-04T15:43:35.000Z | # Copyright (C) 2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
format_spec = {
"name": "PASCAL VOC",
"dumpers": [
{
"display_name": "{name} {format} {version}",
"format": "ZIP",
"version": "1.0",
"handler": "dump"
},
],
"loaders": [
{
"display_name": "{name} {format} {version}",
"format": "ZIP",
"version": "1.0",
"handler": "load"
},
],
}
def load(file_object, annotations):
from pyunpack import Archive
import os
from tempfile import TemporaryDirectory
def parse_xml_file(annotation_file):
import xml.etree.ElementTree as ET
root = ET.parse(annotation_file).getroot()
frame_number = annotations.match_frame(root.find('filename').text)
for obj_tag in root.iter('object'):
bbox_tag = obj_tag.find("bndbox")
label = obj_tag.find('name').text
xmin = float(bbox_tag.find('xmin').text)
ymin = float(bbox_tag.find('ymin').text)
xmax = float(bbox_tag.find('xmax').text)
ymax = float(bbox_tag.find('ymax').text)
truncated = obj_tag.find('truncated')
truncated = truncated.text if truncated is not None else 0
difficult = obj_tag.find('difficult')
difficult = difficult.text if difficult is not None else 0
annotations.add_shape(annotations.LabeledShape(
type='rectangle',
frame=frame_number,
label=label,
points=[xmin, ymin, xmax, ymax],
occluded=False,
attributes=[
annotations.Attribute('truncated', truncated),
annotations.Attribute('difficult', difficult),
],
))
archive_file = getattr(file_object, 'name')
with TemporaryDirectory() as tmp_dir:
Archive(archive_file).extractall(tmp_dir)
for dirpath, _, filenames in os.walk(tmp_dir):
for _file in filenames:
if '.xml' == os.path.splitext(_file)[1]:
parse_xml_file(os.path.join(dirpath, _file))
def dump(file_object, annotations):
from pascal_voc_writer import Writer
import os
from zipfile import ZipFile
from tempfile import TemporaryDirectory
with TemporaryDirectory() as out_dir:
with ZipFile(file_object, 'w') as output_zip:
for frame_annotation in annotations.group_by_frame():
image_name = frame_annotation.name
width = frame_annotation.width
height = frame_annotation.height
writer = Writer(image_name, width, height)
writer.template_parameters['path'] = ''
writer.template_parameters['folder'] = ''
for shape in frame_annotation.labeled_shapes:
if shape.type != "rectangle":
continue
label = shape.label
xtl = shape.points[0]
ytl = shape.points[1]
xbr = shape.points[2]
ybr = shape.points[3]
difficult = 0
truncated = 0
for attribute in shape.attributes:
if attribute.name == 'truncated' and 'true' == attribute.value.lower():
truncated = 1
elif attribute.name == 'difficult' and 'true' == attribute.value.lower():
difficult = 1
writer.addObject(
name=label,
xmin=xtl,
ymin=ytl,
xmax=xbr,
ymax=ybr,
truncated=truncated,
difficult=difficult,
)
anno_name = os.path.basename('{}.{}'.format(os.path.splitext(image_name)[0], 'xml'))
anno_file = os.path.join(out_dir, anno_name)
writer.save(anno_file)
output_zip.write(filename=anno_file, arcname=anno_name)
| 36.136752 | 100 | 0.519631 |
35a074381429779418976b7a989eb96fdcb90160 | 15,482 | py | Python | tests/test_m2m.py | arjones6/elixir | b9c185dc03f087f9299a0f030e94eeafa1edd655 | [
"MIT"
] | null | null | null | tests/test_m2m.py | arjones6/elixir | b9c185dc03f087f9299a0f030e94eeafa1edd655 | [
"MIT"
] | null | null | null | tests/test_m2m.py | arjones6/elixir | b9c185dc03f087f9299a0f030e94eeafa1edd655 | [
"MIT"
] | null | null | null | """
test many to many relationships
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import six
from sqlalchemy.orm import configure_mappers
from elixir import *
import elixir
#-----------
class TestManyToMany(object):
def setup(self):
metadata.bind = 'sqlite://'
def teardown(self):
cleanup_all(True)
def test_simple(self):
class A(Entity):
using_options(shortnames=True)
name = Field(String(60))
as_ = ManyToMany('A')
bs_ = ManyToMany('B')
class B(Entity):
using_options(shortnames=True)
name = Field(String(60))
as_ = ManyToMany('A')
setup_all(True)
configure_mappers()
# check m2m table was generated correctly
m2m_table = A.bs_.property.secondary
assert m2m_table.name in metadata.tables
# check column names
m2m_cols = m2m_table.columns
assert 'a_id' in m2m_cols
assert 'b_id' in m2m_cols
# check selfref m2m table column names were generated correctly
m2m_cols = A.as_.property.secondary.columns
assert 'as__id' in m2m_cols
assert 'inverse_id' in m2m_cols
# check the relationships work as expected
b1 = B(name='b1', as_=[A(name='a1')])
session.commit()
session.close()
a = A.query.one()
b = B.query.one()
assert a in b.as_
assert b in a.bs_
def test_table_kwargs(self):
class A(Entity):
bs_ = ManyToMany('B', table_kwargs={'info': {'test': True}})
class B(Entity):
as_ = ManyToMany('A')
setup_all(True)
configure_mappers()
assert A.bs_.property.secondary.info['test'] is True
def test_table_default_kwargs(self):
options_defaults['table_options'] = {'info': {'test': True}}
class A(Entity):
bs_ = ManyToMany('B')
class B(Entity):
as_ = ManyToMany('A')
setup_all(True)
configure_mappers()
options_defaults['table_options'] = {}
assert A.bs_.property.secondary.info['test'] is True
assert A.table.info['test'] is True
assert B.table.info['test'] is True
def test_custom_global_column_nameformat(self):
# this needs to be done before declaring the classes
elixir.options.M2MCOL_NAMEFORMAT = elixir.options.OLD_M2MCOL_NAMEFORMAT
class A(Entity):
bs_ = ManyToMany('B')
class B(Entity):
as_ = ManyToMany('A')
setup_all(True)
# revert to original format
elixir.options.M2MCOL_NAMEFORMAT = elixir.options.NEW_M2MCOL_NAMEFORMAT
# check m2m table was generated correctly
configure_mappers()
m2m_table = A.bs_.property.secondary
assert m2m_table.name in metadata.tables
# check column names
m2m_cols = m2m_table.columns
assert '%s_id' % A.table.name in m2m_cols
assert '%s_id' % B.table.name in m2m_cols
def test_alternate_column_formatter(self):
# this needs to be done before declaring the classes
elixir.options.M2MCOL_NAMEFORMAT = \
elixir.options.ALTERNATE_M2MCOL_NAMEFORMAT
class A(Entity):
as_ = ManyToMany('A')
bs_ = ManyToMany('B')
class B(Entity):
as_ = ManyToMany('A')
setup_all(True)
configure_mappers()
# revert to original format
elixir.options.M2MCOL_NAMEFORMAT = elixir.options.NEW_M2MCOL_NAMEFORMAT
# check m2m table column names were generated correctly
m2m_cols = A.bs_.property.secondary.columns
assert 'as__id' in m2m_cols
assert 'bs__id' in m2m_cols
# check selfref m2m table column names were generated correctly
m2m_cols = A.as_.property.secondary.columns
assert 'as__id' in m2m_cols
assert 'inverse_id' in m2m_cols
def test_upgrade_rename_col(self):
elixir.options.M2MCOL_NAMEFORMAT = elixir.options.OLD_M2MCOL_NAMEFORMAT
class A(Entity):
using_options(shortnames=True)
name = Field(String(20))
links_to = ManyToMany('A')
is_linked_from = ManyToMany('A')
bs_ = ManyToMany('B')
class B(Entity):
using_options(shortnames=True)
name = Field(String(20))
as_ = ManyToMany('A')
setup_all(True)
a = A(name='a1', links_to=[A(name='a2')])
session.commit()
session.close()
del A
del B
# do not drop the tables, that's the whole point!
cleanup_all()
# simulate a renaming of columns (as given by the migration aid)
# 'a_id1' to 'is_linked_from_id'.
# 'a_id2' to 'links_to_id'.
conn = metadata.bind.connect()
conn.execute("ALTER TABLE a_links_to__a_is_linked_from RENAME TO temp")
conn.execute("CREATE TABLE a_links_to__a_is_linked_from ("
"is_linked_from_id INTEGER NOT NULL, "
"links_to_id INTEGER NOT NULL, "
"PRIMARY KEY (is_linked_from_id, links_to_id), "
"CONSTRAINT a_fk1 FOREIGN KEY(is_linked_from_id) "
"REFERENCES a (id), "
"CONSTRAINT a_fk2 FOREIGN KEY(links_to_id) "
"REFERENCES a (id))")
conn.execute("INSERT INTO a_links_to__a_is_linked_from "
"(is_linked_from_id, links_to_id) "
"SELECT a_id1, a_id2 FROM temp")
conn.close()
# ...
elixir.options.M2MCOL_NAMEFORMAT = elixir.options.NEW_M2MCOL_NAMEFORMAT
# elixir.options.MIGRATION_TO_07_AID = True
class A(Entity):
using_options(shortnames=True)
name = Field(String(20))
links_to = ManyToMany('A')
is_linked_from = ManyToMany('A')
bs_ = ManyToMany('B')
class B(Entity):
using_options(shortnames=True)
name = Field(String(20))
as_ = ManyToMany('A')
setup_all()
a1 = A.get_by(name='a1')
assert len(a1.links_to) == 1
assert not a1.is_linked_from
a2 = a1.links_to[0]
assert a2.name == 'a2'
assert not a2.links_to
assert a2.is_linked_from == [a1]
def test_upgrade_local_colname(self):
elixir.options.M2MCOL_NAMEFORMAT = elixir.options.OLD_M2MCOL_NAMEFORMAT
class A(Entity):
using_options(shortnames=True)
name = Field(String(20))
links_to = ManyToMany('A')
is_linked_from = ManyToMany('A')
bs_ = ManyToMany('B')
class B(Entity):
using_options(shortnames=True)
name = Field(String(20))
as_ = ManyToMany('A')
setup_all(True)
a = A(name='a1', links_to=[A(name='a2')])
session.commit()
session.close()
del A
del B
# do not drop the tables, that's the whole point!
cleanup_all()
# ...
elixir.options.M2MCOL_NAMEFORMAT = elixir.options.NEW_M2MCOL_NAMEFORMAT
# elixir.options.MIGRATION_TO_07_AID = True
class A(Entity):
using_options(shortnames=True)
name = Field(String(20))
links_to = ManyToMany('A', local_colname='a_id1')
is_linked_from = ManyToMany('A', local_colname='a_id2')
bs_ = ManyToMany('B')
class B(Entity):
using_options(shortnames=True)
name = Field(String(20))
as_ = ManyToMany('A')
setup_all()
a1 = A.get_by(name='a1')
assert len(a1.links_to) == 1
assert not a1.is_linked_from
a2 = a1.links_to[0]
assert a2.name == 'a2'
assert not a2.links_to
assert a2.is_linked_from == [a1]
def test_manual_column_format(self):
class A(Entity):
using_options(tablename='aye')
name = Field(String(60))
bs_ = ManyToMany('B', column_format='%(entity)s_%(key)s')
class B(Entity):
using_options(tablename='bee')
name = Field(String(60))
as_ = ManyToMany('A', column_format='%(entity)s_%(key)s')
setup_all(True)
# check column names were generated correctly
configure_mappers()
m2m_cols = A.bs_.property.secondary.columns
assert 'a_id' in m2m_cols
assert 'b_id' in m2m_cols
# check the relationships work as expected
b1 = B(name='b1', as_=[A(name='a1')])
session.commit()
session.close()
a = A.query.one()
b = B.query.one()
assert a in b.as_
assert b in a.bs_
def test_multi_pk_in_target(self):
class A(Entity):
key1 = Field(Integer, primary_key=True, autoincrement=False)
key2 = Field(String(40), primary_key=True)
bs_ = ManyToMany('B')
class B(Entity):
name = Field(String(60))
as_ = ManyToMany('A')
setup_all(True)
b1 = B(name='b1', as_=[A(key1=10, key2='a1')])
session.commit()
session.close()
a = A.query.one()
b = B.query.one()
assert a in b.as_
assert b in a.bs_
def test_multi(self):
class A(Entity):
name = Field(String(100))
rel1 = ManyToMany('B')
rel2 = ManyToMany('B')
class B(Entity):
name = Field(String(20), primary_key=True)
setup_all(True)
b1 = B(name='b1')
a1 = A(name='a1', rel1=[B(name='b2'), b1],
rel2=[B(name='b3'), B(name='b4'), b1])
session.commit()
session.close()
a1 = A.query.one()
b1 = B.get_by(name='b1')
b2 = B.get_by(name='b2')
assert b1 in a1.rel1
assert b1 in a1.rel2
assert b2 in a1.rel1
def test_selfref(self):
class Person(Entity):
using_options(shortnames=True)
name = Field(String(30))
friends = ManyToMany('Person')
setup_all(True)
barney = Person(name="Barney")
homer = Person(name="Homer", friends=[barney])
barney.friends.append(homer)
session.commit()
session.close()
homer = Person.get_by(name="Homer")
barney = Person.get_by(name="Barney")
assert homer in barney.friends
assert barney in homer.friends
m2m_cols = Person.friends.property.secondary.columns
assert 'friends_id' in m2m_cols
assert 'inverse_id' in m2m_cols
def test_bidirectional_selfref(self):
class Person(Entity):
using_options(shortnames=True)
name = Field(String(30))
friends = ManyToMany('Person')
is_friend_of = ManyToMany('Person')
setup_all(True)
barney = Person(name="Barney")
homer = Person(name="Homer", friends=[barney])
barney.friends.append(homer)
session.commit()
session.close()
homer = Person.get_by(name="Homer")
barney = Person.get_by(name="Barney")
assert homer in barney.friends
assert barney in homer.friends
m2m_cols = Person.friends.property.secondary.columns
assert 'friends_id' in m2m_cols
assert 'is_friend_of_id' in m2m_cols
def test_has_and_belongs_to_many(self):
class A(Entity):
has_field('name', String(100))
has_and_belongs_to_many('bs', of_kind='B')
class B(Entity):
has_field('name', String(100), primary_key=True)
setup_all(True)
b1 = B(name='b1')
a1 = A(name='a1', bs=[B(name='b2'), b1])
a2 = A(name='a2', bs=[B(name='b3'), b1])
a3 = A(name='a3')
session.commit()
session.close()
a1 = A.get_by(name='a1')
a2 = A.get_by(name='a2')
a3 = A.get_by(name='a3')
b1 = B.get_by(name='b1')
b2 = B.get_by(name='b2')
assert b1 in a1.bs
assert b2 in a1.bs
assert b1 in a2.bs
assert not a3.bs
def test_local_and_remote_colnames(self):
class A(Entity):
using_options(shortnames=True)
key1 = Field(Integer, primary_key=True, autoincrement=False)
key2 = Field(String(40), primary_key=True)
bs_ = ManyToMany('B', local_colname=['foo', 'bar'],
remote_colname="baz")
class B(Entity):
using_options(shortnames=True)
name = Field(String(60))
as_ = ManyToMany('A', remote_colname=['foo', 'bar'],
local_colname="baz")
setup_all(True)
b1 = B(name='b1', as_=[A(key1=10, key2='a1')])
session.commit()
session.close()
a = A.query.one()
b = B.query.one()
assert a in b.as_
assert b in a.bs_
def test_manual_table_auto_joins(self):
from sqlalchemy import Table, Column, ForeignKey, ForeignKeyConstraint
a_b = Table('a_b', metadata,
Column('a_key1', Integer),
Column('a_key2', String(40)),
Column('b_id', None, ForeignKey('b.id')),
ForeignKeyConstraint(['a_key1', 'a_key2'],
['a.key1', 'a.key2']))
class A(Entity):
using_options(shortnames=True)
key1 = Field(Integer, primary_key=True, autoincrement=False)
key2 = Field(String(40), primary_key=True)
bs_ = ManyToMany('B', table=a_b)
class B(Entity):
using_options(shortnames=True)
name = Field(String(60))
as_ = ManyToMany('A', table=a_b)
setup_all(True)
b1 = B(name='b1', as_=[A(key1=10, key2='a1')])
session.commit()
session.close()
a = A.query.one()
b = B.query.one()
assert a in b.as_
assert b in a.bs_
def test_manual_table_manual_joins(self):
from sqlalchemy import Table, Column, and_
a_b = Table('a_b', metadata,
Column('a_key1', Integer),
Column('a_key2', String(40)),
Column('b_id', String(60)))
class A(Entity):
using_options(shortnames=True)
key1 = Field(Integer, primary_key=True, autoincrement=False)
key2 = Field(String(40), primary_key=True)
bs_ = ManyToMany('B', table=a_b,
primaryjoin=lambda: and_(A.key1 == a_b.c.a_key1,
A.key2 == a_b.c.a_key2),
secondaryjoin=lambda: B.id == a_b.c.b_id,
foreign_keys=[a_b.c.a_key1, a_b.c.a_key2,
a_b.c.b_id])
class B(Entity):
using_options(shortnames=True)
name = Field(String(60))
setup_all(True)
a1 = A(key1=10, key2='a1', bs_=[B(name='b1')])
session.commit()
session.close()
a = A.query.one()
b = B.query.one()
assert b in a.bs_
| 28.776952 | 79 | 0.556323 |
276e172d04846263afd6f01bdd05ebb4c74d8916 | 12,086 | py | Python | edgelm/fairseq/file_utils.py | guotao0628/DeepNet | 1ae74d8b44d715bf67c7d64a8efafff4b7c7937a | [
"MIT"
] | 1 | 2021-11-07T00:30:05.000Z | 2021-11-07T00:30:05.000Z | edgelm/fairseq/file_utils.py | guotao0628/DeepNet | 1ae74d8b44d715bf67c7d64a8efafff4b7c7937a | [
"MIT"
] | null | null | null | edgelm/fairseq/file_utils.py | guotao0628/DeepNet | 1ae74d8b44d715bf67c7d64a8efafff4b7c7937a | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Utilities for working with the local dataset cache.
This file is adapted from `AllenNLP <https://github.com/allenai/allennlp>`_.
and `huggingface <https://github.com/huggingface>`_.
"""
import fnmatch
import json
import logging
import os
import shutil
import tarfile
import tempfile
from functools import partial, wraps
from hashlib import sha256
from io import open
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv(
"TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")
)
)
default_cache_path = os.path.join(torch_cache_home, "pytorch_fairseq")
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
PYTORCH_FAIRSEQ_CACHE = Path(os.getenv("PYTORCH_FAIRSEQ_CACHE", default_cache_path))
except (AttributeError, ImportError):
PYTORCH_FAIRSEQ_CACHE = os.getenv("PYTORCH_FAIRSEQ_CACHE", default_cache_path)
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "pytorch_model.bin"
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def load_archive_file(archive_file):
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=None)
except EnvironmentError:
logger.info(
"Archive name '{}' was not found in archive name list. "
"We assumed '{}' was a path or URL but couldn't find any file "
"associated to this path or URL.".format(
archive_file,
archive_file,
)
)
return None
if resolved_archive_file == archive_file:
logger.info("loading archive file {}".format(archive_file))
else:
logger.info(
"loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file
)
)
# Extract archive to temp dir and replace .tar.bz2 if necessary
tempdir = None
if not os.path.isdir(resolved_archive_file):
tempdir = tempfile.mkdtemp()
logger.info(
"extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir
)
)
ext = os.path.splitext(archive_file)[1][1:]
with tarfile.open(resolved_archive_file, "r:" + ext) as archive:
top_dir = os.path.commonprefix(archive.getnames())
archive.extractall(tempdir)
os.remove(resolved_archive_file)
shutil.move(os.path.join(tempdir, top_dir), resolved_archive_file)
shutil.rmtree(tempdir)
return resolved_archive_file
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the URL's, delimited
by a period.
"""
url_bytes = url.encode("utf-8")
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_FAIRSEQ_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + ".json"
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata["url"]
etag = metadata["etag"]
return url, etag
def cached_path_from_pm(url_or_filename):
"""
Tries to cache the specified URL using PathManager class.
Returns the cached path if success otherwise failure.
"""
try:
from fairseq.file_io import PathManager
local_path = PathManager.get_local_path(url_or_filename)
return local_path
except Exception:
return None
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_FAIRSEQ_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ("http", "https", "s3"):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
cached_path = cached_path_from_pm(url_or_filename)
if cached_path:
return cached_path
# Something unknown
raise ValueError(
"unable to parse {} as a URL or as a local path".format(url_or_filename)
)
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
from botocore.exceptions import ClientError
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
import boto3
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
import boto3
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def request_wrap_timeout(func, url):
import requests
for attempt, timeout in enumerate([10, 20, 40, 60, 60]):
try:
return func(timeout=timeout)
except requests.exceptions.Timeout as e:
logger.warning(
"Request for %s timed-out (attempt %d). Retrying with a timeout of %d secs",
url,
attempt,
timeout,
exc_info=e,
)
continue
raise RuntimeError(f"Unable to fetch file {url}")
def http_get(url, temp_file):
import requests
from tqdm import tqdm
req = request_wrap_timeout(partial(requests.get, url, stream=True), url)
content_length = req.headers.get("Content-Length")
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_FAIRSEQ_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
try:
import requests
response = request_wrap_timeout(
partial(requests.head, url, allow_redirects=True), url
)
if response.status_code != 200:
etag = None
else:
etag = response.headers.get("ETag")
except RuntimeError:
etag = None
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
if not os.path.exists(cache_path) and etag is None:
matching_files = fnmatch.filter(os.listdir(cache_dir), filename + ".*")
matching_files = list(filter(lambda s: not s.endswith(".json"), matching_files))
if matching_files:
cache_path = os.path.join(cache_dir, matching_files[-1])
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, "wb") as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {"url": url, "etag": etag}
meta_path = cache_path + ".json"
with open(meta_path, "w") as meta_file:
output_string = json.dumps(meta)
meta_file.write(output_string)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename):
"""
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
"""
collection = set()
with open(filename, "r", encoding="utf-8") as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
| 32.664865 | 93 | 0.622621 |
1e63fc6e158164c55b43074d59af49c3d9a77371 | 2,663 | py | Python | tests/test_library_matching.py | iomega/spec2vec_gnps_data_analysis | 2695727edd25ed2a1760adbd48a11307b473faa2 | [
"Apache-2.0"
] | 16 | 2020-06-26T16:42:29.000Z | 2021-11-13T14:12:32.000Z | tests/test_library_matching.py | iomega/spec2vec_gnps_data_analysis | 2695727edd25ed2a1760adbd48a11307b473faa2 | [
"Apache-2.0"
] | 8 | 2020-06-17T11:58:48.000Z | 2021-10-07T13:26:48.000Z | tests/test_library_matching.py | iomega/spec2vec_gnps_data_analysis | 2695727edd25ed2a1760adbd48a11307b473faa2 | [
"Apache-2.0"
] | 7 | 2020-06-26T16:40:49.000Z | 2022-02-08T16:34:16.000Z | import os
import sys
import numpy as np
import pytest
from matchms import Spectrum
from spec2vec import Spec2Vec
from spec2vec import SpectrumDocument
#path_root = os.path.dirname(os.path.__file__)
path_root = os.path.dirname(os.getcwd())
sys.path.insert(0, os.path.join(path_root, "custom_functions"))
from custom_functions.library_search import library_matching
def test_library_matching():
spectrum_1 = Spectrum(mz=np.array([100, 150, 200.]),
intensities=np.array([0.7, 0.2, 0.1]),
metadata={'precursor_mz': 500.5})
spectrum_2 = Spectrum(mz=np.array([100, 140, 190.]),
intensities=np.array([0.4, 0.2, 0.1]),
metadata={'precursor_mz': 500.11})
spectrum_3 = Spectrum(mz=np.array([100, 140, 190.]),
intensities=np.array([0.3, 0.5, 0.2]),
metadata={'precursor_mz': 501.1})
spectrum_4 = Spectrum(mz=np.array([97.5, 137.5, 200.]),
intensities=np.array([0.8, 0.5, 0.4]),
metadata={'precursor_mz': 500.1})
documents_library = [SpectrumDocument(s) for s in [spectrum_1, spectrum_2, spectrum_3]]
documents_query = [SpectrumDocument(spectrum_4)]
found_matches = library_matching(documents_query, documents_library,
model=None,
presearch_based_on=["precursor_mz"],
include_scores=["cosine", "modcosine"],
ignore_non_annotated=False,
intensity_weighting_power=0.5,
allowed_missing_percentage=5.0,
cosine_tol=2.0,
mass_tolerance=2.0,
mass_tolerance_type="Dalton")
scores_cosine = found_matches[0].values[:,0]
expected_scores_cosine = np.array([0.05312127152597306, 0.0, 0.0])
scores_modcos = found_matches[0].values[:,2]
expected_scores_modcos = np.array([0.05312127152597306, 0.0, 0.7757282939050968])
assert list(scores_cosine) == [pytest.approx(x, 1e-6) for x in expected_scores_cosine], \
"Expected different scores."
assert list(scores_modcos) == [pytest.approx(x, 1e-6) for x in expected_scores_modcos], \
"Expected different mod. cosine scores."
assert np.all(found_matches[0].values[:,3] == np.array([1, 0, 2])), \
"Expected different number of matches"
assert np.all(found_matches[0].values[:,4]), "Expected all mass matches to be True"
| 52.215686 | 93 | 0.580173 |
f69ff551a4c2c8d053d8942ca2baf3f6b630b8c7 | 4,582 | py | Python | seqData.py | KristinHenry/BioInformatics | 5bfa495e07a7021f90d684d998a84456e1d95df6 | [
"MIT"
] | 1 | 2015-06-26T16:44:42.000Z | 2015-06-26T16:44:42.000Z | seqData.py | KristinHenry/BioInformatics | 5bfa495e07a7021f90d684d998a84456e1d95df6 | [
"MIT"
] | null | null | null | seqData.py | KristinHenry/BioInformatics | 5bfa495e07a7021f90d684d998a84456e1d95df6 | [
"MIT"
] | null | null | null | #
# Takes in a genetic sequence and outputs the amino acid sequence encoded
# Converts a genetic sequence into amino acid sequences (6 frames).
#
# ToDo: enable setting DNA or RNA input.
#
class seqData() :
def __init__ (self, infile=None) :
print "file: ", infile
if infile :
self.fasta = self.fastaDict()
self.file = infile
self.text = self.getText(infile)
print self.text
self.text = self.removeSpace(self.text)
print self.text
self.rna = self.dnaToRNA()
print self.rna
self.getFrames()
def getFrames(self):
self.frame1 = self.toFastaPos(0)
self.frame2 = self.toFastaPos(1)
self.frame3 = self.toFastaPos(2)
self.frame4 = self.toFastaNeg(0)
self.frame5 = self.toFastaNeg(1)
self.frame6 = self.toFastaNeg(2)
print(self.frame1)
print(self.frame2)
print(self.frame3)
print(self.frame4)
print(self.frame5)
print(self.frame6)
def getText(self, doc):
f = open(doc)
text = ''
for line in f.readlines():
text = text + line
#text = f.readlines()[0]
f.close()
return text
def removeSpace(self, txt):
text = ''
for c in txt:
if c != " " and c != "\n":
text = text + c
return text
def fastaDict(self):
dict = { 'uuu': 'F', 'uuc': 'F', 'uua': 'L', 'uug': 'L',
'cuu': 'L', 'cuc': 'L', 'cua': 'L', 'cug': 'L',
'auu': 'I', 'auc': 'I', 'aua': 'I', 'aug': 'M',
'guu': 'V', 'guc': 'V', 'gua': 'V', 'gug': 'V',
'ucu': 'S', 'ucc': 'S', 'uca': 'S', 'ucg': 'S',
'ccu': 'P', 'ccc': 'P', 'cca': 'P', 'ccg': 'P',
'acu': 'T', 'acc': 'T', 'aca': 'T', 'acg': 'T',
'gcu': 'A', 'gcc': 'A', 'gca': 'A', 'gcg': 'A',
'uau': 'Y', 'uac': 'Y', 'uaa': '-', 'uag': '-',
'cau': 'H', 'cac': 'H', 'caa': 'Q', 'cag': 'Q',
'aau': 'N', 'aac': 'N', 'aaa': 'K', 'aag': 'K',
'gau': 'D', 'gac': 'D', 'gaa': 'E', 'gag': 'E',
'ugu': 'C', 'ugc': 'C', 'uga': '-', 'ugg': 'W',
'cgu': 'R', 'cgc': 'R', 'cga': 'R', 'cgg': 'R',
'agu': 'S', 'agc': 'S', 'aga': 'R', 'agg': 'R',
'ggu': 'G', 'ggc': 'G', 'gga': 'G', 'ggg': 'G'}
return dict
def dnaToRNA(self):
out = ""
for c in self.text:
c = c.lower() # in case DNA input is capitalized
if c == 't':
c = 'u'
out += c
return out
def toFastaPos(self, start):
out = ""
i = start
while i < len(self.rna):
codon = self.rna[i:i+3]
if len(codon) == 3:
#print codon[2]
#print codon
out += self.fasta[codon]
#print out
i += 3
return out
def toFastaNeg(self, start):
out = ""
i = len(self.rna) - start
while i > 0:
anticodon = self.rna[i-3:i]
if len(anticodon) == 3:
codon = anticodon[2] + anticodon[1] + anticodon[0]
codon = self.compliment(codon)
out += self.fasta[codon]
i -= 3
return out
def compliment(self, codon):
out = ""
for c in codon:
if c == 'a':
out += 'u'
elif c == 'u':
out += 'a'
elif c == 'g':
out += 'c'
elif c == 'c':
out += 'g'
return out
def saveToFile(self):
f = open('seqOut.txt', 'wb')
f.write(self.text)
f.write(" \n")
f.write("FRAMES:\n ")
f.write('frame1: ' + self.frame1 + ' \n')
f.write('frame2: ' + self.frame2 + ' \n')
f.write('frame3: ' + self.frame3 + ' \n')
f.write('frame4: ' + self.frame4 + ' \n')
f.write('frame5: ' + self.frame5 + ' \n')
f.write('frame6: ' + self.frame6 + ' \n')
self.text;
f.close()
| 27.437126 | 73 | 0.380183 |
fa0f89f3aa2d0f2bea379482c9b1e755d13d80bf | 36,021 | py | Python | tensorflow/python/training/supervisor_test.py | joshz123/tensorflow | 7841ca029060ab78e221e757d4b1ee6e3e0ffaa4 | [
"Apache-2.0"
] | 57 | 2017-09-03T07:08:31.000Z | 2022-02-28T04:33:42.000Z | tensorflow/python/training/supervisor_test.py | sagol/tensorflow | 04f2870814d2773e09dcfa00cbe76a66a2c4de88 | [
"Apache-2.0"
] | 203 | 2019-06-14T23:53:10.000Z | 2022-02-10T02:27:23.000Z | tensorflow/python/training/supervisor_test.py | sagol/tensorflow | 04f2870814d2773e09dcfa00cbe76a66a2c4de88 | [
"Apache-2.0"
] | 66 | 2020-05-15T10:05:12.000Z | 2022-02-14T07:28:18.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for supervisor.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import shutil
import time
import uuid
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.summary import summary_iterator
from tensorflow.python.summary.writer import writer
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_manager as session_manager_lib
from tensorflow.python.training import supervisor
def _summary_iterator(test_dir):
"""Reads events from test_dir/events.
Args:
test_dir: Name of the test directory.
Returns:
A summary_iterator
"""
event_paths = sorted(glob.glob(os.path.join(test_dir, "event*")))
return summary_iterator.summary_iterator(event_paths[-1])
class SupervisorTest(test.TestCase):
def _test_dir(self, test_name):
test_dir = os.path.join(self.get_temp_dir(), test_name)
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
return test_dir
def _wait_for_glob(self, pattern, timeout_secs, for_checkpoint=True):
"""Wait for a checkpoint file to appear.
Args:
pattern: A string.
timeout_secs: How long to wait for in seconds.
for_checkpoint: whether we're globbing for checkpoints.
"""
end_time = time.time() + timeout_secs
while time.time() < end_time:
if for_checkpoint:
if checkpoint_management.checkpoint_exists(pattern):
return
else:
if len(gfile.Glob(pattern)) >= 1:
return
time.sleep(0.05)
self.assertFalse(True, "Glob never matched any file: %s" % pattern)
# This test does not test much.
def testBasics(self):
logdir = self._test_dir("basics")
with ops.Graph().as_default():
my_op = constant_op.constant(1.0)
sv = supervisor.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session("")
for _ in xrange(10):
self.evaluate(my_op)
sess.close()
sv.stop()
def testManagedSession(self):
logdir = self._test_dir("managed_session")
with ops.Graph().as_default():
my_op = constant_op.constant(1.0)
sv = supervisor.Supervisor(logdir=logdir)
with sv.managed_session("") as sess:
for _ in xrange(10):
self.evaluate(my_op)
# Supervisor has been stopped.
self.assertTrue(sv.should_stop())
def testManagedSessionUserError(self):
logdir = self._test_dir("managed_user_error")
with ops.Graph().as_default():
my_op = constant_op.constant(1.0)
sv = supervisor.Supervisor(logdir=logdir)
last_step = None
with self.assertRaisesRegexp(RuntimeError, "failing here"):
with sv.managed_session("") as sess:
for step in xrange(10):
last_step = step
if step == 1:
raise RuntimeError("failing here")
else:
self.evaluate(my_op)
# Supervisor has been stopped.
self.assertTrue(sv.should_stop())
self.assertEqual(1, last_step)
def testManagedSessionIgnoreOutOfRangeError(self):
logdir = self._test_dir("managed_out_of_range")
with ops.Graph().as_default():
my_op = constant_op.constant(1.0)
sv = supervisor.Supervisor(logdir=logdir)
last_step = None
with sv.managed_session("") as sess:
for step in xrange(10):
last_step = step
if step == 3:
raise errors_impl.OutOfRangeError(my_op.op.node_def, my_op.op,
"all done")
else:
self.evaluate(my_op)
# Supervisor has been stopped. OutOfRangeError was not thrown.
self.assertTrue(sv.should_stop())
self.assertEqual(3, last_step)
def testManagedSessionDoNotKeepSummaryWriter(self):
logdir = self._test_dir("managed_not_keep_summary_writer")
with ops.Graph().as_default():
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summary.scalar("c3", constant_op.constant(3))
summ = summary.merge_all()
sv = supervisor.Supervisor(logdir=logdir, summary_op=None)
with sv.managed_session(
"", close_summary_writer=True, start_standard_services=False) as sess:
sv.summary_computed(sess, sess.run(summ))
# Sleep 1.2s to make sure that the next event file has a different name
# than the current one.
time.sleep(1.2)
with sv.managed_session(
"", close_summary_writer=True, start_standard_services=False) as sess:
sv.summary_computed(sess, sess.run(summ))
event_paths = sorted(glob.glob(os.path.join(logdir, "event*")))
self.assertEquals(2, len(event_paths))
# The two event files should have the same contents.
for path in event_paths:
# The summary iterator should report the summary once as we closed the
# summary writer across the 2 sessions.
rr = summary_iterator.summary_iterator(path)
# The first event should list the file_version.
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
# The next one has the graph and metagraph.
ev = next(rr)
self.assertTrue(ev.graph_def)
ev = next(rr)
self.assertTrue(ev.meta_graph_def)
# The next one should have the values from the summary.
# But only once.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# The next one should be a stop message if we closed cleanly.
ev = next(rr)
self.assertEquals(event_pb2.SessionLog.STOP, ev.session_log.status)
# We should be done.
with self.assertRaises(StopIteration):
next(rr)
def testManagedSessionKeepSummaryWriter(self):
logdir = self._test_dir("managed_keep_summary_writer")
with ops.Graph().as_default():
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summary.scalar("c3", constant_op.constant(3))
summ = summary.merge_all()
sv = supervisor.Supervisor(logdir=logdir)
with sv.managed_session(
"", close_summary_writer=False,
start_standard_services=False) as sess:
sv.summary_computed(sess, sess.run(summ))
with sv.managed_session(
"", close_summary_writer=False,
start_standard_services=False) as sess:
sv.summary_computed(sess, sess.run(summ))
# Now close the summary writer to flush the events.
sv.summary_writer.close()
# The summary iterator should report the summary twice as we reused
# the same summary writer across the 2 sessions.
rr = _summary_iterator(logdir)
# The first event should list the file_version.
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
# The next one has the graph.
ev = next(rr)
self.assertTrue(ev.graph_def)
ev = next(rr)
self.assertTrue(ev.meta_graph_def)
# The next one should have the values from the summary.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# The next one should also have the values from the summary.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# We should be done.
self.assertRaises(StopIteration, lambda: next(rr))
def _csv_data(self, logdir):
# Create a small data file with 3 CSV records.
data_path = os.path.join(logdir, "data.csv")
with open(data_path, "w") as f:
f.write("1,2,3\n")
f.write("4,5,6\n")
f.write("7,8,9\n")
return data_path
def testManagedEndOfInputOneQueue(self):
# Tests that the supervisor finishes without an error when using
# a fixed number of epochs, reading from a single queue.
logdir = self._test_dir("managed_end_of_input_one_queue")
os.makedirs(logdir)
data_path = self._csv_data(logdir)
with ops.Graph().as_default():
# Create an input pipeline that reads the file 3 times.
filename_queue = input_lib.string_input_producer(
[data_path], num_epochs=3)
reader = io_ops.TextLineReader()
_, csv = reader.read(filename_queue)
rec = parsing_ops.decode_csv(csv, record_defaults=[[1], [1], [1]])
sv = supervisor.Supervisor(logdir=logdir)
with sv.managed_session("") as sess:
while not sv.should_stop():
sess.run(rec)
def testManagedEndOfInputTwoQueues(self):
# Tests that the supervisor finishes without an error when using
# a fixed number of epochs, reading from two queues, the second
# one producing a batch from the first one.
logdir = self._test_dir("managed_end_of_input_two_queues")
os.makedirs(logdir)
data_path = self._csv_data(logdir)
with ops.Graph().as_default():
# Create an input pipeline that reads the file 3 times.
filename_queue = input_lib.string_input_producer(
[data_path], num_epochs=3)
reader = io_ops.TextLineReader()
_, csv = reader.read(filename_queue)
rec = parsing_ops.decode_csv(csv, record_defaults=[[1], [1], [1]])
shuff_rec = input_lib.shuffle_batch(rec, 1, 6, 4)
sv = supervisor.Supervisor(logdir=logdir)
with sv.managed_session("") as sess:
while not sv.should_stop():
sess.run(shuff_rec)
def testManagedMainErrorTwoQueues(self):
# Tests that the supervisor correctly raises a main loop
# error even when using multiple queues for input.
logdir = self._test_dir("managed_main_error_two_queues")
os.makedirs(logdir)
data_path = self._csv_data(logdir)
with self.assertRaisesRegexp(RuntimeError, "fail at step 3"):
with ops.Graph().as_default():
# Create an input pipeline that reads the file 3 times.
filename_queue = input_lib.string_input_producer(
[data_path], num_epochs=3)
reader = io_ops.TextLineReader()
_, csv = reader.read(filename_queue)
rec = parsing_ops.decode_csv(csv, record_defaults=[[1], [1], [1]])
shuff_rec = input_lib.shuffle_batch(rec, 1, 6, 4)
sv = supervisor.Supervisor(logdir=logdir)
with sv.managed_session("") as sess:
for step in range(9):
if sv.should_stop():
break
elif step == 3:
raise RuntimeError("fail at step 3")
else:
sess.run(shuff_rec)
def testSessionConfig(self):
logdir = self._test_dir("session_config")
with ops.Graph().as_default():
with ops.device("/cpu:1"):
my_op = constant_op.constant([1.0])
sv = supervisor.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session(
"", config=config_pb2.ConfigProto(device_count={"CPU": 2}))
for _ in xrange(10):
self.evaluate(my_op)
sess.close()
sv.stop()
def testChiefCanWriteEvents(self):
logdir = self._test_dir("can_write")
with ops.Graph().as_default():
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summary.scalar("c3", constant_op.constant(3))
summ = summary.merge_all()
sv = supervisor.Supervisor(is_chief=True, logdir=logdir, summary_op=None)
meta_graph_def = meta_graph.create_meta_graph_def()
sess = sv.prepare_or_wait_for_session("")
sv.summary_computed(sess, sess.run(summ))
sess.close()
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
rr = _summary_iterator(logdir)
# The first event should list the file_version.
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
# The next one has the graph.
ev = next(rr)
ev_graph = graph_pb2.GraphDef()
ev_graph.ParseFromString(ev.graph_def)
self.assertProtoEquals(sess.graph.as_graph_def(add_shapes=True), ev_graph)
# Stored MetaGraphDef
ev = next(rr)
ev_meta_graph = meta_graph_pb2.MetaGraphDef()
ev_meta_graph.ParseFromString(ev.meta_graph_def)
self.assertProtoEquals(meta_graph_def, ev_meta_graph)
self.assertProtoEquals(
sess.graph.as_graph_def(add_shapes=True), ev_meta_graph.graph_def)
# The next one should have the values from the summary.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# The next one should be a stop message if we closed cleanly.
ev = next(rr)
self.assertEquals(event_pb2.SessionLog.STOP, ev.session_log.status)
# We should be done.
self.assertRaises(StopIteration, lambda: next(rr))
def testNonChiefCannotWriteEvents(self):
def _summary_computed():
with ops.Graph().as_default():
sv = supervisor.Supervisor(is_chief=False)
sess = sv.prepare_or_wait_for_session("")
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summ = summary.merge_all()
sv.summary_computed(sess, sess.run(summ))
def _start_standard_services():
with ops.Graph().as_default():
sv = supervisor.Supervisor(is_chief=False)
sess = sv.prepare_or_wait_for_session("")
sv.start_standard_services(sess)
self.assertRaises(RuntimeError, _summary_computed)
self.assertRaises(RuntimeError, _start_standard_services)
def testNoLogdirButWantSummary(self):
with ops.Graph().as_default():
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summary.scalar("c3", constant_op.constant(3))
summ = summary.merge_all()
sv = supervisor.Supervisor(logdir="", summary_op=None)
sess = sv.prepare_or_wait_for_session("")
with self.assertRaisesRegexp(RuntimeError, "requires a summary writer"):
sv.summary_computed(sess, sess.run(summ))
@test_util.run_v1_only("b/120545219")
def testLogdirButExplicitlyNoSummaryWriter(self):
logdir = self._test_dir("explicit_no_summary_writer")
with ops.Graph().as_default():
variables.VariableV1([1.0], name="foo")
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summary.scalar("c3", constant_op.constant(3))
summ = summary.merge_all()
sv = supervisor.Supervisor(logdir=logdir, summary_writer=None)
sess = sv.prepare_or_wait_for_session("")
# Check that a checkpoint is still be generated.
self._wait_for_glob(sv.save_path, 3.0)
# Check that we cannot write a summary
with self.assertRaisesRegexp(RuntimeError, "requires a summary writer"):
sv.summary_computed(sess, sess.run(summ))
def testNoLogdirButExplicitSummaryWriter(self):
logdir = self._test_dir("explicit_summary_writer")
with ops.Graph().as_default():
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summary.scalar("c3", constant_op.constant(3))
summ = summary.merge_all()
sw = writer.FileWriter(logdir)
sv = supervisor.Supervisor(logdir="", summary_op=None, summary_writer=sw)
meta_graph_def = meta_graph.create_meta_graph_def()
sess = sv.prepare_or_wait_for_session("")
sv.summary_computed(sess, sess.run(summ))
sess.close()
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
# Check the summary was written to 'logdir'
rr = _summary_iterator(logdir)
# The first event should list the file_version.
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
# The next one has the graph.
ev = next(rr)
ev_graph = graph_pb2.GraphDef()
ev_graph.ParseFromString(ev.graph_def)
self.assertProtoEquals(sess.graph.as_graph_def(add_shapes=True), ev_graph)
# Stored MetaGraphDef
ev = next(rr)
ev_meta_graph = meta_graph_pb2.MetaGraphDef()
ev_meta_graph.ParseFromString(ev.meta_graph_def)
self.assertProtoEquals(meta_graph_def, ev_meta_graph)
self.assertProtoEquals(
sess.graph.as_graph_def(add_shapes=True), ev_meta_graph.graph_def)
# The next one should have the values from the summary.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# The next one should be a stop message if we closed cleanly.
ev = next(rr)
self.assertEquals(event_pb2.SessionLog.STOP, ev.session_log.status)
# We should be done.
self.assertRaises(StopIteration, lambda: next(rr))
def testNoLogdirSucceeds(self):
with ops.Graph().as_default():
variables.VariableV1([1.0, 2.0, 3.0])
sv = supervisor.Supervisor(logdir="", summary_op=None)
sess = sv.prepare_or_wait_for_session("")
sess.close()
sv.stop()
def testUseSessionManager(self):
with ops.Graph().as_default():
variables.VariableV1([1.0, 2.0, 3.0])
sm = session_manager_lib.SessionManager()
# Pass in session_manager. The additional init_op is ignored.
sv = supervisor.Supervisor(logdir="", session_manager=sm)
sv.prepare_or_wait_for_session("")
@test_util.run_v1_only("b/120545219")
def testInitOp(self):
logdir = self._test_dir("default_init_op")
with ops.Graph().as_default():
v = variables.VariableV1([1.0, 2.0, 3.0])
sv = supervisor.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
sv.stop()
@test_util.run_v1_only("b/120545219")
def testInitFn(self):
logdir = self._test_dir("default_init_op")
with ops.Graph().as_default():
v = variables.VariableV1([1.0, 2.0, 3.0])
def _init_fn(sess):
sess.run(v.initializer)
sv = supervisor.Supervisor(logdir=logdir, init_op=None, init_fn=_init_fn)
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
sv.stop()
@test_util.run_v1_only("b/120545219")
def testInitOpWithFeedDict(self):
logdir = self._test_dir("feed_dict_init_op")
with ops.Graph().as_default():
p = array_ops.placeholder(dtypes.float32, shape=(3,))
v = variables.VariableV1(p, name="v")
sv = supervisor.Supervisor(
logdir=logdir,
init_op=variables.global_variables_initializer(),
init_feed_dict={p: [1.0, 2.0, 3.0]})
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
sv.stop()
@test_util.run_v1_only("b/120545219")
def testReadyForLocalInitOp(self):
server = server_lib.Server.create_local_server()
logdir = self._test_dir("default_ready_for_local_init_op")
uid = uuid.uuid4().hex
def get_session(is_chief):
g = ops.Graph()
with g.as_default():
with ops.device("/job:localhost"):
v = variables.VariableV1(
1, name="default_ready_for_local_init_op_v_" + str(uid))
vadd = v.assign_add(1)
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="default_ready_for_local_init_op_w_" + str(uid))
ready_for_local_init_op = variables.report_uninitialized_variables(
variables.global_variables())
sv = supervisor.Supervisor(
logdir=logdir,
is_chief=is_chief,
graph=g,
recovery_wait_secs=1,
init_op=v.initializer,
ready_for_local_init_op=ready_for_local_init_op)
sess = sv.prepare_or_wait_for_session(server.target)
return sv, sess, v, vadd, w
sv0, sess0, v0, _, w0 = get_session(True)
sv1, sess1, _, vadd1, w1 = get_session(False)
self.assertEqual(1, sess0.run(w0))
self.assertEqual(2, sess1.run(vadd1))
self.assertEqual(1, sess1.run(w1))
self.assertEqual(2, sess0.run(v0))
sv0.stop()
sv1.stop()
@test_util.run_v1_only("b/120545219")
def testReadyForLocalInitOpRestoreFromCheckpoint(self):
server = server_lib.Server.create_local_server()
logdir = self._test_dir("ready_for_local_init_op_restore")
uid = uuid.uuid4().hex
# Create a checkpoint.
with ops.Graph().as_default():
v = variables.VariableV1(
10.0, name="ready_for_local_init_op_restore_v_" + str(uid))
summary.scalar("ready_for_local_init_op_restore_v_" + str(uid), v)
sv = supervisor.Supervisor(logdir=logdir)
sv.prepare_or_wait_for_session(server.target)
save_path = sv.save_path
self._wait_for_glob(save_path, 3.0)
self._wait_for_glob(
os.path.join(logdir, "*events*"), 3.0, for_checkpoint=False)
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
def get_session(is_chief):
g = ops.Graph()
with g.as_default():
with ops.device("/job:localhost"):
v = variables.VariableV1(
1.0, name="ready_for_local_init_op_restore_v_" + str(uid))
vadd = v.assign_add(1)
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="ready_for_local_init_op_restore_w_" + str(uid))
ready_for_local_init_op = variables.report_uninitialized_variables(
variables.global_variables())
sv = supervisor.Supervisor(
logdir=logdir,
is_chief=is_chief,
graph=g,
recovery_wait_secs=1,
ready_for_local_init_op=ready_for_local_init_op)
sess = sv.prepare_or_wait_for_session(server.target)
return sv, sess, v, vadd, w
sv0, sess0, v0, _, w0 = get_session(True)
sv1, sess1, _, vadd1, w1 = get_session(False)
self.assertEqual(10, sess0.run(w0))
self.assertEqual(11, sess1.run(vadd1))
self.assertEqual(10, sess1.run(w1))
self.assertEqual(11, sess0.run(v0))
sv0.stop()
sv1.stop()
def testLocalInitOp(self):
logdir = self._test_dir("default_local_init_op")
with ops.Graph().as_default():
# A local variable.
v = variables.VariableV1(
[1.0, 2.0, 3.0],
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
# An entity which is initialized through a TABLE_INITIALIZER.
w = variables.VariableV1([4, 5, 6], trainable=False, collections=[])
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, w.initializer)
# This shouldn't add a variable to the VARIABLES collection responsible
# for variables that are saved/restored from checkpoints.
self.assertEquals(len(variables.global_variables()), 0)
# Suppress normal variable inits to make sure the local one is
# initialized via local_init_op.
sv = supervisor.Supervisor(logdir=logdir, init_op=None)
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
self.assertAllClose([4, 5, 6], sess.run(w))
sv.stop()
def testLocalInitOpForNonChief(self):
logdir = self._test_dir("default_local_init_op_non_chief")
with ops.Graph().as_default():
with ops.device("/job:localhost"):
# A local variable.
v = variables.VariableV1(
[1.0, 2.0, 3.0],
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
# This shouldn't add a variable to the VARIABLES collection responsible
# for variables that are saved/restored from checkpoints.
self.assertEquals(len(variables.global_variables()), 0)
# Suppress normal variable inits to make sure the local one is
# initialized via local_init_op.
sv = supervisor.Supervisor(logdir=logdir, init_op=None, is_chief=False)
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
sv.stop()
def testInitOpFails(self):
server = server_lib.Server.create_local_server()
logdir = self._test_dir("default_init_op_fails")
with ops.Graph().as_default():
v = variables.VariableV1([1.0, 2.0, 3.0], name="v")
variables.VariableV1([4.0, 5.0, 6.0], name="w")
# w will not be initialized.
sv = supervisor.Supervisor(logdir=logdir, init_op=v.initializer)
with self.assertRaisesRegexp(RuntimeError,
"Variables not initialized: w"):
sv.prepare_or_wait_for_session(server.target)
def testInitOpFailsForTransientVariable(self):
server = server_lib.Server.create_local_server()
logdir = self._test_dir("default_init_op_fails_for_local_variable")
with ops.Graph().as_default():
v = variables.VariableV1(
[1.0, 2.0, 3.0],
name="v",
collections=[ops.GraphKeys.LOCAL_VARIABLES])
variables.VariableV1(
[1.0, 2.0, 3.0],
name="w",
collections=[ops.GraphKeys.LOCAL_VARIABLES])
# w will not be initialized.
sv = supervisor.Supervisor(logdir=logdir, local_init_op=v.initializer)
with self.assertRaisesRegexp(RuntimeError,
"Variables not initialized: w"):
sv.prepare_or_wait_for_session(server.target)
@test_util.run_v1_only("b/120545219")
def testSetupFail(self):
logdir = self._test_dir("setup_fail")
with ops.Graph().as_default():
variables.VariableV1([1.0, 2.0, 3.0], name="v")
with self.assertRaisesRegexp(ValueError, "must have their device set"):
supervisor.Supervisor(logdir=logdir, is_chief=False)
with ops.Graph().as_default(), ops.device("/job:ps"):
variables.VariableV1([1.0, 2.0, 3.0], name="v")
supervisor.Supervisor(logdir=logdir, is_chief=False)
@test_util.run_v1_only("b/120545219")
def testDefaultGlobalStep(self):
logdir = self._test_dir("default_global_step")
with ops.Graph().as_default():
variables.VariableV1(287, name="global_step")
sv = supervisor.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session("")
self.assertEquals(287, sess.run(sv.global_step))
sv.stop()
@test_util.run_v1_only("b/120545219")
def testRestoreFromMetaGraph(self):
logdir = self._test_dir("restore_from_meta_graph")
with ops.Graph().as_default():
variables.VariableV1(1, name="v0")
sv = supervisor.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session("")
filename = sv.saver.save(sess, sv.save_path)
sv.stop()
# Create a new Graph and Supervisor and recover.
with ops.Graph().as_default():
new_saver = saver_lib.import_meta_graph(".".join([filename, "meta"]))
self.assertIsNotNone(new_saver)
sv2 = supervisor.Supervisor(logdir=logdir, saver=new_saver)
sess = sv2.prepare_or_wait_for_session("")
self.assertEquals(1, sess.run("v0:0"))
sv2.saver.save(sess, sv2.save_path)
sv2.stop()
# This test is based on the fact that the standard services start
# right away and get to run once before sv.stop() returns.
# We still sleep a bit to make the test robust.
@test_util.run_v1_only("b/120545219")
def testStandardServicesWithoutGlobalStep(self):
logdir = self._test_dir("standard_services_without_global_step")
# Create a checkpoint.
with ops.Graph().as_default():
v = variables.VariableV1([1.0], name="foo")
summary.scalar("v", v[0])
sv = supervisor.Supervisor(logdir=logdir)
meta_graph_def = meta_graph.create_meta_graph_def(
saver_def=sv.saver.saver_def)
sess = sv.prepare_or_wait_for_session("")
save_path = sv.save_path
self._wait_for_glob(save_path, 3.0)
self._wait_for_glob(
os.path.join(logdir, "*events*"), 3.0, for_checkpoint=False)
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
# There should be an event file with a version number.
rr = _summary_iterator(logdir)
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
ev = next(rr)
ev_graph = graph_pb2.GraphDef()
ev_graph.ParseFromString(ev.graph_def)
self.assertProtoEquals(sess.graph.as_graph_def(add_shapes=True), ev_graph)
# Stored MetaGraphDef
ev = next(rr)
ev_meta_graph = meta_graph_pb2.MetaGraphDef()
ev_meta_graph.ParseFromString(ev.meta_graph_def)
self.assertProtoEquals(meta_graph_def, ev_meta_graph)
self.assertProtoEquals(
sess.graph.as_graph_def(add_shapes=True), ev_meta_graph.graph_def)
ev = next(rr)
self.assertProtoEquals("value { tag: 'v' simple_value: 1.0 }", ev.summary)
ev = next(rr)
self.assertEquals(event_pb2.SessionLog.STOP, ev.session_log.status)
self.assertRaises(StopIteration, lambda: next(rr))
# There should be a checkpoint file with the variable "foo"
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([10.10], name="foo")
sav = saver_lib.Saver([v])
sav.restore(sess, save_path)
self.assertEqual(1.0, self.evaluate(v)[0])
# Same as testStandardServicesNoGlobalStep but with a global step.
# We should get a summary about the step time.
@test_util.run_v1_only("b/120545219")
def testStandardServicesWithGlobalStep(self):
logdir = self._test_dir("standard_services_with_global_step")
# Create a checkpoint.
with ops.Graph().as_default():
v = variables.VariableV1([123], name="global_step")
sv = supervisor.Supervisor(logdir=logdir)
meta_graph_def = meta_graph.create_meta_graph_def(
saver_def=sv.saver.saver_def)
sess = sv.prepare_or_wait_for_session("")
# This is where the checkpoint will appear, with step number 123.
save_path = "%s-123" % sv.save_path
self._wait_for_glob(save_path, 3.0)
self._wait_for_glob(
os.path.join(logdir, "*events*"), 3.0, for_checkpoint=False)
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
# There should be an event file with a version number.
rr = _summary_iterator(logdir)
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
ev = next(rr)
ev_graph = graph_pb2.GraphDef()
ev_graph.ParseFromString(ev.graph_def)
self.assertProtoEquals(sess.graph.as_graph_def(add_shapes=True), ev_graph)
ev = next(rr)
ev_meta_graph = meta_graph_pb2.MetaGraphDef()
ev_meta_graph.ParseFromString(ev.meta_graph_def)
self.assertProtoEquals(meta_graph_def, ev_meta_graph)
self.assertProtoEquals(
sess.graph.as_graph_def(add_shapes=True), ev_meta_graph.graph_def)
ev = next(rr)
# It is actually undeterministic whether SessionLog.START gets written
# before the summary or the checkpoint, but this works when run 10000 times.
self.assertEquals(123, ev.step)
self.assertEquals(event_pb2.SessionLog.START, ev.session_log.status)
first = next(rr)
second = next(rr)
# It is undeterministic whether the value gets written before the checkpoint
# since they are on separate threads, so we check for both conditions.
if first.HasField("summary"):
self.assertProtoEquals("""value { tag: 'global_step/sec'
simple_value: 0.0 }""", first.summary)
self.assertEquals(123, second.step)
self.assertEquals(event_pb2.SessionLog.CHECKPOINT,
second.session_log.status)
else:
self.assertEquals(123, first.step)
self.assertEquals(event_pb2.SessionLog.CHECKPOINT,
first.session_log.status)
self.assertProtoEquals("""value { tag: 'global_step/sec'
simple_value: 0.0 }""", second.summary)
ev = next(rr)
self.assertEquals(event_pb2.SessionLog.STOP, ev.session_log.status)
self.assertRaises(StopIteration, lambda: next(rr))
# There should be a checkpoint file with the variable "foo"
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([-12], name="global_step")
sav = saver_lib.Saver([v])
sav.restore(sess, save_path)
self.assertEqual(123, self.evaluate(v)[0])
def testNoQueueRunners(self):
with ops.Graph().as_default(), self.cached_session() as sess:
sv = supervisor.Supervisor(logdir=self._test_dir("no_queue_runners"))
self.assertEqual(0, len(sv.start_queue_runners(sess)))
sv.stop()
def testPrepareSessionAfterStopForChief(self):
logdir = self._test_dir("prepare_after_stop_chief")
with ops.Graph().as_default():
sv = supervisor.Supervisor(logdir=logdir, is_chief=True)
# Create a first session and then stop.
sess = sv.prepare_or_wait_for_session("")
sv.stop()
sess.close()
self.assertTrue(sv.should_stop())
# Now create a second session and test that we don't stay stopped, until
# we ask to stop again.
sess2 = sv.prepare_or_wait_for_session("")
self.assertFalse(sv.should_stop())
sv.stop()
sess2.close()
self.assertTrue(sv.should_stop())
def testPrepareSessionAfterStopForNonChief(self):
logdir = self._test_dir("prepare_after_stop_nonchief")
with ops.Graph().as_default():
sv = supervisor.Supervisor(logdir=logdir, is_chief=False)
# Create a first session and then stop.
sess = sv.prepare_or_wait_for_session("")
sv.stop()
sess.close()
self.assertTrue(sv.should_stop())
# Now create a second session and test that we don't stay stopped, until
# we ask to stop again.
sess2 = sv.prepare_or_wait_for_session("")
self.assertFalse(sv.should_stop())
sv.stop()
sess2.close()
self.assertTrue(sv.should_stop())
if __name__ == "__main__":
test.main()
| 38.857605 | 80 | 0.676078 |
0a19d4e48bc8935029949a5fe80e43618c906a07 | 1,200 | py | Python | scraper/storage_spiders/muagiagocvn.py | chongiadung/choinho | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | [
"MIT"
] | null | null | null | scraper/storage_spiders/muagiagocvn.py | chongiadung/choinho | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | [
"MIT"
] | 10 | 2020-02-11T23:34:28.000Z | 2022-03-11T23:16:12.000Z | scraper/storage_spiders/muagiagocvn.py | chongiadung/choinho | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | [
"MIT"
] | 3 | 2018-08-05T14:54:25.000Z | 2021-06-07T01:49:59.000Z | # Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@id='content']/div[@id='content_top']/div[@class='overview']/h1[@class='name']",
'price' : "//div[@id='content_top']/div[@class='overview']/p/b[@class='red price']",
'category' : "//div[@class='wrap_content']/div[@id='content']/div[@class='location']/a",
'description' : "//div[@class='tabs']/div[@class='content_tab']/div[@id='ThongSoKyThuat']",
'images' : "//div[@id='content']/div[@id='content_top']/div[@class='img_product_detail']/ul/li/a/@href | //div[@class='img_product_detail']/div[@class='img_large']/a/@href",
'canonical' : "",
'base_url' : "",
'brand' : ""
}
name = 'muagiagoc.vn'
allowed_domains = ['muagiagoc.vn']
start_urls = ['http://muagiagoc.vn']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/\d+-[a-zA-Z0-9-]+\.html']), 'parse_item'),
Rule(LinkExtractor(allow=['/c+\d+/'], deny=['filter=','min=','max=']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| 44.444444 | 177 | 0.634167 |
070632fc219854c929ad790a9f41ae16cffacc54 | 3,713 | py | Python | cride/circles/views/memberships.py | diegoalejo14/advanced_django_platzi | 3a281749d05776b8d856d1ec3f9c47537a5490a0 | [
"MIT"
] | null | null | null | cride/circles/views/memberships.py | diegoalejo14/advanced_django_platzi | 3a281749d05776b8d856d1ec3f9c47537a5490a0 | [
"MIT"
] | null | null | null | cride/circles/views/memberships.py | diegoalejo14/advanced_django_platzi | 3a281749d05776b8d856d1ec3f9c47537a5490a0 | [
"MIT"
] | null | null | null | """Circle membership views"""
# Django REST Framework
from cride.circles.models import invitations
from cride.circles.models.invitations import Invitation
from rest_framework import viewsets, mixins, status
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.generics import get_object_or_404
# Models
from cride.circles.models import Circle, Membership, Invitation
from cride.circles.serializers.memberships import MembershipModelSerializer, AddMemberSerializer
# Permissions
from rest_framework.permissions import IsAuthenticated
from cride.circles.permissions.memberships import IsActiveCircleMember, IsSelfMember
class MembershipViewSet(
mixins.ListModelMixin,
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet):
"""Circle Membership view set."""
serializer_class = MembershipModelSerializer
def dispatch(self, request, *args, **kwargs):
"""Verify that the circle exists."""
slug_name = kwargs['slug_name']
self.circle = get_object_or_404(
Circle,
slug_name=slug_name
)
return super(MembershipViewSet, self).dispatch(request, *args, **kwargs)
def get_permissions(self):
"""Assign permissions based on action"""
permissions = [IsAuthenticated]
if self.action != 'create':
permissions.append(IsActiveCircleMember)
if self.action == 'invitations':
permissions = [IsAuthenticated, IsSelfMember]
return [p() for p in permissions]
def get_queryset(self):
"""Return circle members."""
return Membership.objects.filter(
circle=self.circle,
is_active=True
)
def get_object(self):
"""Return the circle member by using the user's username."""
return get_object_or_404(
Membership,
user__username=self.kwargs['pk'],
circle=self.circle,
is_active=True
)
def perform_destroy(self, instance):
"""Disable membership."""
instance.is_active = False
instance.save()
@action(detail=True, methods=['get'])
def invitations(self, request, *args, **kwargs):
""" Get invitations to the member"""
member = self.get_object()
invited_members = Membership.objects.filter(
circle=self.circle,
invited_by=request.user,
is_active=True
)
unused_invitations = Invitation.objects.filter(
circle=self.circle,
issued_by=request.user,
used=False
).values_list('code')
diff = member.remaining_invitation-len(unused_invitations)
invitations = [x[0] for x in unused_invitations]
for i in range(0, diff):
invitations.append(
Invitation.objects.create(
issued_by=request.user,
circle=self.circle
).code
)
data = {
'used_invitations': MembershipModelSerializer(invited_members, many=True).data,
'invitations': invitations
}
return Response(data)
def create(self, request, *args, **kwargs):
"""Handle member creation from invitation code."""
serializer = AddMemberSerializer(data=request.data, context={
'circle': self.circle,
'request': request
})
serializer.is_valid(raise_exception=True)
member = serializer.save()
data = self.get_serializer(member).data
return Response(data, status=status.HTTP_201_CREATED)
| 34.700935 | 96 | 0.643954 |
3fd0b891f708207189e36a8174bb46b4b973e828 | 8,066 | py | Python | athena/data/datasets/speech_set_kaldiio.py | Huang17/athena | 9077f57f5b7aa64a28487d8b30f1781783d45a42 | [
"Apache-2.0"
] | 1 | 2020-08-26T08:56:49.000Z | 2020-08-26T08:56:49.000Z | athena/data/datasets/speech_set_kaldiio.py | shuaijiang/athena-2 | 5d4d6d13075b8ee9fd824ce6258cb8f55dd157eb | [
"Apache-2.0"
] | null | null | null | athena/data/datasets/speech_set_kaldiio.py | shuaijiang/athena-2 | 5d4d6d13075b8ee9fd824ce6258cb8f55dd157eb | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright (C) 2019 ATHENA AUTHORS; Xiangang Li; Shuaijiang Zhao; Ne Luo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=no-member, invalid-name
""" audio dataset """
import os
from absl import logging
import tensorflow as tf
import kaldiio
from athena.transform import AudioFeaturizer
from ...utils.hparam import register_and_parse_hparams
from ..feature_normalizer import FeatureNormalizer
from .base import BaseDatasetBuilder
class SpeechDatasetKaldiIOBuilder(BaseDatasetBuilder):
""" SpeechDatasetKaldiIOBuilder
Args:
for __init__(self, config=None)
Config:
feature_config: the config file for feature extractor, default={'type':'Fbank'}
data_csv: the path for original LDC HKUST,
default='/tmp-data/dataset/opensource/hkust/train.csv'
force_process: force process, if force_process=True, we will re-process the dataset,
if False, we will process only if the out_path is empty. default=False
Interfaces::
__len__(self): return the number of data samples
@property:
sample_shape:
{"input": tf.TensorShape([None, self.audio_featurizer.dim,
self.audio_featurizer.num_channels]),
"input_length": tf.TensorShape([]),
"output_length": tf.TensorShape([]),
"output": tf.TensorShape([None, self.audio_featurizer.dim *
self.audio_featurizer.num_channels]),}
"""
default_config = {
"audio_config": {"type": "Fbank"},
"cmvn_file": None,
"input_length_range": [20, 50000],
"data_csv": None,
"data_scps_dir": None
}
def __init__(self, config=None):
super().__init__()
# hparams
self.hparams = register_and_parse_hparams(
self.default_config, config, cls=self.__class__)
logging.info("hparams: {}".format(self.hparams))
self.audio_featurizer = AudioFeaturizer(self.hparams.audio_config)
self.feature_normalizer = FeatureNormalizer(self.hparams.cmvn_file)
if self.hparams.data_scps_dir is not None:
self.load_scps(self.hparams.data_scps_dir)
def reload_config(self, config):
""" reload the config """
if config is not None:
self.hparams.override_from_dict(config)
def preprocess_data(self, file_dir, apply_sort_filter=True):
""" Generate a list of tuples (feat_key, speaker). """
logging.info("Loading kaldi-format feats.scp and utt2spk (optional) from {}".format(file_dir))
self.kaldi_io_feats = kaldiio.load_scp(os.path.join(file_dir, "feats.scp"))
# initialize all speakers with 'global' unless 'utterance_key speaker' is specified in "utt2spk"
self.speakers = dict.fromkeys(self.kaldi_io_feats.keys(), 'global')
if os.path.exists(os.path.join(file_dir, "utt2spk")):
with open(os.path.join(file_dir, "utt2spk"), "r") as f:
lines = f.readlines()
for line in lines:
key, spk = line.strip().split(" ", 1)
self.speakers[key] = spk
self.entries = []
for key in self.kaldi_io_feats.keys():
self.entries.append(tuple([key, self.speakers[key]]))
if apply_sort_filter:
logging.info("Sorting and filtering data, this is very slow, please be patient ...")
self.entries.sort(key=lambda item: self.kaldi_io_feats[item[0]].shape[0])
self.filter_sample_by_input_length()
return self
def load_scps(self, file_dir):
""" load kaldi-format feats.scp and utt2spk (optional) """
return self.preprocess_data(file_dir)
def __getitem__(self, index):
key, speaker = self.entries[index]
feat = self.kaldi_io_feats[key]
feat = feat.reshape(feat.shape[0], feat.shape[1], 1)
feat = tf.convert_to_tensor(feat)
feat = self.feature_normalizer(feat, speaker)
input_data = feat
output_data = tf.reshape(
feat, [-1, self.audio_featurizer.dim * self.audio_featurizer.num_channels]
)
return {
"input": input_data,
"input_length": input_data.shape[0],
"output": output_data,
"output_length": output_data.shape[0],
}
def __len__(self):
""" return the number of data samples """
return len(self.entries)
@property
def num_class(self):
""" return the max_index of the vocabulary """
target_dim = self.audio_featurizer.dim * self.audio_featurizer.num_channels
return target_dim
@property
def speaker_list(self):
""" return the speaker list """
return self.speakers
@property
def audio_featurizer_func(self):
""" return the audio_featurizer function """
return self.audio_featurizer
@property
def sample_type(self):
return {
"input": tf.float32,
"input_length": tf.int32,
"output": tf.float32,
"output_length": tf.int32,
}
@property
def sample_shape(self):
return {
"input": tf.TensorShape(
[None, self.audio_featurizer.dim, self.audio_featurizer.num_channels]
),
"input_length": tf.TensorShape([]),
"output": tf.TensorShape([None, None]),
"output_length": tf.TensorShape([]),
}
@property
def sample_signature(self):
return (
{
"input": tf.TensorSpec(
shape=(None, None, None, None), dtype=tf.float32
),
"input_length": tf.TensorSpec(shape=([None]), dtype=tf.int32),
"output": tf.TensorSpec(shape=(None, None, None), dtype=tf.float32),
"output_length": tf.TensorSpec(shape=([None]), dtype=tf.int32),
},
)
def filter_sample_by_input_length(self):
"""filter samples by input length
The length of filterd samples will be in [min_length, max_length)
Args:
self.hparams.input_length_range = [min_len, max_len]
min_len: the minimal length (ms for csv-format data, and frame amount for scp-format data)
max_len: the maximal length (ms for csv-format data, and frame amount for scp-format data)
returns:
entries: a filtered list of tuples
(wav_filename, wav_len, speaker)
"""
min_len = self.hparams.input_length_range[0]
max_len = self.hparams.input_length_range[1]
filter_entries = []
for items in self.entries:
if self.kaldi_io_feats[items[0]].shape[0] in range(min_len, max_len):
filter_entries.append(items)
self.entries = filter_entries
def compute_cmvn_if_necessary(self, is_necessary=True):
""" compute cmvn file
"""
if not is_necessary:
return self
if os.path.exists(self.hparams.cmvn_file):
return self
feature_dim = self.audio_featurizer.dim * self.audio_featurizer.num_channels
with tf.device("/cpu:0"):
self.feature_normalizer.compute_cmvn_kaldiio(
self.entries, self.speakers, self.kaldi_io_feats, feature_dim
)
self.feature_normalizer.save_cmvn(["speaker", "mean", "var"])
return self
| 37.516279 | 104 | 0.61753 |
39be4e9428461363aaa3d59c7aa45bca0f6a2c44 | 3,530 | py | Python | WebChat/settings.py | VincentHeredia/WebChat | c0f29e9d30080f528c74daa1567839169d5094dc | [
"MIT"
] | 1 | 2016-05-24T02:15:11.000Z | 2016-05-24T02:15:11.000Z | WebChat/settings.py | VincentHeredia/WebChat | c0f29e9d30080f528c74daa1567839169d5094dc | [
"MIT"
] | null | null | null | WebChat/settings.py | VincentHeredia/WebChat | c0f29e9d30080f528c74daa1567839169d5094dc | [
"MIT"
] | null | null | null | """
Django settings for WebChat project.
Generated by 'django-admin startproject' using Django 1.9.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'gr&1f&d@xthy_7ob^@1g*34a^%n=6zgzarj7-ppt7qmkadl#ce'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'channels',
'chat.apps.ChatConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'WebChat.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'WebChat.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
},
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
CHANNEL_LAYERS = {
"default": {
"BACKEND": "asgi_redis.RedisChannelLayer",
"CONFIG": {
"hosts": [("localhost", 6379)],
},
"ROUTING": "WebChat.routing.channel_routing",
},
}
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'),)# finds static files in base directory | 25.955882 | 92 | 0.696317 |
447899bd231e25715c693055bbf9efce2d01dd3b | 628 | py | Python | gluoncv/model_zoo/__init__.py | PistonY/gluon-cv | aff5c36c0a1985350d32b766df5644e5648f4d13 | [
"Apache-2.0"
] | 36 | 2019-12-25T04:59:49.000Z | 2022-03-17T07:24:49.000Z | gluoncv/model_zoo/__init__.py | PistonY/gluon-cv | aff5c36c0a1985350d32b766df5644e5648f4d13 | [
"Apache-2.0"
] | 1 | 2018-09-20T19:31:37.000Z | 2018-09-20T19:31:37.000Z | gluoncv/model_zoo/__init__.py | PistonY/gluon-cv | aff5c36c0a1985350d32b766df5644e5648f4d13 | [
"Apache-2.0"
] | 9 | 2019-12-25T05:00:33.000Z | 2021-10-01T14:23:51.000Z | """GluonCV Model Zoo"""
# pylint: disable=wildcard-import
from .model_zoo import get_model, get_model_list
from .model_store import pretrained_model_list
from .faster_rcnn import *
from .mask_rcnn import *
from .ssd import *
from .yolo import *
from .cifarresnet import *
from .cifarwideresnet import *
from .fcn import *
from .pspnet import *
from .deeplabv3 import *
from . import segbase
from .resnetv1b import *
from .se_resnet import *
from .nasnet import *
from .alexnet import *
from .densenet import *
from .inception import *
from .resnet import *
from .squeezenet import *
from .vgg import *
from .mobilenet import *
| 24.153846 | 48 | 0.764331 |
6f75aef9e97a07965d3f1ab93a8e2edbe8b14bf9 | 111,607 | py | Python | vendor/github.com/avinetworks/sdk/python/avi/migrationtools/f5_converter/conversion_util.py | avinetworks/servicemesh | 4b4864e1327f09fa0bc047770af7e477a32f9d94 | [
"Apache-2.0"
] | 4 | 2017-06-16T18:51:52.000Z | 2020-02-11T15:22:42.000Z | vendor/github.com/avinetworks/sdk/python/avi/migrationtools/f5_converter/conversion_util.py | sudswasavi/servicemesh | 4f07c228e9b62c9b621cb0bfc464c75e85351f7f | [
"Apache-2.0"
] | 17 | 2019-01-11T05:57:35.000Z | 2019-08-29T05:33:38.000Z | vendor/github.com/avinetworks/sdk/python/avi/migrationtools/f5_converter/conversion_util.py | sudswasavi/servicemesh | 4f07c228e9b62c9b621cb0bfc464c75e85351f7f | [
"Apache-2.0"
] | 8 | 2017-06-26T18:15:58.000Z | 2021-04-12T08:09:03.000Z | import copy
import logging
import os
import pandas
import ast
import re
import random
import avi.migrationtools.f5_converter.converter_constants as conv_const
from xlsxwriter import Workbook
from openpyxl import load_workbook
from pkg_resources import parse_version
from avi.migrationtools.avi_migration_utils import MigrationUtil, tenants
LOG = logging.getLogger(__name__)
csv_writer_dict_list = []
# Added variable for checking progress and get overall object.
ppcount = 0
ptotal_count = 0
global fully_migrated
fully_migrated = 0
used_pool_groups = {}
used_pool = {}
class F5Util(MigrationUtil):
def get_conv_status(self, skipped, indirect_list, ignore_dict, f5_object,
user_ignore=None, na_list=None):
"""
Update skipped list for conversion status
:param skipped: All skipped attributes after conversion
:param indirect_list: List of attrs to be mapped as indirect mapping
:param ignore_dict: Dict of default values for column skipped for defaults
:param f5_object: Currant F5 object
:param user_ignore: List of attributes user wants not to be shown in skipped
:param na_list: List of attributes marked as not applicable
:return: Conversion status dict
"""
conv_status = dict()
user_ignore = [] if not user_ignore else user_ignore
na_list = [] if not na_list else na_list
conv_status['user_ignore'] = [val for val in skipped if
val in user_ignore]
skipped = [attr for attr in skipped if attr not in user_ignore]
conv_status['indirect'] = [val for val in skipped if
val in indirect_list]
skipped = [attr for attr in skipped if attr not in indirect_list]
conv_status['na_list'] = [val for val in skipped if val in na_list]
skipped = [attr for attr in skipped if attr not in na_list]
default_skip = []
for key in ignore_dict.keys():
f5_val = f5_object.get(key)
default_val = ignore_dict.get(key)
if key in skipped and f5_val == default_val:
default_skip.append(key)
if default_skip:
skipped = [attr for attr in skipped if attr not in default_skip]
conv_status['skipped'] = skipped
conv_status['default_skip'] = default_skip
if skipped:
status = conv_const.STATUS_PARTIAL
else:
status = conv_const.STATUS_SUCCESSFUL
conv_status['status'] = status
return conv_status
def get_avi_pool_down_action(self, action):
"""
Maps Pool down action from F5 config to Avi Config
:param action: F5 action string
:return: Avi action String
"""
action_close_con = {
"type": "FAIL_ACTION_CLOSE_CONN"
}
if action == "reset":
return action_close_con
if action == "reselect":
return action_close_con
else:
return action_close_con
def get_cc_algo_val(self, cc_algo):
"""
congestion-control algorithm conversion
:param cc_algo: F5 algorithm value
:return: Avi algorithm value
"""
avi_algo_val = "CC_ALGO_NEW_RENO"
if cc_algo == "high-speed":
avi_algo_val = "CC_ALGO_HTCP"
elif cc_algo == "cubic":
avi_algo_val = "CC_ALGO_CUBIC"
return avi_algo_val
def add_conv_status(self, f5_type, f5_sub_type, f5_id, conv_status,
avi_object=None, need_review=None):
"""
Adds as status row in conversion status csv
:param f5_type: Object type
:param f5_sub_type: Object sub type
:param f5_id: Name oconv_f object
:param conv_status: dict of conversion status
:param avi_object: Converted objectconverted avi object
"""
global csv_writer_dict_list
# Added space if f5_sub_type None for pivot table
row = {
'F5 type': f5_type,
'F5 SubType': f5_sub_type if f5_sub_type else ' ',
'F5 ID': f5_id,
'Status': conv_status.get('status', ''),
'Skipped settings': str(conv_status.get('skipped', '')),
'Skipped for defaults': str(conv_status.get('default_skip', '')),
'Indirect mapping': str(conv_status.get('indirect', '')),
'Not Applicable': str(conv_status.get('na_list', '')),
'User Ignored': str(conv_status.get('user_ignore', '')),
'Avi Object': str(avi_object),
'Needs Review': need_review
}
csv_writer_dict_list.append(row)
def add_status_row(self, f5_type, f5_sub_type, f5_id, status, avi_obj=None):
"""
Adds as status row in conversion status csv
:param f5_type: Object type
:param f5_sub_type: Object sub type
:param f5_id: Name of object
:param status: conversion status
:param avi_obj: Converted avi object
"""
global csv_writer_dict_list
# Added space if f5_sub_type None for pivot table
row = {
'F5 type': f5_type,
'F5 SubType': f5_sub_type if f5_sub_type else ' ',
'F5 ID': f5_id,
'Status': status
}
if avi_obj:
row.update({
'Avi Object': str(avi_obj)
})
csv_writer_dict_list.append(row)
def add_complete_conv_status(self, output_dir, avi_config, report_name,
vs_level_status):
global csv_writer_dict_list
global ptotal_count
for status in conv_const.STATUS_LIST:
status_list = [row for row in csv_writer_dict_list if
row['Status'] == status]
print '%s: %s' % (status, len(status_list))
print "Writing Excel Sheet For Converted Configuration..."
ptotal_count = ptotal_count + len(csv_writer_dict_list)
if vs_level_status:
self.vs_per_skipped_setting_for_references(avi_config)
self.correct_vs_ref(avi_config)
else:
# Update the complexity level of VS as Basic or Advanced
self.vs_complexity_level()
self.write_status_report_and_pivot_table_in_xlsx(
output_dir, report_name, vs_level_status)
def get_port_by_protocol(self, protocol):
"""
Instead of default ports for protocols F5 config has protocol in
destination value for Avi object need to conver it to port number
:param protocol: protocol name
:return: integer value for protocol
"""
if protocol == 'http':
port = conv_const.HTTP_PORT
elif protocol == "https":
port = conv_const.HTTPS_PORT
elif protocol == "ftp":
port = conv_const.FTP_PORT
elif protocol == "smtp":
port = conv_const.SMTP_PORT
elif protocol == "snmp":
port = conv_const.SNMP_PORT
elif protocol == "telnet":
port = conv_const.TELNET_PORT
elif protocol == "snmp-trap":
port = conv_const.SNMP_TRAP_PORT
elif protocol == "ssh":
port = conv_const.SSH_PORT
elif protocol == "xfer":
port = conv_const.XFER_PORT
elif protocol == "pcsync-https":
port = conv_const.PCSYNC_HTTPS_PORT
elif protocol == "macromedia-fcs":
port = conv_const.MACROMEDIA_FCS_PORT
elif protocol == 'imap':
port = conv_const.IMAP_PORT
elif protocol == 'pop3':
port = conv_const.POP3_PORT
elif protocol == "any":
port = None
else:
return None
return port
def update_skip_duplicates(self, obj, obj_list, obj_type, converted_objs,
name, default_profile_name, merge_object_mapping,
ent_type, prefix, syslist):
"""
Merge duplicate profiles
:param obj: Source object to find duplicates for
:param obj_list: List of object to search duplicates in
:param obj_type: Type of object to add in converted_objs status
:param converted_objs: Converted avi object or merged object name
:param name: Name of the object
:param default_profile_name : Name of root parent default profile
:param merge_object_mapping: merged object mappings
:param ent_type: Entity type
:param prefix: object name prefix
:param syslist: System object list
:return:
"""
dup_of = None
if isinstance(merge_object_mapping, dict):
merge_object_mapping[obj_type].update({name: name})
# root default profiles are skipped for merging
if not name == default_profile_name or obj_type == 'ssl_profile':
dup_of, old_name = \
self.check_for_duplicates(obj, obj_list, obj_type,
merge_object_mapping, ent_type,
prefix, syslist)
if dup_of:
converted_objs.append({obj_type: "Duplicate of %s" % dup_of})
LOG.info(
"Duplicate profiles: %s merged in %s" % (obj['name'], dup_of))
if isinstance(merge_object_mapping, dict):
if old_name in merge_object_mapping[obj_type].keys():
merge_object_mapping[obj_type].update({old_name: dup_of})
merge_object_mapping[obj_type].update({name: dup_of})
else:
obj_list.append(obj)
converted_objs.append({obj_type: obj})
def get_content_string_group(self, name, content_types, tenant):
"""
Creates Avi String group object
:param name: name of string group
:param content_types: list of content type
:param tenant: tenant name to add tenant reference
:return:
"""
sg_obj = {"name": name + "-content_type", "type": "SG_TYPE_STRING"}
kv = []
for content_type in content_types:
if content_type is None:
LOG.warning('%s content_types %s has none', name, content_types)
continue
uri = {"key": content_type}
kv.append(uri)
sg_obj["kv"] = kv
# Changed tenant ref to new reference.
sg_obj['tenant_ref'] = self.get_object_ref(tenant, 'tenant')
return sg_obj
def get_vs_ssl_profiles(self, profiles, avi_config, prefix,
merge_object_mapping, sys_dict, f5_config):
"""
Searches for profile refs in converted profile config if not found
creates default profiles
:param profiles: profiles in f5 config assigned to VS
:param avi_config: converted avi config
:param prefix: prefix for objects
:param merge_object_mapping: Merged object mappings
:param sys_dict: System object dict
:return: returns list of profile refs assigned to VS in avi config
"""
# f5_profiles = f5_config.get("profile", {})
vs_ssl_profile_names = []
pool_ssl_profile_names = []
if not profiles:
return vs_ssl_profile_names, pool_ssl_profile_names
if isinstance(profiles, str):
profiles = profiles.replace(" {}", "")
profiles = {profiles: None}
for key in profiles.keys():
# Called tenant ref to get object name.
tenant, name = self.get_tenant_ref(key)
if prefix:
name = prefix + '-' + name
ssl_profile_list = avi_config.get("SSLProfile", [])
sys_ssl = sys_dict['SSLProfile']
ssl_profiles = [ob for ob in sys_ssl if ob['name'] ==
merge_object_mapping['ssl_profile'].get(name)
] or [obj for obj in ssl_profile_list
if (obj['name'] == name or name in
obj.get("dup_of", []))]
if ssl_profiles:
cert_name = ssl_profiles[0].get('cert_name', None)
if not cert_name:
cert_name = name
ssl_key_cert_list = avi_config.get("SSLKeyAndCertificate", [])
sys_key_cert = sys_dict['SSLKeyAndCertificate']
key_cert = [ob for ob in sys_key_cert if ob['name'] ==
merge_object_mapping['ssl_cert_key'].get(cert_name)
] or [obj for obj in ssl_key_cert_list if
(obj['name'] == cert_name or obj['name'] ==
cert_name + '-dummy' or cert_name in
obj.get("dup_of", []))]
# key_cert = key_cert[0]['name'] if key_cert else None
if key_cert:
key_cert = self.get_object_ref(
key_cert[0]['name'], 'sslkeyandcertificate',
tenant=self.get_name(key_cert[0]['tenant_ref']))
profile = profiles[key]
context = profile.get("context") if profile else None
if (not context) and isinstance(profile, dict):
if 'serverside' in profile:
context = 'serverside'
elif 'clientside' in profile:
context = 'clientside'
pki_list = avi_config.get("PKIProfile", [])
syspki = sys_dict['PKIProfile']
pki_profiles = [ob for ob in syspki if ob['name'] ==
merge_object_mapping['pki_profile'].get(
name)] or \
[obj for obj in pki_list if
(obj['name'] == name or
name in obj.get("dup_of", []))]
pki_profile = pki_profiles[0]['name'] if pki_profiles else None
mode = 'SSL_CLIENT_CERTIFICATE_NONE'
if pki_profile:
mode = pki_profiles[0].pop('mode',
'SSL_CLIENT_CERTIFICATE_NONE')
pki_profile = self.get_object_ref(
pki_profiles[0]["name"], 'pkiprofile',
tenant=self.get_name(pki_profiles[0]['tenant_ref']))
if context == "clientside":
ssl_prof_ref = self.get_object_ref(
ssl_profiles[0]["name"], 'sslprofile',
tenant=self.get_name(ssl_profiles[0]['tenant_ref']))
vs_ssl_profile_names.append({"profile": ssl_prof_ref,
"cert": key_cert,
"pki": pki_profile,
'mode': mode})
elif context == "serverside":
ssl_prof_ref = self.get_object_ref(
ssl_profiles[0]["name"], 'sslprofile',
tenant=self.get_name(ssl_profiles[0]['tenant_ref']))
pool_ssl_profile_names.append(
{"profile": ssl_prof_ref, "cert": key_cert,
"pki": pki_profile, 'mode': mode})
return vs_ssl_profile_names, pool_ssl_profile_names
def get_vs_app_profiles(self, profiles, avi_config, tenant_ref, prefix,
oc_prof, enable_ssl, merge_object_mapping,
sys_dict):
"""
Searches for profile refs in converted profile config if not found
creates default profiles
:param profiles: profiles in f5 config assigned to VS
:param avi_config: converted avi config
:param tenant_ref: Tenant referance
:param prefix: prefix for objects
:param oc_prof: one connect profile
:param enable_ssl: VS ssl enabled flag
:param merge_object_mapping: Merged object mappings
:param sys_dict: System object dict
:return: returns list of profile refs assigned to VS in avi config
"""
app_profile_refs = []
app_prof_conf = dict()
app_profile_list = avi_config.get("ApplicationProfile", [])
unsupported_profiles = avi_config.get('UnsupportedProfiles', [])
sys_app = sys_dict['ApplicationProfile']
if not profiles:
profiles = {}
if isinstance(profiles, str):
profiles = profiles.replace(" {}", "")
profiles = {profiles: None}
for name in profiles.keys():
# Called tenant ref to get object name.
name = self.get_tenant_ref(name)[1]
# Added prefix for objects
if prefix:
name = '%s-%s' % (prefix, name)
app_profiles = [ob for ob in sys_app if ob['name'] ==
merge_object_mapping['app_profile'].get(name)] or [
obj for obj in app_profile_list if
(obj['name'] == name
or name in obj.get("dup_of", []))]
if app_profiles:
app_prof_name = app_profiles[0]['name']
app_profile_refs.append(self.get_object_ref(
app_prof_name, 'applicationprofile',
tenant=self.get_name(app_profiles[0]['tenant_ref'])))
if app_profiles[0].get('HTTPPolicySet', None):
app_prof_conf['policy_name'] = app_profiles[0]['HTTPPolicySet']
if app_profiles[0].get('fallback_host', None):
app_prof_conf['f_host'] = app_profiles[0]['fallback_host']
# prerequisite user need to create default auth profile
if app_profiles[0].get('realm', None):
app_prof_conf['realm'] = {
"type": "HTTP_BASIC_AUTH",
"auth_profile_ref": self.get_object_ref(
'System-Default-Auth-Profile', 'authprofile',
tenant=self.get_name(
app_profiles[0]['tenant_ref'])),
"realm": app_profiles[0]['realm']
}
if not app_profile_refs:
not_supported = [key for key in profiles.keys() if
key in unsupported_profiles]
if not_supported:
LOG.warning(
'Profiles not supported by Avi : %s' % not_supported)
return app_prof_conf
if enable_ssl:
app_profile_refs.append(
self.get_object_ref('System-SSL-Application',
'applicationprofile', tenant='admin'))
app_prof_conf['app_prof'] = app_profile_refs
return app_prof_conf
else:
app_profile_refs.append(
self.get_object_ref('System-L4-Application',
'applicationprofile', tenant='admin'))
app_prof_conf['app_prof'] = app_profile_refs
return app_prof_conf
# Added prefix for objects
if prefix:
value = '%s-%s' % (prefix, value)
default_app_profile = [ob for ob in sys_app if ob['name'] ==
merge_object_mapping['app_profile'].get(
value)] or [
obj for obj in app_profile_list if
(obj['name'] == value
or value in obj.get("dup_of", []))]
tenant = self.get_name(default_app_profile[0]['tenant_ref']) if \
default_app_profile else '/api/tenant/?name=admin'
app_profile_refs.append(
self.get_object_ref(default_app_profile[0]['name'],
'applicationprofile', tenant=tenant))
app_prof_conf['app_prof'] = app_profile_refs
return app_prof_conf
def get_vs_ntwk_profiles(self, profiles, avi_config, prefix,
merge_object_mapping, sys_dict):
"""
Searches for profile refs in converted profile config if not found
creates default profiles
:param profiles: profiles in f5 config assigned to VS
:param avi_config: converted avi config
:param prefix: prefix for objects
:param merge_object_mapping: merged object mappings
:param sys_dict: System object dict
:return: returns list of profile refs assigned to VS in avi config
"""
network_profile_names = []
if not profiles:
return []
if isinstance(profiles, str):
profiles = profiles.replace(" {}", "")
profiles = {profiles: None}
for name in profiles.keys():
# Called tenant method to get object name
tenant, name = self.get_tenant_ref(name)
# Added prefix for objects
if prefix:
name = prefix + '-' + name
ntwk_prof_lst = avi_config.get("NetworkProfile")
sysnw = sys_dict['NetworkProfile']
network_profiles = [ob for ob in sysnw if
ob['name'] == merge_object_mapping[
'network_profile'].get(name)] or \
[obj for obj in ntwk_prof_lst if (
obj['name'] == name or name in
obj.get("dup_of", []))]
if network_profiles:
network_profile_ref = self.get_object_ref(
network_profiles[0]['name'], 'networkprofile',
tenant=self.get_name(network_profiles[0]['tenant_ref']))
network_profile_names.append(network_profile_ref)
return network_profile_names
def update_service(self, port, vs, enable_ssl):
"""
iterates over services of existing vs in converted list to update
services for port overlapping scenario
:param port: port for currant VS
:param vs: VS from converted config list
:param enable_ssl: value to put in service object
:return: boolean if service is updated or not
"""
service_updated = False
vs_new_service = []
for service in vs['services']:
port_end = service.get('port_range_end', None)
if not port_end and int(service['port']) == int(port):
return 'duplicate_ip_port'
if port_end and (service['port'] <= int(port) <= port_end):
if port not in [conv_const.PORT_START, conv_const.PORT_END]:
if service['port'] == int(port) == port_end:
return 'duplicate_ip_port'
elif service['port'] == int(port):
service['port'] = int(port) + 1
elif service['port_range_end'] == int(port):
service['port_range_end'] = int(port) - 1
else:
new_port = int(port) + 1
new_end = service['port_range_end']
service['port_range_end'] = int(port) - 1
new_service = {'port': new_port,
'port_range_end': new_end,
'enable_ssl': enable_ssl}
vs_new_service.append(new_service)
elif port == conv_const.PORT_START:
service['port'] = 2
elif port == conv_const.PORT_END:
service['port_range_end'] = (conv_const.PORT_START - 1)
service_updated = True
break
vs['services'].extend(vs_new_service)
return service_updated
def get_service_obj(self, destination, avi_config, enable_ssl,
controller_version, tenant_name, cloud_name, prefix,
vs_name, input_vrf=None):
"""
Checks port overlapping scenario for port value 0 in F5 config
:param destination: IP and Port destination of VS
:param avi_config: Dict of avi config
:param enable_ssl: value to put in service objects
:param controller_version: Version of controller
:param tenant_name: Name of tenant
:param cloud_name: Name of cloud
:param prefix: name prefix
:param vs_name: Name of VS
:param input_vrf: Vrf context name
:return: services_obj, ip_addr of vs and ref of vsvip
"""
parts = destination.split(':')
ip_addr = parts[0]
ip_addr = ip_addr.strip()
vrf = None
# Removed unwanted string from ip address
if '%' in ip_addr:
ip_addr, vrf = ip_addr.split('%')
# Added support to skip virtualservice with ip address any
if ip_addr == 'any':
LOG.debug("Skipped:VS with IP address: %s" % str(destination))
return None, None, None, None
# Added check for IP V4
matches = re.findall('^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', ip_addr)
if not matches or ip_addr == '0.0.0.0':
LOG.warning(
'Avi does not support IPv6 Generated random ipv4 for vs:'
' %s' % ip_addr)
ip_addr = ".".join(map(str, (
random.randint(0, 255) for _ in range(4))))
port = parts[1] if len(parts) == 2 else conv_const.DEFAULT_PORT
# Get the list of vs which shared the same vip
if parse_version(controller_version) >= parse_version('17.1'):
vs_dup_ips = \
[vs for vs in avi_config['VirtualService'] if
vs['vip'][0]['ip_address']['addr'] ==
ip_addr]
else:
vs_dup_ips = \
[vs for vs in avi_config['VirtualService'] if
vs['ip_address']['addr'] == ip_addr]
if port == 'any':
port = '0'
if isinstance(port, str) and (not port.isdigit()):
port = self.get_port_by_protocol(port)
# Port is None then skip vs
if not port:
LOG.debug("Skipped:Port not supported %s" % str(parts[1]))
return None, None, None, None
if int(port) > 0:
for vs in vs_dup_ips:
service_updated = self.update_service(port, vs, enable_ssl)
if service_updated == 'duplicate_ip_port':
LOG.debug('Skipped: Duplicate IP-Port for vs %s', vs_name)
return None, None, None, None
if service_updated:
break
services_obj = [{'port': port, 'enable_ssl': enable_ssl}]
else:
if {service.get('port_range_end') for vs in vs_dup_ips for
service in vs['services']}:
LOG.debug('Skipped: Duplicate IP-Port for vs %s', vs_name)
return None, None, None, None
used_ports = list({service['port'] for vs in vs_dup_ips for
service in vs['services']})
if used_ports:
services_obj = []
if conv_const.PORT_END not in used_ports:
used_ports.append(conv_const.PORT_END + 1)
used_ports = sorted(used_ports, key=int)
start = conv_const.PORT_START
for i in range(len(used_ports)):
if start == used_ports[i]:
start += 1
continue
end = int(used_ports[i]) - 1
if end < start:
start += 1
continue
services_obj.append({'port': start,
'port_range_end': end,
'enable_ssl': enable_ssl})
start = int(used_ports[i]) + 1
else:
services_obj = [
{'port': 1, 'port_range_end': conv_const.PORT_END,
'enable_ssl': enable_ssl}]
# Getting vrf ref
if vrf:
self.add_vrf(avi_config, vrf, cloud_name)
vrf_config = avi_config['VrfContext']
vrf_ref = self.get_vrf_context_ref(destination, vrf_config,
'virtual service', vs_name,
cloud_name)
if input_vrf:
vrf_ref = self.get_object_ref(input_vrf, 'vrfcontext',
cloud_name=cloud_name)
updated_vsvip_ref = None
if parse_version(controller_version) >= parse_version('17.1'):
vs_vip_name = self.create_update_vsvip(
ip_addr, avi_config['VsVip'],
self.get_object_ref(tenant_name, 'tenant'),
self.get_object_ref(cloud_name, 'cloud', tenant=tenant_name),
prefix,
vrf_ref)
if vs_vip_name == '':
updated_vsvip_ref = ''
else:
updated_vsvip_ref = self.get_object_ref(vs_vip_name, 'vsvip',
tenant_name, cloud_name)
return services_obj, ip_addr, updated_vsvip_ref, vrf_ref
def clone_pool(self, pool_name, clone_for, avi_pool_list, is_vs,
tenant=None):
"""
If pool is shared with other VS pool is cloned for other VS as Avi dose
not support shared pools with new pool name as <pool_name>-<vs_name>
:param pool_name: Name of the pool to be cloned
:param clone_for: Name of the VS for pool to be cloned
:param avi_pool_list: new pool to be added to this list
:param is_vs: True if this cloning is for VS
:param tenant: if pool is shared across partition then coned for tenant
:return: new pool object
"""
LOG.debug("Cloning pool %s for %s " % (pool_name, clone_for))
new_pool = None
for pool in avi_pool_list:
if pool["name"] == pool_name:
new_pool = copy.deepcopy(pool)
break
if new_pool:
if pool_name in used_pool:
used_pool[pool_name] += 1
else:
used_pool[pool_name] = 1
LOG.debug('Cloning Pool for %s', clone_for)
new_pool["name"] = '{}-{}'.format(pool_name, used_pool[pool_name])
if tenant:
new_pool["tenant_ref"] = self.get_object_ref(tenant, 'tenant')
if is_vs:
# removing config added from VS config to pool
new_pool["application_persistence_profile_ref"] = None
new_pool["ssl_profile_ref"] = None
new_pool["ssl_key_and_certificate_ref"] = None
new_pool["pki_profile_ref"] = None
avi_pool_list.append(new_pool)
pool_ref = new_pool["name"]
LOG.debug("Cloned pool successfully %s for %s " % (
new_pool["name"], clone_for))
return pool_ref
def remove_https_mon_from_pool(self, avi_config, pool_ref, tenant, sysdict):
pool = [p for p in avi_config['Pool'] if p['name'] == pool_ref]
if pool:
hm_refs = pool[0].get('health_monitor_refs', [])
for hm_ref in hm_refs:
hm = [h for h in (sysdict['HealthMonitor'] + avi_config[
'HealthMonitor']) if
self.get_object_ref(
h['name'], 'healthmonitor', tenant=tenant) == hm_ref]
if hm and hm[0]['type'] == 'HEALTH_MONITOR_HTTPS':
pool[0]['health_monitor_refs'].remove(hm_ref)
LOG.warning(
'Skipping %s this reference from %s pool '
'because of health monitor type is HTTPS and VS '
'has no ssl profile.'
% (hm_ref, pool_ref))
def remove_http_mon_from_pool(self, avi_config, pool_ref, tenant, sysdict):
pool = [p for p in avi_config['Pool'] if p['name'] == pool_ref]
if pool:
hm_refs = pool[0].get('health_monitor_refs', [])
for hm_ref in hm_refs:
hm = [h for h in (sysdict['HealthMonitor'] + avi_config[
'HealthMonitor']) if
self.get_object_ref(
h['name'], 'healthmonitor', tenant=tenant) == hm_ref]
if hm and hm[0]['type'] == 'HEALTH_MONITOR_HTTP':
pool[0]['health_monitor_refs'].remove(hm_ref)
LOG.warning(
'Skipping %s this reference from %s pool because of'
' health monitor type is HTTPS and VS has no ssl '
'profile.' % (hm_ref, pool_ref))
def remove_https_mon_from_pool_group(self, avi_config, poolgroup_ref,
tenant, sysdict):
poolgroup = [p for p in avi_config['PoolGroup'] if self.get_object_ref(
p['name'], 'poolgroup', tenant=tenant) == poolgroup_ref]
if poolgroup:
pool_members = [p['pool_ref'] for p in poolgroup[0]['members']]
for pool_ref in pool_members:
pool_ref = self.get_name(pool_ref)
self.remove_https_mon_from_pool(avi_config, pool_ref, tenant,
sysdict)
def remove_http_mon_from_pool_group(self, avi_config, poolgroup_ref, tenant,
sysdict):
poolgroup = [p for p in avi_config['PoolGroup'] if self.get_object_ref(
p['name'], 'poolgroup', tenant=tenant) == poolgroup_ref]
if poolgroup:
pool_members = [p['pool_ref'] for p in poolgroup[0]['members']]
for pool_ref in pool_members:
pool_name = self.get_name(pool_ref)
self.remove_http_mon_from_pool(
avi_config, pool_name, tenant, sysdict)
def add_ssl_to_pool(self, avi_pool_list, pool_ref, pool_ssl_profiles,
tenant='admin'):
"""
F5 serverside SSL need to be added to pool if VS contains serverside SSL
profile this method add that profile to pool
:param avi_pool_list: List of pools to search pool object
:param pool_ref: name of the pool
:param pool_ssl_profiles: ssl profiles to be added to pool
:param tenant: tenant name
"""
for pool in avi_pool_list:
if pool_ref == pool["name"]:
if pool_ssl_profiles["profile"]:
pool["ssl_profile_ref"] = pool_ssl_profiles["profile"]
if pool_ssl_profiles["pki"]:
pool["pki_profile_ref"] = pool_ssl_profiles["pki"]
if pool_ssl_profiles["cert"]:
pool["ssl_key_and_certificate_ref"] = pool_ssl_profiles[
"cert"]
def add_ssl_to_pool_group(self, avi_config, pool_group_ref, ssl_pool,
tenant_ref):
"""
:param avi_config:
:param pool_group_ref:
:param ssl_pool:
:param tenant_ref:
:return:
"""
pool_group = [obj for obj in avi_config['PoolGroup']
if obj['name'] == pool_group_ref]
if pool_group:
pool_group = pool_group[0]
for member in pool_group['members']:
pool_name = self.get_name(member['pool_ref'])
self.add_ssl_to_pool(
avi_config['Pool'], pool_name, ssl_pool, tenant_ref)
def update_pool_for_persist(self, avi_pool_list, pool_ref, persist_profile,
hash_profiles, persist_config, tenant,
merge_object_mapping, syspersist,
app_prof_type):
"""
Updates pool for persistence profile assigned in F5 VS config
:param avi_pool_list: List of all converted pool objects to avi config
:param pool_ref: pool name to be updated
:param persist_profile: persistence profile to be added to pool
:param hash_profiles: list of profile name for which pool's lb algorithm
updated to hash
:param persist_config: list of all converted persistence profiles
:param tenant: tenant of a pool
:param app_prof_type: type of application profile for the VS
:param merge_object_mapping: merged object mapping
:param syspersist: system persistence profile
:return: Boolean of is pool updated successfully
"""
pool_updated = True
persist_type = None
pool_obj = [pool for pool in avi_pool_list if pool["name"] == pool_ref]
if not pool_obj:
LOG.error("Pool %s not found to add profile %s" %
(pool_ref, persist_profile))
return False, None
pool_obj = pool_obj[0]
persist_profile_obj = \
[ob for ob in syspersist if ob['name'] ==
merge_object_mapping['app_per_profile'].get(persist_profile)] or \
[obj for obj in persist_config if (
obj["name"] == persist_profile or persist_profile
in obj.get("dup_of", []))]
persist_ref_key = "application_persistence_profile_ref"
if persist_profile_obj:
if app_prof_type == 'APPLICATION_PROFILE_TYPE_L4' and (
persist_profile_obj[0]['persistence_type'] !=
'PERSISTENCE_TYPE_CLIENT_IP_ADDRESS'):
pool_obj[persist_ref_key] = self.get_object_ref(
'System-Persistence-Client-IP',
'applicationpersistenceprofile')
persist_type = 'PERSISTENCE_TYPE_CLIENT_IP_ADDRESS'
LOG.debug("Defaulted to Client IP persistence profile for '%s' "
"Pool in VS of L4 app type " % pool_ref)
else:
obj_tenant = persist_profile_obj[0]['tenant_ref']
pool_obj[persist_ref_key] = \
self.get_object_ref(
persist_profile_obj[0]['name'],
'applicationpersistenceprofile',
tenant=self.get_name(obj_tenant))
persist_type = persist_profile_obj[0]['persistence_type']
elif persist_profile == "hash" or persist_profile in hash_profiles:
del pool_obj["lb_algorithm"]
hash_algorithm = "LB_ALGORITHM_CONSISTENT_HASH_SOURCE_IP_ADDRESS"
pool_obj["lb_algorithm_hash"] = hash_algorithm
else:
pool_updated = False
return pool_updated, persist_type
def update_pool_group_for_persist(
self, avi_config, pool_ref, persist_profile, hash_profiles,
persist_config, tenant, merge_object_mapping, syspersist,
app_prof_type):
pool_group_updated = True
persist_type = None
pool_group = [obj for obj in avi_config['PoolGroup']
if obj['name'] == pool_ref]
if pool_group:
pool_group = pool_group[0]
for member in pool_group['members']:
pool_name = self.get_name(member['pool_ref'])
pool_updated, persist_type = self.update_pool_for_persist(
avi_config['Pool'], pool_name, persist_profile,
hash_profiles, persist_config, tenant, merge_object_mapping,
syspersist, app_prof_type)
if not pool_updated:
pool_group_updated = False
return pool_group_updated, persist_type
def update_pool_for_fallback(self, host, avi_pool_list, pool_ref):
"""
Update pool for fallback host config
:param host: Redirect url
:param avi_pool_list: List of all converted pools
:param pool_ref: Name of the pool for which config is to be added
"""
pool_obj = [pool for pool in avi_pool_list if pool["name"] == pool_ref]
if pool_obj:
pool_obj = pool_obj[0]
fail_action = {
"redirect":
{
"status_code": "HTTP_REDIRECT_STATUS_CODE_302",
"host": host,
"protocol": "HTTPS"
},
"type": "FAIL_ACTION_HTTP_REDIRECT"
}
pool_obj["fail_action"] = fail_action
def get_snat_list_for_vs(self, snat_pool):
"""
Converts the f5 snat pool config object to Avi snat list
:param snat_pool: f5 snat pool config
:return: Avi snat list
"""
snat_list = []
members = snat_pool.get("members")
ips = []
if isinstance(members, dict):
ips = members.keys() + members.values()
elif isinstance(members, str):
ips = [members]
ips = [ip for ip in ips if ip]
for ip in ips:
# Removed unwanted string from ip address
if '/' in ip or '%' in ip:
ip = ip.split('/')[-1]
ip = ip.split('%')[-2]
snat_obj = {
"type": "V4",
"addr": ip
}
snat_list.append(snat_obj)
return snat_list
def cleanup_config(self, avi_config):
self.remove_dup_key(avi_config["SSLKeyAndCertificate"])
self.remove_dup_key(avi_config["ApplicationProfile"])
self.remove_dup_key(avi_config["NetworkProfile"])
self.remove_dup_key(avi_config["SSLProfile"])
self.remove_dup_key(avi_config["PKIProfile"])
self.remove_dup_key(avi_config["ApplicationPersistenceProfile"])
self.remove_dup_key(avi_config["HealthMonitor"])
self.remove_dup_key(avi_config["IpAddrGroup"])
avi_config.pop('hash_algorithm', [])
avi_config.pop('OneConnect', [])
avi_config.pop('UnsupportedProfiles', [])
for profile in avi_config['ApplicationProfile']:
profile.pop('HTTPPolicySet', None)
profile.pop('realm', [])
profile.pop('fallback_host', [])
for profile in avi_config.get('PKIProfile', []):
profile.pop('mode', None)
for profile in avi_config.get('SSLProfile', []):
profile.pop('cert_name', None)
if 'Tenant' in avi_config:
for tenant in avi_config['Tenant']:
if tenant['name'] == 'admin':
avi_config['Tenant'].remove(tenant)
def create_hdr_erase_rule(self, name, hdr_name, rule_index):
return self.create_header_rule(
name, hdr_name, "HTTP_REMOVE_HDR", None, rule_index)
def create_hdr_insert_rule(self, name, hdr_name, val, rule_index):
return self.create_header_rule(
name, hdr_name, "HTTP_ADD_HDR", val.strip(), rule_index)
def create_header_rule(self, name, hdr_name, action, val,
rule_index):
rule = {
"name": name,
"index": rule_index,
"hdr_action": [
{
"action": action,
"hdr": {
"name": hdr_name.strip(),
"value": {
"val": val
}
}
}
]
}
return rule
def create_network_security_rule(self, name, ip, mask, tenant):
if '%' in ip:
ip = ip.split('%')[0]
rule = {
"name": name,
"tenant_ref": self.get_object_ref(tenant, 'tenant'),
"rules": [
{
"index": 1,
"enable": True,
"name": "Rule 1",
"age": 0,
"action": "NETWORK_SECURITY_POLICY_ACTION_TYPE_DENY",
"match": {
"client_ip": {
"prefixes": [
{
"ip_addr": {
"type": "V4",
"addr": ip
},
"mask": mask
}
],
"match_criteria": "IS_NOT_IN"
}
},
"log": False
}
]
}
return rule
def add_vrf(self, avi_config, vrf, cloud_ref):
vrf_name = 'vrf-%s' % vrf
vrf_list = avi_config['VrfContext']
vrf_obj = [obj for obj in vrf_list if obj['name'] == vrf_name]
if not vrf_obj:
vrf_obj = {
"name": vrf_name,
"system_default": False,
"cloud_ref": self.get_object_ref(cloud_ref, 'cloud'),
"tenant_ref": self.get_object_ref('admin', 'tenant')
}
if vrf_name == 'global':
vrf_obj['system_default'] = True
vrf_list.append(vrf_obj)
def get_tenant_ref(self, name):
tenant = 'admin'
if name and name.startswith('/'):
parts = name.split('/', 2)
tenant = parts[1]
if not parts[2]:
LOG.warning('Invalid tenant ref : %s' % name)
name = parts[2]
elif name and '/' in name:
parts = name.split('/')
# Changed the index to get the tenant and name in case of
# prefixed name
tenant = parts[-2]
name = parts[-1]
if tenant.lower() == 'common':
tenant = 'admin'
if '/' in name:
name = name.split('/')[1]
if ' ' in tenant:
tenant = tenant.split(' ')[-1]
return tenant, name
def get_app_profile_type(self, profile_name, avi_config):
profiles = avi_config.get('ApplicationProfile', [])
# Called tenant method to get object name
profile_name = self.get_tenant_ref(profile_name)[1]
profile = [obj for obj in profiles if obj['name'] == profile_name]
if profile:
return profile[0]['type']
else:
return 'APPLICATION_PROFILE_TYPE_HTTP'
def update_pool_for_service_port(self, pool_list, pool_name, hm_list,
sys_hm_list):
rem_hm = []
pool = [obj for obj in pool_list if obj['name'] == pool_name]
if pool:
pool[0]['use_service_port'] = True
# Checking monitor ports if use_service_port is true
if pool[0].get('health_monitor_refs'):
for hm in pool[0]['health_monitor_refs']:
hm_name = self.get_name(hm)
hm_ob = [ob for ob in (hm_list + sys_hm_list) if
ob['name'] == hm_name]
if hm_ob and (not hm_ob[0].get('monitor_port')):
rem_hm.append(hm)
LOG.debug("Removing monitor reference of %s from pool"
" %s as 'use_service_port' is true but "
"monitor has no port", hm_name,
pool_name)
if rem_hm:
pool[0]['health_monitor_refs'] = [
h_monitor for h_monitor in pool[0]
['health_monitor_refs'] if h_monitor not in rem_hm]
rem_hm = [self.get_name(hmonitor) for hmonitor in rem_hm]
csv_row = [cl for cl in csv_writer_dict_list if cl[
'F5 type'] == 'pool' and self.get_tenant_ref(
cl['F5 ID'])[1] == pool_name]
if csv_row:
if csv_row[0]['Skipped settings'] in ('[]', ''):
csv_row[0]['Skipped settings'] = str([{
'monitor': rem_hm}])
else:
init_val = eval(csv_row[0]['Skipped settings'])
if not isinstance(init_val, list):
init_val = [init_val]
mon_val = [
val['monitor'].extend(rem_hm) for val in
init_val if isinstance(val, dict) and
'monitor' in val]
if bool(mon_val):
csv_row[0]['Skipped settings'] = str(init_val)
else:
init_val.append({'monitor': rem_hm})
csv_row[0]['Skipped settings'] = str(init_val)
csv_row[0]['Status'] = conv_const.STATUS_PARTIAL
csv_row[0]['Avi Object'] = str({'pools': pool})
def rreplace(self, s, old, new, occurrence):
li = s.rsplit(old, occurrence)
return new.join(li)
def get_project_path(self):
return os.path.abspath(os.path.dirname(__file__))
def clone_pool_if_shared(self, ref, avi_config, vs_name, tenant, p_tenant,
persist_type, controller_version, app_prof_ref,
sysdict, cloud_name='Default-Cloud', prefix=None):
"""
clones pool or pool group if its shard between multiple VS or partitions
in F5
:param ref: reference of pool or pool group
:param avi_config: Avi configuration cloned pool or pool groups to be
added
:param vs_name: Name of the vs to be added
:param tenant: tenant name of vs
:param p_tenant: tenant name of pool
:param persist_type: persistence profile type
:param controller_version:
:param app_prof_ref: Application profile referance
:param sysdict:
:param cloud_name:
:param prefix:
:return:
"""
is_pool_group = False
pool_group_obj = None
# Added prefix for objects
if prefix:
ref = prefix + '-' + ref
# Search the pool or pool group with name in avi config for the same
# tenant as VS
pool_obj = [pool for pool in avi_config['Pool'] if pool['name'] == ref
and pool['tenant_ref'] == self.get_object_ref(tenant,
'tenant')]
pool_per_ref = pool_obj[0].get(
'application_persistence_profile_ref') if pool_obj else None
pool_per_name = self.get_name(pool_per_ref) if pool_per_ref else None
pool_per_types = [obj['persistence_type'] for obj in (avi_config[
'ApplicationPersistenceProfile'] + sysdict[
'ApplicationPersistenceProfile']) if obj['name'] ==
pool_per_name] if pool_per_name else []
pool_per_type = pool_per_types[0] if pool_per_types else None
if not pool_obj:
pool_group_obj = [pool for pool in avi_config['PoolGroup']
if pool['name'] == ref and
pool['tenant_ref'] == self.get_object_ref(
tenant, 'tenant')]
if pool_group_obj:
is_pool_group = True
if p_tenant:
shared_vs = [obj for obj in avi_config['VirtualService']
if obj.get("pool_ref", "") == self.get_object_ref(
ref, 'pool', tenant=p_tenant, cloud_name=cloud_name)]
if not shared_vs:
shared_vs = [obj for obj in avi_config['VirtualService']
if obj.get("pool_group_ref", "") ==
self.get_object_ref(
ref, 'poolgroup', tenant=p_tenant,
cloud_name=cloud_name)]
else:
shared_vs = [obj for obj in avi_config['VirtualService']
if obj.get("pool_ref", "") == self.get_object_ref(
ref, 'pool', tenant=tenant, cloud_name=cloud_name)]
if not shared_vs:
shared_vs = [obj for obj in avi_config['VirtualService']
if obj.get("pool_group_ref", "") ==
self.get_object_ref(
ref, 'poolgroup', tenant=tenant,
cloud_name=cloud_name)]
if not tenant == p_tenant:
if is_pool_group:
ref = self.clone_pool_group(ref, vs_name, avi_config, True,
tenant, cloud_name=cloud_name)
else:
ref = self.clone_pool(ref, vs_name, avi_config['Pool'],
True, tenant)
if shared_vs:
if is_pool_group:
ref = self.clone_pool_group(ref, vs_name, avi_config, True,
tenant, cloud_name=cloud_name)
else:
shared_appref = shared_vs[0].get('application_profile_ref')
shared_apptype = None
if shared_appref:
shared_appname = self.get_name(shared_appref)
shared_appobjs = [ob for ob in (avi_config[
'ApplicationProfile'] + sysdict[
'ApplicationProfile']) if ob['name'] ==
shared_appname]
shared_appobj = shared_appobjs[0] if shared_appobjs else {}
shared_apptype = shared_appobj['type'] if shared_appobj \
else None
app_prof_name = self.get_name(app_prof_ref)
app_prof_objs = [appob for appob in (avi_config[
'ApplicationProfile'] + sysdict[
'ApplicationProfile']) if appob['name'] ==
app_prof_name]
app_prof_obj = app_prof_objs[0] if app_prof_objs else {}
app_prof_type = app_prof_obj['type'] if app_prof_obj else None
if self.is_pool_clone_criteria(
controller_version, app_prof_type, shared_apptype,
persist_type, pool_per_type, shared_appobj,
app_prof_obj):
LOG.debug('Cloned the pool %s for VS %s', ref, vs_name)
ref = self.clone_pool(ref, vs_name, avi_config['Pool'],
True, tenant)
else:
LOG.debug("Shared pool %s for VS %s", ref, vs_name)
return ref, is_pool_group
def is_pool_clone_criteria(self, controller_version, app_prof_type,
shared_apptype, persist_type, pool_per_type,
shared_appobj, app_prof_obj):
if parse_version(controller_version) < parse_version(
'17.1.6') or app_prof_type != 'APPLICATION_PROFILE_TYPE_HTTP' \
or shared_apptype != app_prof_type or (
persist_type and persist_type !=
'PERSISTENCE_TYPE_HTTP_COOKIE') or (
pool_per_type and pool_per_type !=
'PERSISTENCE_TYPE_HTTP_COOKIE') or (
shared_appobj.get('http_profile', {}).get(
'connection_multiplexing_enabled') != app_prof_obj.get(
'http_profile', {}).get('connection_multiplexing_enabled') or (
shared_appobj.get('http_profile', {}).get(
'cache_config') != app_prof_obj.get(
'http_profile', {}).get('cache_config'))):
return True
else:
return False
def clone_pool_group(self, pool_group_name, clone_for, avi_config, is_vs,
tenant='admin', cloud_name='Default-Cloud'):
"""
If pool is shared with other VS pool is cloned for other VS as Avi dose
not support shared pools with new pool name as <pool_name>-<vs_name>
:param pool_group_name: Name of the pool group to be cloned
:param clone_for: Name of the object/entity for pool group to be cloned
:param avi_config: new pool to be added to avi config
:param is_vs: True if clone is called for VS
:param tenant: if f5 pool is shared across partition then coned for
tenant
:param cloud_name:
:return: new pool group name
"""
pg_ref = None
new_pool_group = None
for pool_group in avi_config['PoolGroup']:
if pool_group["name"] == pool_group_name:
new_pool_group = copy.deepcopy(pool_group)
break
if new_pool_group:
if pool_group_name in used_pool_groups:
used_pool_groups[pool_group_name] += 1
else:
used_pool_groups[pool_group_name] = 1
LOG.debug('Cloning pool group for %s', clone_for)
new_pool_group["name"] = '{}-{}'.format(
pool_group_name, used_pool_groups[pool_group_name])
pg_ref = new_pool_group["name"]
new_pool_group["tenant_ref"] = self.get_object_ref(tenant, 'tenant')
avi_config['PoolGroup'].append(new_pool_group)
for member in new_pool_group['members']:
pool_name = self.get_name(member['pool_ref'])
pool_name = self.clone_pool(pool_name, clone_for,
avi_config['Pool'], is_vs, tenant)
member['pool_ref'] = self.get_object_ref(
pool_name, 'pool', tenant=tenant, cloud_name=cloud_name)
return pg_ref
def add_tenants(self, avi_config_dict):
if tenants:
avi_config_dict['Tenant'] = []
for tenant in tenants:
avi_config_dict['Tenant'].append({
'name': tenant,
'local': True
})
def get_cell_format(self, workbook, cell_format_info):
format_col = cell_format_info['col']
format = workbook.add_format(cell_format_info['fromat'])
return format_col, format
def write_status_report_and_pivot_table_in_xlsx(
self, output_dir, report_name, vs_level_status):
"""
This function defines that add status sheet and pivot table sheet in
xlsx format
:param output_dir: Path of output directory
:param report_name: filename to write report
:param vs_level_status: Flag to include VS wise detailed status or not
:return: None
"""
global ppcount
global ptotal_count
# List of fieldnames for headers
if vs_level_status:
fieldnames = ['F5 type', 'F5 SubType', 'F5 ID', 'Status',
'Skipped settings', 'Indirect mapping',
'Not Applicable', 'User Ignored',
'Skipped for defaults', 'Complexity Level',
'VS Reference', 'Overall skipped settings',
'Avi Object', 'Needs Review']
else:
fieldnames = ['F5 type', 'F5 SubType', 'F5 ID', 'Status',
'Skipped settings', 'Indirect mapping',
'Not Applicable',
'User Ignored', 'Skipped for defaults',
'Complexity Level', 'Avi Object', 'Needs Review']
# xlsx workbook
report_path = output_dir + os.path.sep + "%s-ConversionStatus.xlsx" % \
report_name
status_wb = Workbook(report_path)
# xlsx worksheet
status_ws = status_wb.add_worksheet("Status Sheet")
# Lock the first row of xls report.
status_ws.freeze_panes(1, 0)
first_row = 0
for header in fieldnames:
col = fieldnames.index(header)
status_ws.write(first_row, col, header)
row = 1
for row_data in csv_writer_dict_list:
ppcount += 1
for _key, _value in row_data.items():
col = fieldnames.index(_key)
status_ws.write(row, col, _value)
# Added call for progress function.
msg = "excel sheet conversion started..."
self.print_progress_bar(ppcount, ptotal_count, msg,
prefix='Progress', suffix='')
row += 1
status_wb.close()
# create dataframe for row list
df = pandas.DataFrame(csv_writer_dict_list, columns=fieldnames)
# create pivot table using pandas
pivot_table = \
pandas.pivot_table(df, index=["Status", "F5 type", "F5 SubType"],
values=[], aggfunc=[len], fill_value=0)
# create dataframe for pivot table using pandas
pivot_df = pandas.DataFrame(pivot_table)
master_book = \
load_workbook(report_path)
master_writer = pandas.ExcelWriter(report_path, engine='openpyxl')
master_writer.book = master_book
# Add pivot table in Pivot sheet
pivot_df.to_excel(master_writer, 'Pivot Sheet')
master_writer.save()
def format_string_to_json(self, avi_string):
"""
This function defines that it convert string into json format to
convert into dict
:param avi_string: string to be converted
:return: Return converted string
"""
avi_string = avi_string.split('__/__')[0]
return ast.literal_eval(avi_string)
def get_csv_object_list(self, csv_writer_dict_list, command_list):
"""
This method is used for getting csv object
:param csv_writer_dict_list: CSV row of object from xlsx report
:param command_list: List of netscaler commands
:return: List of CSV rows
"""
csv_object = [row for row in csv_writer_dict_list if
row['Status'] in [conv_const.STATUS_PARTIAL,
conv_const.STATUS_SUCCESSFUL] and
'->' not in row['Avi Object'] and
row['F5 type'] in command_list]
return csv_object
def get_and_update_csv_row(self, csv_object, vs_ref):
"""
This function defines that update csv row.
:param csv_object: csv object
:param vs_ref: Name of VS
:return: Skipped attribute list
"""
if 'VS Reference' in csv_object and \
vs_ref not in csv_object['VS Reference']:
csv_object['VS Reference'] += ',' + vs_ref
else:
csv_object['VS Reference'] = vs_ref
repls = ('[', ''), (']', '')
skipped_setting_csv = reduce(
lambda a, kv: a.replace(*kv), repls, csv_object['Skipped settings'])
if skipped_setting_csv:
return [skipped_setting_csv]
def get_csv_skipped_list(self, csv_objects, name_of_object, vs_ref,
field_key=None):
"""
This method is used for getting skipped list from vs.
:param csv_objects: CSV row of object from xlsx report
:param name_of_object: Name of object
:param vs_ref: Name of VS
:param field_key: Key fromm avi json which is specific for object type
:return: Return skipped attribute list
"""
for csv_object in csv_objects:
avi_objects = self.format_string_to_json(csv_object['Avi Object'])
if isinstance(avi_objects, dict):
avi_objects = [avi_objects]
if not avi_objects:
avi_objects = []
for avi_object_json in avi_objects:
object_found = False
if field_key:
if field_key in avi_object_json and 'Duplicate' not in \
avi_object_json[field_key] and (
avi_object_json[field_key]['name'] ==
name_of_object):
object_found = True
else:
if avi_object_json.get('name') and \
avi_object_json['name'] == name_of_object:
object_found = True
if object_found:
return self.get_and_update_csv_row(csv_object, vs_ref)
def get_ssl_profile_skipped(self, profile_csv_list, ssl_profile_ref,
vs_ref):
"""
This functions defines that get the skipped list of CSV row
:param profile_csv_list: List of profile(F5 type) csv rows
:param ssl_profile_ref: Reference of ssl profile
:param vs_ref: Name of VS
:return: ssl profile name and skipped sttribute list
"""
ssl_profile_name = self.get_name(ssl_profile_ref)
skipped_list = self.get_csv_skipped_list(
profile_csv_list, ssl_profile_name, vs_ref, field_key='ssl_profile')
return ssl_profile_name, skipped_list
def get_application_profile_skipped(self, profile_csv_list, app_profile_ref,
vs_ref):
"""
This functions defines that get the skipped list of CSV row
:param profile_csv_list: List of profile(F5 type) csv rows
:param app_profile_ref: Reference of application profile
:param vs_ref: Name of VS
:return: application profile name and skipped sttribute list
"""
app_profile_name = self.get_name(app_profile_ref)
skipped_list = self.get_csv_skipped_list(
profile_csv_list, app_profile_name, vs_ref, field_key='app_profile')
return app_profile_name, skipped_list
def get_network_profile_skipped(self, profile_csv_list, network_profile_ref,
vs_ref):
"""
This functions defines that get the skipped list of CSV row
:param profile_csv_list: List of profile(F5 type) csv rows
:param network_profile_ref: Reference of Network profile
:param vs_ref: Name of VS
:return: network profile name and skipped sttribute list
"""
network_profile_name = self.get_name(network_profile_ref)
skipped_list = self.get_csv_skipped_list(
profile_csv_list, network_profile_name, vs_ref,
field_key='network_profile')
return network_profile_name, skipped_list
def get_policy_set_skipped(self, profile_csv_list, policy_set_ref, vs_ref):
"""
This functions defines that get the skipped list of CSV row
:param profile_csv_list: List of profile(F5 type) csv rows
:param policy_set_ref: Reference of policy set
:param vs_ref: Name of VS
:return: policy set name and skipped sttribute list
"""
policy_set_name = self.get_name(policy_set_ref)
skipped_list = self.get_csv_skipped_list(
profile_csv_list, policy_set_name, vs_ref, field_key='policy_set')
return policy_set_name, skipped_list
def get_app_persistence_profile_skipped(self, csv_writer_dict_list,
pool_object, vs_ref):
"""
This functions defines that get the skipped list of CSV row
:param csv_writer_dict_list: List of csv rows
:param pool_object: object of pool
:param vs_ref: Name of VS
:return: profile name and skipped attribute list
"""
app_persistence_profile_name = self.get_name(
pool_object['application_persistence_profile_ref'])
csv_object = self.get_csv_object_list(csv_writer_dict_list,
['persistence'])
skipped_list = self.get_csv_skipped_list(
csv_object, app_persistence_profile_name, vs_ref,
field_key='app_per_profile')
return app_persistence_profile_name, skipped_list
def get_pool_skipped(self, csv_objects, pool_name, vs_ref):
"""
This functions defines that get the skipped list of CSV row
:param csv_objects: CSV row of object from xlsx report
:param pool_name: Name of pool
:param vs_ref: Name of VS
:return: Skipped list of csv row
"""
for csv_object in csv_objects:
avi_object = self.format_string_to_json(csv_object['Avi Object'])
if 'pools' in avi_object:
pool_object = [pool for pool in avi_object['pools']
if pool['name'] == pool_name]
if pool_object:
return self.get_and_update_csv_row(csv_object, vs_ref)
def get_pool_skipped_list(self, avi_config, pool_group_name, csv_pool_rows,
csv_writer_dict_list, vs_ref, profile_csv_list):
"""
This method is used for getting pool skipped list.
:param avi_config: AVI dict
:param pool_group_name: Name of Pool group
:param csv_pool_rows: List of pool(F5 type) csv rows
:param csv_writer_dict_list: List of F5 csv rows
:param vs_ref: Name of VS
:param profile_csv_list: List of profile(F5 type) csv rows
:return:
"""
pool_group_objects = [pool_group_object for pool_group_object in
avi_config['PoolGroup'] if
pool_group_object['name']
== pool_group_name]
pool_members = pool_group_objects[0]['members']
skipped_setting = {
'pools': []
}
for pool_member in pool_members:
pool_name = self.get_name(pool_member['pool_ref'])
self.get_skipped_pool(
avi_config, pool_name, csv_pool_rows, csv_writer_dict_list,
vs_ref, profile_csv_list, skipped_setting)
if skipped_setting['pools']:
return skipped_setting
def vs_complexity_level(self):
"""
This method calculate the complexity of vs.
:return:
"""
# Get the VS object list which is having status successful and partial.
vs_csv_objects = [row for row in csv_writer_dict_list
if row['Status'] in [conv_const.STATUS_PARTIAL,
conv_const.STATUS_SUCCESSFUL]
and row['F5 type'] == 'virtual']
for vs_csv_object in vs_csv_objects:
virtual_service = self.format_string_to_json(
vs_csv_object['Avi Object'])
# Update the complexity level of VS as Basic or Advanced
self.update_vs_complexity_level(vs_csv_object, virtual_service)
def vs_per_skipped_setting_for_references(self, avi_config):
"""
This functions defines that Add the skipped setting per VS CSV row
:param avi_config: this method use avi_config for checking vs skipped
:return: None
"""
# Get the count of vs fully migrated
global fully_migrated
global ptotal_count
global ppcount
fully_migrated = 0
# Get the VS object list which is having status successful and partial.
vs_csv_objects = [row for row in csv_writer_dict_list
if row['Status'] in [conv_const.STATUS_PARTIAL,
conv_const.STATUS_SUCCESSFUL]
and row['F5 type'] == 'virtual']
# Get the list of csv rows which has profile as F5 Type
profile_csv_list = self.get_csv_object_list(
csv_writer_dict_list, ['profile'])
ptotal_count = ptotal_count + len(vs_csv_objects)
for vs_csv_object in vs_csv_objects:
ppcount += 1
skipped_setting = {}
virtual_service = self.format_string_to_json(
vs_csv_object['Avi Object'])
# Update the complexity level of VS as Basic or Advanced
self.update_vs_complexity_level(vs_csv_object, virtual_service)
vs_ref = virtual_service['name']
repls = ('[', ''), (']', '')
# Get list of skipped setting attributes
skipped_setting_csv = reduce(lambda a, kv: a.replace(*kv), repls,
vs_csv_object['Skipped settings'])
if skipped_setting_csv:
skipped_setting['virtual_service'] = [skipped_setting_csv]
# Get the skipped list for ssl key and cert
if 'ssl_key_and_certificate_refs' in virtual_service:
for ssl_key_and_certificate_ref in \
virtual_service['ssl_key_and_certificate_refs']:
ssl_key_cert = self.get_name(ssl_key_and_certificate_ref)
ssl_kc_skip = self.get_csv_skipped_list(
profile_csv_list, ssl_key_cert, vs_ref,
field_key='ssl_cert_key')
if ssl_kc_skip:
skipped_setting['ssl cert key'] = {}
skipped_setting['ssl cert key']['name'] = ssl_key_cert
skipped_setting['ssl cert key'][
'skipped_list'] = ssl_kc_skip
# Get the skipped list for ssl profile name.
# Changed ssl profile name to ssl profile ref.
if 'ssl_profile_ref' in virtual_service:
name, skipped = self.get_ssl_profile_skipped(
profile_csv_list, virtual_service['ssl_profile_ref'],
vs_ref)
if skipped:
skipped_setting['ssl profile'] = {}
skipped_setting['ssl profile']['name'] = name
skipped_setting['ssl profile']['skipped_list'] = skipped
# Get the skipped list for pool group.
if 'pool_group_ref' in virtual_service:
pool_group_name = self.get_name(
virtual_service['pool_group_ref'])
csv_pool_rows = self.get_csv_object_list(csv_writer_dict_list,
['pool'])
pool_group_skipped_settings = self.get_pool_skipped_list(
avi_config, pool_group_name, csv_pool_rows,
csv_writer_dict_list, vs_ref, profile_csv_list)
if pool_group_skipped_settings:
skipped_setting['Pool Group'] = pool_group_skipped_settings
# Get the skipped list for pool.
if 'pool_ref' in virtual_service:
pool_skipped_settings = {'pools': []}
pool_name = self.get_name(virtual_service['pool_ref'])
csv_pool_rows = self.get_csv_object_list(csv_writer_dict_list,
['pool'])
self.get_skipped_pool(
avi_config, pool_name, csv_pool_rows, csv_writer_dict_list,
vs_ref, profile_csv_list, pool_skipped_settings)
if pool_skipped_settings['pools']:
skipped_setting['Pool'] = pool_skipped_settings
# Get the skipepd list for http policy.
if 'http_policies' in virtual_service:
policy_csv_list = self.get_csv_object_list(
csv_writer_dict_list, ['policy', 'profile'])
for http_ref in virtual_service['http_policies']:
policy_set_name, skipped_list = self.get_policy_set_skipped(
policy_csv_list, http_ref['http_policy_set_ref'],
vs_ref)
if skipped_list:
skipped_setting['Httppolicy'] = {}
skipped_setting['Httppolicy']['name'] = policy_set_name
skipped_setting['Httppolicy'][
'skipped_list'] = skipped_list
# Get the http policy name
pool_csv_rows = \
self.get_csv_object_list(csv_writer_dict_list, ['pool'])
for each_http_policy in avi_config['HTTPPolicySet']:
if each_http_policy['name'] == policy_set_name:
for http_req in each_http_policy[
'http_request_policy']['rules']:
if http_req.get('switching_action', {}):
self.get_skip_pools_policy(
policy_set_name, http_req,
avi_config, pool_csv_rows, vs_ref,
profile_csv_list, skipped_setting)
# # Get the skipped list for application_profile_ref.
if 'application_profile_ref' in virtual_service and 'admin:System' \
not in virtual_service['application_profile_ref']:
name, skipped = self.get_application_profile_skipped(
profile_csv_list,
virtual_service['application_profile_ref'],
vs_ref)
if skipped:
skipped_setting['Application profile'] = {}
skipped_setting['Application profile'][
'name'] = name
skipped_setting['Application profile'][
'skipped_list'] = skipped
# # Get the skipped list for network profile ref.
if 'network_profile_ref' in virtual_service and 'admin:System' \
not in virtual_service['network_profile_ref']:
name, skipped = self.get_network_profile_skipped(
profile_csv_list, virtual_service['network_profile_ref'],
vs_ref)
if skipped:
skipped_setting['Network profile'] = {}
skipped_setting['Network profile'][
'name'] = name
skipped_setting['Network profile'][
'skipped_list'] = skipped
# Update overall skipped setting of VS csv row
if skipped_setting:
vs_csv_object.update(
{'Overall skipped settings': str(skipped_setting)})
else:
vs_csv_object.update(
{'Overall skipped settings': "FULLY MIGRATION"})
fully_migrated += 1
# Added call for progress function.
msg = "excel sheet conversion started..."
self.print_progress_bar(ppcount, ptotal_count, msg,
prefix='Progress', suffix='')
csv_objects = [row for row in csv_writer_dict_list
if row['Status'] in [
conv_const.STATUS_PARTIAL,
conv_const.STATUS_SUCCESSFUL]
and row['F5 type'] != 'virtual']
# Update the vs reference not in used if objects are not attached to
# VS directly or indirectly
for row in csv_objects:
if 'VS Reference' not in row or row['VS Reference'] == '':
row['VS Reference'] = conv_const.STATUS_NOT_IN_USE
def create_update_vsvip(self, vip, vsvip_config, tenant_ref, cloud_ref,
prefix, vrf_ref):
"""
This functions defines that create or update VSVIP object.
:param vip: vip of VS
:param vsvip_config: List of vs object
:param tenant_ref: tenant reference
:param cloud_ref: cloud reference
:param prefix: Name prefix
:param vrf_ref: VRF reference
:return: None
"""
name = vip + '-vsvip'
# Added prefix for objects
if prefix:
name = '%s-%s' % (prefix, name)
# Get the exsting vsvip object list if present
vsvip = [vip_obj for vip_obj in vsvip_config if vip_obj['name'] == name
and vip_obj.get('vrf_context_ref') == vrf_ref]
if vsvip:
diff_ten = [vips for vips in vsvip if vips['tenant_ref'] !=
tenant_ref]
if diff_ten:
LOG.debug('VsVip %s is repeated with vrf %s but different '
'tenant %s', name, self.get_name(vrf_ref) if vrf_ref
else 'None', self.get_name(tenant_ref))
name = ''
# If VSVIP object not present then create new VSVIP object.
else:
vsvip_object = {
"name": name,
"tenant_ref": tenant_ref,
"cloud_ref": cloud_ref,
"vip": [
{
"vip_id": "0",
"ip_address": {
"type": "V4",
"addr": vip
}
}
],
}
if vrf_ref:
vsvip_object["vrf_context_ref"] = vrf_ref
vsvip_config.append(vsvip_object)
return name
def update_static_route(self, route):
"""
This function defines that convert convert static routes
:param route: Object of net static route
:return: Return static route object
"""
msg = None
next_hop_ip = route.get('gw', route.get('gateway'))
if next_hop_ip and '%' in next_hop_ip:
next_hop_ip = next_hop_ip.split('%')[0]
ip_addr = route.get('network', None)
vrf = None
# Get the mask from subnet mask
if ip_addr and '%' in ip_addr:
ip_addr, vrf = ip_addr.split('%')
vrf = 'vrf-' + (
'/' in vrf and vrf.split('/')[0] or vrf) if vrf else None
if ip_addr and '/' in ip_addr:
ip_addr = ip_addr.split('/')[0]
# set subnet mask to 0.0.0.0 if its equal to default
if not ip_addr or ip_addr == 'default':
ip_addr = '0.0.0.0'
mask = sum([bin(int(x)).count('1') for x in ip_addr.split('.')])
if next_hop_ip and ip_addr:
static_route = {
"route_id": 1,
"prefix": {
"ip_addr": {
"type": "V4",
"addr": ip_addr
},
"mask": mask
},
"next_hop": {
"type": "V4",
"addr": next_hop_ip
}
}
return static_route, vrf, msg
else:
msg = "Next hop ip is not present" if not next_hop_ip else (
"Ip Address is not present")
LOG.debug(msg)
return None, None, msg
def get_vrf_context_ref(self, f5_entity_mem, vrf_config, entity_string,
entity_name, cloud):
"""
Searches for vrf context refs in converted pool config
:param f5_entity_mem: f5 entity or object like pool
:param vrf_config: converted vrf config
:param entity_string: entity string
:param entity_name: name of f5 entity
:param cloud: name of the cloud
:return: returns list of vrf refs assigned to entity in avi config
"""
vrf_ref = None
f5_entity_mem = ':' in f5_entity_mem and f5_entity_mem.split(':')[0] \
or f5_entity_mem if f5_entity_mem else None
vrf = 'vrf-' + f5_entity_mem.split('%')[1] \
if f5_entity_mem and '%' in f5_entity_mem else None
vrf_obj = [obj for obj in vrf_config if vrf and obj["name"] == vrf]
if vrf_obj:
vrf_ref = self.get_object_ref(
vrf_obj[0]['name'], 'vrfcontext', cloud_name=cloud)
else:
LOG.warning("VRF not found for %s %s" % (entity_string,
entity_name))
return vrf_ref
def net_to_static_route(self, f5_config, avi_config):
"""
This method converts the net route to static routes and updates the
VrfContext objects
:param f5_config: parsed f5 config
:param avi_config: converted config in avi
:return:
"""
net_config = f5_config.get('route', {})
avi_vrf = avi_config["VrfContext"]
# Convert net static route to vrf static route
for key, route in net_config.iteritems():
LOG.debug("Starting conversion from net route to static for '%s'"
% key)
static_route, vrf, msg = self.update_static_route(route)
if static_route:
for obj in avi_vrf:
if obj['name'] == vrf or (not vrf and obj['name'] ==
'global'):
if obj.get('static_routes'):
rid = max(
[i['route_id'] for i in obj['static_routes']])
static_route['route_id'] = rid + 1
obj['static_routes'].append(static_route)
else:
obj['static_routes'] = [static_route]
LOG.debug("Conversion completed for route '%s'" % key)
self.add_conv_status(
'route', None, key,
{'status': conv_const.STATUS_SUCCESSFUL},
[{'route': static_route}]
)
else:
LOG.debug("Conversion unsuccessful for route '%s'" % key)
self.add_conv_status('route', None, key,
{'status': conv_const.STATUS_SKIPPED}, msg)
def update_monitor_ssl_ref(self, avi_dict, merge_obj_dict, sysdict):
"""
This method updates the first ssl profile reference from merge
perspective in monitors, which get attached at the time of creation
:param avi_dict: avi configuration dict
:param merge_obj_dict: dict having merge objects
:param sysdict: system object dicts
:return:
"""
for obj in avi_dict['HealthMonitor']:
obj_ref = obj.get('https_monitor', {}).get('ssl_attributes',
{}).get(
'ssl_profile_ref')
if obj_ref:
name = self.get_name(obj_ref)
if name in merge_obj_dict['ssl_profile']:
updated_name = merge_obj_dict['ssl_profile'][name]
prof = [ob for ob in (sysdict['SSLProfile'] + avi_dict[
'SSLProfile']) if ob['name'] == updated_name]
tenant = self.get_name(prof[0]['tenant_ref'])
type_cons = conv_const.OBJECT_TYPE_SSL_PROFILE
obj['https_monitor']['ssl_attributes']['ssl_profile_ref'] =\
self.get_object_ref(updated_name, type_cons, tenant)
def update_app_profile(self, aviconfig, sys_dict):
"""
This method updates the application profile to http when there are
multiple services to a L4 app VS in which one of them is ssl enabled
:param aviconfig: avi config dict
:param sys_dict: system config dict
:return:
"""
for vs_obj in aviconfig['VirtualService']:
if vs_obj.get('services') and len(vs_obj['services']) > 1 and \
vs_obj.get('application_profile_ref'):
app_profile = self.get_name(vs_obj['application_profile_ref'])
app_profile_obj = [app for app in sys_dict[
'ApplicationProfile'] + aviconfig['ApplicationProfile']
if app['name'] == app_profile]
if app_profile_obj and app_profile_obj[0]['type'] == \
'APPLICATION_PROFILE_TYPE_L4':
for service in vs_obj['services']:
if service['enable_ssl']:
vs_obj['application_profile_ref'] = \
self.get_object_ref(
'System-HTTP',
conv_const.OBJECT_TYPE_APPLICATION_PROFILE)
LOG.debug('Changed the application profile '
'reference from L4 to System-HTTP')
if vs_obj.get('network_profile_ref'):
self.update_nw_profile(
vs_obj, sys_dict, aviconfig)
break
def update_nw_profile(self, vs_obj, sys_dict, aviconfig):
nw_profile = self.get_name(vs_obj['network_profile_ref'])
nw_profile_obj = [nw for nw in sys_dict['NetworkProfile'] +
aviconfig['NetworkProfile'] if nw['name'] ==
nw_profile]
if nw_profile_obj and nw_profile_obj[0]['profile']['type'] != \
'PROTOCOL_TYPE_TCP_PROXY':
LOG.debug(
'Changed the network profile reference from %s to '
'TCP-Proxy for VS %s' % (nw_profile_obj[0]['profile']['type'],
vs_obj['name']))
vs_obj['network_profile_ref'] = self.get_object_ref(
'System-TCP-Proxy', conv_const.OBJECT_TYPE_NETWORK_PROFILE)
def set_pool_group_vrf(self, pool_ref, vrf_ref, avi_config):
"""
This method will set vrf_ref for all pools in poolgroup
:param pool_ref: pool group name
:param vrf_ref: vrf ref of VS
:param avi_config: avi config json
:return:
"""
pg_obj = [poolgrp for poolgrp in avi_config['PoolGroup'] if
poolgrp['name'] == pool_ref]
if pg_obj:
for member in pg_obj[0]['members']:
poolname = self.get_name(member.get('pool_ref'))
self.set_pool_vrf(poolname, vrf_ref, avi_config)
def set_pool_vrf(self, pool_ref, vrf_ref, avi_config):
"""
This method will set vrf_ref for pool
:param pool_ref: pool name
:param vrf_ref: vrf ref of VS
:param avi_config: avi config json
:return:
"""
pool_obj = [pool for pool in avi_config['Pool'] if pool['name'] ==
pool_ref]
if pool_obj and not pool_obj[0].get('vrf_ref'):
pool_obj[0]['vrf_ref'] = vrf_ref
LOG.debug("Added vrf ref to the pool %s", pool_ref)
def clone_http_policy_set(self, policy, vs_name, avi_config, tenant_name,
cloud_name):
"""
This function clones policy which is shared with more than one vs
:param policy: name of policy
:param vs_name: vs name
:param avi_config: avi config dict
:param tenant_name: tenant
:param cloud_name: cloud
:return: cloned policy object
"""
policy_name = policy['name']
clone_policy = copy.deepcopy(policy)
LOG.debug("cloning policy %s" % clone_policy)
if 'http_request_policy' in clone_policy:
for rule in clone_policy['http_request_policy']['rules']:
if 'switching_action' in rule:
if rule['switching_action'].get('pool_group_ref'):
pool_group_ref = self.get_name(
rule['switching_action']['pool_group_ref'])
pool_group_ref = self.clone_pool_group(
pool_group_ref, policy_name, avi_config, False,
tenant_name, cloud_name)
if pool_group_ref:
updated_pool_group_ref = self.get_object_ref(
pool_group_ref,
conv_const.OBJECT_TYPE_POOL_GROUP,
tenant_name, cloud_name)
rule['switching_action']['pool_group_ref'] = \
updated_pool_group_ref
elif rule['switching_action'].get('pool_ref'):
pool_ref = self.get_name(
rule['switching_action']['pool_ref'])
if pool_ref:
updated_pool_ref = self.get_object_ref(
pool_ref, conv_const.OBJECT_TYPE_POOL,
tenant_name, cloud_name)
rule['switching_action']['pool_ref'] = \
updated_pool_ref
clone_policy['name'] += '-%s-clone' % vs_name
return clone_policy
def get_skipped_pool(self, avi_config, pool_name, pool_csv_rows,
csv_writer_dict_list, vs_ref, profile_csv_list,
skipped_setting):
"""
This method get the skipped list for pool by going over the
references attached to it
:param avi_config: Converted Avi configuration
:param pool_name: name of the pool
:param pool_csv_rows:
:param csv_writer_dict_list: Result report dict
:param vs_ref: VS reference
:param profile_csv_list:
:param skipped_setting: User defined skipped settings
:return: skipped setting for pool
"""
pool_skipped_setting = {}
skipped_list = self.get_pool_skipped(pool_csv_rows, pool_name, vs_ref)
pool_object = [pool for pool in avi_config["Pool"]
if pool['name'] == pool_name]
if skipped_list:
pool_skipped_setting['pool_name'] = pool_name
pool_skipped_setting['pool_skipped_list'] = skipped_list
if pool_object:
if 'health_monitor_refs' in pool_object[0]:
health_monitor_skipped_setting = []
for health_monitor_ref in pool_object[0]['health_monitor_refs']:
health_monitor_ref = self.get_name(health_monitor_ref)
monitor_csv_object = self.get_csv_object_list(
csv_writer_dict_list, ['monitor'])
skipped_list = self.get_csv_skipped_list(
monitor_csv_object, health_monitor_ref, vs_ref,
field_key='health_monitor')
if skipped_list:
health_monitor_skipped_setting.append(
{'health_monitor_name': health_monitor_ref,
'monitor_skipped_list': skipped_list})
if health_monitor_skipped_setting:
pool_skipped_setting['pool_name'] = pool_name
pool_skipped_setting['health_monitor'] = \
health_monitor_skipped_setting
if 'ssl_key_and_certificate_ref' in pool_object[0] and \
pool_object[0]['ssl_key_and_certificate_ref']:
ssl_key_cert = self.get_name(
pool_object[0]['ssl_key_and_certificate_ref'])
sslkc_skip = self.get_csv_skipped_list(
profile_csv_list, ssl_key_cert, vs_ref,
field_key='ssl_cert_key')
if sslkc_skip:
pool_skipped_setting['pool_name'] = pool_name
pool_skipped_setting['ssl_key_and_certificate'] = sslkc_skip
if 'ssl_profile_ref' in pool_object[0] and \
pool_object[0]['ssl_profile_ref']:
name, skipped = self.get_ssl_profile_skipped(
profile_csv_list, pool_object[0]['ssl_profile_ref'], vs_ref)
if skipped:
pool_skipped_setting['pool_name'] = pool_name
pool_skipped_setting['ssl profile'] = {}
pool_skipped_setting['ssl profile']['name'] = name
pool_skipped_setting['ssl profile'][
'skipped_list'] = skipped
if 'application_persistence_profile_ref' in pool_object[0] and \
pool_object[0]['application_persistence_profile_ref']:
name, skipped = self.get_app_persistence_profile_skipped(
csv_writer_dict_list, pool_object[0], vs_ref)
if skipped:
pool_skipped_setting['pool_name'] = pool_name
pool_skipped_setting['Application Persistence profile'] = {}
pool_skipped_setting['Application Persistence profile'][
'name'] = name
pool_skipped_setting['Application Persistence profile'][
'skipped_list'] = skipped
if pool_skipped_setting:
skipped_setting['pools'].append(pool_skipped_setting)
def get_skip_pools_policy(self, policy_set_name, http_req, avi_config,
pool_csv_rows, vs_ref, profile_csv_list,
skipped_setting):
if http_req['switching_action'].get('pool_group_ref'):
pool_group_name = self.get_name(http_req['switching_action']
['pool_group_ref'])
pool_group_skipped_settings = self.get_pool_skipped_list(
avi_config, pool_group_name, pool_csv_rows,
csv_writer_dict_list, vs_ref, profile_csv_list)
if pool_group_skipped_settings:
if 'Httppolicy' not in skipped_setting:
skipped_setting['Httppolicy'] = {}
skipped_setting['Httppolicy']['name'] = policy_set_name
skipped_setting['Httppolicy']['Pool Group'] =\
pool_group_skipped_settings
elif http_req['switching_action'].get('pool_ref'):
pool_name = self.get_name(http_req['switching_action']['pool_ref'])
pool_skipped_settings = {'pools': []}
self.get_skipped_pool(avi_config, pool_name, pool_csv_rows,
csv_writer_dict_list, vs_ref,
profile_csv_list, pool_skipped_settings)
if pool_skipped_settings['pools']:
if 'Httppolicy' not in skipped_setting:
skipped_setting['Httppolicy'] = {}
skipped_setting['Httppolicy']['name'] = policy_set_name
skipped_setting['Httppolicy']['Pool'] = pool_skipped_settings
def remove_pool_group_vrf(self, pool_ref, avi_config):
"""
This method will remove vrf_ref for all pools in poolgroup
:param pool_ref: pool group name
:param avi_config: avi config json
:return:
"""
pg_obj = [poolgrp for poolgrp in avi_config['PoolGroup'] if
poolgrp['name'] == pool_ref]
if pg_obj:
for member in pg_obj[0]['members']:
poolname = self.get_name(member.get('pool_ref'))
self.remove_pool_vrf(poolname, avi_config)
def remove_pool_vrf(self, pool_ref, avi_config):
"""
This method will remove vrf_ref for pool
:param pool_ref: pool name
:param avi_config: avi config json
:return:
"""
pool_obj = [pool for pool in avi_config['Pool'] if pool['name'] ==
pool_ref]
if pool_obj and pool_obj[0].get('vrf_ref'):
pool_obj[0].pop('vrf_ref')
LOG.debug("Removed vrf ref from the pool %s", pool_ref)
def update_network_profile(self, aviconfig, sys_dict):
"""
This method updates the network profile to TCP PROXY when VS has HTTP
application profile
:param aviconfig: avi config dict
:param sys_dict: system config dict
:return:
"""
for vs_obj in aviconfig['VirtualService']:
if vs_obj.get('application_profile_ref'):
app_profile = self.get_name(vs_obj['application_profile_ref'])
app_profile_obj = [app for app in sys_dict['ApplicationProfile']
+ aviconfig['ApplicationProfile']
if app['name'] == app_profile]
if app_profile_obj and (
app_profile_obj[0]['type'] ==
'APPLICATION_PROFILE_TYPE_HTTP' or app_profile_obj[
0]['name'] == 'System-HTTP'):
if vs_obj.get('network_profile_ref'):
nw_profile = self.get_name(vs_obj[
'network_profile_ref'])
nw_profile_obj = [nw for nw in sys_dict[
'NetworkProfile'] + aviconfig[
'NetworkProfile'] if
nw['name'] == nw_profile]
if nw_profile_obj and (
nw_profile_obj[0]['profile']['type']
!= 'PROTOCOL_TYPE_TCP_PROXY'):
LOG.debug(
'Changed the network profile reference from %s '
'to TCP-Proxy as VS %s has HTTP profile',
nw_profile_obj[0]['profile']['type'],
vs_obj['name'])
vs_obj['network_profile_ref'] = \
self.get_object_ref(
'System-TCP-Proxy',
conv_const.OBJECT_TYPE_NETWORK_PROFILE)
def correct_vs_ref(self, avi_config):
"""
This method corrects the reference of VS to different objects
:param avi_config: avi configuration dict
:return:
"""
global csv_writer_dict_list
avi_graph = self.make_graph(avi_config)
csv_dict_sub = [row for row in csv_writer_dict_list if row[
'F5 type'] != 'virtual' and row['Status'] in
(conv_const.STATUS_PARTIAL,
conv_const.STATUS_SUCCESSFUL)]
for dict_row in csv_dict_sub:
obj = dict_row['Avi Object']
vs = []
if obj.startswith('{'):
obj = eval(obj)
for key in obj:
for objs in obj[key]:
self.add_vs_ref(objs, avi_graph, vs)
elif obj.startswith('['):
obj = eval(obj)
for objs in obj:
for key in objs:
objval = objs[key]
self.add_vs_ref(objval, avi_graph, vs)
if vs:
dict_row['VS Reference'] = str(list(set(vs)))
else:
dict_row['VS Reference'] = conv_const.STATUS_NOT_IN_USE
def add_vs_ref(self, obj, avi_graph, vs):
"""
Helper method for adding vs ref
:param obj: object
:param avi_graph: avi graph
:param vs: VS list
:return:
"""
tmplist = []
if isinstance(obj, str) and obj.startswith('Duplicate of'):
obj_name = None
LOG.debug("Object has merged: %s" % obj)
else:
obj_name = obj.get('name', obj.get('hostname'))
if obj_name:
if avi_graph.has_node(obj_name):
LOG.debug("Checked predecessor for %s", obj_name)
predecessor = list(avi_graph.predecessors(obj_name))
if predecessor:
self.get_predecessor(predecessor, avi_graph, vs, tmplist)
else:
LOG.debug("Object %s may be merged or orphaned", obj_name)
def get_predecessor(self, predecessor, avi_graph, vs, tmplist):
"""
This method gets the predecessor of the object
:param predecessor: predecessor list
:param avi_graph: avi graph
:param vs: VS list
:param tmplist: temporary list of objects for which predecessors
are already evaluated
:return:
"""
if len(predecessor) > 1:
for node in predecessor:
if node in tmplist:
continue
nodelist = [node]
self.get_predecessor(nodelist, avi_graph, vs, tmplist)
elif len(predecessor):
node_obj = [nod for nod in list(avi_graph.nodes().data()) if
nod[0] == predecessor[0]]
if node_obj and (node_obj[0][1]['type'] == 'VS' or 'VS' in node_obj[
0][1]['type']):
LOG.debug("Predecessor %s found", predecessor[0])
vs.extend(predecessor)
else:
tmplist.extend(predecessor)
LOG.debug("Checked predecessor for %s", predecessor[0])
nodelist = list(avi_graph.predecessors(predecessor[0]))
self.get_predecessor(nodelist, avi_graph, vs, tmplist)
else:
LOG.debug("No more predecessor")
def convert_irules(self, vs_ds_rules, rule_config, avi_config, prefix,
vs_name, tenant):
vs_ds = list()
req_policies = list()
nw_policy = None
mapped_rules = []
converted_rules = []
LOG.debug("Converting for irules %s for vs %s" % (vs_ds_rules, vs_name))
for rule_mapping in rule_config:
mapped_rules.append(rule_mapping['rule_name'])
for index, rule in enumerate(vs_ds_rules):
rule_mapping = None
if rule in mapped_rules:
rule_mapping = [obj for obj in rule_config if
obj['rule_name'] == rule][0]
if rule_mapping and rule_mapping['type'] == 'VSDataScriptSet':
if 'avi_config' in rule_mapping:
ds_config = copy.deepcopy(rule_mapping['avi_config'])
else:
ds_config = copy.deepcopy(conv_const.DUMMY_DS)
ds_config['name'] = '%s-dummy' % rule
ds_config['tenant_ref'] = self.get_object_ref(tenant, 'tenant')
if prefix:
ds_config['name'] = '%s-%s' % (prefix, ds_config['name'])
existing_ds = [obj for obj in avi_config['VSDataScriptSet']
if obj['name'] == ds_config['name']]
if not existing_ds:
avi_config['VSDataScriptSet'].append(ds_config)
vs_ds.append(ds_config['name'])
converted_rules.append(rule)
LOG.debug(
"iRule %s successfully mapped to %s VSDataScriptSet" %
(rule, ds_config['name']))
elif rule_mapping and rule_mapping['type'] == 'HTTPPolicySet':
if 'avi_config' in rule_mapping:
policy = copy.deepcopy(rule_mapping['avi_config'])
policy['name'] = '%s-%s' % (policy['name'], vs_name)
else:
policy = copy.deepcopy(conv_const.DUMMY_REQ_POLICY)
policy['name'] = '%s-%s-dummy' % (rule, vs_name)
policy['tenant_ref'] = self.get_object_ref(tenant, 'tenant')
if prefix:
policy['name'] = '%s-%s' % (prefix, policy['name'])
avi_config['HTTPPolicySet'].append(policy)
req_policies.append(policy['name'])
converted_rules.append(rule)
LOG.debug(
"iRule %s successfully mapped to %s HTTPPolicySet" %
(rule, policy['name']))
elif rule_mapping and rule_mapping['type'] == \
'NetworkSecurityPolicy':
if 'avi_config' in rule_mapping:
policy = copy.deepcopy(rule_mapping['avi_config'])
policy['name'] = '%s-%s' % (policy['name'], vs_name)
else:
policy = copy.deepcopy(conv_const.DUMMY_NW_POLICY)
policy['name'] = '%s-%s-dummy' % (rule, vs_name)
policy['tenant_ref'] = self.get_object_ref(tenant, 'tenant')
if prefix:
policy['name'] = '%s-%s' % (prefix, policy['name'])
avi_config['NetworkSecurityPolicy'].append(policy)
nw_policy = policy['name']
converted_rules.append(rule)
LOG.debug(
"iRule %s successfully mapped to %s NetworkSecurityPolicy" %
(rule, policy['name']))
elif (rule_mapping and rule_mapping['type'] ==
'HTTPToHTTPSRedirect') or rule == '_sys_https_redirect':
# Added prefix for objects
if prefix:
policy_name = '%s-%s-%s' % (prefix, rule, vs_name)
else:
policy_name = '%s-%s' % (rule, vs_name)
policy = copy.deepcopy(conv_const.HTTP_TO_HTTPS_REDIRECT_POL)
policy["name"] = policy_name
policy['tenant_ref'] = self.get_object_ref(tenant, 'tenant')
req_policies.append(policy_name)
avi_config['HTTPPolicySet'].append(policy)
converted_rules.append(rule)
LOG.debug(
"iRule %s successfully mapped to %s HTTPPolicySet" %
(rule, policy['name']))
return vs_ds, req_policies, nw_policy, converted_rules
def update_with_default_profile(self, profile_type, profile,
profile_config, profile_name):
"""
Profiles can have inheritance used by attribute defaults-from in F5
configuration this method recursively gets all the attributes from the
default objects and forms complete object
:param profile_type: type of profile
:param profile: currant profile object
:param profile_config: F5 profile config dict
:param profile_name: Name of profile
:return: Complete profile with updated attributes from defaults
"""
parent_name = profile.get('defaults-from', None)
if parent_name and profile_name != parent_name:
parent_profile = profile_config.get(profile_type + " " +
parent_name, None)
if parent_profile:
parent_profile = self.update_with_default_profile(
profile_type, parent_profile, profile_config, parent_name)
parent_profile = copy.deepcopy(parent_profile)
parent_profile.update(profile)
profile = parent_profile
return profile | 46.69749 | 84 | 0.540244 |
ed1df660fe1b2ad3346f4c19f5135f22d93d871f | 1,668 | py | Python | tests/integration/states/match.py | kstaken/salt | 3321d57cf4d085bc6cbe269dbd01afd04fc6ff2a | [
"Apache-2.0"
] | 2 | 2019-03-30T02:12:56.000Z | 2021-03-08T18:59:46.000Z | tests/integration/states/match.py | ageron/salt | 72a0a89011e55ce7c875e948b5f0e97e70328153 | [
"Apache-2.0"
] | null | null | null | tests/integration/states/match.py | ageron/salt | 72a0a89011e55ce7c875e948b5f0e97e70328153 | [
"Apache-2.0"
] | 1 | 2020-03-07T07:04:55.000Z | 2020-03-07T07:04:55.000Z | # -*- coding: utf-8 -*-
'''
tests.integration.states.match
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
:copyright: © 2012 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
'''
# Import python libs
import os
# Import salt libs
import salt.utils
import integration
from saltunittest import skipIf
STATE_DIR = os.path.join(integration.FILES, 'file', 'base')
class StateMatchTest(integration.ModuleCase):
'''
Validate the file state
'''
def test_issue_2167_exsel_no_AttributeError(self):
ret = self.run_function('state.top', ['issue-2167-exsel-match.sls'])
self.assertNotIn(
"AttributeError: 'Matcher' object has no attribute 'functions'",
ret
)
@skipIf(os.geteuid() != 0, 'you must be root to run this test')
def test_issue_2167_ipcidr_no_AttributeError(self):
subnets = self.run_function('network.subnets')
self.assertTrue(len(subnets) > 0)
top_filename = 'issue-2167-ipcidr-match.sls'
top_file = os.path.join(STATE_DIR, top_filename)
try:
salt.utils.fopen(top_file, 'w').write(
'base:\n'
' {0}:\n'
' - match: ipcidr\n'
' - test\n'.format(subnets[0])
)
ret = self.run_function('state.top', [top_filename])
self.assertNotIn(
'AttributeError: \'Matcher\' object has no attribute '
'\'functions\'',
ret
)
finally:
os.remove(top_file)
| 29.785714 | 76 | 0.576739 |
c9ff02a9208987676d3f803f52eab06594a6c8f2 | 1,888 | py | Python | junction/proposals/permissions.py | josejibin/junction | faf9607e24b109d53be7e6b28787f52100fffad3 | [
"MIT"
] | 1 | 2019-04-15T10:35:46.000Z | 2019-04-15T10:35:46.000Z | junction/proposals/permissions.py | josejibin/junction | faf9607e24b109d53be7e6b28787f52100fffad3 | [
"MIT"
] | null | null | null | junction/proposals/permissions.py | josejibin/junction | faf9607e24b109d53be7e6b28787f52100fffad3 | [
"MIT"
] | 1 | 2017-06-01T06:03:14.000Z | 2017-06-01T06:03:14.000Z | # -*- coding: utf-8 -*-
# Third Party Stuff
from django.core.exceptions import PermissionDenied
# Junction Stuff
from junction.conferences.models import ConferenceProposalReviewer
from junction.base.constants import ConferenceStatus
from .models import ProposalSectionReviewer
def is_proposal_voting_allowed(proposal):
return proposal.conference.status != ConferenceStatus.SCHEDULE_PUBLISHED
def is_proposal_author(user, proposal):
return user.is_authenticated() and proposal.author == user
def is_proposal_reviewer(user, conference):
authenticated = user.is_authenticated()
is_reviewer = ConferenceProposalReviewer.objects.filter(
reviewer=user.id, conference=conference, active=True).exists()
return authenticated and is_reviewer
def is_proposal_section_reviewer(user, conference, proposal):
return user.is_authenticated() and ProposalSectionReviewer.objects.filter(
conference_reviewer__reviewer=user,
conference_reviewer__conference=conference,
proposal_section=proposal.proposal_section,
active=True).exists()
def is_proposal_author_or_proposal_reviewer(user, conference, proposal):
reviewer = is_proposal_reviewer(user, conference)
author = is_proposal_author(user, proposal)
return reviewer or author
def is_proposal_author_or_proposal_section_reviewer(user,
conference, proposal):
return is_proposal_author(user, proposal) or \
is_proposal_section_reviewer(user, conference, proposal)
def is_proposal_author_or_permisson_denied(user, proposal):
if is_proposal_author(user, proposal):
return True
raise PermissionDenied
def is_conference_moderator(user, conference):
if user.is_superuser:
return True
users = [mod.moderator for mod in conference.moderators.all()]
return user in users
| 31.466667 | 78 | 0.760593 |
a13aea79de122303268056287fc3c2530a5fdd19 | 4,304 | py | Python | tests/test_KoalaBot.py | JayDwee/KoalaBot | b2bc4052115531db81d6f777c08e271d6b9d408c | [
"MIT"
] | null | null | null | tests/test_KoalaBot.py | JayDwee/KoalaBot | b2bc4052115531db81d6f777c08e271d6b9d408c | [
"MIT"
] | null | null | null | tests/test_KoalaBot.py | JayDwee/KoalaBot | b2bc4052115531db81d6f777c08e271d6b9d408c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Testing KoalaBot Base Code
Commented using reStructuredText (reST)
"""
# Futures
# Built-in/Generic Imports
import argparse
# Libs
import discord
import discord.ext.test as dpytest
import mock
import pytest
from discord.ext import commands
# Own modules
import KoalaBot
from tests.utils_testing.TestUtils import FakeAuthor
from tests.utils_testing.LastCtxCog import LastCtxCog
from utils.KoalaDBManager import KoalaDBManager
# Constants
# Variables
utils_cog = None
DBManager = KoalaDBManager(KoalaBot.DATABASE_PATH, KoalaBot.DB_KEY, KoalaBot.config_dir)
DBManager.create_base_tables()
@pytest.fixture(autouse=True)
async def test_ctx(bot):
global utils_cog
utils_cog = LastCtxCog(bot)
bot.add_cog(utils_cog)
dpytest.configure(bot)
await dpytest.message(KoalaBot.COMMAND_PREFIX + "store_ctx")
return utils_cog.get_last_ctx()
@pytest.fixture(scope='session', autouse=True)
def setup_db():
DBManager.clear_all_tables(DBManager.fetch_all_tables())
yield DBManager
@pytest.fixture(scope='function', autouse=True)
async def setup_clean_messages():
await dpytest.empty_queue()
yield dpytest
def test_parse_args_config():
assert "/config/" == vars(KoalaBot.parse_args(["--config", "/config/"])).get("config")
def test_parse_args_invalid():
with mock.patch.object(argparse.ArgumentParser, 'exit') as mock1:
KoalaBot.parse_args(["--test", "/test/"])
mock1.assert_called_once()
def test_test_user_is_owner(test_ctx):
assert KoalaBot.is_owner(test_ctx)
def test_invalid_test_user_is_owner(test_ctx):
test_ctx.author = FakeAuthor(id=int(KoalaBot.BOT_OWNER)+1)
KoalaBot.is_dpytest = False
assert not KoalaBot.is_owner(test_ctx)
KoalaBot.is_dpytest = True
def test_owner_is_owner(test_ctx):
test_ctx.author = FakeAuthor(id=int(KoalaBot.BOT_OWNER))
assert KoalaBot.is_owner(test_ctx)
def test_test_user_is_admin(test_ctx):
assert KoalaBot.is_admin(test_ctx)
def test_invalid_test_user_is_admin(test_ctx):
test_ctx.author = FakeAuthor(id=int(KoalaBot.BOT_OWNER)+2)
KoalaBot.is_dpytest = False
assert not KoalaBot.is_admin(test_ctx)
KoalaBot.is_dpytest = True
def test_admin_test_user_is_admin(test_ctx):
test_ctx.author = FakeAuthor(name="TestUser#0001", all_permissions=True)
assert KoalaBot.is_admin(test_ctx)
def test_admin_is_admin(test_ctx):
test_ctx.author = FakeAuthor(name="TestUser#0002", all_permissions=True)
assert KoalaBot.is_admin(test_ctx)
def test_not_admin_is_admin(test_ctx):
test_ctx.author = FakeAuthor(all_permissions=False)
KoalaBot.is_dpytest = False
assert not KoalaBot.is_admin(test_ctx)
KoalaBot.is_dpytest = True
def test_load_all_cogs():
test_koala = KoalaBot
test_koala.COGS_DIR = "tests/fake_load_all_cogs"
with mock.patch.object(discord.ext.commands.bot.Bot, 'load_extension') as mock1:
test_koala.load_all_cogs()
mock1.assert_called_with("tests.fake_load_all_cogs.Greetings")
@pytest.mark.asyncio
async def test_dm_single_group_message():
test_message = 'default message'
test_member = dpytest.get_config().members[0]
x = await KoalaBot.dm_group_message([test_member], test_message)
assert dpytest.verify().message().content(test_message)
assert x == 1
@pytest.mark.asyncio
async def test_dm_plural_group_message():
test_message = 'default message'
test_member = dpytest.get_config().members[0]
test_member_2 = await dpytest.member_join()
await dpytest.empty_queue()
x = await KoalaBot.dm_group_message([test_member, test_member_2], test_message)
assert dpytest.verify().message().content(test_message)
assert dpytest.verify().message().content(test_message)
assert x == 2
@pytest.mark.asyncio
async def test_dm_empty_group_message():
test_message = 'this should not be sent'
x = await KoalaBot.dm_group_message([], test_message)
assert dpytest.verify().message().nothing()
assert x == 0
@pytest.fixture(scope='session', autouse=True)
def setup_is_dpytest():
KoalaBot.is_dpytest = True
yield
KoalaBot.is_dpytest = False
| 28.315789 | 91 | 0.730251 |
4815b2128da76ed5f0226a3caddc30ba26a5f4df | 1,680 | py | Python | adafruit-circuitpython-bundle-py-20201107/examples/ili9341_shield_simpletest.py | rantler/AdaFruit | 9b0aa56ede9ac358b835162cad4c6531c09ba5b0 | [
"CC0-1.0"
] | null | null | null | adafruit-circuitpython-bundle-py-20201107/examples/ili9341_shield_simpletest.py | rantler/AdaFruit | 9b0aa56ede9ac358b835162cad4c6531c09ba5b0 | [
"CC0-1.0"
] | null | null | null | adafruit-circuitpython-bundle-py-20201107/examples/ili9341_shield_simpletest.py | rantler/AdaFruit | 9b0aa56ede9ac358b835162cad4c6531c09ba5b0 | [
"CC0-1.0"
] | null | null | null | """
This test will initialize the display using displayio and draw a solid green
background, a smaller purple rectangle, and some yellow text.
Pinouts are for the 2.8" TFT Shield
"""
import board
import terminalio
import displayio
from adafruit_display_text import label
import adafruit_ili9341
# Release any resources currently in use for the displays
displayio.release_displays()
# Use Hardware SPI
spi = board.SPI()
# Use Software SPI if you have a shield with pins 11-13 jumpered
# import busio
# spi = busio.SPI(board.D11, board.D13)
tft_cs = board.D10
tft_dc = board.D9
display_bus = displayio.FourWire(spi, command=tft_dc, chip_select=tft_cs)
display = adafruit_ili9341.ILI9341(display_bus, width=320, height=240)
# Make the display context
splash = displayio.Group(max_size=10)
display.show(splash)
# Draw a green background
color_bitmap = displayio.Bitmap(320, 240, 1)
color_palette = displayio.Palette(1)
color_palette[0] = 0x00FF00 # Bright Green
bg_sprite = displayio.TileGrid(color_bitmap, pixel_shader=color_palette, x=0, y=0)
splash.append(bg_sprite)
# Draw a smaller inner rectangle
inner_bitmap = displayio.Bitmap(280, 200, 1)
inner_palette = displayio.Palette(1)
inner_palette[0] = 0xAA0088 # Purple
inner_sprite = displayio.TileGrid(inner_bitmap, pixel_shader=inner_palette, x=20, y=20)
splash.append(inner_sprite)
# Draw a label
text_group = displayio.Group(max_size=10, scale=3, x=57, y=120)
text = "Hello World!"
text_area = label.Label(terminalio.FONT, text=text, color=0xFFFF00)
text_group.append(text_area) # Subgroup for text scaling
splash.append(text_group)
while True:
pass
| 28.965517 | 88 | 0.753571 |
6c1a6ad313be651a645d5fe8a8ee0ab3a9e90173 | 14,476 | py | Python | Lib/site-packages/pdsspect/roi_plot.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | null | null | null | Lib/site-packages/pdsspect/roi_plot.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | Lib/site-packages/pdsspect/roi_plot.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | null | null | null | """Parent classes for any widget that plots data"""
from qtpy import QT_VERSION
from qtpy import QtWidgets, QtCore
from matplotlib.figure import Figure
from .pdsspect_image_set import PDSSpectImageSetViewBase
qt_ver = int(QT_VERSION[0])
if qt_ver == 4:
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg
elif qt_ver == 5:
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
class ROIPlotModel(object):
"""Model for ROI Plot and accompanying widget
Parameters
----------
image_set : :class:`~.pdsspect_image_set.PDSSpectImageSet`
pdsspect model
Attributes
----------
selected_colors : :obj:`list`
Colors to display in the histogram
latex_units : :obj:`list` of 3 :obj:`str`
The latex strings of
:attr:`.pdsspect_image_set.PDSSpectImageSet.accepted_units`
"""
latex_units = ['nm', '\mu m', '\AA']
def __init__(self, image_set):
self._views = []
self._image_set = image_set
self.selected_colors = []
self._view_index = 0
def register(self, view):
"""Register view with the model"""
if view not in self._views:
self._views.append(view)
def unregister(self, view):
"""Unregister view with the model"""
if view in self._views:
self._views.remove(view)
def set_data(self):
for view in self._views:
view.set_data()
@property
def image_sets(self):
""":obj:`list` : All the image sets, including the current one"""
return [self._image_set] + self._image_set.subsets
@property
def image_set(self):
""":class:`~.pdsspect_image_set.PDSSpectImageSet` : Image set that
corresponds with the current view
"""
return self.image_sets[self._view_index]
@property
def has_multiple_views(self):
""":obj:`bool` : True if there are multiple views, False otherwise"""
return len(self.image_sets) > 1
@property
def view_index(self):
""":obj:`int` : The index of the view to display the ROI data
If there are not multiple views, view_index is automatically ``-1``.
"""
if not self.has_multiple_views:
return -1
else:
return self._view_index
@view_index.setter
def view_index(self, new_index):
self._view_index = new_index
if self._view_index == self.image_set.current_image_index:
self.image_index = -1
self.set_data()
def add_selected_color(self, color):
"""Select a color and inform views to display new color
Parameters
----------
color : :obj:`str`
The color to add
"""
self.selected_colors.append(color)
self.set_data()
def remove_selected_color(self, color):
"""Remove a selected color and inform views to not display the color
Parameters
----------
color : :obj:`str`
The color to remove
"""
self.selected_colors.remove(color)
self.set_data()
@property
def unit(self):
""":obj:`str` : Latex version of
:attr:`.pdsspect_image_set.PDSSpectImageSet.unit`"""
index = self.image_set.accepted_units.index(self.image_set.unit)
return self.latex_units[index]
class ROIPlotController(object):
"""Controller for ROI plot and accompanying widget
Parameters
----------
model : :class:`ROIPlotModel`
The model
view : :class:`QtWidgets.QWidget <PySide.QtGui.QWidget>`
The view
Attributes
----------
model : :class:`ROIPlotModel`
The model
view : :class:`QtWidgets.QWidget <PySide.QtGui.QWidget>`
The view
"""
def __init__(self, model, view):
self.model = model
self.view = view
def color_state_changed(self, color):
"""Select or remove the color when a checkbox color changes
Parameters
----------
color : :obj:`str`
The name of the checkbox whose state changed
"""
if color not in self.model.selected_colors:
self.select_color(color)
else:
self.remove_color(color)
def select_color(self, color):
"""Selected a given color
Parameters
----------
color : :obj:`str`
The color to select
"""
self.model.add_selected_color(color)
def remove_color(self, color):
"""Remove a given color
Parameters
----------
color : :obj:`str`
The color to remove
"""
self.model.remove_selected_color(color)
def set_view_index(self, index):
"""Set the index of the view
Parameters
----------
index : :obj:`int`
Index of the view
"""
self.model.view_index = index
class ColorCheckBox(QtWidgets.QCheckBox):
"""Custom checkbox that emits its color (:obj:`str`) when toggled
Parameters
----------
color : :obj:`str`
The color to name the checkbox
Attributes
----------
color : :obj:`str`
The color to name the checkbox
stateChanged : :obj:`QtCore.Signal`
Signal that emits a string when check box changes its state
Read more about `Signals here
<http://pyqt.sourceforge.net/Docs/PyQt5/signals_slots.html>`_
"""
stateChanged = QtCore.Signal(str)
def __init__(self, color):
super(ColorCheckBox, self).__init__(color)
self.color = color
def nextCheckState(self):
"""Adjust checkbox's toggle & emit color when checkbox is clicked"""
self.setChecked(not self.isChecked())
self.stateChanged.emit(self.color)
class ViewCheckBox(QtWidgets.QCheckBox):
"""Custom checkbox that emits its index (:obj:`int`) when toggled
Parameters
----------
index : :obj:`int`
The index of the view
Attributes
----------
index : :obj:`int`
The index of the view
stateChanged : :obj:`QtCore.Signal`
Signal that emits the box itself when check box changes its state
Read more about `Signals here
<http://pyqt.sourceforge.net/Docs/PyQt5/signals_slots.html>`_
"""
stateChanged = QtCore.Signal(object)
def __init__(self, index):
view_number = index + 1
name = 'view ' + str(view_number)
super(ViewCheckBox, self).__init__(name)
self.name = name
self.view_number = view_number
self.index = index
def set_check_state(self):
self.setChecked(not self.isChecked())
self.stateChanged.emit(self)
def nextCheckState(self):
"""Adjust checkbox's toggle & emit checkbox when checkbox is clicked"""
if not self.isChecked():
self.set_check_state()
class ROIPlotWidget(QtWidgets.QWidget, PDSSpectImageSetViewBase):
"""Widget to hold the histogram and checkboxs
Checkboxes are created in :meth:`create_color_checkbox` which is why they
do not appear in the :meth:`__init__` method.
Parameters
----------
model : :class:`ROIPlotModel`
The model
Attributes
----------
model : :class:`ROIPlotModel`
The model
controller : :class:`ROIPlotController`
The controller
checkbox_layout : :class:`QtWidgets.QVBoxLayout <PySide.QtGui.QVBoxLayout>`
Place the checkboxes vertically
main_layout : :class:`QtWidgets.QGridLayout <PySide.QtGui.QGridLayout>`
Place in grid layout so histogram stretches while boxes are stationary
roi_plot : :class:`ROIPlot`
The plot of ROI data
save_btn : :class:`QtWidgets.QPushButton <PySide.QtGui.QPushButton>`
Save the plot as an image
red_checkbox : :class:`ColorCheckBox`
Red checkbox that displays red ROI data when checked
brown_checkbox : :class:`ColorCheckBox`
Brown checkbox that displays brown ROI data when checked
lightblue_checkbox : :class:`ColorCheckBox`
Lightblue checkbox that displays lightblue ROI data when checked
lightcyan_checkbox : :class:`ColorCheckBox`
Lightcyan checkbox that displays lightcyan ROI data when checked
darkgreen_checkbox : :class:`ColorCheckBox`
Darkgreen checkbox that displays darkgreen ROI data when checked
yellow_checkbox : :class:`ColorCheckBox`
Yellow checkbox that displays yellow ROI data when checked
pink_checkbox : :class:`ColorCheckBox`
Pink checkbox that displays pink ROI data when checked
teal_checkbox : :class:`ColorCheckBox`
Teal checkbox that displays teal ROI data when checked
goldenrod_checkbox : :class:`ColorCheckBox`
Goldenrod checkbox that displays goldenrod ROI data when checked
sienna_checkbox : :class:`ColorCheckBox`
Sienna checkbox that displays sienna ROI data when checked
darkblue_checkbox : :class:`ColorCheckBox`
Darkblue checkbox that displays darkblue ROI data when checked
crimson_checkbox : :class:`ColorCheckBox`
Crimson checkbox that displays crimson ROI data when checked
maroon_checkbox : :class:`ColorCheckBox`
Maroon checkbox that displays maroon ROI data when checked
purple_checkbox : :class:`ColorCheckBox`
Purple checkbox that displays purple ROI data when checked
"""
def __init__(self, model):
super(ROIPlotWidget, self).__init__()
self.model = model
self.model.register(self)
self.controller = ROIPlotController(model, self)
self.roi_plot = None
self._create_roi_plot()
self.checkbox_layout = QtWidgets.QVBoxLayout()
for color in self.model.image_set.colors[:-1]:
self.create_color_checkbox(color)
self.save_btn = QtWidgets.QPushButton('Save Plot')
self.save_btn.clicked.connect(self.save_plot)
self.view_boxes_layout = QtWidgets.QHBoxLayout()
self.main_layout = QtWidgets.QGridLayout()
self._set_layout()
if self.model.has_multiple_views:
self.add_view()
else:
self._register_set_at_index(0)
def _create_roi_plot(self):
self.roi_plot = None
def _register_set_at_index(self, index):
self.model.image_sets[index].register(self.roi_plot)
self.model.image_sets[index].register(self)
def _set_layout(self):
pass
def create_color_checkbox(self, color):
"""Create a checkbox with the given color
Parameters
----------
color : :obj:`str`
The color to name the checkbox
"""
name = color + '_checkbox'
color_checkbox = ColorCheckBox(color)
color_checkbox.stateChanged.connect(self.check_color)
setattr(self, name, color_checkbox)
self.checkbox_layout.addWidget(color_checkbox)
def check_color(self, checkbox_color):
"""Called when the state a checkbox is changed
Parameters
----------
checkbox_color : :obj:`str`
The color label of the check box
"""
self.controller.color_state_changed(checkbox_color)
def add_view(self, index=None):
"""Add a view box to the widget
Parameters
----------
index : :obj:`int` [Default None]
The index to add the view to
"""
if self.view_boxes_layout.count() == 0 and index is None:
for index, image_set in enumerate(self.model.image_sets):
self.add_view(index)
return
if index is None:
index = len(self.model.image_sets) - 1
view_box = ViewCheckBox(index)
view_box.stateChanged.connect(self.check_view_checkbox)
self.view_boxes_layout.addWidget(view_box)
self._register_set_at_index(index)
box = self.view_boxes_layout.itemAt(self.model._view_index).widget()
box.setChecked(True)
self.check_view_checkbox(box)
def check_view_checkbox(self, view_checkbox):
"""Check the view box at the given index
Parameters
----------
view_checkbox : :class:`ViewCheckBox`
The view check box whose state changed
"""
index = view_checkbox.index
for item_index in range(self.view_boxes_layout.count()):
if item_index != index:
box = self.view_boxes_layout.itemAt(item_index).widget()
box.setChecked(False)
if view_checkbox.isChecked():
self.controller.set_view_index(index)
def set_data(self):
pass
def save_plot(self):
"""Save the plot as an image"""
save_file, _ = QtWidgets.QFileDialog.getSaveFileName(parent=self)
if save_file != '':
self.roi_plot._figure.savefig(
save_file,
facecolor='black',
edgecolor='black',
)
class ROIPlot(FigureCanvasQTAgg, PDSSpectImageSetViewBase):
"""Plot of the data in each ROI color
Parameters
----------
model : :class:`ROIPlotModel`
The model
image_set : :class:`~.pdsspect_image_set.PDSSpectImageSet`
pdsspect model
Attributes
----------
model : :class:`ROIPlotModel`
The model
image_set : :class:`~.pdsspect_image_set.PDSSpectImageSet`
pdsspect model
"""
def __init__(self, model):
self.model = model
self.image_set = model.image_set
fig = Figure(figsize=(6, 4), dpi=100, facecolor='black')
fig.subplots_adjust(
left=0.15,
right=0.95,
top=0.95,
bottom=0.15,
wspace=0.0,
hspace=0.0,
)
super(ROIPlot, self).__init__(fig)
self._figure = fig
policy = self.sizePolicy()
policy.setHeightForWidth(True)
self.setSizePolicy(policy)
self.setMinimumSize(self.size())
self._ax = fig.add_subplot(111)
self._ax.set_facecolor('black')
self._ax.spines['bottom'].set_color('w')
self._ax.spines['left'].set_color('w')
self._ax.tick_params(axis='x', colors='w', labelsize=8)
self._ax.tick_params(axis='y', colors='w', labelsize=8)
self.set_data()
def set_roi_data(self):
"""Set data when ROI is created/destroyed or checkbox is toggled"""
self.set_data()
| 30.348008 | 79 | 0.62234 |
45080ce10975d30d158892f39ac31e7630d2397d | 3,358 | py | Python | wtm_envs/mujoco/keybot_env.py | knowledgetechnologyuhh/goal_conditioned_RL_baselines | 915fc875fd8cc75accd0804d99373916756f726e | [
"MIT"
] | 15 | 2020-07-01T16:16:09.000Z | 2021-12-20T21:56:33.000Z | wtm_envs/mujoco/keybot_env.py | knowledgetechnologyuhh/goal_conditioned_RL_baselines | 915fc875fd8cc75accd0804d99373916756f726e | [
"MIT"
] | 14 | 2020-09-25T22:41:20.000Z | 2022-03-12T00:38:44.000Z | wtm_envs/mujoco/keybot_env.py | knowledgetechnologyuhh/goal_conditioned_RL_baselines | 915fc875fd8cc75accd0804d99373916756f726e | [
"MIT"
] | 2 | 2020-07-01T16:19:08.000Z | 2020-11-28T10:45:59.000Z | import numpy as np
from wtm_envs.mujoco import blocks_env, utils
class KeybotEnv(blocks_env.BlocksEnv):
"""Superclass for all Keybot environments.
"""
def __init__(
self, model_path, n_substeps, gripper_extra_height, block_gripper,
target_in_the_air, target_offset, obj_range, target_range,
distance_threshold, gripper_relative_target, initial_qpos, reward_type,
gripper_goal, n_objects, table_height, obj_height, min_tower_height=None, max_tower_height=None,
):
self.gripper_relative_target = gripper_relative_target
super(KeybotEnv, self).__init__(model_path, n_substeps, gripper_extra_height, block_gripper,
target_in_the_air, target_offset, obj_range, target_range,
distance_threshold, initial_qpos, reward_type,
gripper_goal, n_objects, table_height, obj_height, min_tower_height, max_tower_height)
def _set_action(self, action):
assert action.shape == (4,)
action = action.copy() # ensure that we don't change the action outside of this scope
pos_ctrl, gripper_ctrl = action[:3], action[3]
if self.gripper_relative_target:
pos_ctrl *= 0.005 # limit maximum change in position. was 0.05\
ref_frame = None
else: # Absolute target relative to the robot frame
pos_ctrl *= 0.08 # limit maximum change in position. was 0.05
pos_ctrl[0] += 0.20 # add constant to x-axis to avoid generating actions behind the robot
ref_frame = self.sim.data.get_body_xpos('base_link')
# pos_ctrl[0] += 0.20 # add constant to x-axis to avoid generating actions behind the robot
rot_ctrl = [1., 0., 0., 0.] # fixed rotation of the end effector, expressed as a quaternion
gripper_ctrl = np.array([gripper_ctrl, gripper_ctrl])
assert gripper_ctrl.shape == (2,)
if self.block_gripper:
gripper_ctrl = np.zeros_like(gripper_ctrl)
action = np.concatenate([pos_ctrl, rot_ctrl, gripper_ctrl])
# Apply action to simulation.
utils.ctrl_set_action(self.sim, action)
utils.mocap_set_action(self.sim, action, absolute_ref=ref_frame)
def _env_setup(self, initial_qpos):
for name, value in initial_qpos.items():
self.sim.data.set_joint_qpos(name, value)
utils.reset_mocap_welds(self.sim)
self.sim.forward()
# Move end effector into position.
gripper_target = np.array([-0.498, 0.005, -0.431 + self.gripper_extra_height]) + self.sim.data.get_site_xpos(
'robot0:grip')
gripper_rotation = np.array([1., 0., 0., 0.])
self.sim.data.set_mocap_pos('robot0:mocap', gripper_target)
self.sim.data.set_mocap_quat('robot0:mocap', gripper_rotation)
for _ in range(10):
self.sim.step()
# Offset the random goal if gripper random is used
if self.gripper_relative_target:
self.random_gripper_goal_pos_offset = (0.0, 0.0, 0.0)
else:
self.random_gripper_goal_pos_offset = (0.25, 0.0, 0.14)
# Extract information for sampling goals.
self.initial_gripper_xpos = self.sim.data.get_site_xpos('robot0:grip').copy()
if self.n_objects > 0:
self.height_offset = self.sim.data.get_site_xpos('object0')[2]
| 46.638889 | 117 | 0.667659 |
7eeb6afeb1fa9ce73dc093909372ffb8d490a831 | 681 | py | Python | postprocess.py | heyyyjude/microbiome | d5e042913ea5c88ac6c064363cc555647f24701b | [
"Beerware"
] | 7 | 2016-12-22T07:06:53.000Z | 2019-02-10T07:16:36.000Z | postprocess.py | heyyyjude/microbiome | d5e042913ea5c88ac6c064363cc555647f24701b | [
"Beerware"
] | null | null | null | postprocess.py | heyyyjude/microbiome | d5e042913ea5c88ac6c064363cc555647f24701b | [
"Beerware"
] | null | null | null | import click
from qiime.automation.diversity_analysis.diversity_analysis import \
DiversityAnalysis
__author__ = "jkkim"
@click.command()
@click.option('-s', help="sample type is either bac or its.")
@click.option('-t', type=int, help="threads.")
@click.option("-e", type=int,
help="sampling depth. have a look at stats_reads_per_sample.txt and pick a number")
@click.option('-r', help="ref database is either gg or silva.")
def main(t, e, s, r):
'''do diversity analysis'''
run = DiversityAnalysis(taxon=s, sampling_depth=e, ref_db=r, threads=t, )
run.run_core_diversity()
print("core-diversity done")
if __name__ == '__main__':
main()
| 28.375 | 97 | 0.688693 |
7f95661496f1d886520ec9b5fa15a6a682b4dd8c | 8,291 | py | Python | pysnmp/RUCKUS-SYSTEM-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/RUCKUS-SYSTEM-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/RUCKUS-SYSTEM-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module RUCKUS-SYSTEM-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/RUCKUS-SYSTEM-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:50:47 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsIntersection")
ruckusCommonSystemModule, = mibBuilder.importSymbols("RUCKUS-ROOT-MIB", "ruckusCommonSystemModule")
ObjectGroup, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "ModuleCompliance", "NotificationGroup")
NotificationType, Integer32, Gauge32, MibIdentifier, ObjectIdentity, TimeTicks, Unsigned32, Counter64, Counter32, IpAddress, Bits, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, iso = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Integer32", "Gauge32", "MibIdentifier", "ObjectIdentity", "TimeTicks", "Unsigned32", "Counter64", "Counter32", "IpAddress", "Bits", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso")
TruthValue, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "DisplayString", "TextualConvention")
ruckusSystemMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1))
if mibBuilder.loadTexts: ruckusSystemMIB.setLastUpdated('201010150800Z')
if mibBuilder.loadTexts: ruckusSystemMIB.setOrganization('Ruckus Wireless, Inc.')
ruckusSystemObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1, 1))
ruckusSystemInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1, 1, 1))
ruckusSystemServices = MibIdentifier((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1, 1, 2))
ruckusSystemCommands = MibIdentifier((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1, 1, 3))
ruckusSystemEvents = MibIdentifier((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1, 2))
ruckusSystemCPUUtil = MibScalar((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusSystemCPUUtil.setStatus('current')
ruckusSystemMemoryUtil = MibScalar((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusSystemMemoryUtil.setStatus('current')
ruckusSystemHTTP = MibIdentifier((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1, 1, 2, 1))
ruckusSystemHTTPS = MibIdentifier((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1, 1, 2, 2))
ruckusSystemTelnet = MibIdentifier((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1, 1, 2, 3))
ruckusSystemSSH = MibIdentifier((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1, 1, 2, 4))
ruckusSystemBonjour = MibIdentifier((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1, 1, 2, 5))
ruckusSystemSyslog = MibIdentifier((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1, 1, 2, 6))
ruckusSystemNTP = MibIdentifier((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1, 1, 2, 7))
ruckusSystemFlexMaster = MibIdentifier((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1, 1, 2, 8))
ruckusSystemHTTPStatus = MibScalar((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1, 1, 2, 1, 1), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusSystemHTTPStatus.setStatus('current')
ruckusSystemHTTPSStatus = MibScalar((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1, 1, 2, 2, 1), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusSystemHTTPSStatus.setStatus('current')
ruckusSystemTelnetStatus = MibScalar((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1, 1, 2, 3, 1), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusSystemTelnetStatus.setStatus('current')
ruckusSystemSSHStatus = MibScalar((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1, 1, 2, 4, 1), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusSystemSSHStatus.setStatus('current')
ruckusSystemBonjourStatus = MibScalar((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1, 1, 2, 5, 1), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusSystemBonjourStatus.setStatus('current')
ruckusSystemSyslogStatus = MibScalar((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1, 1, 2, 6, 1), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusSystemSyslogStatus.setStatus('current')
ruckusSystemSyslogServerIP = MibScalar((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1, 1, 2, 6, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(2, 40))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusSystemSyslogServerIP.setStatus('current')
ruckusSystemSyslogServerPort = MibScalar((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1, 1, 2, 6, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusSystemSyslogServerPort.setStatus('current')
ruckusSystemNTPStatus = MibScalar((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1, 1, 2, 7, 1), TruthValue().clone('true')).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusSystemNTPStatus.setStatus('current')
ruckusSystemNTPGMTTime = MibScalar((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1, 1, 2, 7, 2), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusSystemNTPGMTTime.setStatus('current')
ruckusSystemNTPActiveServer = MibScalar((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1, 1, 2, 7, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 128))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusSystemNTPActiveServer.setStatus('current')
ruckusSystemNTPUpdate = MibScalar((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1, 1, 2, 7, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusSystemNTPUpdate.setStatus('current')
ruckusSystemFlexMasterURL = MibScalar((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1, 1, 2, 8, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 128))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusSystemFlexMasterURL.setStatus('current')
ruckusSystemReboot = MibScalar((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1, 1, 3, 1), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusSystemReboot.setStatus('current')
ruckusSystemSetFactory = MibScalar((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1, 1, 3, 2), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusSystemSetFactory.setStatus('current')
ruckusSystemDHCPRenew = MibScalar((1, 3, 6, 1, 4, 1, 25053, 1, 1, 11, 1, 1, 3, 3), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusSystemDHCPRenew.setStatus('current')
mibBuilder.exportSymbols("RUCKUS-SYSTEM-MIB", ruckusSystemReboot=ruckusSystemReboot, ruckusSystemServices=ruckusSystemServices, ruckusSystemNTPActiveServer=ruckusSystemNTPActiveServer, ruckusSystemDHCPRenew=ruckusSystemDHCPRenew, ruckusSystemHTTP=ruckusSystemHTTP, ruckusSystemMemoryUtil=ruckusSystemMemoryUtil, ruckusSystemNTPStatus=ruckusSystemNTPStatus, ruckusSystemEvents=ruckusSystemEvents, ruckusSystemObjects=ruckusSystemObjects, ruckusSystemCPUUtil=ruckusSystemCPUUtil, ruckusSystemBonjourStatus=ruckusSystemBonjourStatus, ruckusSystemSyslogStatus=ruckusSystemSyslogStatus, ruckusSystemSSHStatus=ruckusSystemSSHStatus, ruckusSystemNTPGMTTime=ruckusSystemNTPGMTTime, ruckusSystemHTTPS=ruckusSystemHTTPS, ruckusSystemSyslogServerIP=ruckusSystemSyslogServerIP, ruckusSystemBonjour=ruckusSystemBonjour, ruckusSystemHTTPStatus=ruckusSystemHTTPStatus, ruckusSystemMIB=ruckusSystemMIB, ruckusSystemInfo=ruckusSystemInfo, ruckusSystemFlexMasterURL=ruckusSystemFlexMasterURL, ruckusSystemCommands=ruckusSystemCommands, ruckusSystemSyslog=ruckusSystemSyslog, ruckusSystemSetFactory=ruckusSystemSetFactory, ruckusSystemHTTPSStatus=ruckusSystemHTTPSStatus, ruckusSystemSSH=ruckusSystemSSH, ruckusSystemTelnet=ruckusSystemTelnet, ruckusSystemNTP=ruckusSystemNTP, ruckusSystemTelnetStatus=ruckusSystemTelnetStatus, PYSNMP_MODULE_ID=ruckusSystemMIB, ruckusSystemNTPUpdate=ruckusSystemNTPUpdate, ruckusSystemFlexMaster=ruckusSystemFlexMaster, ruckusSystemSyslogServerPort=ruckusSystemSyslogServerPort)
| 121.926471 | 1,493 | 0.760222 |
e518dcdc8cf0652c37a421e7f01a2bc682bd0192 | 850 | py | Python | scripts/populate_chunk_file_size.py | ORC-RIS/beiwe-backend | af2c43f79350bf0fc1ce8efafab1ac9c40008c40 | [
"BSD-3-Clause"
] | 1 | 2022-03-12T16:25:50.000Z | 2022-03-12T16:25:50.000Z | scripts/populate_chunk_file_size.py | ORC-RIS/beiwe-backend | af2c43f79350bf0fc1ce8efafab1ac9c40008c40 | [
"BSD-3-Clause"
] | null | null | null | scripts/populate_chunk_file_size.py | ORC-RIS/beiwe-backend | af2c43f79350bf0fc1ce8efafab1ac9c40008c40 | [
"BSD-3-Clause"
] | null | null | null | from database.data_access_models import ChunkRegistry
from libs.s3 import conn, S3_BUCKET
from django.utils import timezone
from django.db.models import Q
# print("start:", timezone.now())
basic_query = Q(file_size__isnull=True) | Q(file_size=0)
filters = {}
# stick study object ids here to process particular studies
study_object_ids = []
if study_object_ids:
filters["study__object_id__in"] = study_object_ids
# this could be a huge query, use the iterator
query = ChunkRegistry.objects.filter(basic_query, **filters).values_list("pk", "chunk_path")
print("start:", timezone.now())
for i, (pk, path) in enumerate(query.iterator()):
if i % 1000 == 0:
print(i)
size = conn.head_object(Bucket=S3_BUCKET, Key=path)["ContentLength"]
ChunkRegistry.objects.filter(pk=pk).update(file_size=size)
print("end:", timezone.now())
| 31.481481 | 92 | 0.738824 |
0c9c51468ae5c9f06766b8776578c8e340ebc167 | 148 | py | Python | Models/ResidenceType.py | pedromneto97/crossbar-heroku | 7bedb40aeb4c7db483113a8579637b1007bba595 | [
"MIT"
] | null | null | null | Models/ResidenceType.py | pedromneto97/crossbar-heroku | 7bedb40aeb4c7db483113a8579637b1007bba595 | [
"MIT"
] | null | null | null | Models/ResidenceType.py | pedromneto97/crossbar-heroku | 7bedb40aeb4c7db483113a8579637b1007bba595 | [
"MIT"
] | null | null | null | from mongoengine import *
class ResidenceType(Document):
type = StringField(required=True, unique=True)
icon = StringField(required=True)
| 21.142857 | 50 | 0.75 |
f77cc0ac3a7be04889cebd874b86a9c913795af6 | 10,695 | py | Python | heat/tests/test_swift.py | redhat-openstack/heat | 6b9be0a868b857e942c1cc90594d0f3a0d0725d0 | [
"Apache-2.0"
] | null | null | null | heat/tests/test_swift.py | redhat-openstack/heat | 6b9be0a868b857e942c1cc90594d0f3a0d0725d0 | [
"Apache-2.0"
] | null | null | null | heat/tests/test_swift.py | redhat-openstack/heat | 6b9be0a868b857e942c1cc90594d0f3a0d0725d0 | [
"Apache-2.0"
] | null | null | null | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mox
import swiftclient.client as sc
from heat.common import exception
from heat.common import template_format
from heat.engine.resources import swift
from heat.engine import scheduler
from heat.tests.common import HeatTestCase
from heat.tests import utils
swift_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template to test OS::Swift::Container resources",
"Resources" : {
"SwiftContainerWebsite" : {
"Type" : "OS::Swift::Container",
"DeletionPolicy" : "Delete",
"Properties" : {
"X-Container-Read" : ".r:*",
"X-Container-Meta" : {
"Web-Index" : "index.html",
"Web-Error" : "error.html"
}
}
},
"SwiftAccountMetadata" : {
"Type" : "OS::Swift::Container",
"DeletionPolicy" : "Delete",
"Properties" : {
"X-Account-Meta" : {
"Temp-Url-Key" : "secret"
}
}
},
"S3Bucket" : {
"Type" : "AWS::S3::Bucket",
"Properties" : {
"SwiftContainer" : {"Ref" : "SwiftContainer"}
}
},
"SwiftContainer" : {
"Type" : "OS::Swift::Container",
"Properties" : {
}
}
}
}
'''
class swiftTest(HeatTestCase):
def setUp(self):
super(swiftTest, self).setUp()
self.m.CreateMock(sc.Connection)
self.m.StubOutWithMock(sc.Connection, 'post_account')
self.m.StubOutWithMock(sc.Connection, 'put_container')
self.m.StubOutWithMock(sc.Connection, 'delete_container')
self.m.StubOutWithMock(sc.Connection, 'head_container')
self.m.StubOutWithMock(sc.Connection, 'get_auth')
self.stub_keystoneclient()
def create_resource(self, t, stack, resource_name):
resource_defns = stack.t.resource_definitions(stack)
rsrc = swift.SwiftContainer(
'test_resource',
resource_defns[resource_name],
stack)
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
return rsrc
def test_create_container_name(self):
self.m.ReplayAll()
t = template_format.parse(swift_template)
t['Resources']['SwiftContainer']['Properties']['name'] = 'the_name'
stack = utils.parse_stack(t)
resource_defns = stack.t.resource_definitions(stack)
rsrc = swift.SwiftContainer(
'test_resource',
resource_defns['SwiftContainer'],
stack)
self.assertEqual('the_name', rsrc.physical_resource_name())
def test_build_meta_headers(self):
self.m.UnsetStubs()
self.assertEqual({}, swift.SwiftContainer._build_meta_headers(
'container', {}))
self.assertEqual({}, swift.SwiftContainer._build_meta_headers(
'container', None))
meta = {
'X-Container-Meta-Web-Index': 'index.html',
'X-Container-Meta-Web-Error': 'error.html'
}
self.assertEqual(meta, swift.SwiftContainer._build_meta_headers(
'container', {
"Web-Index": "index.html",
"Web-Error": "error.html"
}))
def test_attributes(self):
headers = {
"content-length": "0",
"x-container-object-count": "82",
"accept-ranges": "bytes",
"x-trans-id": "tx08ea48ef2fa24e6da3d2f5c188fd938b",
"date": "Wed, 23 Jan 2013 22:48:05 GMT",
"x-timestamp": "1358980499.84298",
"x-container-read": ".r:*",
"x-container-bytes-used": "17680980",
"content-type": "text/plain; charset=utf-8"}
container_name = utils.PhysName('test_stack', 'test_resource')
sc.Connection.put_container(
container_name, {}).AndReturn(None)
sc.Connection.head_container(
mox.IgnoreArg()).MultipleTimes().AndReturn(headers)
sc.Connection.delete_container(container_name).AndReturn(None)
self.m.ReplayAll()
t = template_format.parse(swift_template)
stack = utils.parse_stack(t)
rsrc = self.create_resource(t, stack, 'SwiftContainer')
ref_id = rsrc.FnGetRefId()
self.assertEqual(container_name, ref_id)
self.assertEqual('example.com', rsrc.FnGetAtt('DomainName'))
url = 'http://example.com:1234/v1/%s' % ref_id
self.assertEqual(url, rsrc.FnGetAtt('WebsiteURL'))
self.assertEqual('82', rsrc.FnGetAtt('ObjectCount'))
self.assertEqual('17680980', rsrc.FnGetAtt('BytesUsed'))
self.assertEqual(headers, rsrc.FnGetAtt('HeadContainer'))
self.assertRaises(exception.InvalidTemplateAttribute,
rsrc.FnGetAtt, 'Foo')
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
def test_public_read(self):
container_name = utils.PhysName('test_stack', 'test_resource')
sc.Connection.put_container(
container_name,
{'X-Container-Read': '.r:*'}).AndReturn(None)
sc.Connection.delete_container(container_name).AndReturn(None)
self.m.ReplayAll()
t = template_format.parse(swift_template)
properties = t['Resources']['SwiftContainer']['Properties']
properties['X-Container-Read'] = '.r:*'
stack = utils.parse_stack(t)
rsrc = self.create_resource(t, stack, 'SwiftContainer')
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
def test_public_read_write(self):
container_name = utils.PhysName('test_stack', 'test_resource')
sc.Connection.put_container(
container_name,
{'X-Container-Write': '.r:*',
'X-Container-Read': '.r:*'}).AndReturn(None)
sc.Connection.delete_container(container_name).AndReturn(None)
self.m.ReplayAll()
t = template_format.parse(swift_template)
properties = t['Resources']['SwiftContainer']['Properties']
properties['X-Container-Read'] = '.r:*'
properties['X-Container-Write'] = '.r:*'
stack = utils.parse_stack(t)
rsrc = self.create_resource(t, stack, 'SwiftContainer')
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
def test_container_headers(self):
container_name = utils.PhysName('test_stack', 'test_resource')
sc.Connection.put_container(
container_name,
{'X-Container-Meta-Web-Error': 'error.html',
'X-Container-Meta-Web-Index': 'index.html',
'X-Container-Read': '.r:*'}).AndReturn(None)
sc.Connection.delete_container(container_name).AndReturn(None)
self.m.ReplayAll()
t = template_format.parse(swift_template)
stack = utils.parse_stack(t)
rsrc = self.create_resource(t, stack, 'SwiftContainerWebsite')
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
def test_account_headers(self):
container_name = utils.PhysName('test_stack', 'test_resource')
sc.Connection.put_container(container_name, {})
sc.Connection.post_account(
{'X-Account-Meta-Temp-Url-Key': 'secret'}).AndReturn(None)
sc.Connection.delete_container(container_name).AndReturn(None)
self.m.ReplayAll()
t = template_format.parse(swift_template)
stack = utils.parse_stack(t)
rsrc = self.create_resource(t, stack, 'SwiftAccountMetadata')
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
def test_delete_exception(self):
container_name = utils.PhysName('test_stack', 'test_resource')
sc.Connection.put_container(
container_name,
{}).AndReturn(None)
sc.Connection.delete_container(container_name).AndRaise(
sc.ClientException('Test delete failure'))
self.m.ReplayAll()
t = template_format.parse(swift_template)
stack = utils.parse_stack(t)
rsrc = self.create_resource(t, stack, 'SwiftContainer')
self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.delete))
self.m.VerifyAll()
def test_delete_not_found(self):
container_name = utils.PhysName('test_stack', 'test_resource')
sc.Connection.put_container(
container_name,
{}).AndReturn(None)
sc.Connection.delete_container(container_name).AndRaise(
sc.ClientException('Its gone',
http_status=404))
self.m.ReplayAll()
t = template_format.parse(swift_template)
stack = utils.parse_stack(t)
rsrc = self.create_resource(t, stack, 'SwiftContainer')
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
def test_delete_retain(self):
# first run, with retain policy
sc.Connection.put_container(
utils.PhysName('test_stack', 'test_resource'),
{}).AndReturn(None)
self.m.ReplayAll()
t = template_format.parse(swift_template)
container = t['Resources']['SwiftContainer']
container['DeletionPolicy'] = 'Retain'
stack = utils.parse_stack(t)
rsrc = self.create_resource(t, stack, 'SwiftContainer')
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_default_headers_not_none_empty_string(self):
'''Test that we are not passing None when we have a default
empty string or sc will pass them as string None. see
bug lp:1259571.
'''
container_name = utils.PhysName('test_stack', 'test_resource')
sc.Connection.put_container(
container_name, {}).AndReturn(None)
sc.Connection.delete_container(container_name).AndReturn(None)
self.m.ReplayAll()
t = template_format.parse(swift_template)
stack = utils.parse_stack(t)
rsrc = self.create_resource(t, stack, 'SwiftContainer')
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
self.assertEqual({}, rsrc.metadata_get())
| 36.626712 | 78 | 0.625432 |
1f86430987d9891697473ef7847702ffee95dacc | 5,087 | py | Python | cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_IMAGE_MIB.py | CiscoDevNet/ydk-py | 073731fea50694d0bc6cd8ebf10fec308dcc0aa9 | [
"ECL-2.0",
"Apache-2.0"
] | 177 | 2016-03-15T17:03:51.000Z | 2022-03-18T16:48:44.000Z | cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_IMAGE_MIB.py | CiscoDevNet/ydk-py | 073731fea50694d0bc6cd8ebf10fec308dcc0aa9 | [
"ECL-2.0",
"Apache-2.0"
] | 18 | 2016-03-30T10:45:22.000Z | 2020-07-14T16:28:13.000Z | cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_IMAGE_MIB.py | CiscoDevNet/ydk-py | 073731fea50694d0bc6cd8ebf10fec308dcc0aa9 | [
"ECL-2.0",
"Apache-2.0"
] | 85 | 2016-03-16T20:38:57.000Z | 2022-02-22T04:26:02.000Z | """ CISCO_IMAGE_MIB
Router image MIB which identify the capabilities
and characteristics of the image
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class CISCOIMAGEMIB(Entity):
"""
.. attribute:: ciscoimagetable
A table provides content information describing the executing IOS image
**type**\: :py:class:`CiscoImageTable <ydk.models.cisco_ios_xe.CISCO_IMAGE_MIB.CISCOIMAGEMIB.CiscoImageTable>`
**config**\: False
"""
_prefix = 'CISCO-IMAGE-MIB'
_revision = '1995-08-15'
def __init__(self):
super(CISCOIMAGEMIB, self).__init__()
self._top_entity = None
self.yang_name = "CISCO-IMAGE-MIB"
self.yang_parent_name = "CISCO-IMAGE-MIB"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("ciscoImageTable", ("ciscoimagetable", CISCOIMAGEMIB.CiscoImageTable))])
self._leafs = OrderedDict()
self.ciscoimagetable = CISCOIMAGEMIB.CiscoImageTable()
self.ciscoimagetable.parent = self
self._children_name_map["ciscoimagetable"] = "ciscoImageTable"
self._segment_path = lambda: "CISCO-IMAGE-MIB:CISCO-IMAGE-MIB"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOIMAGEMIB, [], name, value)
class CiscoImageTable(Entity):
"""
A table provides content information describing the
executing IOS image.
.. attribute:: ciscoimageentry
A image characteristic string entry
**type**\: list of :py:class:`CiscoImageEntry <ydk.models.cisco_ios_xe.CISCO_IMAGE_MIB.CISCOIMAGEMIB.CiscoImageTable.CiscoImageEntry>`
**config**\: False
"""
_prefix = 'CISCO-IMAGE-MIB'
_revision = '1995-08-15'
def __init__(self):
super(CISCOIMAGEMIB.CiscoImageTable, self).__init__()
self.yang_name = "ciscoImageTable"
self.yang_parent_name = "CISCO-IMAGE-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("ciscoImageEntry", ("ciscoimageentry", CISCOIMAGEMIB.CiscoImageTable.CiscoImageEntry))])
self._leafs = OrderedDict()
self.ciscoimageentry = YList(self)
self._segment_path = lambda: "ciscoImageTable"
self._absolute_path = lambda: "CISCO-IMAGE-MIB:CISCO-IMAGE-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOIMAGEMIB.CiscoImageTable, [], name, value)
class CiscoImageEntry(Entity):
"""
A image characteristic string entry.
.. attribute:: ciscoimageindex (key)
A sequence number for each string stored in the IOS image
**type**\: int
**range:** 0..2147483647
**config**\: False
.. attribute:: ciscoimagestring
The string of this entry
**type**\: str
**config**\: False
"""
_prefix = 'CISCO-IMAGE-MIB'
_revision = '1995-08-15'
def __init__(self):
super(CISCOIMAGEMIB.CiscoImageTable.CiscoImageEntry, self).__init__()
self.yang_name = "ciscoImageEntry"
self.yang_parent_name = "ciscoImageTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['ciscoimageindex']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('ciscoimageindex', (YLeaf(YType.int32, 'ciscoImageIndex'), ['int'])),
('ciscoimagestring', (YLeaf(YType.str, 'ciscoImageString'), ['str'])),
])
self.ciscoimageindex = None
self.ciscoimagestring = None
self._segment_path = lambda: "ciscoImageEntry" + "[ciscoImageIndex='" + str(self.ciscoimageindex) + "']"
self._absolute_path = lambda: "CISCO-IMAGE-MIB:CISCO-IMAGE-MIB/ciscoImageTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOIMAGEMIB.CiscoImageTable.CiscoImageEntry, ['ciscoimageindex', 'ciscoimagestring'], name, value)
def clone_ptr(self):
self._top_entity = CISCOIMAGEMIB()
return self._top_entity
| 33.248366 | 147 | 0.603696 |
8668ddb0fb827e6237167c00a6f418dd9ae88a9e | 43,487 | py | Python | lldb/packages/Python/lldbsuite/test/dotest.py | tiwaria1/llvm | 616a396db0610ae0c1992361af005a869ef81897 | [
"Apache-2.0"
] | 1 | 2020-09-10T01:00:18.000Z | 2020-09-10T01:00:18.000Z | lldb/packages/Python/lldbsuite/test/dotest.py | coolstar/llvm-project | e21ccdd5b5667de50de65ee8903a89a21020e89a | [
"Apache-2.0"
] | null | null | null | lldb/packages/Python/lldbsuite/test/dotest.py | coolstar/llvm-project | e21ccdd5b5667de50de65ee8903a89a21020e89a | [
"Apache-2.0"
] | null | null | null | """
A simple testing framework for lldb using python's unit testing framework.
Tests for lldb are written as python scripts which take advantage of the script
bridging provided by LLDB.framework to interact with lldb core.
A specific naming pattern is followed by the .py script to be recognized as
a module which implements a test scenario, namely, Test*.py.
To specify the directories where "Test*.py" python test scripts are located,
you need to pass in a list of directory names. By default, the current
working directory is searched if nothing is specified on the command line.
Type:
./dotest.py -h
for available options.
"""
from __future__ import absolute_import
from __future__ import print_function
# System modules
import atexit
import datetime
import errno
import logging
import os
import platform
import re
import signal
import subprocess
import sys
import tempfile
# Third-party modules
import six
import unittest2
# LLDB Modules
import lldbsuite
from . import configuration
from . import dotest_args
from . import lldbtest_config
from . import test_categories
from lldbsuite.test_event import formatter
from . import test_result
from lldbsuite.test_event.event_builder import EventBuilder
from ..support import seven
def is_exe(fpath):
"""Returns true if fpath is an executable."""
if fpath == None:
return False
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def which(program):
"""Returns the full path to a program; None otherwise."""
fpath, _ = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def usage(parser):
parser.print_help()
if configuration.verbose > 0:
print("""
Examples:
This is an example of using the -f option to pinpoint to a specific test class
and test method to be run:
$ ./dotest.py -f ClassTypesTestCase.test_with_dsym_and_run_command
----------------------------------------------------------------------
Collected 1 test
test_with_dsym_and_run_command (TestClassTypes.ClassTypesTestCase)
Test 'frame variable this' when stopped on a class constructor. ... ok
----------------------------------------------------------------------
Ran 1 test in 1.396s
OK
And this is an example of using the -p option to run a single file (the filename
matches the pattern 'ObjC' and it happens to be 'TestObjCMethods.py'):
$ ./dotest.py -v -p ObjC
----------------------------------------------------------------------
Collected 4 tests
test_break_with_dsym (TestObjCMethods.FoundationTestCase)
Test setting objc breakpoints using '_regexp-break' and 'breakpoint set'. ... ok
test_break_with_dwarf (TestObjCMethods.FoundationTestCase)
Test setting objc breakpoints using '_regexp-break' and 'breakpoint set'. ... ok
test_data_type_and_expr_with_dsym (TestObjCMethods.FoundationTestCase)
Lookup objective-c data types and evaluate expressions. ... ok
test_data_type_and_expr_with_dwarf (TestObjCMethods.FoundationTestCase)
Lookup objective-c data types and evaluate expressions. ... ok
----------------------------------------------------------------------
Ran 4 tests in 16.661s
OK
Running of this script also sets up the LLDB_TEST environment variable so that
individual test cases can locate their supporting files correctly. The script
tries to set up Python's search paths for modules by looking at the build tree
relative to this script. See also the '-i' option in the following example.
Finally, this is an example of using the lldb.py module distributed/installed by
Xcode4 to run against the tests under the 'forward' directory, and with the '-w'
option to add some delay between two tests. It uses ARCH=x86_64 to specify that
as the architecture and CC=clang to specify the compiler used for the test run:
$ PYTHONPATH=/Xcode4/Library/PrivateFrameworks/LLDB.framework/Versions/A/Resources/Python ARCH=x86_64 CC=clang ./dotest.py -v -w -i forward
Session logs for test failures/errors will go into directory '2010-11-11-13_56_16'
----------------------------------------------------------------------
Collected 2 tests
test_with_dsym_and_run_command (TestForwardDeclaration.ForwardDeclarationTestCase)
Display *bar_ptr when stopped on a function with forward declaration of struct bar. ... ok
test_with_dwarf_and_run_command (TestForwardDeclaration.ForwardDeclarationTestCase)
Display *bar_ptr when stopped on a function with forward declaration of struct bar. ... ok
----------------------------------------------------------------------
Ran 2 tests in 5.659s
OK
The 'Session ...' verbiage is recently introduced (see also the '-s' option) to
notify the directory containing the session logs for test failures or errors.
In case there is any test failure/error, a similar message is appended at the
end of the stderr output for your convenience.
ENABLING LOGS FROM TESTS
Option 1:
Writing logs into different files per test case::
$ ./dotest.py --channel "lldb all"
$ ./dotest.py --channel "lldb all" --channel "gdb-remote packets"
These log files are written to:
<session-dir>/<test-id>-host.log (logs from lldb host process)
<session-dir>/<test-id>-server.log (logs from debugserver/lldb-server)
<session-dir>/<test-id>-<test-result>.log (console logs)
By default, logs from successful runs are deleted. Use the --log-success flag
to create reference logs for debugging.
$ ./dotest.py --log-success
""")
sys.exit(0)
def parseExclusion(exclusion_file):
"""Parse an exclusion file, of the following format, where
'skip files', 'skip methods', 'xfail files', and 'xfail methods'
are the possible list heading values:
skip files
<file name>
<file name>
xfail methods
<method name>
"""
excl_type = None
with open(exclusion_file) as f:
for line in f:
line = line.strip()
if not excl_type:
excl_type = line
continue
if not line:
excl_type = None
elif excl_type == 'skip':
if not configuration.skip_tests:
configuration.skip_tests = []
configuration.skip_tests.append(line)
elif excl_type == 'xfail':
if not configuration.xfail_tests:
configuration.xfail_tests = []
configuration.xfail_tests.append(line)
def parseOptionsAndInitTestdirs():
"""Initialize the list of directories containing our unittest scripts.
'-h/--help as the first option prints out usage info and exit the program.
"""
do_help = False
platform_system = platform.system()
platform_machine = platform.machine()
try:
parser = dotest_args.create_parser()
args = parser.parse_args()
except:
raise
if args.unset_env_varnames:
for env_var in args.unset_env_varnames:
if env_var in os.environ:
# From Python Doc: When unsetenv() is supported, deletion of items in os.environ
# is automatically translated into a corresponding call to
# unsetenv().
del os.environ[env_var]
# os.unsetenv(env_var)
if args.set_env_vars:
for env_var in args.set_env_vars:
parts = env_var.split('=', 1)
if len(parts) == 1:
os.environ[parts[0]] = ""
else:
os.environ[parts[0]] = parts[1]
if args.set_inferior_env_vars:
lldbtest_config.inferior_env = ' '.join(args.set_inferior_env_vars)
if args.h:
do_help = True
if args.compiler:
configuration.compiler = os.path.realpath(args.compiler)
if not is_exe(configuration.compiler):
configuration.compiler = which(args.compiler)
if not is_exe(configuration.compiler):
logging.error(
'%s is not a valid compiler executable; aborting...',
args.compiler)
sys.exit(-1)
else:
# Use a compiler appropriate appropriate for the Apple SDK if one was
# specified
if platform_system == 'Darwin' and args.apple_sdk:
configuration.compiler = seven.get_command_output(
'xcrun -sdk "%s" -find clang 2> /dev/null' %
(args.apple_sdk))
else:
# 'clang' on ubuntu 14.04 is 3.4 so we try clang-3.5 first
candidateCompilers = ['clang-3.5', 'clang', 'gcc']
for candidate in candidateCompilers:
if which(candidate):
configuration.compiler = candidate
break
if args.dsymutil:
configuration.dsymutil = args.dsymutil
elif platform_system == 'Darwin':
configuration.dsymutil = seven.get_command_output(
'xcrun -find -toolchain default dsymutil')
if args.filecheck:
# The lldb-dotest script produced by the CMake build passes in a path
# to a working FileCheck binary. So does one specific Xcode project
# target. However, when invoking dotest.py directly, a valid --filecheck
# option needs to be given.
configuration.filecheck = os.path.abspath(args.filecheck)
if not configuration.get_filecheck_path():
logging.warning('No valid FileCheck executable; some tests may fail...')
logging.warning('(Double-check the --filecheck argument to dotest.py)')
if args.channels:
lldbtest_config.channels = args.channels
if args.log_success:
lldbtest_config.log_success = args.log_success
if args.out_of_tree_debugserver:
lldbtest_config.out_of_tree_debugserver = args.out_of_tree_debugserver
# Set SDKROOT if we are using an Apple SDK
if platform_system == 'Darwin' and args.apple_sdk:
configuration.sdkroot = seven.get_command_output(
'xcrun --sdk "%s" --show-sdk-path 2> /dev/null' %
(args.apple_sdk))
if args.arch:
configuration.arch = args.arch
if configuration.arch.startswith(
'arm') and platform_system == 'Darwin' and not args.apple_sdk:
configuration.sdkroot = seven.get_command_output(
'xcrun --sdk iphoneos.internal --show-sdk-path 2> /dev/null')
if not os.path.exists(configuration.sdkroot):
configuration.sdkroot = seven.get_command_output(
'xcrun --sdk iphoneos --show-sdk-path 2> /dev/null')
else:
configuration.arch = platform_machine
if args.categories_list:
configuration.categories_list = set(
test_categories.validate(
args.categories_list, False))
configuration.use_categories = True
else:
configuration.categories_list = []
if args.skip_categories:
configuration.skip_categories += test_categories.validate(
args.skip_categories, False)
if args.xfail_categories:
configuration.xfail_categories += test_categories.validate(
args.xfail_categories, False)
if args.E:
os.environ['CFLAGS_EXTRAS'] = args.E
if args.dwarf_version:
configuration.dwarf_version = args.dwarf_version
# We cannot modify CFLAGS_EXTRAS because they're used in test cases
# that explicitly require no debug info.
os.environ['CFLAGS'] = '-gdwarf-{}'.format(configuration.dwarf_version)
if args.settings:
for setting in args.settings:
if not len(setting) == 1 or not setting[0].count('='):
logging.error('"%s" is not a setting in the form "key=value"',
setting[0])
sys.exit(-1)
setting_list = setting[0].split('=', 1)
configuration.settings.append((setting_list[0], setting_list[1]))
if args.d:
sys.stdout.write(
"Suspending the process %d to wait for debugger to attach...\n" %
os.getpid())
sys.stdout.flush()
os.kill(os.getpid(), signal.SIGSTOP)
if args.f:
if any([x.startswith('-') for x in args.f]):
usage(parser)
configuration.filters.extend(args.f)
if args.framework:
configuration.lldb_framework_path = args.framework
if args.executable:
# lldb executable is passed explicitly
lldbtest_config.lldbExec = os.path.realpath(args.executable)
if not is_exe(lldbtest_config.lldbExec):
lldbtest_config.lldbExec = which(args.executable)
if not is_exe(lldbtest_config.lldbExec):
logging.error(
'%s is not a valid executable to test; aborting...',
args.executable)
sys.exit(-1)
if args.server:
os.environ['LLDB_DEBUGSERVER_PATH'] = args.server
if args.excluded:
for excl_file in args.excluded:
parseExclusion(excl_file)
if args.p:
if args.p.startswith('-'):
usage(parser)
configuration.regexp = args.p
if args.s:
configuration.sdir_name = args.s
else:
timestamp_started = datetime.datetime.now().strftime("%Y-%m-%d-%H_%M_%S")
configuration.sdir_name = os.path.join(os.getcwd(), timestamp_started)
configuration.session_file_format = args.session_file_format
if args.t:
os.environ['LLDB_COMMAND_TRACE'] = 'YES'
if args.v:
configuration.verbose = 2
# argparse makes sure we have a number
if args.sharp:
configuration.count = args.sharp
if sys.platform.startswith('win32'):
os.environ['LLDB_DISABLE_CRASH_DIALOG'] = str(
args.disable_crash_dialog)
os.environ['LLDB_LAUNCH_INFERIORS_WITHOUT_CONSOLE'] = str(True)
if do_help:
usage(parser)
if args.results_file:
configuration.results_filename = args.results_file
if args.results_formatter:
configuration.results_formatter_name = args.results_formatter
if args.results_formatter_options:
configuration.results_formatter_options = args.results_formatter_options
# Default to using the BasicResultsFormatter if no formatter is specified.
if configuration.results_formatter_name is None:
configuration.results_formatter_name = (
"lldbsuite.test_event.formatter.results_formatter.ResultsFormatter")
# Reproducer arguments
if args.capture_path and args.replay_path:
logging.error('Cannot specify both a capture and a replay path.')
sys.exit(-1)
if args.capture_path:
configuration.capture_path = args.capture_path
if args.replay_path:
configuration.replay_path = args.replay_path
# rerun-related arguments
configuration.rerun_all_issues = args.rerun_all_issues
if args.lldb_platform_name:
configuration.lldb_platform_name = args.lldb_platform_name
if args.lldb_platform_url:
configuration.lldb_platform_url = args.lldb_platform_url
if args.lldb_platform_working_dir:
configuration.lldb_platform_working_dir = args.lldb_platform_working_dir
if args.test_build_dir:
configuration.test_build_dir = args.test_build_dir
if args.lldb_module_cache_dir:
configuration.lldb_module_cache_dir = args.lldb_module_cache_dir
else:
configuration.lldb_module_cache_dir = os.path.join(
configuration.test_build_dir, 'module-cache-lldb')
if args.clang_module_cache_dir:
configuration.clang_module_cache_dir = args.clang_module_cache_dir
else:
configuration.clang_module_cache_dir = os.path.join(
configuration.test_build_dir, 'module-cache-clang')
if args.lldb_libs_dir:
configuration.lldb_libs_dir = args.lldb_libs_dir
if args.enabled_plugins:
configuration.enabled_plugins = args.enabled_plugins
# Gather all the dirs passed on the command line.
if len(args.args) > 0:
configuration.testdirs = [os.path.realpath(os.path.abspath(x)) for x in args.args]
lldbtest_config.codesign_identity = args.codesign_identity
def setupTestResults():
"""Sets up test results-related objects based on arg settings."""
# Setup the results formatter configuration.
formatter_config = formatter.FormatterConfig()
formatter_config.filename = configuration.results_filename
formatter_config.formatter_name = configuration.results_formatter_name
formatter_config.formatter_options = (
configuration.results_formatter_options)
# Create the results formatter.
formatter_spec = formatter.create_results_formatter(
formatter_config)
if formatter_spec is not None and formatter_spec.formatter is not None:
configuration.results_formatter_object = formatter_spec.formatter
# Send an initialize message to the formatter.
initialize_event = EventBuilder.bare_event("initialize")
initialize_event["worker_count"] = 1
formatter_spec.formatter.handle_event(initialize_event)
# Make sure we clean up the formatter on shutdown.
if formatter_spec.cleanup_func is not None:
atexit.register(formatter_spec.cleanup_func)
def setupSysPath():
"""
Add LLDB.framework/Resources/Python to the search paths for modules.
As a side effect, we also discover the 'lldb' executable and export it here.
"""
# Get the directory containing the current script.
if "DOTEST_PROFILE" in os.environ and "DOTEST_SCRIPT_DIR" in os.environ:
scriptPath = os.environ["DOTEST_SCRIPT_DIR"]
else:
scriptPath = os.path.dirname(os.path.realpath(__file__))
if not scriptPath.endswith('test'):
print("This script expects to reside in lldb's test directory.")
sys.exit(-1)
os.environ["LLDB_TEST"] = scriptPath
os.environ["LLDB_TEST_SRC"] = lldbsuite.lldb_test_root
# Set up the root build directory.
if not configuration.test_build_dir:
raise Exception("test_build_dir is not set")
configuration.test_build_dir = os.path.abspath(configuration.test_build_dir)
# Set up the LLDB_SRC environment variable, so that the tests can locate
# the LLDB source code.
os.environ["LLDB_SRC"] = lldbsuite.lldb_root
pluginPath = os.path.join(scriptPath, 'plugins')
toolsLLDBVSCode = os.path.join(scriptPath, 'tools', 'lldb-vscode')
toolsLLDBServerPath = os.path.join(scriptPath, 'tools', 'lldb-server')
# Insert script dir, plugin dir and lldb-server dir to the sys.path.
sys.path.insert(0, pluginPath)
# Adding test/tools/lldb-vscode to the path makes it easy to
# "import lldb_vscode_testcase" from the VSCode tests
sys.path.insert(0, toolsLLDBVSCode)
# Adding test/tools/lldb-server to the path makes it easy
sys.path.insert(0, toolsLLDBServerPath)
# to "import lldbgdbserverutils" from the lldb-server tests
# This is the root of the lldb git/svn checkout
# When this changes over to a package instead of a standalone script, this
# will be `lldbsuite.lldb_root`
lldbRootDirectory = lldbsuite.lldb_root
# Some of the tests can invoke the 'lldb' command directly.
# We'll try to locate the appropriate executable right here.
# The lldb executable can be set from the command line
# if it's not set, we try to find it now
# first, we try the environment
if not lldbtest_config.lldbExec:
# First, you can define an environment variable LLDB_EXEC specifying the
# full pathname of the lldb executable.
if "LLDB_EXEC" in os.environ:
lldbtest_config.lldbExec = os.environ["LLDB_EXEC"]
if not lldbtest_config.lldbExec:
# Last, check the path
lldbtest_config.lldbExec = which('lldb')
if lldbtest_config.lldbExec and not is_exe(lldbtest_config.lldbExec):
print(
"'{}' is not a path to a valid executable".format(
lldbtest_config.lldbExec))
lldbtest_config.lldbExec = None
if not lldbtest_config.lldbExec:
print("The 'lldb' executable cannot be located. Some of the tests may not be run as a result.")
sys.exit(-1)
# confusingly, this is the "bin" directory
lldbLibDir = os.path.dirname(lldbtest_config.lldbExec)
os.environ["LLDB_LIB_DIR"] = lldbLibDir
lldbImpLibDir = configuration.lldb_libs_dir
os.environ["LLDB_IMPLIB_DIR"] = lldbImpLibDir
print("LLDB library dir:", os.environ["LLDB_LIB_DIR"])
print("LLDB import library dir:", os.environ["LLDB_IMPLIB_DIR"])
os.system('%s -v' % lldbtest_config.lldbExec)
lldbDir = os.path.dirname(lldbtest_config.lldbExec)
lldbVSCodeExec = os.path.join(lldbDir, "lldb-vscode")
if is_exe(lldbVSCodeExec):
os.environ["LLDBVSCODE_EXEC"] = lldbVSCodeExec
else:
if not configuration.shouldSkipBecauseOfCategories(["lldb-vscode"]):
print(
"The 'lldb-vscode' executable cannot be located. The lldb-vscode tests can not be run as a result.")
configuration.skip_categories.append("lldb-vscode")
lldbPythonDir = None # The directory that contains 'lldb/__init__.py'
if not configuration.lldb_framework_path and os.path.exists(os.path.join(lldbLibDir, "LLDB.framework")):
configuration.lldb_framework_path = os.path.join(lldbLibDir, "LLDB.framework")
if configuration.lldb_framework_path:
lldbtest_config.lldb_framework_path = configuration.lldb_framework_path
candidatePath = os.path.join(
configuration.lldb_framework_path, 'Resources', 'Python')
if os.path.isfile(os.path.join(candidatePath, 'lldb/__init__.py')):
lldbPythonDir = candidatePath
if not lldbPythonDir:
print(
'Resources/Python/lldb/__init__.py was not found in ' +
configuration.lldb_framework_path)
sys.exit(-1)
else:
# If our lldb supports the -P option, use it to find the python path:
init_in_python_dir = os.path.join('lldb', '__init__.py')
lldb_dash_p_result = subprocess.check_output(
[lldbtest_config.lldbExec, "-P"], stderr=subprocess.STDOUT, universal_newlines=True)
if lldb_dash_p_result and not lldb_dash_p_result.startswith(
("<", "lldb: invalid option:")) and not lldb_dash_p_result.startswith("Traceback"):
lines = lldb_dash_p_result.splitlines()
# Workaround for readline vs libedit issue on FreeBSD. If stdout
# is not a terminal Python executes
# rl_variable_bind ("enable-meta-key", "off");
# This produces a warning with FreeBSD's libedit because the
# enable-meta-key variable is unknown. Not an issue on Apple
# because cpython commit f0ab6f9f0603 added a #ifndef __APPLE__
# around the call. See http://bugs.python.org/issue19884 for more
# information. For now we just discard the warning output.
if len(lines) >= 1 and lines[0].startswith(
"bind: Invalid command"):
lines.pop(0)
# Taking the last line because lldb outputs
# 'Cannot read termcap database;\nusing dumb terminal settings.\n'
# before the path
if len(lines) >= 1 and os.path.isfile(
os.path.join(lines[-1], init_in_python_dir)):
lldbPythonDir = lines[-1]
if "freebsd" in sys.platform or "linux" in sys.platform:
os.environ['LLDB_LIB_DIR'] = os.path.join(
lldbPythonDir, '..', '..')
if not lldbPythonDir:
print(
"Unable to load lldb extension module. Possible reasons for this include:")
print(" 1) LLDB was built with LLDB_ENABLE_PYTHON=0")
print(
" 2) PYTHONPATH and PYTHONHOME are not set correctly. PYTHONHOME should refer to")
print(
" the version of Python that LLDB built and linked against, and PYTHONPATH")
print(
" should contain the Lib directory for the same python distro, as well as the")
print(" location of LLDB\'s site-packages folder.")
print(
" 3) A different version of Python than that which was built against is exported in")
print(" the system\'s PATH environment variable, causing conflicts.")
print(
" 4) The executable '%s' could not be found. Please check " %
lldbtest_config.lldbExec)
print(" that it exists and is executable.")
if lldbPythonDir:
lldbPythonDir = os.path.normpath(lldbPythonDir)
# Some of the code that uses this path assumes it hasn't resolved the Versions... link.
# If the path we've constructed looks like that, then we'll strip out
# the Versions/A part.
(before, frameWithVersion, after) = lldbPythonDir.rpartition(
"LLDB.framework/Versions/A")
if frameWithVersion != "":
lldbPythonDir = before + "LLDB.framework" + after
lldbPythonDir = os.path.abspath(lldbPythonDir)
# If tests need to find LLDB_FRAMEWORK, now they can do it
os.environ["LLDB_FRAMEWORK"] = os.path.dirname(
os.path.dirname(lldbPythonDir))
# This is to locate the lldb.py module. Insert it right after
# sys.path[0].
sys.path[1:1] = [lldbPythonDir]
def visit_file(dir, name):
# Try to match the regexp pattern, if specified.
if configuration.regexp:
if not re.search(configuration.regexp, name):
# We didn't match the regex, we're done.
return
if configuration.skip_tests:
for file_regexp in configuration.skip_tests:
if re.search(file_regexp, name):
return
# We found a match for our test. Add it to the suite.
# Update the sys.path first.
if not sys.path.count(dir):
sys.path.insert(0, dir)
base = os.path.splitext(name)[0]
# Thoroughly check the filterspec against the base module and admit
# the (base, filterspec) combination only when it makes sense.
def check(obj, parts):
for part in parts:
try:
parent, obj = obj, getattr(obj, part)
except AttributeError:
# The filterspec has failed.
return False
return True
module = __import__(base)
def iter_filters():
for filterspec in configuration.filters:
parts = filterspec.split('.')
if check(module, parts):
yield filterspec
elif parts[0] == base and len(parts) > 1 and check(module, parts[1:]):
yield '.'.join(parts[1:])
else:
for key,value in module.__dict__.items():
if check(value, parts):
yield key + '.' + filterspec
filtered = False
for filterspec in iter_filters():
filtered = True
print("adding filter spec %s to module %s" % (filterspec, repr(module)))
tests = unittest2.defaultTestLoader.loadTestsFromName(filterspec, module)
configuration.suite.addTests(tests)
# Forgo this module if the (base, filterspec) combo is invalid
if configuration.filters and not filtered:
return
if not filtered:
# Add the entire file's worth of tests since we're not filtered.
# Also the fail-over case when the filterspec branch
# (base, filterspec) combo doesn't make sense.
configuration.suite.addTests(
unittest2.defaultTestLoader.loadTestsFromName(base))
def visit(prefix, dir, names):
"""Visitor function for os.path.walk(path, visit, arg)."""
dir_components = set(dir.split(os.sep))
excluded_components = set(['.svn', '.git'])
if dir_components.intersection(excluded_components):
return
# Gather all the Python test file names that follow the Test*.py pattern.
python_test_files = [
name
for name in names
if name.endswith('.py') and name.startswith(prefix)]
# Visit all the python test files.
for name in python_test_files:
try:
# Ensure we error out if we have multiple tests with the same
# base name.
# Future improvement: find all the places where we work with base
# names and convert to full paths. We have directory structure
# to disambiguate these, so we shouldn't need this constraint.
if name in configuration.all_tests:
raise Exception("Found multiple tests with the name %s" % name)
configuration.all_tests.add(name)
# Run the relevant tests in the python file.
visit_file(dir, name)
except Exception as ex:
# Convert this exception to a test event error for the file.
test_filename = os.path.abspath(os.path.join(dir, name))
if configuration.results_formatter_object is not None:
# Grab the backtrace for the exception.
import traceback
backtrace = traceback.format_exc()
# Generate the test event.
configuration.results_formatter_object.handle_event(
EventBuilder.event_for_job_test_add_error(
test_filename, ex, backtrace))
raise
# ======================================== #
# #
# Execution of the test driver starts here #
# #
# ======================================== #
def checkDsymForUUIDIsNotOn():
cmd = ["defaults", "read", "com.apple.DebugSymbols"]
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
cmd_output = process.stdout.read()
output_str = cmd_output.decode("utf-8")
if "DBGFileMappedPaths = " in output_str:
print("%s =>" % ' '.join(cmd))
print(output_str)
print(
"Disable automatic lookup and caching of dSYMs before running the test suite!")
print("Exiting...")
sys.exit(0)
def exitTestSuite(exitCode=None):
# lldb.py does SBDebugger.Initialize().
# Call SBDebugger.Terminate() on exit.
import lldb
lldb.SBDebugger.Terminate()
if exitCode:
sys.exit(exitCode)
def getVersionForSDK(sdk):
sdk = str.lower(sdk)
full_path = seven.get_command_output('xcrun -sdk %s --show-sdk-path' % sdk)
basename = os.path.basename(full_path)
basename = os.path.splitext(basename)[0]
basename = str.lower(basename)
ver = basename.replace(sdk, '')
return ver
def setDefaultTripleForPlatform():
if configuration.lldb_platform_name == 'ios-simulator':
triple_str = 'x86_64-apple-ios%s' % (
getVersionForSDK('iphonesimulator'))
os.environ['TRIPLE'] = triple_str
return {'TRIPLE': triple_str}
return {}
def checkCompiler():
# Add some intervention here to sanity check that the compiler requested is sane.
# If found not to be an executable program, we abort.
c = configuration.compiler
if which(c):
return
if not sys.platform.startswith("darwin"):
raise Exception(c + " is not a valid compiler")
pipe = subprocess.Popen(
['xcrun', '-find', c], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
cmd_output = pipe.stdout.read()
if not cmd_output or "not found" in cmd_output:
raise Exception(c + " is not a valid compiler")
configuration.compiler = cmd_output.split('\n')[0]
print("'xcrun -find %s' returning %s" % (c, configuration.compiler))
def canRunLibcxxTests():
from lldbsuite.test import lldbplatformutil
platform = lldbplatformutil.getPlatform()
if lldbplatformutil.target_is_android() or lldbplatformutil.platformIsDarwin():
return True, "libc++ always present"
if platform == "linux":
if os.path.isdir("/usr/include/c++/v1"):
return True, "Headers found, let's hope they work"
with tempfile.NamedTemporaryFile() as f:
cmd = [configuration.compiler, "-xc++", "-stdlib=libc++", "-o", f.name, "-"]
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
_, stderr = p.communicate("#include <algorithm>\nint main() {}")
if not p.returncode:
return True, "Compiling with -stdlib=libc++ works"
return False, "Compiling with -stdlib=libc++ fails with the error: %s" % stderr
return False, "Don't know how to build with libc++ on %s" % platform
def checkLibcxxSupport():
result, reason = canRunLibcxxTests()
if result:
return # libc++ supported
if "libc++" in configuration.categories_list:
return # libc++ category explicitly requested, let it run.
print("Libc++ tests will not be run because: " + reason)
configuration.skip_categories.append("libc++")
def canRunLibstdcxxTests():
from lldbsuite.test import lldbplatformutil
platform = lldbplatformutil.getPlatform()
if lldbplatformutil.target_is_android():
platform = "android"
if platform == "linux":
return True, "libstdcxx always present"
return False, "Don't know how to build with libstdcxx on %s" % platform
def checkLibstdcxxSupport():
result, reason = canRunLibstdcxxTests()
if result:
return # libstdcxx supported
if "libstdcxx" in configuration.categories_list:
return # libstdcxx category explicitly requested, let it run.
print("libstdcxx tests will not be run because: " + reason)
configuration.skip_categories.append("libstdcxx")
def canRunWatchpointTests():
from lldbsuite.test import lldbplatformutil
platform = lldbplatformutil.getPlatform()
if platform == "netbsd":
if os.geteuid() == 0:
return True, "root can always write dbregs"
try:
output = subprocess.check_output(["/sbin/sysctl", "-n",
"security.models.extensions.user_set_dbregs"]).decode().strip()
if output == "1":
return True, "security.models.extensions.user_set_dbregs enabled"
except subprocess.CalledProcessError:
pass
return False, "security.models.extensions.user_set_dbregs disabled"
return True, "watchpoint support available"
def checkWatchpointSupport():
result, reason = canRunWatchpointTests()
if result:
return # watchpoints supported
if "watchpoint" in configuration.categories_list:
return # watchpoint category explicitly requested, let it run.
print("watchpoint tests will not be run because: " + reason)
configuration.skip_categories.append("watchpoint")
def checkDebugInfoSupport():
import lldb
platform = lldb.selected_platform.GetTriple().split('-')[2]
compiler = configuration.compiler
skipped = []
for cat in test_categories.debug_info_categories:
if cat in configuration.categories_list:
continue # Category explicitly requested, let it run.
if test_categories.is_supported_on_platform(cat, platform, compiler):
continue
configuration.skip_categories.append(cat)
skipped.append(cat)
if skipped:
print("Skipping following debug info categories:", skipped)
def run_suite():
# On MacOS X, check to make sure that domain for com.apple.DebugSymbols defaults
# does not exist before proceeding to running the test suite.
if sys.platform.startswith("darwin"):
checkDsymForUUIDIsNotOn()
# Start the actions by first parsing the options while setting up the test
# directories, followed by setting up the search paths for lldb utilities;
# then, we walk the directory trees and collect the tests into our test suite.
#
parseOptionsAndInitTestdirs()
# Setup test results (test results formatter and output handling).
setupTestResults()
setupSysPath()
import lldbconfig
if configuration.capture_path or configuration.replay_path:
lldbconfig.INITIALIZE = False
import lldb
if configuration.capture_path:
lldb.SBReproducer.Capture(configuration.capture_path)
lldb.SBReproducer.SetAutoGenerate(True)
elif configuration.replay_path:
lldb.SBReproducer.PassiveReplay(configuration.replay_path)
if not lldbconfig.INITIALIZE:
lldb.SBDebugger.Initialize()
# Use host platform by default.
lldb.selected_platform = lldb.SBPlatform.GetHostPlatform()
# Now we can also import lldbutil
from lldbsuite.test import lldbutil
if configuration.lldb_platform_name:
print("Setting up remote platform '%s'" %
(configuration.lldb_platform_name))
lldb.remote_platform = lldb.SBPlatform(
configuration.lldb_platform_name)
if not lldb.remote_platform.IsValid():
print(
"error: unable to create the LLDB platform named '%s'." %
(configuration.lldb_platform_name))
exitTestSuite(1)
if configuration.lldb_platform_url:
# We must connect to a remote platform if a LLDB platform URL was
# specified
print(
"Connecting to remote platform '%s' at '%s'..." %
(configuration.lldb_platform_name, configuration.lldb_platform_url))
platform_connect_options = lldb.SBPlatformConnectOptions(
configuration.lldb_platform_url)
err = lldb.remote_platform.ConnectRemote(platform_connect_options)
if err.Success():
print("Connected.")
else:
print("error: failed to connect to remote platform using URL '%s': %s" % (
configuration.lldb_platform_url, err))
exitTestSuite(1)
else:
configuration.lldb_platform_url = None
platform_changes = setDefaultTripleForPlatform()
first = True
for key in platform_changes:
if first:
print("Environment variables setup for platform support:")
first = False
print("%s = %s" % (key, platform_changes[key]))
if configuration.lldb_platform_working_dir:
print("Setting remote platform working directory to '%s'..." %
(configuration.lldb_platform_working_dir))
error = lldb.remote_platform.MakeDirectory(
configuration.lldb_platform_working_dir, 448) # 448 = 0o700
if error.Fail():
raise Exception("making remote directory '%s': %s" % (
configuration.lldb_platform_working_dir, error))
if not lldb.remote_platform.SetWorkingDirectory(
configuration.lldb_platform_working_dir):
raise Exception("failed to set working directory '%s'" % configuration.lldb_platform_working_dir)
lldb.selected_platform = lldb.remote_platform
else:
lldb.remote_platform = None
configuration.lldb_platform_working_dir = None
configuration.lldb_platform_url = None
# Set up the working directory.
# Note that it's not dotest's job to clean this directory.
lldbutil.mkdir_p(configuration.test_build_dir)
target_platform = lldb.selected_platform.GetTriple().split('-')[2]
checkLibcxxSupport()
checkLibstdcxxSupport()
checkWatchpointSupport()
checkDebugInfoSupport()
# Don't do debugserver tests on anything except OS X.
configuration.dont_do_debugserver_test = (
"linux" in target_platform or
"freebsd" in target_platform or
"netbsd" in target_platform or
"windows" in target_platform)
# Don't do lldb-server (llgs) tests on anything except Linux and Windows.
configuration.dont_do_llgs_test = not (
"linux" in target_platform or
"netbsd" in target_platform or
"windows" in target_platform)
for testdir in configuration.testdirs:
for (dirpath, dirnames, filenames) in os.walk(testdir):
visit('Test', dirpath, filenames)
#
# Now that we have loaded all the test cases, run the whole test suite.
#
# Install the control-c handler.
unittest2.signals.installHandler()
lldbutil.mkdir_p(configuration.sdir_name)
os.environ["LLDB_SESSION_DIRNAME"] = configuration.sdir_name
sys.stderr.write(
"\nSession logs for test failures/errors/unexpected successes"
" will go into directory '%s'\n" %
configuration.sdir_name)
#
# Invoke the default TextTestRunner to run the test suite
#
checkCompiler()
if configuration.verbose:
print("compiler=%s" % configuration.compiler)
# Iterating over all possible architecture and compiler combinations.
configString = "arch=%s compiler=%s" % (configuration.arch,
configuration.compiler)
# Output the configuration.
if configuration.verbose:
sys.stderr.write("\nConfiguration: " + configString + "\n")
# First, write out the number of collected test cases.
if configuration.verbose:
sys.stderr.write(configuration.separator + "\n")
sys.stderr.write(
"Collected %d test%s\n\n" %
(configuration.suite.countTestCases(),
configuration.suite.countTestCases() != 1 and "s" or ""))
# Invoke the test runner.
if configuration.count == 1:
result = unittest2.TextTestRunner(
stream=sys.stderr,
verbosity=configuration.verbose,
resultclass=test_result.LLDBTestResult).run(
configuration.suite)
else:
# We are invoking the same test suite more than once. In this case,
# mark __ignore_singleton__ flag as True so the signleton pattern is
# not enforced.
test_result.LLDBTestResult.__ignore_singleton__ = True
for i in range(configuration.count):
result = unittest2.TextTestRunner(
stream=sys.stderr,
verbosity=configuration.verbose,
resultclass=test_result.LLDBTestResult).run(
configuration.suite)
configuration.failed = not result.wasSuccessful()
if configuration.sdir_has_content and configuration.verbose:
sys.stderr.write(
"Session logs for test failures/errors/unexpected successes"
" can be found in directory '%s'\n" %
configuration.sdir_name)
if configuration.use_categories and len(
configuration.failures_per_category) > 0:
sys.stderr.write("Failures per category:\n")
for category in configuration.failures_per_category:
sys.stderr.write(
"%s - %d\n" %
(category, configuration.failures_per_category[category]))
# Exiting.
exitTestSuite(configuration.failed)
if __name__ == "__main__":
print(
__file__ +
" is for use as a module only. It should not be run as a standalone script.")
sys.exit(-1)
| 37.979913 | 139 | 0.65369 |
4784394a2a3ad200865a41359c49b51443ebc08f | 12,735 | py | Python | photutils/segmentation/tests/test_core.py | Onoddil/photutils | 433f3e54d3f53282ae04eadde9e1ddf657944590 | [
"BSD-3-Clause"
] | null | null | null | photutils/segmentation/tests/test_core.py | Onoddil/photutils | 433f3e54d3f53282ae04eadde9e1ddf657944590 | [
"BSD-3-Clause"
] | null | null | null | photutils/segmentation/tests/test_core.py | Onoddil/photutils | 433f3e54d3f53282ae04eadde9e1ddf657944590 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the core module.
"""
import numpy as np
from numpy.testing import assert_allclose
import pytest
from ..core import Segment, SegmentationImage
try:
import matplotlib # noqa
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
try:
import scipy # noqa
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
@pytest.mark.skipif('not HAS_SCIPY')
class TestSegmentationImage:
def setup_class(self):
self.data = [[1, 1, 0, 0, 4, 4],
[0, 0, 0, 0, 0, 4],
[0, 0, 3, 3, 0, 0],
[7, 0, 0, 0, 0, 5],
[7, 7, 0, 5, 5, 5],
[7, 7, 0, 0, 5, 5]]
self.segm = SegmentationImage(self.data)
def test_array(self):
assert_allclose(self.segm.data, self.segm.__array__())
def test_copy(self):
segm = SegmentationImage(self.data)
segm2 = segm.copy()
assert segm.data is not segm2.data
assert segm.labels is not segm2.labels
segm.data[0, 0] = 100.
assert segm.data[0, 0] != segm2.data[0, 0]
def test_invalid_data(self):
# contains all zeros
data = np.zeros((3, 3))
with pytest.raises(ValueError):
SegmentationImage(data)
# contains a NaN
data = np.zeros((5, 5))
data[2, 2] = np.nan
with pytest.raises(ValueError):
SegmentationImage(data)
# contains an inf
data = np.zeros((5, 5))
data[2, 2] = np.inf
data[0, 0] = -np.inf
with pytest.raises(ValueError):
SegmentationImage(data)
# contains a negative value
data = np.arange(-1, 8).reshape(3, 3)
with pytest.raises(ValueError):
SegmentationImage(data)
@pytest.mark.parametrize('label', [0, -1, 2])
def test_invalid_label(self, label):
# test with scalar labels
with pytest.raises(ValueError):
self.segm.check_label(label)
self.segm.check_labels(label)
def test_invalid_label_array(self):
# test with array of labels
with pytest.raises(ValueError):
self.segm.check_labels([0, -1, 2])
def test_data_ma(self):
assert isinstance(self.segm.data_ma, np.ma.MaskedArray)
assert np.ma.count(self.segm.data_ma) == 18
assert np.ma.count_masked(self.segm.data_ma) == 18
def test_segments(self):
assert isinstance(self.segm.segments[0], Segment)
assert_allclose(self.segm.segments[0].data,
self.segm.segments[0].__array__())
assert (self.segm.segments[0].data_ma.shape
== self.segm.segments[0].data.shape)
assert (self.segm.segments[0].data_ma.filled(0.).sum()
== self.segm.segments[0].data.sum())
label = 4
idx = self.segm.get_index(label)
assert self.segm.segments[idx].label == label
assert self.segm.segments[idx].area == self.segm.areas[idx]
assert self.segm.segments[idx].slices == self.segm.slices[idx]
assert self.segm.segments[idx].bbox == self.segm.bbox[idx]
def test_repr_str(self):
assert repr(self.segm) == str(self.segm)
props = ['shape', 'nlabels']
for prop in props:
assert f'{prop}:' in repr(self.segm)
def test_segment_repr_str(self):
props = ['label', 'slices', 'area']
for prop in props:
assert f'{prop}:' in repr(self.segm.segments[0])
def test_segment_data(self):
assert_allclose(self.segm.segments[3].data.shape, (3, 3))
assert_allclose(np.unique(self.segm.segments[3].data), [0, 5])
def test_segment_make_cutout(self):
cutout = self.segm.segments[3].make_cutout(self.data,
masked_array=False)
assert not np.ma.is_masked(cutout)
assert_allclose(cutout.shape, (3, 3))
cutout = self.segm.segments[3].make_cutout(self.data,
masked_array=True)
assert np.ma.is_masked(cutout)
assert_allclose(cutout.shape, (3, 3))
def test_segment_make_cutout_input(self):
with pytest.raises(ValueError):
self.segm.segments[0].make_cutout(np.arange(10))
def test_labels(self):
assert_allclose(self.segm.labels, [1, 3, 4, 5, 7])
def test_nlabels(self):
assert self.segm.nlabels == 5
def test_max_label(self):
assert self.segm.max_label == 7
def test_areas(self):
expected = np.array([2, 2, 3, 6, 5])
assert_allclose(self.segm.areas, expected)
assert (self.segm.get_area(1)
== self.segm.areas[self.segm.get_index(1)])
assert_allclose(self.segm.get_areas(self.segm.labels),
self.segm.areas)
def test_background_area(self):
assert self.segm.background_area == 18
def test_is_consecutive(self):
assert not self.segm.is_consecutive
data = [[2, 2, 0], [0, 3, 3], [0, 0, 4]]
segm = SegmentationImage(data)
assert not segm.is_consecutive # does not start with label=1
segm.relabel_consecutive(start_label=1)
assert segm.is_consecutive
def test_missing_labels(self):
assert_allclose(self.segm.missing_labels, [2, 6])
def test_check_labels(self):
with pytest.raises(ValueError):
self.segm.check_label(2)
self.segm.check_labels([2])
with pytest.raises(ValueError):
self.segm.check_labels([2, 6])
@pytest.mark.skipif('not HAS_MATPLOTLIB')
def test_make_cmap(self):
cmap = self.segm.make_cmap()
assert len(cmap.colors) == (self.segm.max_label + 1)
assert_allclose(cmap.colors[0], [0, 0, 0])
assert_allclose(self.segm._cmap.colors,
self.segm.make_cmap(background_color='#000000',
seed=0).colors)
def test_reassign_labels(self):
segm = SegmentationImage(self.data)
segm.reassign_labels(labels=[1, 7], new_label=2)
ref_data = np.array([[2, 2, 0, 0, 4, 4],
[0, 0, 0, 0, 0, 4],
[0, 0, 3, 3, 0, 0],
[2, 0, 0, 0, 0, 5],
[2, 2, 0, 5, 5, 5],
[2, 2, 0, 0, 5, 5]])
assert_allclose(segm.data, ref_data)
assert segm.nlabels == len(segm.slices) - segm.slices.count(None)
@pytest.mark.parametrize('start_label', [1, 5])
def test_relabel_consecutive(self, start_label):
segm = SegmentationImage(self.data)
ref_data = np.array([[1, 1, 0, 0, 3, 3],
[0, 0, 0, 0, 0, 3],
[0, 0, 2, 2, 0, 0],
[5, 0, 0, 0, 0, 4],
[5, 5, 0, 4, 4, 4],
[5, 5, 0, 0, 4, 4]])
ref_data[ref_data != 0] += (start_label - 1)
segm.relabel_consecutive(start_label=start_label)
assert_allclose(segm.data, ref_data)
# relabel_consecutive should do nothing if already consecutive
segm.relabel_consecutive(start_label=start_label)
assert_allclose(segm.data, ref_data)
assert segm.nlabels == len(segm.slices) - segm.slices.count(None)
@pytest.mark.parametrize('start_label', [0, -1])
def test_relabel_consecutive_start_invalid(self, start_label):
with pytest.raises(ValueError):
segm = SegmentationImage(self.data)
segm.relabel_consecutive(start_label=start_label)
def test_keep_labels(self):
ref_data = np.array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 3, 3, 0, 0],
[0, 0, 0, 0, 0, 5],
[0, 0, 0, 5, 5, 5],
[0, 0, 0, 0, 5, 5]])
segm = SegmentationImage(self.data)
segm.keep_labels([5, 3])
assert_allclose(segm.data, ref_data)
def test_keep_labels_relabel(self):
ref_data = np.array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 2],
[0, 0, 0, 2, 2, 2],
[0, 0, 0, 0, 2, 2]])
segm = SegmentationImage(self.data)
segm.keep_labels([5, 3], relabel=True)
assert_allclose(segm.data, ref_data)
def test_remove_labels(self):
ref_data = np.array([[1, 1, 0, 0, 4, 4],
[0, 0, 0, 0, 0, 4],
[0, 0, 0, 0, 0, 0],
[7, 0, 0, 0, 0, 0],
[7, 7, 0, 0, 0, 0],
[7, 7, 0, 0, 0, 0]])
segm = SegmentationImage(self.data)
segm.remove_labels(labels=[5, 3])
assert_allclose(segm.data, ref_data)
def test_remove_labels_relabel(self):
ref_data = np.array([[1, 1, 0, 0, 2, 2],
[0, 0, 0, 0, 0, 2],
[0, 0, 0, 0, 0, 0],
[3, 0, 0, 0, 0, 0],
[3, 3, 0, 0, 0, 0],
[3, 3, 0, 0, 0, 0]])
segm = SegmentationImage(self.data)
segm.remove_labels(labels=[5, 3], relabel=True)
assert_allclose(segm.data, ref_data)
def test_remove_border_labels(self):
ref_data = np.array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 3, 3, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
segm = SegmentationImage(self.data)
segm.remove_border_labels(border_width=1)
assert_allclose(segm.data, ref_data)
def test_remove_border_labels_border_width(self):
with pytest.raises(ValueError):
segm = SegmentationImage(self.data)
segm.remove_border_labels(border_width=3)
def test_remove_border_labels_no_remaining_segments(self):
alt_data = np.copy(self.data)
alt_data[alt_data == 3] = 0
segm = SegmentationImage(alt_data)
segm.remove_border_labels(border_width=1, relabel=True)
assert segm.nlabels == 0
def test_remove_masked_labels(self):
ref_data = np.array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 3, 3, 0, 0],
[7, 0, 0, 0, 0, 5],
[7, 7, 0, 5, 5, 5],
[7, 7, 0, 0, 5, 5]])
segm = SegmentationImage(self.data)
mask = np.zeros(segm.data.shape, dtype=bool)
mask[0, :] = True
segm.remove_masked_labels(mask)
assert_allclose(segm.data, ref_data)
def test_remove_masked_labels_without_partial_overlap(self):
ref_data = np.array([[0, 0, 0, 0, 4, 4],
[0, 0, 0, 0, 0, 4],
[0, 0, 3, 3, 0, 0],
[7, 0, 0, 0, 0, 5],
[7, 7, 0, 5, 5, 5],
[7, 7, 0, 0, 5, 5]])
segm = SegmentationImage(self.data)
mask = np.zeros(segm.data.shape, dtype=bool)
mask[0, :] = True
segm.remove_masked_labels(mask, partial_overlap=False)
assert_allclose(segm.data, ref_data)
def test_remove_masked_segments_mask_shape(self):
segm = SegmentationImage(np.ones((5, 5)))
mask = np.zeros((3, 3), dtype=bool)
with pytest.raises(ValueError):
segm.remove_masked_labels(mask)
def test_outline_segments(self):
segm_array = np.zeros((5, 5)).astype(int)
segm_array[1:4, 1:4] = 2
segm = SegmentationImage(segm_array)
segm_array_ref = np.copy(segm_array)
segm_array_ref[2, 2] = 0
assert_allclose(segm.outline_segments(), segm_array_ref)
def test_outline_segments_masked_background(self):
segm_array = np.zeros((5, 5)).astype(int)
segm_array[1:4, 1:4] = 2
segm = SegmentationImage(segm_array)
segm_array_ref = np.copy(segm_array)
segm_array_ref[2, 2] = 0
segm_outlines = segm.outline_segments(mask_background=True)
assert isinstance(segm_outlines, np.ma.MaskedArray)
assert np.ma.count(segm_outlines) == 8
assert np.ma.count_masked(segm_outlines) == 17
| 37.346041 | 73 | 0.529643 |
a7c0d21cad36acc10da2753e5d9383bc15620a24 | 771 | py | Python | PlanheatMappingModule/PlanHeatDMM/model/refurbishment_level.py | Planheat/Planheat-Tool | 9764fcb86d3898b232c4cc333dab75ebe41cd421 | [
"MIT"
] | 2 | 2020-04-07T03:43:33.000Z | 2021-03-23T13:17:42.000Z | PlanheatMappingModule/PlanHeatDMM/model/refurbishment_level.py | Planheat/Planheat-Tool | 9764fcb86d3898b232c4cc333dab75ebe41cd421 | [
"MIT"
] | 1 | 2020-07-20T09:56:13.000Z | 2020-07-22T10:26:06.000Z | PlanheatMappingModule/PlanHeatDMM/model/refurbishment_level.py | Planheat/Planheat-Tool | 9764fcb86d3898b232c4cc333dab75ebe41cd421 | [
"MIT"
] | 1 | 2020-07-20T09:40:15.000Z | 2020-07-20T09:40:15.000Z | # -*- coding: utf-8 -*-
"""
Model Map refurbishment_level table
:author: Sergio Aparicio Vegas
:version: 0.2
:date: 29 Nov 2017
"""
__docformat__ = "restructuredtext"
class RefurbishmentLevel():
""" Refurbishment level options """
def __init__(self):
self.__id = 0
self.__level = ""
def __str__(self):
return "id:" + str(self.__id) + " level:" + self.__level
@property
def id(self):
return self.__id
@id.setter
def id(self, val):
self.__id = val
@property
def level(self):
return self.__level
@level.setter
def level(self, val):
self.__level = val
| 19.275 | 68 | 0.50454 |
fde1ce18fe94e07bb562b03a53d68a804461587d | 937 | py | Python | aioredis_models/redis_key.py | musabhusaini/aioredis-models | 8f868f4bf65e1068f8e8412fcc322ccfb65c1ea3 | [
"MIT"
] | null | null | null | aioredis_models/redis_key.py | musabhusaini/aioredis-models | 8f868f4bf65e1068f8e8412fcc322ccfb65c1ea3 | [
"MIT"
] | null | null | null | aioredis_models/redis_key.py | musabhusaini/aioredis-models | 8f868f4bf65e1068f8e8412fcc322ccfb65c1ea3 | [
"MIT"
] | null | null | null | """
This module contains the following classes:
- RedisKey: represents a generic Redis key.
"""
from aioredis import Redis
class RedisKey:
"""
Represents a Redis key of any type. Acts as the class for all data structures.
"""
def __init__(self, redis: Redis, key: str):
"""
Creates an instance ot `RedisKey`.
Args:
redis (Redis): The Redis instance to use to connect to Redis.
key (str): The Redis key to use.
"""
self._redis = redis
self._key = key
async def delete(self):
"""
Deletes the key from Redis.
"""
return await self._redis.delete(self._key)
async def exists(self) -> bool:
"""
Checks if the key exists in Redis or not.
Returns:
bool: A flag indicating whether the key exists or not.
"""
return await self._redis.exists(self._key) > 0
| 22.309524 | 82 | 0.577375 |
949588edb14b73a2d0cb7afc9d141ed5fc0fe58f | 270 | py | Python | dataent/data_migration/doctype/data_migration_mapping_detail/data_migration_mapping_detail.py | dataent/dataent | c41bd5942ffe5513f4d921c4c0595c84bbc422b4 | [
"MIT"
] | null | null | null | dataent/data_migration/doctype/data_migration_mapping_detail/data_migration_mapping_detail.py | dataent/dataent | c41bd5942ffe5513f4d921c4c0595c84bbc422b4 | [
"MIT"
] | 6 | 2020-03-24T17:15:56.000Z | 2022-02-10T18:41:31.000Z | dataent/data_migration/doctype/data_migration_mapping_detail/data_migration_mapping_detail.py | dataent/dataent | c41bd5942ffe5513f4d921c4c0595c84bbc422b4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2017, Dataent Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
from dataent.model.document import Document
class DataMigrationMappingDetail(Document):
pass
| 27 | 59 | 0.796296 |
0d49c77dfb9c8be4a51337883b193d99a9bf0bf9 | 990 | py | Python | {{cookiecutter.project_module}}/{{cookiecutter.project_module}}/animation.py | jgerrish/cookiecutter-pygame | a0e2522b629e2aa3803e89d85907880e8f4b4aa4 | [
"MIT"
] | null | null | null | {{cookiecutter.project_module}}/{{cookiecutter.project_module}}/animation.py | jgerrish/cookiecutter-pygame | a0e2522b629e2aa3803e89d85907880e8f4b4aa4 | [
"MIT"
] | null | null | null | {{cookiecutter.project_module}}/{{cookiecutter.project_module}}/animation.py | jgerrish/cookiecutter-pygame | a0e2522b629e2aa3803e89d85907880e8f4b4aa4 | [
"MIT"
] | null | null | null | import pygame
# define colors
BLACK = (0, 0, 0)
GREEN = (0, 255, 0)
class Rectangle(pygame.sprite.Sprite):
"""
Rectangle is a simple sprite
"""
def __init__(self, screen, width, height):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((50, 50))
self.image.fill(GREEN)
self.rect = self.image.get_rect()
self.rect.center = (width / 2, height / 2)
self.screen = screen
class Animator():
"""
The Animator class manages the animation of sprites
"""
def __init__(self, screen, width, height):
self.screen = screen
self.all_sprites = pygame.sprite.Group()
self.rectangle = Rectangle(screen, width, height)
self.all_sprites.add(self.rectangle)
return
def animate(self):
"""
animate draws the next frame
"""
self.screen.fill(BLACK)
self.all_sprites.draw(self.screen)
pygame.display.flip()
return
| 24.146341 | 57 | 0.6 |
d6a446dfa28e11cd51b5469162e1246fa4c5a299 | 4,907 | py | Python | data/partnet_process/blender_render/generate_partnet_point_cloud.py | StuSe/Multimodal-Shape-Completion | f053d0babdd223a7511911b8682afb80fbf42823 | [
"MIT"
] | 75 | 2020-07-04T22:26:29.000Z | 2022-03-28T06:07:50.000Z | data/partnet_process/blender_render/generate_partnet_point_cloud.py | StuSe/Multimodal-Shape-Completion | f053d0babdd223a7511911b8682afb80fbf42823 | [
"MIT"
] | 7 | 2020-08-13T16:06:00.000Z | 2021-07-30T02:07:44.000Z | data/partnet_process/blender_render/generate_partnet_point_cloud.py | StuSe/Multimodal-Shape-Completion | f053d0babdd223a7511911b8682afb80fbf42823 | [
"MIT"
] | 11 | 2020-07-05T02:35:22.000Z | 2022-01-18T10:52:21.000Z | # A simple script that uses blender to render views of a single object by rotation the camera around it.
# Also produces depth map at the same time.
#
# Example:
# /home/song/wurundi/blender-2.79-linux-glibc219-x86_64/blender --background --python generate_partnet_point_cloud.py -- /mnt/data/partnet_data/shape_mesh/37700.obj --output_folder tmp
# find /mnt/data/partnet_data/shape_mesh -name '*.obj' -print0 | xargs -0 -n1 -P10 -I {} /home/song/wurundi/blender-2.79-linux-glibc219-x86_64/blender --background --python generate_partnet_point_cloud.py -- --output_folder ./tmp {}
import argparse, sys, os
import numpy as np
import bpy
from math import radians
import OpenEXR as exr
import Imath
import array
from PIL import Image
sys.path.append('.')
import util
import blender_camera_util
import blender_util
from scipy import spatial
from tqdm import tqdm
import trimesh
import random
parser = argparse.ArgumentParser(description='Renders given obj file by rotation a camera around it.')
parser.add_argument('--reso', type=int, default=640,
help='resolution')
parser.add_argument('--nb_view', type=int, default=36,
help='number of views per model to render passes')
parser.add_argument('--orth_scale', type=int, default=2,
help='view scale of orthogonal camera')
parser.add_argument('obj', type=str,
help='Path to the obj file to be rendered.')
parser.add_argument('--output_folder', type=str, default='./tmp',
help='The path the output will be dumped to.')
parser.add_argument('--normalization_mode', type=str, default=None,
help='if scale the mesh to be within a unit sphere. [None | diag2sphere | unit_sphere | unit_cube]')
# usually fix below args
parser.add_argument('--remove_doubles', type=bool, default=True,
help='Remove double vertices to improve mesh quality.')
parser.add_argument('--remove_iso_verts', type=bool, default=True,
help='Remove isolated vertices.')
parser.add_argument('--edge_split', type=bool, default=True,
help='Adds edge split filter.')
parser.add_argument('--depth_scale', type=float, default=0.5,
help='Scaling that is applied to depth. Depends on size of mesh. Try out various values until you get a good result. Ignored if format is OPEN_EXR.')
parser.add_argument('--color_depth', type=str, default='16',
help='Number of bit per channel used for output. Either 8 or 16.')
parser.add_argument('--format', type=str, default='OPEN_EXR',
help='Format of files generated. Either PNG or OPEN_EXR')
argv = sys.argv[sys.argv.index("--") + 1:]
args = parser.parse_args(argv)
def get_parnet_name(path):
name = path.split('/')[-1].split('.')[0]
return name
# cls_id, modelname = util.get_shapenet_clsID_modelname_from_filename(args.obj)
modelname = get_parnet_name(args.obj)
# generate random camera rotations
rot_angles_list = []
view_step = 60
for rot_angle in range(0, 359, view_step):
rot_angles_list.append([rot_angle, 0, 0])
rot_angles_list.append([0, rot_angle, 0])
rot_angles_list.append([0, 0, rot_angle])
# for i in range(args.nb_view):
# rot_x_angle = random.randint(0, 360)
# rot_y_angle = 0 # do not rot around y, no in-plane rotation
# rot_z_angle = random.randint(0, 360)
# rot_angles_list.append([rot_x_angle, rot_y_angle, rot_z_angle])
blender_util.clear_scene_objects()
depth_file_output,normal_file_output,albedo_file_output,matidx_file_output = blender_util.rendering_pass_setup(args)
# shapenet v2 coordinate system: Y - up, -Z - face
# after imported to blender, the up of the object will be the Z axis in blender world...
bpy.ops.import_scene.obj(filepath=args.obj, use_smooth_groups=False, use_split_objects=False, use_split_groups=False)
blender_util.process_scene_objects(args) # including normalization
# assign each material a unique id
# disable transparency for all materials
for i, mat in enumerate(bpy.data.materials):
if mat.name in ['Material']: continue
mat.pass_index = i
mat.use_transparency = False
# setup camera resolution etc
blender_util.setup_render(args)
scene = bpy.context.scene
# render shapenet shape to get color point cloud
all_points_normals_colors_mindices = blender_util.scan_point_cloud(depth_file_output, normal_file_output, albedo_file_output, matidx_file_output, args, rot_angles_list)
all_points_normals_colors_mindices = util.sample_from_point_cloud(all_points_normals_colors_mindices, 10000)
util.write_ply(all_points_normals_colors_mindices[:, :3], os.path.join(args.output_folder, modelname+'.ply'), colors=all_points_normals_colors_mindices[:, 6:9], normals=all_points_normals_colors_mindices[:, 3:6])
print('Shapenet point cloud scanning done!')
# clear the objects imported previously
blender_util.clear_scene_objects()
| 44.207207 | 232 | 0.737314 |
275381b972d9970c6d0bbedfcfebe55361f902c8 | 935 | py | Python | database_engine/database_engine.py | orxg/helper | 6cbad158213028e64407c8ef0fd4e66a9aff9917 | [
"Apache-2.0"
] | null | null | null | database_engine/database_engine.py | orxg/helper | 6cbad158213028e64407c8ef0fd4e66a9aff9917 | [
"Apache-2.0"
] | null | null | null | database_engine/database_engine.py | orxg/helper | 6cbad158213028e64407c8ef0fd4e66a9aff9917 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 03 15:18:17 2017
@author: ldh
提供数据库接口,数据库配置在etc.yaml中按照
相应的格式进行配置即可。创建引擎参数type_
为对应的服务器配置名。
"""
# database_engine.py
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import yaml
from consts import DB_STATE
with open('etc.yaml','r') as f:
etc = yaml.load(f)
class DatabaseEngine():
def __init__(self,type_):
self.type_ = type_
self.etc = etc[self.type_]
self.engine = None
def get_engine(self):
self.engine = create_engine(DB_STATE.format(
user = self.etc['user'],
pws = self.etc['passwd'],
server = self.etc['server'],
db = self.etc['db']))
return self.engine
def get_session(self):
assert self.engine
self.session = sessionmaker(bind = self.engine)()
return self.session
| 21.25 | 57 | 0.594652 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.