code stringlengths 281 23.7M |
|---|
class Effect6734(BaseEffect):
type = ('active', 'gang')
def handler(fit, module, context, projectionRange, **kwargs):
for x in range(1, 5):
if module.getModifiedChargeAttr('warfareBuff{}ID'.format(x)):
value = module.getModifiedItemAttr('warfareBuff{}Value'.format(x))
id = module.getModifiedChargeAttr('warfareBuff{}ID'.format(x))
if id:
fit.addCommandBonus(id, value, module, kwargs['effect']) |
def test_annotate_output_dir(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v', f'--cov={script.dirpath()}', ('--cov-report=annotate:' + DEST_DIR), script)
result.stdout.fnmatch_lines(['*- coverage: platform *, python * -*', ('Coverage annotated source written to dir ' + DEST_DIR), '*10 passed*'])
dest_dir = testdir.tmpdir.join(DEST_DIR)
assert dest_dir.check(dir=True)
assert dest_dir.join((script.basename + ',cover')).check()
assert (result.ret == 0) |
class QlOsUefi(QlOs):
type = QL_OS.UEFI
def __init__(self, ql: Qiling):
super().__init__(ql)
self.entry_point = 0
self.running_module: str
self.smm: SmmEnv
self.heap: QlMemoryHeap
self.on_module_enter: MutableSequence[Callable[([str], bool)]] = []
self.on_module_exit: MutableSequence[Callable[([int], bool)]] = []
cc: QlCC = {32: intel.cdecl, 64: intel.ms64}[ql.arch.bits](ql.arch)
self.fcall = QlFunctionCall(ql, cc)
def save(self):
saved_state = super(QlOsUefi, self).save()
saved_state['entry_point'] = self.entry_point
return saved_state
def restore(self, saved_state):
super(QlOsUefi, self).restore(saved_state)
self.entry_point = saved_state['entry_point']
def process_fcall_params(self, targs: Iterable[TypedArg]) -> Sequence[Tuple[(str, str)]]:
def fallback(v):
return super(QlOsUefi, self).process_fcall_params([(None, '', v)])[0][1]
ahandlers: Mapping[(Any, Callable[([Any], str)])] = {POINTER: (lambda v: (f'{v:#010x}' if v else 'NULL')), STRING: (lambda v: QlOsUtils.stringify(v)), WSTRING: (lambda v: f'L{QlOsUtils.stringify(v)}'), GUID: (lambda v: (guids_db.get(v.upper(), v) if v else 'NULL'))}
return tuple(((aname, ahandlers.get(atype, fallback)(avalue)) for (atype, aname, avalue) in targs))
def notify_after_module_execution(self, nmodules: int) -> bool:
return bool(sum((callback(nmodules) for callback in self.on_module_exit)))
def notify_before_module_execution(self, module: str) -> bool:
return bool(sum((callback(module) for callback in self.on_module_enter)))
def emit_context(self):
rgroups = (((8, 'rax'), (8, 'r8'), (4, 'cs')), ((8, 'rbx'), (8, 'r9'), (4, 'ds')), ((8, 'rcx'), (8, 'r10'), (4, 'es')), ((8, 'rdx'), (8, 'r11'), (4, 'fs')), ((8, 'rsi'), (8, 'r12'), (4, 'gs')), ((8, 'rdi'), (8, 'r13'), (4, 'ss')), ((8, 'rsp'), (8, 'r14')), ((8, 'rbp'), (8, 'r15')), ((8, 'rip'),))
p = re.compile('^((?:00)+)')
def __emit_reg(size: int, reg: str):
val = f'{self.ql.arch.regs.read(reg):0{(size * 2)}x}'
padded = p.sub('\x1b[90m\\1\x1b[39m', val, 1)
return f'{reg:3s} = {padded}'
self.ql.log.error(f'CPU Context:')
for regs in rgroups:
self.ql.log.error(f"{' | '.join((__emit_reg(size, reg) for (size, reg) in regs))}")
self.ql.log.error(f'')
def emit_hexdump(self, address: int, data: bytearray, num_cols: int=16):
self.ql.log.error('Hexdump:')
pre_padding = ([None] * (address % num_cols))
post_padding = ([None] * (num_cols - len(pre_padding)))
chars = ((pre_padding + list(data)) + post_padding)
address = (address & (~ (num_cols - 1)))
for i in range(0, len(chars), num_cols):
hexdump = ' '.join(((f' ' if (ch is None) else f'{ch:02x}') for ch in chars[i:(i + num_cols)]))
self.ql.log.error(f'{(address + i):08x} : {hexdump}')
self.ql.log.error(f'')
def emit_disasm(self, address: int, data: bytearray, num_insns: int=8):
md = self.ql.arch.disassembler
self.ql.log.error('Disassembly:')
for insn in tuple(md.disasm(data, address))[:num_insns]:
self.ql.log.error(f'{insn.address:08x} : {insn.bytes.hex():28s} {insn.mnemonic:10s} {insn.op_str:s}')
self.ql.log.error(f'')
def emit_stack(self, nitems: int=4):
self.ql.log.error('Stack:')
for i in range((- nitems), (nitems + 1)):
offset = (i * self.ql.arch.pointersize)
try:
item = self.ql.arch.stack_read(offset)
except UcError:
data = '(unavailable)'
else:
data = f'{item:0{(self.ql.arch.pointersize * 2)}x}'
self.ql.log.error(f"{(self.ql.arch.regs.arch_sp + offset):08x} : {data}{(' <=' if (i == 0) else '')}")
self.ql.log.error('')
def emu_error(self):
pc = self.ql.arch.regs.arch_pc
try:
data = self.ql.mem.read(pc, size=64)
except UcError:
pc_info = ' (unreachable)'
else:
self.emit_context()
self.emit_hexdump(pc, data)
self.emit_disasm(pc, data)
containing_image = self.ql.loader.find_containing_image(pc)
pc_info = (f' ({containing_image.path} + {(pc - containing_image.base):#x})' if containing_image else '')
finally:
self.ql.log.error(f'PC = {pc:#010x}{pc_info}')
self.ql.log.error(f'')
self.emit_stack()
self.ql.log.error(f'Memory map:')
for info_line in self.ql.mem.get_formatted_mapinfo():
self.ql.log.error(info_line)
def set_api(self, target: str, handler: Callable, intercept: QL_INTERCEPT=QL_INTERCEPT.CALL):
super().set_api(f'hook_{target}', handler, intercept)
def run(self):
self.smm = SmmEnv(self.ql)
self.notify_before_module_execution(self.running_module)
if (self.ql.entry_point is not None):
self.ql.loader.entry_point = self.ql.entry_point
if (self.ql.exit_point is not None):
self.exit_point = self.ql.exit_point
try:
self.ql.emu_start(self.ql.loader.entry_point, self.exit_point, self.ql.timeout, self.ql.count)
except KeyboardInterrupt:
self.ql.log.critical(f'Execution interrupted by user')
except UcError:
self.emu_error()
raise
def stop(self) -> None:
self.ql.emu_stop() |
class ValueTrigger(_TriggerType):
def __init__(self, name, delay, conditionedge, valuecondition, triggeringpoint='start'):
self.name = name
if (triggeringpoint not in ['start', 'stop']):
raise ValueError('not a valid triggering point, valid start or stop')
if (triggeringpoint == 'start'):
self._triggerpoint = 'StartTrigger'
else:
self._triggerpoint = 'StopTrigger'
self.delay = convert_float(delay)
self.conditionedge = convert_enum(conditionedge, ConditionEdge)
if (not isinstance(valuecondition, _ValueTriggerType)):
raise TypeError('entitycondition is not a valid EntityCondition')
self.valuecondition = valuecondition
self._used_by_parent = False
def __eq__(self, other):
if isinstance(other, ValueTrigger):
if ((self.get_attributes() == other.get_attributes()) and (self.valuecondition == other.valuecondition) and (self._triggerpoint == other._triggerpoint)):
return True
elif isinstance(other, Trigger):
if ((len(other.conditiongroups) == 1) and (len(other.conditiongroups[0].conditions) == 1)):
if ((self._triggerpoint == other._triggerpoint) and (other.conditiongroups[0].conditions[0] == self)):
return True
elif isinstance(other, ConditionGroup):
if (len(other.conditions) == 1):
if ((self._triggerpoint == other._triggerpoint) and (other.conditions[0] == self)):
return True
return False
def parse(element):
if (element.tag != 'Condition'):
raise NotAValidElement('ValueTrigger only parses a Condition, not ', element)
name = element.attrib['name']
delay = convert_float(element.attrib['delay'])
conditionedge = convert_enum(element.attrib['conditionEdge'], ConditionEdge)
condition = _ValueConditionFactory.parse_value_condition(element.find('ByValueCondition'))
return ValueTrigger(name, delay, conditionedge, condition)
def _set_used_by_parent(self):
self._used_by_parent = True
def get_attributes(self):
return {'name': self.name, 'delay': str(self.delay), 'conditionEdge': self.conditionedge.get_name()}
def get_element(self):
condition = ET.Element('Condition', attrib=self.get_attributes())
byvalue = ET.SubElement(condition, 'ByValueCondition')
byvalue.append(self.valuecondition.get_element())
if self._used_by_parent:
return condition
else:
element = ET.Element(self._triggerpoint)
condgroup = ET.SubElement(element, 'ConditionGroup')
condgroup.append(condition)
return element |
def init():
if (QWebEngineUrlScheme is not None):
assert (not QWebEngineUrlScheme.schemeByName(_QUTE).name())
scheme = QWebEngineUrlScheme(_QUTE)
scheme.setFlags((QWebEngineUrlScheme.Flag.LocalScheme | QWebEngineUrlScheme.Flag.LocalAccessAllowed))
QWebEngineUrlScheme.registerScheme(scheme) |
class Conv2dBlock_my(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, stride, padding=0, norm='none', activation='relu', pad_type='zero'):
super(Conv2dBlock_my, self).__init__()
self.use_bias = True
if (pad_type == 'reflect'):
self.pad = nn.ReflectionPad2d(padding)
elif (pad_type == 'replicate'):
self.pad = nn.ReplicationPad2d(padding)
elif (pad_type == 'zero'):
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, 'Unsupported padding type: {}'.format(pad_type)
norm_dim = output_dim
if (norm == 'bn'):
self.norm = nn.BatchNorm2d(norm_dim)
elif (norm == 'in'):
self.norm = nn.InstanceNorm2d(norm_dim)
elif (norm == 'ln'):
self.norm = LayerNorm(norm_dim)
elif (norm == 'adain'):
self.norm = AdaptiveInstanceNorm2d(norm_dim)
elif (norm == 'spade'):
self.norm = SPADE()
elif ((norm == 'none') or (norm == 'sn')):
self.norm = None
else:
assert 0, 'Unsupported normalization: {}'.format(norm)
if (activation == 'relu'):
self.activation = nn.ReLU(inplace=True)
elif (activation == 'lrelu'):
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif (activation == 'prelu'):
self.activation = nn.PReLU()
elif (activation == 'selu'):
self.activation = nn.SELU(inplace=True)
elif (activation == 'tanh'):
self.activation = nn.Tanh()
elif (activation == 'none'):
self.activation = None
else:
assert 0, 'Unsupported activation: {}'.format(activation)
if (norm == 'sn'):
self.conv = SpectralNorm(nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias))
else:
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)
def forward(self, x):
style = x[1]
x = x[0]
x = self.conv(self.pad(x))
if self.norm:
x = self.norm([x, style])
if self.activation:
x = self.activation(x)
return x |
class PetitionCreationStep1(forms.Form):
title = forms.CharField(max_length=200)
def clean_title(self):
title = self.cleaned_data.get('title')
filters = {'title': title}
if self.owned_by_org:
org = Organization.objects.get(slugname=self.orgslugname)
filters.update({'org': org})
else:
user = PytitionUser.objects.get(user__username=self.username)
filters.update({'user': user})
results = Petition.objects.filter(**filters)
if (results.count() > 0):
self.add_error('title', ValidationError(_('There is already a petition with this title'), code='invalid'))
return title
def __init__(self, *args, **kwargs):
if ('orgslugname' in kwargs):
self.orgslugname = kwargs.pop('orgslugname')
self.owned_by_org = True
elif ('user_name' in kwargs):
self.owned_by_org = False
self.username = kwargs.pop('user_name')
else:
raise ValueError(_('You should either provide an org name or a user name'))
super(PetitionCreationStep1, self).__init__(*args, **kwargs) |
class LazyProxy():
__slots__ = ['_func', '_args', '_kwargs', '_value', '_is_cache_enabled', '_attribute_error']
if TYPE_CHECKING:
_func: Callable[(..., Any)]
_args: tuple[(Any, ...)]
_kwargs: dict[(str, Any)]
_is_cache_enabled: bool
_value: Any
_attribute_error: (AttributeError | None)
def __init__(self, func: Callable[(..., Any)], *args: Any, enable_cache: bool=True, **kwargs: Any) -> None:
object.__setattr__(self, '_func', func)
object.__setattr__(self, '_args', args)
object.__setattr__(self, '_kwargs', kwargs)
object.__setattr__(self, '_is_cache_enabled', enable_cache)
object.__setattr__(self, '_value', None)
object.__setattr__(self, '_attribute_error', None)
def value(self) -> Any:
if (self._value is None):
try:
value = self._func(*self._args, **self._kwargs)
except AttributeError as error:
object.__setattr__(self, '_attribute_error', error)
raise
if (not self._is_cache_enabled):
return value
object.__setattr__(self, '_value', value)
return self._value
def __contains__(self, key: object) -> bool:
return (key in self.value)
def __bool__(self) -> bool:
return bool(self.value)
def __dir__(self) -> list[str]:
return dir(self.value)
def __iter__(self) -> Iterator[Any]:
return iter(self.value)
def __len__(self) -> int:
return len(self.value)
def __str__(self) -> str:
return str(self.value)
def __add__(self, other: object) -> Any:
return (self.value + other)
def __radd__(self, other: object) -> Any:
return (other + self.value)
def __mod__(self, other: object) -> Any:
return (self.value % other)
def __rmod__(self, other: object) -> Any:
return (other % self.value)
def __mul__(self, other: object) -> Any:
return (self.value * other)
def __rmul__(self, other: object) -> Any:
return (other * self.value)
def __call__(self, *args: Any, **kwargs: Any) -> Any:
return self.value(*args, **kwargs)
def __lt__(self, other: object) -> bool:
return (self.value < other)
def __le__(self, other: object) -> bool:
return (self.value <= other)
def __eq__(self, other: object) -> bool:
return (self.value == other)
def __ne__(self, other: object) -> bool:
return (self.value != other)
def __gt__(self, other: object) -> bool:
return (self.value > other)
def __ge__(self, other: object) -> bool:
return (self.value >= other)
def __delattr__(self, name: str) -> None:
delattr(self.value, name)
def __getattr__(self, name: str) -> Any:
if (self._attribute_error is not None):
raise self._attribute_error
return getattr(self.value, name)
def __setattr__(self, name: str, value: Any) -> None:
setattr(self.value, name, value)
def __delitem__(self, key: Any) -> None:
del self.value[key]
def __getitem__(self, key: Any) -> Any:
return self.value[key]
def __setitem__(self, key: Any, value: Any) -> None:
self.value[key] = value
def __copy__(self) -> LazyProxy:
return LazyProxy(self._func, *self._args, enable_cache=self._is_cache_enabled, **self._kwargs)
def __deepcopy__(self, memo: Any) -> LazyProxy:
from copy import deepcopy
return LazyProxy(deepcopy(self._func, memo), *deepcopy(self._args, memo), enable_cache=deepcopy(self._is_cache_enabled, memo), **deepcopy(self._kwargs, memo)) |
class FP16Optimizer(_FP16OptimizerMixin, optim.FairseqOptimizer):
def __init__(self, args, params, fp32_optimizer, fp32_params):
super().__init__(args)
self.fp16_params = params
self.fp32_optimizer = fp32_optimizer
self.fp32_params = fp32_params
if (getattr(args, 'fp16_scale_window', None) is None):
if (len(args.update_freq) > 1):
raise ValueError('--fp16-scale-window must be given explicitly when using a custom --update-freq schedule')
scale_window = int((((2 ** 14) / args.distributed_world_size) / args.update_freq[0]))
else:
scale_window = args.fp16_scale_window
self.scaler = DynamicLossScaler(init_scale=args.fp16_init_scale, scale_window=scale_window, tolerance=args.fp16_scale_tolerance, threshold=args.threshold_loss_scale)
self.min_loss_scale = self.args.min_loss_scale
def build_optimizer(cls, args, params):
flatten = (not getattr(args, 'fp16_no_flatten_grads', False))
fp32_params = cls.build_fp32_params(params, flatten=flatten)
if flatten:
fp32_optimizer = optim.build_optimizer(args, [fp32_params])
else:
fp32_optimizer = optim.build_optimizer(args, fp32_params)
return cls(args, params, fp32_optimizer, fp32_params)
def optimizer(self):
return self.fp32_optimizer.optimizer
def optimizer_config(self):
return self.fp32_optimizer.optimizer_config
def get_lr(self):
return self.fp32_optimizer.get_lr()
def set_lr(self, lr):
self.fp32_optimizer.set_lr(lr) |
def average_state_dicts(state_dicts: List[Dict[(str, torch.Tensor)]]):
new_sd = {}
for k in state_dicts[0].keys():
tensors = [sd[k] for sd in state_dicts]
new_t = (sum(tensors) / len(tensors))
assert isinstance(new_t, torch.Tensor)
new_sd[k] = new_t
return new_sd |
def lih_hamiltonian():
geometry = [('Li', (0.0, 0.0, 0.0)), ('H', (0.0, 0.0, 1.45))]
active_space_start = 1
active_space_stop = 3
molecule = MolecularData(geometry, 'sto-3g', 1, description='1.45')
molecule.load()
molecular_hamiltonian = molecule.get_molecular_hamiltonian(occupied_indices=range(active_space_start), active_indices=range(active_space_start, active_space_stop))
hamiltonian = get_fermion_operator(molecular_hamiltonian)
spectrum = eigenspectrum(hamiltonian)
return (hamiltonian, spectrum) |
class NonNegativeParametrizer(nn.Module):
def __init__(self, minimum: float=0.0, eps: float=Consts.Eps):
super().__init__()
minimum = float(minimum)
eps = float(eps)
self.register_buffer('eps', torch.Tensor([(eps ** 2)]))
bound = ((minimum + (eps ** 2)) ** 0.5)
self.lowerBound = LowerBound(bound)
def init(self, x):
return torch.sqrt(torch.max((x + self.eps), self.eps))
def forward(self, x):
out = self.lowerBound(x)
out = ((out ** 2) - self.eps)
return out |
class BatchManager(object):
def __init__(self, data, batch_size):
self.batch_data = self.sort_and_pad(data, batch_size)
self.len_data = len(self.batch_data)
def sort_and_pad(self, data, batch_size):
num_batch = int(math.ceil((len(data) / batch_size)))
sorted_data = sorted(data, key=(lambda x: len(x[0])))
batch_data = list()
for i in range(num_batch):
batch_data.append(self.pad_data(sorted_data[int((i * batch_size)):int(((i + 1) * batch_size))]))
return batch_data
def pad_data(data):
strings = []
chars = []
segs = []
targets = []
max_length = max([len(sentence[0]) for sentence in data])
print(max_length)
for line in data:
(string, char, seg, target) = line
padding = ([0] * (max_length - len(string)))
strings.append((string + padding))
chars.append((char + padding))
segs.append((seg + padding))
targets.append((target + padding))
return [strings, chars, segs, targets]
def iter_batch(self, shuffle=False):
if shuffle:
random.shuffle(self.batch_data)
for idx in range(self.len_data):
(yield self.batch_data[idx]) |
class RecallSessionMetricComputation(RecMetricComputation):
def __init__(self, *args: Any, session_metric_def: SessionMetricDef, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self._add_state(NUM_TRUE_POS, torch.zeros(self._n_tasks, dtype=torch.double), add_window_state=True, dist_reduce_fx='sum', persistent=True)
self._add_state(NUM_FALSE_NEGATIVE, torch.zeros(self._n_tasks, dtype=torch.double), add_window_state=True, dist_reduce_fx='sum', persistent=True)
self.top_threshold: Optional[int] = session_metric_def.top_threshold
self.run_ranking_of_labels: bool = session_metric_def.run_ranking_of_labels
self.session_var_name: Optional[str] = session_metric_def.session_var_name
def update(self, *, predictions: Optional[torch.Tensor], labels: torch.Tensor, weights: Optional[torch.Tensor], **kwargs: Dict[(str, Any)]) -> None:
if (('required_inputs' not in kwargs) or (self.session_var_name not in kwargs['required_inputs'])):
raise RecMetricException('Need the {} input to update the session metric'.format(self.session_var_name))
session = kwargs['required_inputs'][self.session_var_name]
if ((predictions is None) or (weights is None) or (session is None)):
raise RecMetricException("Inputs 'predictions', 'weights' and 'session' should not be None for RecallSessionMetricComputation update")
_validate_model_outputs(labels, predictions, weights, session)
predictions = predictions.double()
labels = labels.double()
weights = weights.double()
num_samples = predictions.shape[(- 1)]
for (state_name, state_value) in self.get_recall_states(labels=labels, predictions=predictions, weights=weights, session=session).items():
state = getattr(self, state_name)
state += state_value
self._aggregate_window_state(state_name, state_value, num_samples)
def _compute(self) -> List[MetricComputationReport]:
return [MetricComputationReport(name=MetricName.RECALL_SESSION_LEVEL, metric_prefix=MetricPrefix.LIFETIME, value=_calc_recall(num_true_pos=cast(torch.Tensor, getattr(self, NUM_TRUE_POS)), num_false_neg=cast(torch.Tensor, getattr(self, NUM_FALSE_NEGATIVE)))), MetricComputationReport(name=MetricName.RECALL_SESSION_LEVEL, metric_prefix=MetricPrefix.WINDOW, value=_calc_recall(num_true_pos=self.get_window_state(NUM_TRUE_POS), num_false_neg=self.get_window_state(NUM_FALSE_NEGATIVE)))]
def get_recall_states(self, labels: torch.Tensor, predictions: torch.Tensor, weights: torch.Tensor, session: torch.Tensor) -> Dict[(str, torch.Tensor)]:
predictions_ranked = ranking_within_session(predictions, session)
predictions_labels = (predictions_ranked < self.top_threshold).to(torch.int32)
if self.run_ranking_of_labels:
labels_ranked = ranking_within_session(labels, session)
labels = (labels_ranked < self.top_threshold).to(torch.int32)
num_true_pos = _calc_num_true_pos(labels, predictions_labels, weights)
num_false_neg = _calc_num_false_neg(labels, predictions_labels, weights)
return {NUM_TRUE_POS: num_true_pos, NUM_FALSE_NEGATIVE: num_false_neg} |
def main(args):
coverage_path = os.path.abspath(args[0])
coverage_dir = ((coverage_path + '.') + str(random.getrandbits(64)))
mkdir_p(coverage_dir)
env = os.environ.copy()
env['GCOV_PREFIX'] = coverage_dir
subprocess.check_call(args[1:], env=env)
arch_path = (coverage_dir + '.archive')
with tarfile.open(arch_path, 'w:') as tar:
tar.add(coverage_dir, arcname='.')
os.rename(arch_path, coverage_path)
shutil.rmtree(coverage_dir) |
def build_cli_parser(description='Red Canary example script'):
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--profile', type=str, action='store', help='The credentials.response profile to use.')
parser.add_argument('--prefix', type=str, action='store', help='Output filename prefix.')
parser.add_argument('--append', type=str, action='store', help='Append to output file.')
parser.add_argument('--days', type=int, action='store', help='Number of days to search.')
parser.add_argument('--minutes', type=int, action='store', help='Number of days to search.')
cbr = parser.add_mutually_exclusive_group(required=False)
cbr.add_argument('--queryfile', type=str, action='store', help='File containing queries, one per line.')
cbr.add_argument('--query', type=str, action='store', help='A single Cb query to execute.')
return parser |
class DictionaryConfig(AppConfig):
name = 'dictionary'
verbose_name = _('Dictionary')
def ready(self):
import dictionary.signals
DOMAIN = 'xyzsozluk.com'
PROTOCOL = '
FROM_EMAIL = ''
TOPICS_PER_PAGE_DEFAULT = 50
ENTRIES_PER_PAGE_DEFAULT = 10
ENTRIES_PER_PAGE_PROFILE = 15
GENERIC_SUPERUSER_USERNAME = 'djangosozluk'
GENERIC_PRIVATEUSER_USERNAME = 'anonymous'
DISABLE_NOVICE_QUEUE = False
INDEX_TYPE = 'random_records'
NON_DB_CATEGORIES_META = {'today': (_('today'), _('most recent entries')), 'popular': (_('popular'), _('whats happening?')), 'uncategorized': (_('uncategorized'), _('topics with no channels')), 'acquaintances': (_('acquaintances'), _('what are users i follow up to?'), ({'entries': _('entries'), 'favorites': _('favorites')}, 'entries')), 'wishes': (_('wishes'), _('the topics that some authors want populated'), ({'all': _('all'), 'owned': _('owned')}, 'all')), 'today-in-history': (_('today in history'), _('what has been said around this time in the past years?')), 'drafts': (_('drafts'), _("the entries that i've yet to publish")), 'followups': (_('followups'), _('what other authors wrote down after me?')), 'novices': (_('novices'), _('the entries of novice users')), 'top': (_('top'), _('most liked entries'), ({'yesterday': _('yesterday'), 'week': _('last week')}, 'yesterday')), 'search': (_('search'), _('advanced search')), 'userstats': (_('user statistics'), _('user statistics'), ({'latest': _('%(username)s - entries'), 'popular': _('%(username)s - most favorited'), 'favorites': _('%(username)s - favorites'), 'recentlyvoted': _('%(username)s - recently voted'), 'liked': _('%(username)s - most liked'), 'weeklygoods': _('%(username)s - attracting entries of this week'), 'beloved': _('%(username)s - beloved entries'), 'channels': _('%(username)s - #%(channel)s topics')}, 'latest')), 'ama': (_('ama'), _('question-and-answer themed interactive interviews'))}
NON_DB_CATEGORIES = NON_DB_CATEGORIES_META.keys()
TABBED_CATEGORIES = ('acquaintances', 'wishes', 'userstats', 'top')
USER_EXCLUSIVE_CATEGORIES = ('today', 'drafts', 'acquaintances', 'wishes', 'followups')
LOGIN_REQUIRED_CATEGORIES = (USER_EXCLUSIVE_CATEGORIES + ('novices',))
EXCLUDABLE_CATEGORIES = ('spor', 'siyaset', 'anket', 'yetiskin')
DEFAULT_EXCLUSIONS = ['yetiskin']
DEFAULT_CATEGORY = 'popular'
PARAMETRIC_CATEGORIES = ('userstats',)
DEFAULT_CACHE_TIMEOUT = 90
EXCLUSIVE_TIMEOUTS = {'top': 86400, 'today-in-history': 86400, 'today': 300, 'popular': 30}
REFRESH_TIMEOUT = 0.1337
UNCACHED_CATEGORIES = ('drafts', 'wishes_owned', 'followups')
DISABLE_CATEGORY_CACHING = False
YEAR_RANGE = (2023, 2022, 2021, 2020, 2019, 2018)
DISABLE_GENERATIONS = False
FIRST_GENERATION_DATE = '13.08.2019'
GENERATION_GAP_DAYS = 180
DISABLE_ANONYMOUS_VOTING = False
VOTE_RATES = {'favorite': Decimal('.2'), 'vote': Decimal('.2'), 'anonymous': Decimal('.05')}
KARMA_RATES = {'upvote': Decimal('0.18'), 'downvote': Decimal('0.27'), 'cost': Decimal('0.09')}
DAILY_VOTE_LIMIT = 240
DAILY_VOTE_LIMIT_PER_USER = 24
TOTAL_VOTE_LIMIT_PER_USER = 160
KARMA_EXPRESSIONS = {range(25, 50): _('chaotic neutral'), range(50, 100): _('chronic backup'), range(100, 125): _('padawan'), range(125, 150): _('lunatic'), range(150, 200): _('fragile anarchist'), range(200, 250): _('anarchist'), range(250, 300): _('turbulent kicker'), range(300, 350): _('anatolian boy'), range(350, 370): _('battal gazi'), range(370, 400): _('thorny'), range(400, 430): _('hippy'), range(430, 450): _('lad'), range(450, 470): _('staid'), range(470, 500): _('rowdy'), range(500, 530): _('richard the blazeheart '), range(530, 550): _('compliant yet sympathetic'), range(550, 575): _('right minded'), range(575, 600): _('presentable'), range(600, 620): _('sugar'), range(620, 630): _('honeypot'), range(630, 650): _('yummier honey'), range(650, 665): _('luscious'), range(665, 680): _('addicted'), range(680, 700): _('switheet'), range(700, 725): _('damascus apricot'), range(725, 750): _('household'), range(750, 775): _('exuberant'), range(775, 800): _('energizer bunny'), range(800, 850): _('courteous'), range(850, 900): _('inhuman'), range(900, 1000): _('rating beast')}
KARMA_BOUNDARY_UPPER = 1000
KARMA_BOUNDARY_LOWER = (- 200)
UNDERWHELMING_KARMA_EXPRESSION = _('imbecile')
OVERWHELMING_KARMA_EXPRESSION = _('the champion')
SUGGESTIONS_PER_TOPIC = 3
SUGGESTIONS_PER_DAY = 45
SUGGESTIONS_QUALIFY_RATE = 3
SUGGESTIONS_ENTRY_REQUIREMENT = 100
MAX_UPLOAD_SIZE = (1048576 * 2.5)
DAILY_IMAGE_UPLOAD_LIMIT = 25
COMPRESS_IMAGES = False
COMPRESS_THRESHOLD = 2621440
COMPRESS_QUALITY = 70
XSENDFILE_HEADER_NAME = 'X-Accel-Redirect'
MESSAGE_PURGE_THRESHOLD = 300
AUTHOR_ENTRY_INTERVAL = 0
NOVICE_ENTRY_INTERVAL = 0 |
class TestPauliBasis(unittest.TestCase):
X = numpy.array([[0, 1], [1, 0]])
Y = numpy.array([[0, (- 1j)], [1j, 0]])
Z = numpy.array([[1, 0], [0, (- 1)]])
def assertMatricesAlmostEqual(self, lhs, rhs, places=None):
self.assertEqual(lhs.shape, rhs.shape, 'Marix shapes differ: {} vs {}'.format(lhs, rhs))
(n, m) = lhs.shape
for x in range(n):
for y in range(m):
self.assertAlmostEqual(lhs[(x, y)], rhs[(x, y)], places=places, msg='Matrices {} and {} differ on ({}, {})'.format(lhs, rhs, x, y))
def test_measurement_matrices(self):
X0 = paulibasis.pauli_measurement_matrix('X', 0)
X1 = paulibasis.pauli_measurement_matrix('X', 1)
result_X = (X0 - X1)
self.assertMatricesAlmostEqual(self.X, result_X)
Y0 = paulibasis.pauli_measurement_matrix('Y', 0)
Y1 = paulibasis.pauli_measurement_matrix('Y', 1)
result_Y = (Y0 - Y1)
self.assertMatricesAlmostEqual(self.Y, result_Y)
Z0 = paulibasis.pauli_measurement_matrix('Z', 0)
Z1 = paulibasis.pauli_measurement_matrix('Z', 1)
result_Z = (Z0 - Z1)
self.assertMatricesAlmostEqual(self.Z, result_Z)
def test_preparation_matrices(self):
X0 = paulibasis.pauli_preparation_matrix('Xp')
X1 = paulibasis.pauli_preparation_matrix('Xm')
result_X = (X0 - X1)
self.assertMatricesAlmostEqual(self.X, result_X)
Y0 = paulibasis.pauli_preparation_matrix('Yp')
Y1 = paulibasis.pauli_preparation_matrix('Ym')
result_Y = (Y0 - Y1)
self.assertMatricesAlmostEqual(self.Y, result_Y)
Z0 = paulibasis.pauli_preparation_matrix('Zp')
Z1 = paulibasis.pauli_preparation_matrix('Zm')
result_Z = (Z0 - Z1)
self.assertMatricesAlmostEqual(self.Z, result_Z) |
class TransitionExperience(object):
def __init__(self, prob_state, all_state, action, reward, **kwargs):
self.prob_state = prob_state
self.all_state = all_state
self.action = action
self.reward = reward
for (k, v) in six.iteritems(kwargs):
setattr(self, k, v) |
class MegaCrypto():
def base64_decode(data):
data = to_bytes(data, 'ascii')
data += (b'=' * ((- len(data)) % 4))
return base64.b64decode(data, b'-_')
def base64_encode(data):
return base64.b64encode(data, b'-_')
def a32_to_bytes(a):
return struct.pack('>{}I'.format(len(a)), *a)
def bytes_to_a32(s):
s += (b'\x00' * ((- len(s)) % 4))
return struct.unpack('>{}I'.format((len(s) // 4)), s)
def a32_to_base64(a):
return MegaCrypto.base64_encode(MegaCrypto.a32_to_bytes(a))
def base64_to_a32(s):
return MegaCrypto.bytes_to_a32(MegaCrypto.base64_decode(s))
def cbc_decrypt(data, key):
cipher = Cipher(algorithms.AES(MegaCrypto.a32_to_bytes(key)), modes.CBC((b'\x00' * 16)), backend=default_backend())
decryptor = cipher.decryptor()
return (decryptor.update(data) + decryptor.finalize())
def cbc_encrypt(data, key):
cipher = Cipher(algorithms.AES(MegaCrypto.a32_to_bytes(key)), modes.CBC((b'\x00' * 16)), backend=default_backend())
encryptor = cipher.encryptor()
return (encryptor.update(data) + encryptor.finalize())
def ecb_decrypt(data, key):
cipher = Cipher(algorithms.AES(MegaCrypto.a32_to_bytes(key)), modes.ECB(), backend=default_backend())
decryptor = cipher.decryptor()
return (decryptor.update(data) + decryptor.finalize())
def ecb_encrypt(data, key):
cipher = Cipher(algorithms.AES(MegaCrypto.a32_to_bytes(key)), modes.ECB(), backend=default_backend())
encryptor = cipher.encryptor()
return (encryptor.update(data) + encryptor.finalize())
def get_cipher_key(key):
k = ((key[0] ^ key[4]), (key[1] ^ key[5]), (key[2] ^ key[6]), (key[3] ^ key[7]))
iv = (key[4:6] + (0, 0))
meta_mac = key[6:8]
return (k, iv, meta_mac)
def decrypt_attr(data, key):
data = MegaCrypto.base64_decode(data)
(k, iv, meta_mac) = MegaCrypto.get_cipher_key(key)
attr = MegaCrypto.cbc_decrypt(data, k)
return (json.loads(re.search(b'{.+}', attr).group(0)) if (attr[:6] == b'MEGA{"') else False)
def decrypt_key(data, key):
data = MegaCrypto.base64_decode(data)
return MegaCrypto.bytes_to_a32(MegaCrypto.ecb_decrypt(data, key))
def encrypt_key(data, key):
data = MegaCrypto.a32_to_bytes(data)
return MegaCrypto.bytes_to_a32(MegaCrypto.ecb_encrypt(data, key))
def get_chunks(size):
chunk_start = 0
chunk_size = 131072
while ((chunk_start + chunk_size) < size):
(yield (chunk_start, chunk_size))
chunk_start += chunk_size
if (chunk_size < 1048576):
chunk_size += 131072
if (chunk_start < size):
(yield (chunk_start, (size - chunk_start)))
class Checksum():
def __init__(self, key):
(k, iv, meta_mac) = MegaCrypto.get_cipher_key(key)
self.hash = (b'\x00' * 16)
self.key = MegaCrypto.a32_to_bytes(k)
self.iv = MegaCrypto.a32_to_bytes((iv[0:2] * 2))
cipher = Cipher(algorithms.AES(self.key), modes.CBC(self.hash), backend=default_backend())
self.AES = cipher.encryptor()
def update(self, chunk):
cipher = Cipher(algorithms.AES(self.key), modes.CBC(self.iv), backend=default_backend())
encryptor = cipher.encryptor()
for j in range(0, len(chunk), 16):
block = chunk[j:(j + 16)].ljust(16, b'\x00')
hash = encryptor.update(block)
encryptor.finalize()
self.hash = self.AES.update(hash)
def digest(self):
d = MegaCrypto.bytes_to_a32(self.hash)
return ((d[0] ^ d[1]), (d[2] ^ d[3]))
def hexdigest(self):
return ''.join(('{:2x}'.format(ord(x)) for x in MegaCrypto.a32_to_bytes(self.digest())))
def new(key):
return MegaCrypto.Checksum(key) |
class VanillaDQN(BaseAgent):
def __init__(self, cfg):
super().__init__(cfg)
self.cfg = cfg
self.env_name = cfg['env']['name']
self.agent_name = cfg['agent']['name']
self.env = {'Train': make_env(cfg['env']['name'], max_episode_steps=int(cfg['env']['max_episode_steps'])), 'Test': make_env(cfg['env']['name'], max_episode_steps=int(cfg['env']['max_episode_steps']))}
if ((cfg['env']['name'] in ['NChain-v1', 'LockBernoulli-v0', 'LockGaussian-v0']) and ('cfg' in cfg['env'].keys())):
self.env['Train'].init(**cfg['env']['cfg'])
self.env['Test'].init(**cfg['env']['cfg'])
self.config_idx = cfg['config_idx']
self.device = cfg['device']
self.discount = cfg['discount']
self.train_steps = int(cfg['train_steps'])
self.test_per_episodes = int(cfg['test_per_episodes'])
self.display_interval = cfg['display_interval']
self.gradient_clip = cfg['gradient_clip']
self.action_size = self.get_action_size()
self.state_size = self.get_state_size()
self.rolling_score_window = cfg['rolling_score_window']
self.show_tb = cfg['show_tb']
self.log_path = {'Train': self.cfg['train_log_path'], 'Test': self.cfg['test_log_path']}
if self.show_tb:
self.logger.init_writer()
if (cfg['env']['input_type'] == 'pixel'):
if ('MinAtar' in self.env_name):
self.state_normalizer = RescaleNormalizer()
self.reward_normalizer = RescaleNormalizer()
else:
self.state_normalizer = ImageNormalizer()
self.reward_normalizer = SignNormalizer()
elif (cfg['env']['input_type'] == 'feature'):
self.state_normalizer = RescaleNormalizer()
self.reward_normalizer = RescaleNormalizer()
else:
raise ValueError(f"{cfg['env']['input_type']} is not supported.")
self.Q_net = [None]
self.Q_net[0] = self.createNN(cfg['env']['input_type']).to(self.device)
self.update_Q_net_index = 0
self.optimizer = [None]
self.optimizer[0] = getattr(torch.optim, cfg['optimizer']['name'])(self.Q_net[0].parameters(), **cfg['optimizer']['kwargs'])
epsilon = {'steps': float(cfg['epsilon_steps']), 'start': cfg['epsilon_start'], 'end': cfg['epsilon_end'], 'decay': cfg['epsilon_decay']}
self.exploration = getattr(components.exploration, cfg['exploration_type'])(cfg['exploration_steps'], epsilon)
self.loss = getattr(torch.nn, cfg['loss'])(reduction='mean')
self.replay = getattr(components.replay, cfg['memory_type'])(cfg['memory_size'], keys=['state', 'action', 'next_state', 'reward', 'mask'])
for key in ['state', 'next_state', 'action', 'reward', 'done', 'episode_return', 'episode_step_count']:
setattr(self, key, {'Train': None, 'Test': None})
def createNN(self, input_type):
if (input_type == 'pixel'):
layer_dims = (([self.cfg['feature_dim']] + self.cfg['hidden_layers']) + [self.action_size])
if ('MinAtar' in self.env_name):
feature_net = Conv2d_MinAtar(in_channels=self.env['Train'].game.state_shape()[2], feature_dim=layer_dims[0])
else:
feature_net = Conv2d_Atari(in_channels=4, feature_dim=layer_dims[0])
elif (input_type == 'feature'):
layer_dims = (([self.state_size] + self.cfg['hidden_layers']) + [self.action_size])
feature_net = nn.Identity()
assert (self.action_type == 'DISCRETE'), f'{self.agent_name} only supports discrete action spaces.'
value_net = MLPCritic(layer_dims=layer_dims, hidden_act=self.cfg['hidden_act'], output_act=self.cfg['output_act'], last_w_scale=1.0)
NN = DQNNet(feature_net, value_net)
return NN
def reset_game(self, mode):
self.original_state = self.env[mode].reset()
self.state[mode] = self.state_normalizer(self.original_state)
self.next_state[mode] = None
self.action[mode] = None
self.reward[mode] = None
self.done[mode] = False
self.episode_return[mode] = 0
self.episode_step_count[mode] = 0
def run_steps(self, render=False):
self.step_count = 0
self.episode_count = 0
self.result = {'Train': [], 'Test': []}
self.episode_return_list = {'Train': [], 'Test': []}
mode = 'Train'
self.start_time = time.time()
self.reset_game('Train')
self.reset_game('Test')
while (self.step_count < self.train_steps):
if ((mode == 'Train') and (self.test_per_episodes > 0) and ((self.episode_count % self.test_per_episodes) == 0)):
mode = 'Test'
else:
mode = 'Train'
self.set_net_mode(mode)
self.run_episode(mode, render)
def run_episode(self, mode, render):
while (not self.done[mode]):
self.action[mode] = self.get_action(mode)
if render:
self.env[mode].render()
(next_state, self.reward[mode], self.done[mode], _) = self.env[mode].step(self.action[mode])
self.next_state[mode] = self.state_normalizer(next_state)
self.reward[mode] = self.reward_normalizer(self.reward[mode])
self.episode_return[mode] += self.reward[mode]
self.episode_step_count[mode] += 1
if (mode == 'Train'):
self.save_experience()
if self.time_to_learn():
self.learn()
self.update_target_net()
self.step_count += 1
self.state[mode] = self.next_state[mode]
self.original_state = next_state
self.save_episode_result(mode)
self.reset_game(mode)
if (mode == 'Train'):
self.episode_count += 1
def save_episode_result(self, mode):
self.episode_return_list[mode].append(self.episode_return[mode])
rolling_score = np.mean(self.episode_return_list[mode][((- 1) * self.rolling_score_window[mode]):])
result_dict = {'Env': self.env_name, 'Agent': self.agent_name, 'Episode': self.episode_count, 'Step': self.step_count, 'Return': self.episode_return[mode], 'Average Return': rolling_score}
self.result[mode].append(result_dict)
if self.show_tb:
self.logger.add_scalar(f'{mode}_Return', self.episode_return[mode], self.step_count)
self.logger.add_scalar(f'{mode}_Average_Return', rolling_score, self.step_count)
if ((mode == 'Test') or ((self.episode_count % self.display_interval) == 0) or (self.step_count >= self.train_steps)):
result = pd.DataFrame(self.result[mode])
result['Env'] = result['Env'].astype('category')
result['Agent'] = result['Agent'].astype('category')
result.to_feather(self.log_path[mode])
speed = (self.step_count / (time.time() - self.start_time))
eta = ((((self.train_steps - self.step_count) / speed) / 60) if (speed > 0) else (- 1))
self.logger.info(f'<{self.config_idx}> [{mode}] Episode {self.episode_count}, Step {self.step_count}: Average Return({self.rolling_score_window[mode]})={rolling_score:.2f}, Return={self.episode_return[mode]:.2f}, Speed={speed:.2f} (steps/s), ETA={eta:.2f} (mins)')
def get_action(self, mode='Train'):
state = to_tensor(self.state[mode], device=self.device)
state = state.unsqueeze(0)
q_values = self.get_action_selection_q_values(state)
if (mode == 'Test'):
action = np.argmax(q_values)
elif (mode == 'Train'):
action = self.exploration.select_action(q_values, self.step_count)
return action
def time_to_learn(self):
if ((self.step_count > self.cfg['exploration_steps']) and ((self.step_count % self.cfg['network_update_steps']) == 0)):
return True
else:
return False
def update_target_net(self):
pass
def learn(self):
mode = 'Train'
batch = self.replay.sample(['state', 'action', 'reward', 'next_state', 'mask'], self.cfg['batch_size'])
(q, q_target) = (self.compute_q(batch), self.compute_q_target(batch))
loss = self.loss(q, q_target)
self.optimizer[self.update_Q_net_index].zero_grad()
loss.backward()
if (self.gradient_clip > 0):
nn.utils.clip_grad_norm_(self.Q_net[self.update_Q_net_index].parameters(), self.gradient_clip)
self.optimizer[self.update_Q_net_index].step()
if self.show_tb:
self.logger.add_scalar(f'Loss', loss.item(), self.step_count)
def compute_q_target(self, batch):
with torch.no_grad():
q_next = self.Q_net[0](batch.next_state).max(1)[0]
q_target = (batch.reward + ((self.discount * q_next) * batch.mask))
return q_target
def compute_q(self, batch):
action = batch.action.long().unsqueeze(1)
q = self.Q_net[self.update_Q_net_index](batch.state).gather(1, action).squeeze()
return q
def save_experience(self):
mode = 'Train'
prediction = {}
if (self.reward[mode] is not None):
prediction['state'] = to_tensor(self.state[mode], self.device)
prediction['action'] = to_tensor(self.action[mode], self.device)
prediction['next_state'] = to_tensor(self.next_state[mode], self.device)
prediction['mask'] = to_tensor((1 - self.done[mode]), self.device)
prediction['reward'] = to_tensor(self.reward[mode], self.device)
self.replay.add(prediction)
def get_action_size(self):
mode = 'Train'
if isinstance(self.env[mode].action_space, Discrete):
self.action_type = 'DISCRETE'
return self.env[mode].action_space.n
elif isinstance(self.env[mode].action_space, Box):
self.action_type = 'CONTINUOUS'
self.action_min = min(self.env[mode].action_space.low)
self.action_max = max(self.env[mode].action_space.high)
self.action_lim = max(abs(self.action_min), self.action_max)
return self.env[mode].action_space.shape[0]
else:
raise ValueError('Unknown action type.')
def get_state_size(self):
mode = 'Train'
if isinstance(self.env[mode].observation_space, Discrete):
if hasattr(self.env[mode], 'env'):
return self.env[mode].env.observation_space.n
else:
return self.env[mode].observation_space.n
elif hasattr(self.env[mode], 'env'):
return int(np.prod(self.env[mode].env.observation_space.shape))
else:
return int(np.prod(self.env[mode].observation_space.shape))
def set_net_mode(self, mode):
if (mode == 'Test'):
for i in range(len(self.Q_net)):
self.Q_net[i].eval()
elif (mode == 'Train'):
for i in range(len(self.Q_net)):
self.Q_net[i].train()
def get_action_selection_q_values(self, state):
q_values = self.Q_net[0](state)
q_values = to_numpy(q_values).flatten()
return q_values
def save_model(self, model_path):
state_dicts = {}
for i in range(len(self.Q_net)):
state_dicts[i] = self.Q_net[i].state_dict()
torch.save(state_dicts, model_path)
def load_model(self, model_path):
state_dicts = torch.load(model_path)
for i in range(len(self.Q_net)):
self.Q_net[i].load_state_dict(state_dicts[i])
self.Q_net[i] = self.Q_net[i].to(self.device) |
def unannotate_value(origin: Value, extension: Type[ExtensionT]) -> Tuple[(Value, Sequence[ExtensionT])]:
if (not isinstance(origin, AnnotatedValue)):
return (origin, [])
matches = [metadata for metadata in origin.metadata if isinstance(metadata, extension)]
if (matches and all_of_type(matches, Extension)):
remaining = [metadata for metadata in origin.metadata if (not isinstance(metadata, extension))]
return (annotate_value(origin.value, remaining), matches)
return (origin, []) |
class NeighboringStreetOrientationDeviation():
def __init__(self, gdf):
self.gdf = gdf
self.orientation = gdf.geometry.apply(self._orient)
(inp, res) = gdf.sindex.query_bulk(gdf.geometry, predicate='intersects')
itself = (inp == res)
inp = inp[(~ itself)]
res = res[(~ itself)]
left = self.orientation.take(inp).reset_index(drop=True)
right = self.orientation.take(res).reset_index(drop=True)
deviations = (left - right).abs()
results = deviations.groupby(inp).mean()
match = gdf.iloc[list(results.index)]
match['result'] = results.to_list()
self.series = match.result
def _orient(self, geom):
start = geom.coords[0]
end = geom.coords[(- 1)]
az = _azimuth(start, end)
if (90 > az >= 45):
diff = (az - 45)
az = (az - (2 * diff))
elif (135 > az >= 90):
diff = (az - 90)
az = (az - (2 * diff))
diff = (az - 45)
az = (az - (2 * diff))
elif (181 > az >= 135):
diff = (az - 135)
az = (az - (2 * diff))
diff = (az - 90)
az = (az - (2 * diff))
diff = (az - 45)
az = (az - (2 * diff))
return az |
class AMSGrad(OptimizationAlgorithm):
def __init__(self, **kwargs):
default_parameters = {'learning_rate': 0.001, 'beta1': 0.9, 'beta2': 0.999, 'eps': 1e-07}
restart_variables = {'V': 0.0, 'S': 0.0, 'S_hat': 0.0}
super(self.__class__, self).__init__(alg_default_parameters=default_parameters, alg_restart_variables=restart_variables, **kwargs)
def _step(self, grad):
grad = np.squeeze(grad)
self.restart_variables['V'] = ((self.parameters['beta1'] * self.restart_variables['V']) + ((1 - self.parameters['beta1']) * grad))
self.restart_variables['S'] = ((self.parameters['beta2'] * self.restart_variables['S']) + ((1 - self.parameters['beta2']) * (grad ** 2)))
self.restart_variables['S_hat'] = np.maximum(self.restart_variables['S'], self.restart_variables['S_hat'])
step = (((- self.parameters['learning_rate']) * self.restart_variables['V']) / (np.sqrt(S_hat) + self.parameters['eps']))
return step |
class GPT2OnnxConfig(OnnxConfigWithPast):
def __init__(self, config: PretrainedConfig, task: str='default', patching_specs: List[PatchingSpec]=None, use_past: bool=False):
super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past)
if (not getattr(self._config, 'pad_token_id', None)):
self._config.pad_token_id = 0
def inputs(self) -> Mapping[(str, Mapping[(int, str)])]:
common_inputs = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}})
if self.use_past:
self.fill_with_past_key_values_(common_inputs, direction='inputs')
common_inputs['attention_mask'] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
common_inputs['attention_mask'] = {0: 'batch', 1: 'sequence'}
return common_inputs
def num_layers(self) -> int:
return self._config.n_layer
def num_attention_heads(self) -> int:
return self._config.n_head
def generate_dummy_inputs(self, tokenizer: PreTrainedTokenizer, batch_size: int=(- 1), seq_length: int=(- 1), is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[(str, Any)]:
common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(tokenizer, batch_size, seq_length, is_pair, framework)
ordered_inputs = OrderedDict({'input_ids': common_inputs['input_ids']})
if self.use_past:
if (not is_torch_available()):
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
(batch, seqlen) = common_inputs['input_ids'].shape
past_key_values_length = (seqlen + 2)
past_shape = (batch, self.num_attention_heads, past_key_values_length, (self._config.hidden_size // self.num_attention_heads))
ordered_inputs['past_key_values'] = [(torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers)]
ordered_inputs['attention_mask'] = common_inputs['attention_mask']
if self.use_past:
ordered_inputs['attention_mask'] = torch.cat([ordered_inputs['attention_mask'], torch.ones(batch, past_key_values_length)], dim=1)
return ordered_inputs
def default_onnx_opset(self) -> int:
return 13 |
def test_specific_location(hatch, helpers, temp_dir_data, path_append, dist_name, mocker):
install = mocker.patch('hatch.python.core.PythonManager.install')
install_dir = (((temp_dir_data / 'foo') / 'bar') / 'baz')
helpers.write_distribution(install_dir, dist_name)
dist_dir = (install_dir / dist_name)
metadata = helpers.downgrade_distribution_metadata(dist_dir)
python_path = (dist_dir / metadata['python_path'])
install = mocker.patch('hatch.python.core.PythonManager.install', return_value=mocker.MagicMock(path=dist_dir, python_path=python_path))
result = hatch('python', 'update', '-d', str(install_dir), dist_name)
assert (result.exit_code == 0), result.output
assert (result.output == helpers.dedent(f'''
Updating {dist_name}
Updated {dist_name} {dist_dir}
'''))
install.assert_called_once_with(dist_name)
path_append.assert_not_called() |
class MultipleDatasets(Dataset):
def __init__(self, dbs, make_same_len=True):
print(('=' * 20), 'MultipleDatasets', ('=' * 20))
self.dbs = dbs
self.db_num = len(self.dbs)
self.max_db_data_num = max([len(db) for db in dbs])
self.db_len_cumsum = np.cumsum([len(db) for db in dbs])
self.make_same_len = make_same_len
if dist.is_initialized():
np.random.seed(dist.get_rank())
else:
np.random.seed(0)
def __len__(self):
if self.make_same_len:
return (self.max_db_data_num * self.db_num)
else:
return sum([len(db) for db in self.dbs])
def __getitem__(self, index):
if self.make_same_len:
db_idx = (index // self.max_db_data_num)
data_idx = (index % self.max_db_data_num)
if (data_idx >= (len(self.dbs[db_idx]) * (self.max_db_data_num // len(self.dbs[db_idx])))):
data_idx = np.random.randint(0, len(self.dbs[db_idx]))
else:
data_idx = (data_idx % len(self.dbs[db_idx]))
else:
for i in range(self.db_num):
if (index < self.db_len_cumsum[i]):
db_idx = i
break
if (db_idx == 0):
data_idx = index
else:
data_idx = (index - self.db_len_cumsum[(db_idx - 1)])
return self.dbs[db_idx][data_idx] |
class Xception(nn.Module):
def __init__(self, output_stride=16, in_channels=3, pretrained=True):
super(Xception, self).__init__()
if (output_stride == 16):
(b3_s, mf_d, ef_d) = (2, 1, (1, 2))
if (output_stride == 8):
(b3_s, mf_d, ef_d) = (1, 2, (2, 4))
self.conv1 = nn.Conv2d(in_channels, 32, 3, 2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, 3, 1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(64)
self.block1 = Block(64, 128, stride=2, dilation=1, use_1st_relu=False)
self.block2 = Block(128, 256, stride=2, dilation=1)
self.block3 = Block(256, 728, stride=b3_s, dilation=1)
for i in range(16):
exec(f'self.block{(i + 4)} = Block(728, 728, stride=1, dilation=mf_d)')
self.block20 = Block(728, 1024, stride=1, dilation=ef_d[0], exit_flow=True)
self.conv3 = SeparableConv2d(1024, 1536, 3, stride=1, dilation=ef_d[1])
self.bn3 = nn.BatchNorm2d(1536)
self.conv4 = SeparableConv2d(1536, 1536, 3, stride=1, dilation=ef_d[1])
self.bn4 = nn.BatchNorm2d(1536)
self.conv5 = SeparableConv2d(1536, 2048, 3, stride=1, dilation=ef_d[1])
self.bn5 = nn.BatchNorm2d(2048)
initialize_weights(self)
if pretrained:
self._load_pretrained_model()
def _load_pretrained_model(self):
url = '
pretrained_weights = model_zoo.load_url(url)
state_dict = self.state_dict()
model_dict = {}
for (k, v) in pretrained_weights.items():
if (k in state_dict):
if ('pointwise' in k):
v = v.unsqueeze((- 1)).unsqueeze((- 1))
if k.startswith('block11'):
model_dict[k] = v
for i in range(8):
model_dict[k.replace('block11', f'block{(i + 12)}')] = v
elif k.startswith('block12'):
model_dict[k.replace('block12', 'block20')] = v
elif k.startswith('bn3'):
model_dict[k] = v
model_dict[k.replace('bn3', 'bn4')] = v
elif k.startswith('conv4'):
model_dict[k.replace('conv4', 'conv5')] = v
elif k.startswith('bn4'):
model_dict[k.replace('bn4', 'bn5')] = v
else:
model_dict[k] = v
state_dict.update(model_dict)
self.load_state_dict(state_dict)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.block1(x)
low_level_features = x
x = F.relu(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
x = self.block12(x)
x = self.block13(x)
x = self.block14(x)
x = self.block15(x)
x = self.block16(x)
x = self.block17(x)
x = self.block18(x)
x = self.block19(x)
x = self.block20(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = self.conv5(x)
x = self.bn5(x)
x = self.relu(x)
return (x, low_level_features) |
def test_trajectory_position():
traj = OSC.Trajectory('my traj', False)
traj.add_shape(OSC.Clothoid(0.001, 0.001, 100, OSC.WorldPosition()))
pos = OSC.TrajectoryPosition(traj, 0)
prettyprint(pos)
pos2 = OSC.TrajectoryPosition(traj, 0)
pos3 = OSC.TrajectoryPosition(traj, 0, 3)
assert (pos2 == pos)
assert (pos3 != pos)
pos4 = OSC.TrajectoryPosition.parse(pos.get_element())
assert (pos == pos4)
assert (version_validation('Position', pos, 0) == ValidationResponse.OSC_VERSION)
assert (version_validation('Position', pos, 1) == ValidationResponse.OK)
assert (version_validation('Position', pos, 2) == ValidationResponse.OK) |
class _ViewProviderCfdAnalysis():
def __init__(self, vobj):
vobj.Proxy = self
def getIcon(self):
return ':/icons/fem-cfd-analysis.svg'
def attach(self, vobj):
self.ViewObject = vobj
self.Object = vobj.Object
self.bubbles = None
def updateData(self, obj, prop):
return
def onChanged(self, vobj, prop):
return
def doubleClicked(self, vobj):
if (FreeCADGui.activeWorkbench().name() != 'CfdWorkbench'):
FreeCADGui.activateWorkbench('CfdWorkbench')
if (not (FemGui.getActiveAnalysis() == self.Object)):
FemGui.setActiveAnalysis(self.Object)
return True
def __getstate__(self):
return None
def __setstate__(self, state):
return None |
class FitTest(unittest.TestCase):
def test_fit_evaluate_every_n_epochs(self) -> None:
input_dim = 2
train_dataset_len = 8
eval_dataset_len = 4
batch_size = 2
max_epochs = 3
evaluate_every_n_epochs = 1
expected_train_steps_per_epoch = (train_dataset_len / batch_size)
expected_eval_steps_per_epoch = (eval_dataset_len / batch_size)
expected_num_evaluate_calls = (max_epochs / evaluate_every_n_epochs)
my_unit = DummyFitUnit(input_dim=input_dim)
train_dataloader = generate_random_dataloader(train_dataset_len, input_dim, batch_size)
eval_dataloader = generate_random_dataloader(eval_dataset_len, input_dim, batch_size)
fit(my_unit, train_dataloader=train_dataloader, eval_dataloader=eval_dataloader, max_epochs=max_epochs, evaluate_every_n_epochs=evaluate_every_n_epochs)
self.assertEqual(my_unit.train_progress.num_epochs_completed, max_epochs)
self.assertEqual(my_unit.train_progress.num_steps_completed_in_epoch, 0)
self.assertEqual(my_unit.train_progress.num_steps_completed, (max_epochs * expected_train_steps_per_epoch))
self.assertEqual(my_unit.eval_progress.num_epochs_completed, expected_num_evaluate_calls)
self.assertEqual(my_unit.eval_progress.num_steps_completed_in_epoch, 0)
self.assertEqual(my_unit.eval_progress.num_steps_completed, (max_epochs * expected_eval_steps_per_epoch))
def test_fit_evaluate_every_n_steps(self) -> None:
input_dim = 2
train_dataset_len = 16
eval_dataset_len = 4
batch_size = 2
max_epochs = 3
evaluate_every_n_steps = 5
expected_train_steps_per_epoch = (train_dataset_len / batch_size)
expected_total_train_steps = (expected_train_steps_per_epoch * max_epochs)
expected_eval_steps_per_epoch = (eval_dataset_len / batch_size)
expected_num_evaluate_calls = math.floor((expected_total_train_steps / evaluate_every_n_steps))
my_unit = DummyFitUnit(input_dim=input_dim)
train_dataloader = generate_random_dataloader(train_dataset_len, input_dim, batch_size)
eval_dataloader = generate_random_dataloader(eval_dataset_len, input_dim, batch_size)
fit(my_unit, train_dataloader=train_dataloader, eval_dataloader=eval_dataloader, max_epochs=max_epochs, evaluate_every_n_epochs=None, evaluate_every_n_steps=evaluate_every_n_steps)
self.assertEqual(my_unit.train_progress.num_epochs_completed, max_epochs)
self.assertEqual(my_unit.train_progress.num_steps_completed_in_epoch, 0)
self.assertEqual(my_unit.train_progress.num_steps_completed, (max_epochs * expected_train_steps_per_epoch))
self.assertEqual(my_unit.eval_progress.num_epochs_completed, expected_num_evaluate_calls)
self.assertEqual(my_unit.eval_progress.num_steps_completed_in_epoch, 0)
self.assertEqual(my_unit.eval_progress.num_steps_completed, (expected_num_evaluate_calls * expected_eval_steps_per_epoch))
def test_fit_stop(self) -> None:
Batch = Tuple[(torch.Tensor, torch.Tensor)]
class FitStop(TrainUnit[Batch], EvalUnit[Batch]):
def __init__(self, input_dim: int, steps_before_stopping: int) -> None:
super().__init__()
self.module = nn.Linear(input_dim, 2)
self.loss_fn = nn.CrossEntropyLoss()
self.optimizer = torch.optim.SGD(self.module.parameters(), lr=0.01)
self.steps_processed = 0
self.steps_before_stopping = steps_before_stopping
def train_step(self, state: State, data: Batch) -> Tuple[(torch.Tensor, torch.Tensor)]:
(inputs, targets) = data
outputs = self.module(inputs)
loss = self.loss_fn(outputs, targets)
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
assert state.train_state
if ((my_unit.train_progress.num_steps_completed_in_epoch + 1) == self.steps_before_stopping):
state.stop()
self.steps_processed += 1
return (loss, outputs)
def eval_step(self, state: State, data: Batch) -> Tuple[(torch.Tensor, torch.Tensor)]:
(inputs, targets) = data
outputs = self.module(inputs)
loss = self.loss_fn(outputs, targets)
self.steps_processed += 1
return (loss, outputs)
input_dim = 2
dataset_len = 10
batch_size = 2
max_epochs = 3
max_steps_per_epoch = 4
steps_before_stopping = 2
my_unit = FitStop(input_dim=input_dim, steps_before_stopping=steps_before_stopping)
train_dl = generate_random_dataloader(dataset_len, input_dim, batch_size)
eval_dl = generate_random_dataloader(dataset_len, input_dim, batch_size)
fit(my_unit, train_dataloader=train_dl, eval_dataloader=eval_dl, max_epochs=max_epochs, max_train_steps_per_epoch=max_steps_per_epoch)
self.assertEqual(my_unit.train_progress.num_epochs_completed, 1)
self.assertEqual(my_unit.train_progress.num_steps_completed_in_epoch, 0)
self.assertEqual(my_unit.steps_processed, my_unit.train_progress.num_steps_completed)
self.assertEqual(my_unit.steps_processed, steps_before_stopping)
self.assertEqual(my_unit.eval_progress.num_epochs_completed, 1)
self.assertEqual(my_unit.eval_progress.num_steps_completed, 0)
self.assertEqual(my_unit.eval_progress.num_steps_completed_in_epoch, 0)
def test_fit_max_steps(self) -> None:
max_steps = 3
input_dim = 2
dataset_len = 8
batch_size = 2
expected_eval_steps_per_epoch = (dataset_len / batch_size)
my_unit = DummyFitUnit(2)
train_dl = generate_random_dataloader(dataset_len, input_dim, batch_size)
eval_dl = generate_random_dataloader(dataset_len, input_dim, batch_size)
fit(my_unit, train_dataloader=train_dl, eval_dataloader=eval_dl, max_steps=max_steps)
self.assertEqual(my_unit.train_progress.num_steps_completed, max_steps)
self.assertEqual(my_unit.eval_progress.num_steps_completed, expected_eval_steps_per_epoch)
def test_fit_with_callback(self) -> None:
input_dim = 2
train_dataset_len = 10
eval_dataset_len = 6
batch_size = 2
max_epochs = 4
expected_num_total_train_steps = ((train_dataset_len / batch_size) * max_epochs)
expected_num_total_eval_steps = ((eval_dataset_len / batch_size) * max_epochs)
my_unit = DummyFitUnit(2)
train_dataloader = generate_random_dataloader(train_dataset_len, input_dim, batch_size)
eval_dataloader = generate_random_dataloader(eval_dataset_len, input_dim, batch_size)
callback_mock = MagicMock(spec=Callback)
fit(my_unit, train_dataloader=train_dataloader, eval_dataloader=eval_dataloader, max_epochs=max_epochs, callbacks=[callback_mock])
self.assertEqual(callback_mock.on_train_start.call_count, 1)
self.assertEqual(callback_mock.on_train_epoch_start.call_count, max_epochs)
self.assertEqual(callback_mock.on_train_step_start.call_count, expected_num_total_train_steps)
self.assertEqual(callback_mock.on_train_step_end.call_count, expected_num_total_train_steps)
self.assertEqual(callback_mock.on_train_epoch_end.call_count, max_epochs)
self.assertEqual(callback_mock.on_train_end.call_count, 1)
self.assertEqual(callback_mock.on_eval_start.call_count, max_epochs)
self.assertEqual(callback_mock.on_eval_epoch_start.call_count, max_epochs)
self.assertEqual(callback_mock.on_eval_step_start.call_count, expected_num_total_eval_steps)
self.assertEqual(callback_mock.on_eval_step_end.call_count, expected_num_total_eval_steps)
self.assertEqual(callback_mock.on_eval_epoch_end.call_count, max_epochs)
self.assertEqual(callback_mock.on_eval_end.call_count, max_epochs)
def test_fit_active_phase(self) -> None:
tc = unittest.TestCase()
class PhaseTestCallback(Callback):
def on_train_step_end(self, state: State, unit: TTrainUnit) -> None:
tc.assertEqual(state.active_phase, ActivePhase.TRAIN)
def on_train_end(self, state: State, unit: TTrainUnit) -> None:
tc.assertEqual(state.active_phase, ActivePhase.TRAIN)
input_dim = 2
dataset_len = 8
batch_size = 2
evaluate_every_n_steps = 2
evaluate_every_n_epochs = 1
max_epochs = 2
my_unit = DummyFitUnit(input_dim)
train_dl = generate_random_dataloader(dataset_len, input_dim, batch_size)
eval_dl = generate_random_dataloader(dataset_len, input_dim, batch_size)
fit(my_unit, train_dataloader=train_dl, eval_dataloader=eval_dl, evaluate_every_n_steps=evaluate_every_n_steps, evaluate_every_n_epochs=evaluate_every_n_epochs, max_epochs=max_epochs, callbacks=[PhaseTestCallback()])
def test_fit_timing(self) -> None:
input_dim = 2
dataset_len = 10
batch_size = 2
max_steps_per_epoch = 1
max_epochs = 1
evaluate_every_n_epochs = 1
dataloader = generate_random_dataloader(dataset_len, input_dim, batch_size)
timer = Timer()
fit(DummyFitUnit(input_dim=input_dim), train_dataloader=dataloader, eval_dataloader=dataloader, max_train_steps_per_epoch=max_steps_per_epoch, max_epochs=max_epochs, evaluate_every_n_epochs=evaluate_every_n_epochs, timer=timer)
self.assertIn('train.next(data_iter)', timer.recorded_durations.keys())
self.assertIn('evaluate.next(data_iter)', timer.recorded_durations.keys()) |
def parse_args():
parser = argparse.ArgumentParser('D2 model converter')
parser.add_argument('--source_model', default='', type=str, help='Path or url to the model to convert')
parser.add_argument('--output_model', default='', type=str, help='Path where to save the converted model')
return parser.parse_args() |
class TestUtils(TestCase):
def test_print_table(self):
(df2, df3) = (df.copy(), df.copy())
df2['F'] = 0
print_table(df2)
df3['A'] = 0
print_table(df3, tablefmt='html', floatfmt='.3f')
def test__postprocess_dataframe(self):
df2 = df.copy()
df2.Values = [1.54321, 5.87654, 8.23456, 3.45678]
df2 = df2.assign(Values2=[1.54321, 5.87654, 8.23456, 3.45678])
df2.index = [('row' + str(x)) for x in df.index]
old_opts = pingouin.options.copy()
pingouin.options.clear()
pingouin.options['round'] = 4
pingouin.options['round.cell.[row0]x[Values]'] = None
pingouin.options['round.column.Values'] = 3
pingouin.options['round.row.row1'] = 2
pingouin.options['round.cell.[row3]x[Values2]'] = 0
df_expected = df2.copy()
df_expected.Values = [1.54321, 5.877, 8.235, 3.457]
df_expected.Values2 = [1.5432, 5.88, 8.2346, 3.0]
df2 = _postprocess_dataframe(df2)
pd.testing.assert_frame_equal(df2, df_expected)
pingouin.options.update(old_opts)
def test_get_round_setting_for(self):
old_opts = pingouin.options.copy()
pingouin.options.clear()
pingouin.options['round'] = 4
pingouin.options['round.cell.[row0]x[Values]'] = None
pingouin.options['round.column.Values'] = 3
pingouin.options['round.row.row1'] = 2
pingouin.options['round.cell.[row3]x[Values2]'] = 0
assert (_get_round_setting_for('row0', 'Values') is None)
assert (_get_round_setting_for('row1', 'Values') == 3)
assert (_get_round_setting_for('row1', 'Values2') == 2)
assert (_get_round_setting_for('row3', 'Values2') == 0)
assert (_get_round_setting_for('row2', 'Values2') == 4)
pingouin.options.update(old_opts)
def test_flatten_list(self):
x = ['X1', ['M1', 'M2'], 'Y1', ['Y2']]
fl = _flatten_list(x)
np.testing.assert_array_equal(fl, ['X1', 'M1', 'M2', 'Y1', 'Y2'])
x = ['Xaa', 'Xbb', 'Xcc']
np.testing.assert_array_equal(_flatten_list(x), x)
xt = ['Xaa', ('Xbb', 'Xcc')]
fl = _flatten_list(xt)
assert (fl == xt)
np.testing.assert_array_equal(_flatten_list(xt, include_tuple=True), x)
assert (_flatten_list(1) == 1)
assert (_flatten_list([1, 2]) == [1, 2])
def test_perm_pval(self):
np.random.seed(123)
bootstat = np.random.normal(size=1000)
x = (- 2)
up = _perm_pval(bootstat, x, alternative='greater')
low = _perm_pval(bootstat, x, alternative='less')
two = _perm_pval(bootstat, x, alternative='two-sided')
assert (up > low)
assert ((up + low) == 1)
assert (low < two < up)
x = 2.5
up = _perm_pval(bootstat, x, alternative='greater')
low = _perm_pval(bootstat, x, alternative='less')
two = _perm_pval(bootstat, x, alternative='two-sided')
assert (low > up)
assert ((up + low) == 1)
assert (up < two < low)
def test_remove_na(self):
x = [6.4, 3.2, 4.5, np.nan]
y = [3.5, 7.2, 8.4, 3.2]
z = [2.3, np.nan, 5.2, 4.6]
remove_na(x, y, paired=True)
remove_na(x, y, paired=False)
remove_na(y, x, paired=False)
(x_out, _) = remove_na(x, z, paired=True)
assert np.allclose(x_out, [6.4, 4.5])
remove_na(x, None)
remove_na(x, 4)
x = np.array([[4, 2], [4, np.nan], [7, 6]])
y = np.array([[6, np.nan], [3, 2], [2, 2]])
(x_nan, y_nan) = remove_na(x, y, paired=False)
assert np.allclose(x_nan, [[4.0, 2.0], [7.0, 6.0]])
assert np.allclose(y_nan, [[3.0, 2.0], [2.0, 2.0]])
(x_nan, y_nan) = remove_na(x, y, paired=True)
assert np.allclose(x_nan, [[7.0, 6.0]])
assert np.allclose(y_nan, [[2.0, 2.0]])
(x_nan, y_nan) = remove_na(x, y, paired=False, axis='columns')
assert np.allclose(x_nan, [[4.0], [4.0], [7.0]])
assert np.allclose(y_nan, [[6.0], [3.0], [2.0]])
remove_na(x, None, paired=False)
with pytest.raises(AssertionError):
remove_na(x, y=[])
def test_check_eftype(self):
eftype = 'cohen'
_check_eftype(eftype)
eftype = 'fake'
_check_eftype(eftype)
def test_check_dataframe(self):
_check_dataframe(dv='Values', between='Group', effects='between', data=df)
_check_dataframe(dv='Values', within='Time', subject='Subject', effects='within', data=df)
_check_dataframe(dv='Values', within='Time', subject='Subject', between='Group', effects='interaction', data=df)
with pytest.raises(ValueError):
_check_dataframe(dv='Group', between='Group', effects='between', data=df)
with pytest.raises(ValueError):
_check_dataframe(dv='Values', between='Group', effects='between')
with pytest.raises(ValueError):
_check_dataframe(between='Group', effects='between', data=df)
with pytest.raises(ValueError):
_check_dataframe(dv='Values', between='Group', effects='wrong', data=df)
with pytest.raises(ValueError):
_check_dataframe(effects='within', dv='Values', data=df)
with pytest.raises(ValueError):
_check_dataframe(effects='between', dv='Values', data=df)
with pytest.raises(ValueError):
_check_dataframe(between='Group', effects='interaction', dv='Values', data=df)
with pytest.raises(ValueError):
_check_dataframe(dv='Values', between='Group', within='Time', effects='within', data=df)
def _is_statsmodels_installed(self):
assert isinstance(_is_statsmodels_installed(), bool)
def _is_sklearn_installed(self):
assert isinstance(_is_sklearn_installed(), bool)
def _is_mpmath_installed(self):
assert isinstance(_is_mpmath_installed(), bool) |
def test_git_archive_export_ignore(wd: WorkDir, monkeypatch: pytest.MonkeyPatch) -> None:
wd.write('test1.txt', 'test')
wd.write('test2.txt', 'test')
wd.write('.git/info/attributes', '/test1.txt -export-ignore\n/test2.txt export-ignore')
wd('git add test1.txt test2.txt')
wd.commit()
monkeypatch.chdir(wd.cwd)
assert (setuptools_scm._file_finders.find_files('.') == [opj('.', 'test1.txt')]) |
def test_weird_key_names_dict_params():
res = substitute_params('SELECT * FROM cust WHERE salesrep = %(n %s ##ame)s', {'n %s ##ame': b'John Doe'})
eq_(res, b"SELECT * FROM cust WHERE salesrep = 'John Doe'")
res = substitute_params('SELECT * FROM cust WHERE salesrep = %(n %s ##ame)s', {'n %s ##ame': 'John Doe'})
eq_(res, b"SELECT * FROM cust WHERE salesrep = N'John Doe'") |
def test_available_languages(dict_tmp_path, monkeypatch):
for f in ['pl-PL-2-0.bdic', english().remote_filename]:
(dict_tmp_path / f).touch()
monkeypatch.setattr(dictcli, 'language_list_from_api', (lambda : [(lang.code, lang.remote_filename) for lang in langs()]))
languages = sorted(dictcli.available_languages(), key=(lambda lang: lang.code))
assert (languages == [dictcli.Language(code='af-ZA', name='Afrikaans (South Africa)', remote_filename='af-ZA-3-0.bdic', local_filename=None), dictcli.Language(code='en-US', name='English (United States)', remote_filename='en-US-7-1.bdic', local_filename=None), dictcli.Language(code='pl-PL', name='Polish (Poland)', remote_filename='pl-PL-3-0.bdic', local_filename='pl-PL-2-0.bdic')]) |
_fixtures(WebFixture, DisclosedInputFixture)
def test_validation_of_undisclosed_yet_required_input(web_fixture, disclosed_input_fixture):
fixture = disclosed_input_fixture
wsgi_app = web_fixture.new_wsgi_app(enable_js=True, child_factory=fixture.MyForm.factory())
web_fixture.reahl_server.set_app(wsgi_app)
browser = web_fixture.driver_browser
browser.open('/')
assert browser.is_element_present(XPath.input_labelled('Email'))
browser.click(XPath.input_labelled('Trigger field'))
assert (not browser.is_element_present(XPath.input_labelled('Email')))
browser.click(XPath.button_labelled('click me'))
assert (not fixture.submitted_model_object.email) |
class ValidateResult(object):
def __init__(self, kind, missing=False, user=None, token=None, oauthtoken=None, robot=None, appspecifictoken=None, signed_data=None, error_message=None, sso_token=None):
self.kind = kind
self.missing = missing
self.error_message = error_message
self.context = ValidatedAuthContext(user=user, token=token, oauthtoken=oauthtoken, robot=robot, appspecifictoken=appspecifictoken, signed_data=signed_data, sso_token=sso_token)
def tuple(self):
return (self.kind, self.missing, self.error_message, self.context.tuple())
def __eq__(self, other):
return (self.tuple() == other.tuple())
def apply_to_context(self):
self.context.apply_to_request_context()
def with_kind(self, kind):
result = ValidateResult(kind, missing=self.missing, error_message=self.error_message)
result.context = self.context
return result
def __repr__(self):
return ('ValidateResult: %s (missing: %s, error: %s)' % (self.kind, self.missing, self.error_message))
def authed_user(self):
return self.context.authed_user
def has_nonrobot_user(self):
return self.context.has_nonrobot_user
def auth_valid(self):
return (self.context.entity_kind != ContextEntityKind.anonymous) |
def ql_syscall_sysinfo(ql: Qiling, info: int):
fields = ((4660, ql.pack), (8192, ql.pack), (8192, ql.pack), (8192, ql.pack), (, ql.pack), (, ql.pack), (, ql.pack), (0, ql.pack), (0, ql.pack), (0, ql.pack), (1, ql.pack16), (0, ql.pack), (0, ql.pack), (0, ql.pack32))
data = b''.join((pmethod(val) for (val, pmethod) in fields))
ql.mem.write(info, data.ljust(64, b'\x00'))
return 0 |
def localize_to_utc(time, location):
if isinstance(time, dt.datetime):
if (time.tzinfo is None):
time = pytz.timezone(location.tz).localize(time)
time_utc = time.astimezone(pytz.utc)
else:
try:
time_utc = time.tz_convert('UTC')
except TypeError:
time_utc = time.tz_localize(location.tz).tz_convert('UTC')
return time_utc |
class SuperResTransforms(TransformsConfig):
def __init__(self, opts):
super(SuperResTransforms, self).__init__(opts)
def get_transforms(self):
if (self.opts.resize_factors is None):
self.opts.resize_factors = '1,2,4,8,16,32'
factors = [int(f) for f in self.opts.resize_factors.split(',')]
print('Performing down-sampling with factors: {}'.format(factors))
transforms_dict = {'transform_gt_train': transforms.Compose([transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]), 'transform_source': transforms.Compose([transforms.Resize((256, 256)), augmentations.BilinearResize(factors=factors), transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]), 'transform_test': transforms.Compose([transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]), 'transform_inference': transforms.Compose([transforms.Resize((256, 256)), augmentations.BilinearResize(factors=factors), transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])}
return transforms_dict |
def query_info():
def _got_status(status):
(_, _, _, _, pinfo, sinfo) = _parse_status(status)
_print_info(pinfo, sinfo)
_reactor_stop()
def _portal_running(response):
query_status(_got_status)
def _portal_not_running(fail):
print('Evennia is not running.')
send_instruction(PSTATUS, None, _portal_running, _portal_not_running) |
class UNetDecoder(nn.Module):
def __init__(self, encoder: Union[(PlainConvEncoder, ResidualEncoder)], num_classes: int, n_conv_per_stage: Union[(int, Tuple[(int, ...)], List[int])], deep_supervision, nonlin_first: bool=False):
super().__init__()
self.deep_supervision = deep_supervision
self.encoder = encoder
self.num_classes = num_classes
n_stages_encoder = len(encoder.output_channels)
if isinstance(n_conv_per_stage, int):
n_conv_per_stage = ([n_conv_per_stage] * (n_stages_encoder - 1))
assert (len(n_conv_per_stage) == (n_stages_encoder - 1)), ('n_conv_per_stage must have as many entries as we have resolution stages - 1 (n_stages in encoder - 1), here: %d' % n_stages_encoder)
transpconv_op = get_matching_convtransp(conv_op=encoder.conv_op)
stages = []
transpconvs = []
seg_layers = []
(cross_conv_kernel_size, cross_conv_padding) = (3, 1)
total_channels = sum(encoder.output_channels[:(- 1)])
self.cross_conv = [nn.Sequential(nn.Conv2d(total_channels, encoder.output_channels[(- (s + 2))], kernel_size=cross_conv_kernel_size, stride=1, padding=cross_conv_padding, bias=False), nn.BatchNorm2d(encoder.output_channels[(- (s + 2))])) for s in range(len(encoder.output_channels[:(- 1)]))]
self.cross_conv = nn.Sequential(*self.cross_conv)
for s in range(1, n_stages_encoder):
input_features_below = encoder.output_channels[(- s)]
input_features_skip = encoder.output_channels[(- (s + 1))]
stride_for_transpconv = encoder.strides[(- s)]
transpconvs.append(transpconv_op(input_features_below, input_features_skip, stride_for_transpconv, stride_for_transpconv, bias=encoder.conv_bias))
stages.append(StackedConvBlocks(n_conv_per_stage[(s - 1)], encoder.conv_op, (2 * input_features_skip), input_features_skip, encoder.kernel_sizes[(- (s + 1))], 1, encoder.conv_bias, encoder.norm_op, encoder.norm_op_kwargs, encoder.dropout_op, encoder.dropout_op_kwargs, encoder.nonlin, encoder.nonlin_kwargs, nonlin_first))
seg_layers.append(encoder.conv_op(input_features_skip, num_classes, 1, 1, 0, bias=True))
self.stages = nn.ModuleList(stages)
self.transpconvs = nn.ModuleList(transpconvs)
self.seg_layers = nn.ModuleList(seg_layers)
print(f'using my unet'.center(50, '='))
def forward(self, skips):
lres_input = skips[(- 1)]
seg_outputs = []
for s in range(len(self.stages)):
x = self.transpconvs[s](lres_input)
dispatch = []
if (s == (len(self.stages) - 1)):
x = torch.cat((x, skips[(- (s + 2))]), 1)
else:
for y in skips[1:]:
if (y.shape[(- 1)] < x.shape[(- 1)]):
dispatch.append(F.interpolate(y, x.shape[(- 2):]))
elif (y.shape[(- 1)] > x.shape[(- 1)]):
dispatch.append(F.adaptive_max_pool2d(y, x.shape[(- 2):]))
else:
dispatch.append(y)
dispatch = torch.cat(dispatch, dim=1)
x = torch.cat([x, self.cross_conv[(- (s + 1))](dispatch)])
for y in skips[:(- 3)]:
if (y.shape[(- 1)] < x.shape[(- 1)]):
y1 = torch.nn.functional.interpolate(y, x.shape[(- 2):], mode='bilinear')
elif (y.shape[(- 1)] > x.shape[(- 1)]):
y1 = my_roi_pool_2(y, x.shape[(- 2):], pool_op='roi_align')
else:
y1 = y
dispatch.append(y1)
if (s < 2):
dispatch.append(skips[(- (s + 2))])
dispatch = self.cross_conv[s](torch.concat(dispatch, dim=1))
x = torch.cat((x, dispatch), 1)
else:
dispatch = self.cross_conv[s](torch.concat(dispatch, dim=1))
x = torch.cat((x, dispatch), 1)
x = self.stages[s](x)
if self.deep_supervision:
seg_outputs.append(self.seg_layers[s](x))
elif (s == (len(self.stages) - 1)):
seg_outputs.append(self.seg_layers[(- 1)](x))
lres_input = x
seg_outputs = seg_outputs[::(- 1)]
if (not self.deep_supervision):
r = seg_outputs[0]
else:
r = seg_outputs
return r
def compute_conv_feature_map_size(self, input_size):
skip_sizes = []
for s in range((len(self.encoder.strides) - 1)):
skip_sizes.append([(i // j) for (i, j) in zip(input_size, self.encoder.strides[s])])
input_size = skip_sizes[(- 1)]
assert (len(skip_sizes) == len(self.stages))
output = np.int64(0)
for s in range(len(self.stages)):
output += self.stages[s].compute_conv_feature_map_size(skip_sizes[(- (s + 1))])
output += np.prod([self.encoder.output_channels[(- (s + 2))], *skip_sizes[(- (s + 1))]], dtype=np.int64)
if (self.deep_supervision or (s == (len(self.stages) - 1))):
output += np.prod([self.num_classes, *skip_sizes[(- (s + 1))]], dtype=np.int64)
return output |
class ESIM(nn.Module):
def __init__(self, vocab_size, embedding_dim, hidden_size, embeddings=None, padding_idx=0, dropout=0.5, num_classes=2, device='cpu', isSTS=False):
super(ESIM, self).__init__()
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.hidden_size = hidden_size
self.num_classes = num_classes
self.dropout = dropout
self.device = device
self.isSTS = isSTS
self._word_embedding = nn.Embedding(self.vocab_size, self.embedding_dim, padding_idx=padding_idx, _weight=embeddings)
if self.dropout:
self._rnn_dropout = RNNDropout(p=self.dropout)
self._encoding = Seq2SeqEncoder(nn.LSTM, self.embedding_dim, self.hidden_size, bidirectional=True)
self._attention = SoftmaxAttention()
self._projection = nn.Sequential(nn.Linear(((4 * 2) * self.hidden_size), self.hidden_size), nn.ReLU())
self._composition = Seq2SeqEncoder(nn.LSTM, self.hidden_size, self.hidden_size, bidirectional=True)
self._classification = nn.Sequential(nn.Dropout(p=self.dropout), nn.Linear(((2 * 4) * self.hidden_size), self.hidden_size), nn.Tanh(), nn.Dropout(p=self.dropout), nn.Linear(self.hidden_size, self.num_classes))
self._classification_sts = nn.Sequential(nn.Dropout(p=self.dropout), nn.Linear(((2 * 4) * self.hidden_size), self.hidden_size), nn.Tanh(), nn.Dropout(p=self.dropout), nn.Linear(self.hidden_size, self.num_classes))
self.apply(_init_esim_weights)
def forward(self, premises, premises_lengths, hypotheses, hypotheses_lengths):
premises_mask = get_mask(premises, premises_lengths).to(self.device)
hypotheses_mask = get_mask(hypotheses, hypotheses_lengths).to(self.device)
embedded_premises = self._word_embedding(premises)
embedded_hypotheses = self._word_embedding(hypotheses)
if self.dropout:
embedded_premises = self._rnn_dropout(embedded_premises)
embedded_hypotheses = self._rnn_dropout(embedded_hypotheses)
encoded_premises = self._encoding(embedded_premises, premises_lengths)
encoded_hypotheses = self._encoding(embedded_hypotheses, hypotheses_lengths)
(attended_premises, attended_hypotheses) = self._attention(encoded_premises, premises_mask, encoded_hypotheses, hypotheses_mask)
enhanced_premises = torch.cat([encoded_premises, attended_premises, (encoded_premises - attended_premises), (encoded_premises * attended_premises)], dim=(- 1))
enhanced_hypotheses = torch.cat([encoded_hypotheses, attended_hypotheses, (encoded_hypotheses - attended_hypotheses), (encoded_hypotheses * attended_hypotheses)], dim=(- 1))
projected_premises = self._projection(enhanced_premises)
projected_hypotheses = self._projection(enhanced_hypotheses)
if self.dropout:
projected_premises = self._rnn_dropout(projected_premises)
projected_hypotheses = self._rnn_dropout(projected_hypotheses)
v_ai = self._composition(projected_premises, premises_lengths)
v_bj = self._composition(projected_hypotheses, hypotheses_lengths)
v_a_avg = (torch.sum((v_ai * premises_mask.unsqueeze(1).transpose(2, 1)), dim=1) / torch.sum(premises_mask, dim=1, keepdim=True))
v_b_avg = (torch.sum((v_bj * hypotheses_mask.unsqueeze(1).transpose(2, 1)), dim=1) / torch.sum(hypotheses_mask, dim=1, keepdim=True))
(v_a_max, _) = replace_masked(v_ai, premises_mask, (- .0)).max(dim=1)
(v_b_max, _) = replace_masked(v_bj, hypotheses_mask, (- .0)).max(dim=1)
v = torch.cat([v_a_avg, v_a_max, v_b_avg, v_b_max], dim=1)
if (self.isSTS == True):
logits = self._classification(v)
return logits
logits = self._classification(v)
probabilities = nn.functional.softmax(logits, dim=(- 1))
return (logits, probabilities) |
_env
def fillnodata(image, mask=None, max_search_distance=100.0, smoothing_iterations=0):
if ((mask is None) and isinstance(image, MaskedArray)):
mask = (~ image.mask)
if (not dtypes.is_ndarray(mask)):
raise ValueError('An mask array is required')
if isinstance(image, MaskedArray):
image = image.data
if (not dtypes.is_ndarray(image)):
raise ValueError('An image array is required')
max_search_distance = float(max_search_distance)
smoothing_iterations = int(smoothing_iterations)
return _fillnodata(image, mask, max_search_distance, smoothing_iterations) |
class SpinOutputter(Thread):
def __init__(self, initial_message):
super(SpinOutputter, self).__init__()
self.previous_line = ''
self.next_line = initial_message
self.running = True
self.daemon = True
def spinning_cursor():
while 1:
for cursor in '|/-\\':
(yield cursor)
def set_next(self, text):
first_line = text.split(b'\n')[0].strip()
first_line = remove_control_characters(first_line)
self.next_line = first_line[:80]
def _clear_line(self):
sys.stdout.write('\r')
sys.stdout.write((' ' * (len(self.previous_line) + 2)))
sys.stdout.flush()
sys.stdout.write('\r')
sys.stdout.flush()
self.previous_line = ''
def stop(self):
self._clear_line()
self.running = False
def run(self):
spinner = SpinOutputter.spinning_cursor()
while self.running:
self._clear_line()
sys.stdout.write('\r')
sys.stdout.flush()
sys.stdout.write(next(spinner))
sys.stdout.write(' ')
sys.stdout.write(colored(self.next_line, attrs=['dark']))
sys.stdout.flush()
self.previous_line = self.next_line
time.sleep(0.25) |
.unit()
class TestFDCapture():
def test_simple(self, tmpfile):
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
data = b'hello'
os.write(fd, data)
pytest.raises(AssertionError, cap.snap)
cap.done()
cap = capture.FDCapture(fd)
cap.start()
os.write(fd, data)
s = cap.snap()
cap.done()
assert (s == 'hello')
def test_simple_many(self, tmpfile):
for _ in range(10):
self.test_simple(tmpfile)
def test_simple_many_check_open_files(self, tmp_path):
with lsof_check(), tmp_path.joinpath('task_module.py').open('wb+') as tmpfile:
self.test_simple_many(tmpfile)
def test_simple_fail_second_start(self, tmpfile):
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
cap.done()
pytest.raises(AssertionError, cap.start)
def test_stderr(self):
cap = capture.FDCapture(2)
cap.start()
print('hello', file=sys.stderr)
s = cap.snap()
cap.done()
assert (s == 'hello\n')
def test_stdin(self):
cap = capture.FDCapture(0)
cap.start()
x = os.read(0, 100).strip()
cap.done()
assert (x == b'')
def test_writeorg(self, tmpfile):
(data1, data2) = (b'foo', b'bar')
cap = capture.FDCapture(tmpfile.fileno())
cap.start()
tmpfile.write(data1)
tmpfile.flush()
cap.writeorg(data2.decode('ascii'))
scap = cap.snap()
cap.done()
assert (scap == data1.decode('ascii'))
with open(tmpfile.name, 'rb') as stmp_file:
stmp = stmp_file.read()
assert (stmp == data2)
def test_simple_resume_suspend(self):
with saved_fd(1):
cap = capture.FDCapture(1)
cap.start()
data = b'hello'
os.write(1, data)
sys.stdout.write('whatever')
s = cap.snap()
assert (s == 'hellowhatever')
cap.suspend()
os.write(1, b'world')
sys.stdout.write('qlwkej')
assert (not cap.snap())
cap.resume()
os.write(1, b'but now')
sys.stdout.write(' yes\n')
s = cap.snap()
assert (s == 'but now yes\n')
cap.suspend()
cap.done()
pytest.raises(AssertionError, cap.suspend)
assert (repr(cap) == "<FDCapture 1 oldfd={} _state='done' tmpfile={!r}>".format(cap.targetfd_save, cap.tmpfile))
assert (repr(cap.syscapture) == "<SysCapture stdout _old=<UNSET> _state='done' tmpfile={!r}>".format(cap.syscapture.tmpfile))
def test_capfd_sys_stdout_mode(self, capfd):
assert ('b' not in sys.stdout.mode) |
class Model(object):
def __init__(self, config):
self.config = config
self.lr = config['lr']
self.char_dim = config['char_dim']
self.lstm_dim = config['lstm_dim']
self.num_tags = 2
self.num_chars = config['num_char']
self.global_step = tf.Variable(0, trainable=False)
self.best_dev_f1 = tf.Variable(0.0, trainable=False)
self.best_test_f1 = tf.Variable(0.0, trainable=False)
self.initializer = initializers.xavier_initializer()
self.char_inputs = tf.placeholder(dtype=tf.int32, shape=[None, None], name='ChatInputs')
self.targets = tf.placeholder(dtype=tf.int64, shape=[None], name='Targets')
self.dropout = tf.placeholder(dtype=tf.float32, name='Dropout')
used = tf.sign(tf.abs(self.char_inputs))
length = tf.reduce_sum(used, reduction_indices=1)
self.lengths = tf.cast(length, tf.int32)
self.batch_size = tf.shape(self.char_inputs)[0]
self.num_steps = tf.shape(self.char_inputs)[(- 1)]
lstm_inputs = self.embedding_layer(self.char_inputs, config)
lstm_output = self.biLSTM_layer(lstm_inputs, self.lstm_dim, self.lengths)
self.logits = self.project_layer(lstm_output)
self.loss = self.loss_layer(self.logits)
with tf.variable_scope('optimizer'):
optimizer = self.config['optimizer']
if (optimizer == 'sgd'):
self.opt = tf.train.GradientDescentOptimizer(self.lr)
elif (optimizer == 'adam'):
self.opt = tf.train.AdamOptimizer(self.lr)
elif (optimizer == 'adgrad'):
self.opt = tf.train.AdagradOptimizer(self.lr)
else:
raise KeyError
grads_vars = self.opt.compute_gradients(self.loss)
capped_grads_vars = [[tf.clip_by_value(g, (- self.config['clip']), self.config['clip']), v] for (g, v) in grads_vars]
self.train_op = self.opt.apply_gradients(capped_grads_vars, self.global_step)
correct_prediction = tf.equal(tf.argmax(self.logits, (- 1)), self.targets)
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)
def embedding_layer(self, char_inputs, config, name=None):
embedding = []
with tf.variable_scope(('char_embedding' if (not name) else name)), tf.device('/cpu:0'):
self.char_lookup = tf.get_variable(name='char_embedding', shape=[self.num_chars, self.char_dim], initializer=self.initializer)
embedding.append(tf.nn.embedding_lookup(self.char_lookup, char_inputs))
embed = tf.concat(embedding, axis=(- 1))
return embed
def biLSTM_layer(self, lstm_inputs, lstm_dim, lengths, name=None):
with tf.variable_scope(('char_BiLSTM' if (not name) else name)):
lstm_cell = {}
for direction in ['forward', 'backward']:
with tf.variable_scope(direction):
lstm_cell[direction] = rnn.CoupledInputForgetGateLSTMCell(lstm_dim, use_peepholes=True, initializer=self.initializer, state_is_tuple=True)
(outputs, (encoder_fw_final_state, encoder_bw_final_state)) = tf.nn.bidirectional_dynamic_rnn(lstm_cell['forward'], lstm_cell['backward'], lstm_inputs, dtype=tf.float32, sequence_length=lengths)
final_state = tf.concat((encoder_fw_final_state.h, encoder_bw_final_state.h), (- 1))
return final_state
def contact_layer(self, entity_inputs, config, name=None):
embedding = []
embedding.append(entity_inputs)
embed = tf.concat(embedding, axis=(- 1))
return embed
def project_layer(self, lstm_outputs, name=None):
hidden_dim = (self.lstm_dim * 2)
lstm_outputs = tf.reshape(lstm_outputs, [self.batch_size, hidden_dim])
with tf.variable_scope(('project' if (not name) else name)):
with tf.variable_scope('hidden'):
W = tf.get_variable('W', shape=[hidden_dim, self.lstm_dim], dtype=tf.float32, initializer=self.initializer)
b = tf.get_variable('b', shape=[self.lstm_dim], dtype=tf.float32, initializer=tf.zeros_initializer())
hidden = tf.tanh(tf.nn.xw_plus_b(lstm_outputs, W, b))
with tf.variable_scope('logits'):
W = tf.get_variable('W', shape=[self.lstm_dim, self.num_tags], dtype=tf.float32, initializer=self.initializer)
b = tf.get_variable('b', shape=[self.num_tags], dtype=tf.float32, initializer=tf.zeros_initializer())
pred = tf.nn.xw_plus_b(hidden, W, b)
return tf.reshape(pred, [(- 1), self.num_tags])
def loss_layer(self, project_logits, name=None):
with tf.variable_scope(('loss' if (not name) else name)):
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=project_logits, labels=self.targets))
return loss
def create_feed_dict(self, is_train, batch):
(strings, chars, targets) = batch
feed_dict = {self.char_inputs: np.asarray(chars), self.dropout: 1.0}
feed_dict[self.targets] = np.asarray(targets)
if is_train:
feed_dict[self.dropout] = self.config['dropout_keep']
return feed_dict
def run_step(self, sess, is_train, batch):
feed_dict = self.create_feed_dict(is_train, batch)
if is_train:
(global_step, loss, _) = sess.run([self.global_step, self.loss, self.train_op], feed_dict)
return (global_step, loss)
else:
(logits, acc) = sess.run([self.logits, self.accuracy], feed_dict)
return (logits, acc)
def decode(self, logits):
paths = []
for score in logits:
path = tf.cast(tf.argmax(score, axis=(- 1)), tf.int32).eval()
paths.append(path)
return paths
def evaluate(self, sess, data_manager):
results = []
for batch in data_manager.iter_batch():
(scores, acc) = self.run_step(sess, False, batch)
results.append(acc)
acc_ = np.mean(results)
return (results, acc_)
def evaluate_line(self, sess, inputs):
(scores, acc) = self.run_step(sess, False, inputs)
x = self.decode(scores)
pred = [int(x[0])]
return pred
def evaluete_(self, sess, data_manager, id_to_tag):
results = []
(tp, tn, fp, fn) = (0, 0, 0, 0)
for batch in data_manager.iter_batch():
actuals = tf.cast(batch[(- 1)], tf.int64)
(scores, acc) = self.run_step(sess, False, batch)
predictions = tf.argmax(scores, 1)
ones_like_actuals = tf.ones_like(actuals)
zeros_like_actuals = tf.zeros_like(actuals)
ones_like_predictions = tf.ones_like(predictions)
zeros_like_predictions = tf.zeros_like(predictions)
tp_op = tf.reduce_sum(tf.cast(tf.logical_and(tf.equal(actuals, ones_like_actuals), tf.equal(predictions, ones_like_predictions)), 'float'))
tn_op = tf.reduce_sum(tf.cast(tf.logical_and(tf.equal(actuals, zeros_like_actuals), tf.equal(predictions, zeros_like_predictions)), 'float'))
fp_op = tf.reduce_sum(tf.cast(tf.logical_and(tf.equal(actuals, zeros_like_actuals), tf.equal(predictions, ones_like_predictions)), 'float'))
fn_op = tf.reduce_sum(tf.cast(tf.logical_and(tf.equal(actuals, ones_like_actuals), tf.equal(predictions, zeros_like_predictions)), 'float'))
(tp_, tn_, fp_, fn_) = sess.run([tp_op, tn_op, fp_op, fn_op])
tp += tp_
tn += tn_
fp += fp_
fn += fn_
recall = (float(tp) / (float(tp) + float(fn)))
precision = (float(tp) / (float(tp) + float(fp)))
f1_score = ((2 * (precision * recall)) / (precision + recall))
return (precision, recall, f1_score) |
def parse_question_answers(response):
vqa_data = re.findall('\\{.*?\\}', response)
for json_string in vqa_data:
json_string = json_string.replace('\t', ' ').replace('\n', ' ')
json_string = json_string.replace(',}', '}')
json_string = json_string.replace('`', '"').replace('\', "', '", "')
try:
json_dict = json.loads(json_string)
if ((not json_dict['question']) or (not json_dict['answer'])):
continue
except Exception:
continue
(yield json_dict) |
def make_py_pkg_info(context: Context, pkg_dir: Path) -> PackageInfo:
with context.cd(pkg_dir):
proj_metadata = json.loads(ensure_result(context, 'hatch project metadata').stdout)
return PackageInfo(name=proj_metadata['name'], path=pkg_dir, language='py', version=proj_metadata['version']) |
class LaSOTVideo(Video):
def __init__(self, name, root, video_dir, init_rect, img_names, gt_rect, attr, absent, load_img=False):
super(LaSOTVideo, self).__init__(name, root, video_dir, init_rect, img_names, gt_rect, attr, load_img)
self.absent = np.array(absent, np.int8)
def load_tracker(self, path, tracker_names=None, store=True):
if (not tracker_names):
tracker_names = [x.split('/')[(- 1)] for x in glob(path) if os.path.isdir(x)]
if isinstance(tracker_names, str):
tracker_names = [tracker_names]
for name in tracker_names:
traj_file = os.path.join(path, name, (self.name + '.txt'))
if os.path.exists(traj_file):
with open(traj_file, 'r') as f:
pred_traj = [list(map(float, x.strip().split(','))) for x in f.readlines()]
else:
print('File not exists: ', traj_file)
if (self.name == 'monkey-17'):
pred_traj = pred_traj[:len(self.gt_traj)]
if store:
self.pred_trajs[name] = pred_traj
else:
return pred_traj
self.tracker_names = list(self.pred_trajs.keys()) |
.parametrize('case', [CaseConnectInToWireComp, CaseConnectBitsConstToOutComp, CaseConnectConstToOutComp, CaseConnectBitSelToOutComp, CaseConnectSliceToOutComp, CaseBitSelOverBitSelComp, CaseBitSelOverPartSelComp, CasePartSelOverBitSelComp, CasePartSelOverPartSelComp])
def test_verilog_structural_L1(case):
run_test(case, case.DUT()) |
class HyperParameters():
def parse(self, unknown_arg_ok=False):
parser = ArgumentParser()
parser.add_argument('--benchmark', action='store_true')
parser.add_argument('--no_amp', action='store_true')
parser.add_argument('--static_root', help='Static training data root', default='data/static')
parser.add_argument('--bl_root', help='Blender training data root', default='data/BL30K')
parser.add_argument('--yv_root', help='YouTubeVOS data root', default='data/YouTube')
parser.add_argument('--davis_root', help='DAVIS data root', default='data/DAVIS')
parser.add_argument('--stage', help='Training stage (0-static images, 1-Blender dataset, 2-DAVIS+YouTubeVOS (300K), 3-DAVIS+YouTubeVOS (150K))', type=int, default=0)
parser.add_argument('--num_workers', help='Number of datalaoder workers per process', type=int, default=8)
parser.add_argument('-b', '--batch_size', help='Default is dependent on the training stage, see below', default=None, type=int)
parser.add_argument('-i', '--iterations', help='Default is dependent on the training stage, see below', default=None, type=int)
parser.add_argument('--steps', help='Default is dependent on the training stage, see below', nargs='*', default=None, type=int)
parser.add_argument('--lr', help='Initial learning rate', type=float)
parser.add_argument('--gamma', help='LR := LR*gamma at every decay step', default=0.1, type=float)
parser.add_argument('--start_warm', help='hard example start ', type=int)
parser.add_argument('--end_warm', help='hard example end ', type=int)
parser.add_argument('--load_network', help='Path to pretrained network weight only')
parser.add_argument('--load_model', help='Path to the model file, including network, optimizer and such')
parser.add_argument('--id', help='Experiment UNIQUE id, use NULL to disable logging to tensorboard', default='NULL')
parser.add_argument('--debug', help='Debug mode which logs information more often', action='store_true')
parser.add_argument('--local_rank', default=0, type=int, help='Local rank of this process')
if unknown_arg_ok:
(args, _) = parser.parse_known_args()
self.args = vars(args)
else:
self.args = vars(parser.parse_args())
self.args['amp'] = (not self.args['no_amp'])
if (self.args['stage'] == 0):
self.args['lr'] = none_or_default(self.args['lr'], 4e-05)
self.args['batch_size'] = none_or_default(self.args['batch_size'], 16)
self.args['iterations'] = none_or_default(self.args['iterations'], 40000)
self.args['steps'] = none_or_default(self.args['steps'], [20000])
self.args['start_warm'] = none_or_default(self.args['start_warm'], 3000)
self.args['end_warm'] = none_or_default(self.args['end_warm'], 10000)
self.args['single_object'] = True
elif (self.args['stage'] == 1):
self.args['lr'] = none_or_default(self.args['lr'], 6e-05)
self.args['batch_size'] = none_or_default(self.args['batch_size'], 8)
self.args['iterations'] = none_or_default(self.args['iterations'], 65000)
self.args['steps'] = none_or_default(self.args['steps'], [50000])
self.args['start_warm'] = none_or_default(self.args['start_warm'], 5000)
self.args['end_warm'] = none_or_default(self.args['end_warm'], 20000)
self.args['single_object'] = False
elif (self.args['stage'] == 2):
self.args['lr'] = none_or_default(self.args['lr'], 6e-05)
self.args['batch_size'] = none_or_default(self.args['batch_size'], 8)
self.args['iterations'] = none_or_default(self.args['iterations'], 40000)
self.args['steps'] = none_or_default(self.args['steps'], [30000])
self.args['start_warm'] = none_or_default(self.args['start_warm'], 3000)
self.args['end_warm'] = none_or_default(self.args['end_warm'], 10000)
self.args['single_object'] = False
elif (self.args['stage'] == 3):
self.args['lr'] = none_or_default(self.args['lr'], 4e-05)
self.args['batch_size'] = none_or_default(self.args['batch_size'], 8)
self.args['iterations'] = none_or_default(self.args['iterations'], 30000)
self.args['steps'] = none_or_default(self.args['steps'], [25000])
self.args['start_warm'] = none_or_default(self.args['start_warm'], 3000)
self.args['end_warm'] = none_or_default(self.args['end_warm'], 15000)
self.args['single_object'] = False
else:
raise NotImplementedError
def __getitem__(self, key):
return self.args[key]
def __setitem__(self, key, value):
self.args[key] = value
def __str__(self):
return str(self.args) |
def cascade_randomization(arch, num_layers_from_last=None):
model = models.__dict__[arch](pretrained=True)
num = ((- 1) * num_layers_from_last)
conv2d_keys = []
for key in model.features._modules.keys():
if isinstance(model.features._modules[key], nn.Conv2d):
conv2d_keys.append(key)
for key in conv2d_keys[num:]:
layer = model.features._modules[key]
in_channels = layer.in_channels
out_channels = layer.out_channels
kernel_size = layer.kernel_size
stride = layer.stride
padding = layer.padding
model.features._modules[key] = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding)
return model |
_fixtures(WebFixture, DataTableFixture)
def test_sorting(web_fixture, data_table_fixture):
web_fixture.reahl_server.set_app(data_table_fixture.wsgi_app)
web_fixture.quit_browser()
browser = web_fixture.driver_browser
browser.open('/')
assert (not data_table_fixture.is_column_sorted(1, 'ascending'))
assert (not data_table_fixture.is_column_sorted(1, 'descending'))
assert (not data_table_fixture.is_column_sorted(2, 'ascending'))
assert (not data_table_fixture.is_column_sorted(2, 'descending'))
browser.click(data_table_fixture.xpath_for_sort_link_for_column(1))
assert data_table_fixture.is_column_sorted(1, 'ascending')
assert (data_table_fixture.get_table_row(1) == ['1', 'T'])
assert (data_table_fixture.get_table_row(2) == ['2', 'H'])
assert (data_table_fixture.get_table_row(3) == ['3', 'E'])
assert data_table_fixture.is_column_sorted(2, None)
browser.click(data_table_fixture.xpath_for_sort_link_for_column(2))
assert data_table_fixture.is_column_sorted(2, 'ascending')
assert data_table_fixture.is_column_sorted(1, None)
assert (data_table_fixture.get_table_row(1) == ['22', 'A'])
assert (data_table_fixture.get_table_row(2) == ['9', 'B'])
assert (data_table_fixture.get_table_row(3) == ['7', 'C'])
browser.click(data_table_fixture.xpath_for_sort_link_for_column(2))
assert data_table_fixture.is_column_sorted(2, 'descending')
assert (data_table_fixture.get_table_row(1) == ['23', 'Z'])
assert (data_table_fixture.get_table_row(2) == ['24', 'Y'])
assert (data_table_fixture.get_table_row(3) == ['15', 'X'])
browser.click(XPath.link().with_text('4'))
assert (data_table_fixture.get_table_row(1) == ['4', 'Q'])
assert (data_table_fixture.get_table_row(2) == ['18', 'P'])
assert (data_table_fixture.get_table_row(3) == ['11', 'O'])
browser.click(data_table_fixture.xpath_for_sort_link_for_column(1))
assert (data_table_fixture.get_table_row(1) == ['10', 'R'])
assert (data_table_fixture.get_table_row(2) == ['11', 'O'])
assert (data_table_fixture.get_table_row(3) == ['12', 'W']) |
def play_many(pathserv, timeout=120):
conf = fs.get_session_configuration(pathserv)
if conf['dev_debug']:
pass
elif conf['protect_raw_data']:
raise mpexceptions.ExceptionAttemptToBreakRawDataProtection()
playlist_gen = pathserv.session_playlist_generator()
core_play_many(pathserv, playlist_gen, timeout=timeout) |
def create_quant_info(encoding, tensor_quantizer, opMode, useSymmetricEncoding=False, enabled=True, bitwidth=8):
quant_info = libquant_info.QcQuantizeInfo()
encoding.bw = bitwidth
quant_info.encoding = [encoding]
quant_info.opMode = opMode
quant_info.useSymmetricEncoding = useSymmetricEncoding
quant_info.enabled = enabled
quant_info.tensorQuantizerRef = [libpymo.PtrToInt64(tensor_quantizer)]
quant_info.isIntDataType = True
quant_info.usePerChannelMode = False
return quant_info |
class AutomaticFailoverWrapper(object):
def __init__(self, primary_db, fallback_db=None):
self._primary_db = primary_db
self._fallback_db = fallback_db
def __getattr__(self, attribute):
if ((attribute != 'execute_sql') and hasattr(self._primary_db, attribute)):
return getattr(self._primary_db, attribute)
return getattr(self, attribute)
def execute(self, query, commit=SENTINEL, **context_options):
ctx = self.get_sql_context(**context_options)
(sql, params) = ctx.sql(query).query()
return self.execute_sql(sql, params, commit=commit)
def execute_sql(self, sql, params=None, commit=SENTINEL):
try:
return self._primary_db.execute_sql(sql, params, commit)
except OperationalError:
if (self._fallback_db is not None):
try:
return self._fallback_db.execute_sql(sql, params, commit)
except OperationalError:
raise |
(frozen=True, slots=True)
class NodeResourceInfo():
resource_index: int
node_identifier: NodeIdentifier
long_name: str = dataclasses.field(hash=False, repr=False)
short_name: str = dataclasses.field(hash=False, repr=False)
resource_type: ResourceType = dataclasses.field(init=False, hash=False, repr=False, default=ResourceType.NODE_IDENTIFIER)
def __str__(self) -> str:
return self.long_name
def from_node(cls, node: Node, context: NodeContext) -> NodeResourceInfo:
return cls((context.database.first_unused_resource_index() + node.node_index), node.identifier, node.name, node.name)
def from_identifier(cls, identifier: NodeIdentifier, context: NodeContext) -> NodeResourceInfo:
return cls.from_node(context.node_provider.node_by_identifier(identifier), context)
def to_node(self, context: NodeContext) -> Node:
node_index = (self.resource_index - context.database.first_unused_resource_index())
return typing.cast(Node, context.node_provider.all_nodes[node_index])
def extra(self) -> dict:
return {} |
def get_ingress_cmd(interface_list: typing.List[str], network_parameters: typing.Dict[(str, str)], duration: int=300):
tc_set = tc_unset = tc_ls = ''
param_map = {'latency': 'delay', 'loss': 'loss', 'bandwidth': 'rate'}
interface_pattern = re.compile('^[a-z0-9\\-\\\\_]+$')
ifb_pattern = re.compile('^ifb[0-9]+$')
for (i, interface) in enumerate(interface_list):
if (not interface_pattern.match(interface)):
logging.error('Interface name can only consist of alphanumeric characters')
raise Exception("Interface '{0}' does not match the required regex pattern : ^[a-z0-9\\-\\\\_]+$".format(interface))
ifb_name = 'ifb{0}'.format(i)
if (not ifb_pattern.match(ifb_name)):
logging.error('Invalid IFB name')
raise Exception("Interface '{0}' is an invalid IFB name. IFB name should follow the regex pattern ^ifb[0-9]+$".format(ifb_name))
tc_set += 'tc qdisc add dev {0} handle ffff: ingress;'.format(interface)
tc_set += 'tc filter add dev {0} parent ffff: protocol ip u32 match u32 0 0 action mirred egress redirect dev {1};'.format(interface, ifb_name)
tc_set = '{0} tc qdisc add dev {1} root netem'.format(tc_set, ifb_name)
tc_unset = '{0} tc qdisc del dev {1} root ;'.format(tc_unset, ifb_name)
tc_unset += 'tc qdisc del dev {0} handle ffff: ingress;'.format(interface)
tc_ls = '{0} tc qdisc ls dev {1} ;'.format(tc_ls, ifb_name)
for parameter in network_parameters.keys():
tc_set += ' {0} {1} '.format(param_map[parameter], network_parameters[parameter])
tc_set += ';'
exec_cmd = '{0} {1} sleep {2};{3} sleep 20;{4}'.format(tc_set, tc_ls, duration, tc_unset, tc_ls)
return exec_cmd |
class Effect1009(BaseEffect):
type = 'passive'
def handler(fit, skill, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Medium Pulse Laser Specialization')), 'damageMultiplier', (skill.getModifiedItemAttr('damageMultiplierBonus') * skill.level), **kwargs) |
class Ui_Settings(object):
def setupUi(self, Settings):
Settings.setObjectName('Settings')
Settings.resize(1082, 659)
Settings.setMinimumSize(QtCore.QSize(0, 0))
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(Settings)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setSpacing(0)
self.horizontalLayout_2.setObjectName('horizontalLayout_2')
self.leftMenu = QtWidgets.QFrame(parent=Settings)
self.leftMenu.setMinimumSize(QtCore.QSize(250, 0))
self.leftMenu.setMaximumSize(QtCore.QSize(250, ))
self.leftMenu.setStyleSheet('')
self.leftMenu.setFrameShape(QtWidgets.QFrame.Shape.NoFrame)
self.leftMenu.setFrameShadow(QtWidgets.QFrame.Shadow.Raised)
self.leftMenu.setObjectName('leftMenu')
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.leftMenu)
self.verticalLayout_3.setContentsMargins(25, 3, 0, 0)
self.verticalLayout_3.setSpacing(0)
self.verticalLayout_3.setObjectName('verticalLayout_3')
self.menu = QtWidgets.QFrame(parent=self.leftMenu)
self.menu.setMinimumSize(QtCore.QSize(0, 0))
self.menu.setMaximumSize(QtCore.QSize(, ))
self.menu.setFrameShape(QtWidgets.QFrame.Shape.NoFrame)
self.menu.setFrameShadow(QtWidgets.QFrame.Shadow.Raised)
self.menu.setObjectName('menu')
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.menu)
self.verticalLayout_2.setContentsMargins(0, 9, 0, 0)
self.verticalLayout_2.setObjectName('verticalLayout_2')
self.label_16 = QtWidgets.QLabel(parent=self.menu)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
self.label_16.setFont(font)
self.label_16.setObjectName('label_16')
self.verticalLayout_2.addWidget(self.label_16)
self.btn_home = QtWidgets.QPushButton(parent=self.menu)
self.btn_home.setMinimumSize(QtCore.QSize(0, 45))
self.btn_home.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.PointingHandCursor))
self.btn_home.setObjectName('btn_home')
self.verticalLayout_2.addWidget(self.btn_home)
self.btn_users = QtWidgets.QPushButton(parent=self.menu)
self.btn_users.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.PointingHandCursor))
self.btn_users.setObjectName('btn_users')
self.verticalLayout_2.addWidget(self.btn_users)
self.btn_system = QtWidgets.QPushButton(parent=self.menu)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Fixed)
sizePolicy.setHorizontalStretch(60)
sizePolicy.setVerticalStretch(45)
sizePolicy.setHeightForWidth(self.btn_system.sizePolicy().hasHeightForWidth())
self.btn_system.setSizePolicy(sizePolicy)
self.btn_system.setMinimumSize(QtCore.QSize(0, 45))
self.btn_system.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.PointingHandCursor))
self.btn_system.setFocusPolicy(QtCore.Qt.FocusPolicy.TabFocus)
self.btn_system.setObjectName('btn_system')
self.verticalLayout_2.addWidget(self.btn_system)
self.btn_appearance = QtWidgets.QPushButton(parent=self.menu)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Fixed)
sizePolicy.setHorizontalStretch(60)
sizePolicy.setVerticalStretch(45)
sizePolicy.setHeightForWidth(self.btn_appearance.sizePolicy().hasHeightForWidth())
self.btn_appearance.setSizePolicy(sizePolicy)
self.btn_appearance.setMinimumSize(QtCore.QSize(0, 45))
self.btn_appearance.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.PointingHandCursor))
self.btn_appearance.setFocusPolicy(QtCore.Qt.FocusPolicy.TabFocus)
self.btn_appearance.setObjectName('btn_appearance')
self.verticalLayout_2.addWidget(self.btn_appearance)
self.btn_notifications = QtWidgets.QPushButton(parent=self.menu)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Fixed)
sizePolicy.setHorizontalStretch(60)
sizePolicy.setVerticalStretch(45)
sizePolicy.setHeightForWidth(self.btn_notifications.sizePolicy().hasHeightForWidth())
self.btn_notifications.setSizePolicy(sizePolicy)
self.btn_notifications.setMinimumSize(QtCore.QSize(0, 45))
self.btn_notifications.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.PointingHandCursor))
self.btn_notifications.setFocusPolicy(QtCore.Qt.FocusPolicy.TabFocus)
self.btn_notifications.setObjectName('btn_notifications')
self.verticalLayout_2.addWidget(self.btn_notifications)
self.btn_donations = QtWidgets.QPushButton(parent=self.menu)
self.btn_donations.setMinimumSize(QtCore.QSize(0, 45))
self.btn_donations.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.PointingHandCursor))
self.btn_donations.setObjectName('btn_donations')
self.verticalLayout_2.addWidget(self.btn_donations)
self.btn_about = QtWidgets.QPushButton(parent=self.menu)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Fixed)
sizePolicy.setHorizontalStretch(60)
sizePolicy.setVerticalStretch(45)
sizePolicy.setHeightForWidth(self.btn_about.sizePolicy().hasHeightForWidth())
self.btn_about.setSizePolicy(sizePolicy)
self.btn_about.setMinimumSize(QtCore.QSize(0, 45))
self.btn_about.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.PointingHandCursor))
self.btn_about.setFocusPolicy(QtCore.Qt.FocusPolicy.TabFocus)
self.btn_about.setObjectName('btn_about')
self.verticalLayout_2.addWidget(self.btn_about)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Expanding)
self.verticalLayout_2.addItem(spacerItem)
self.btn_quit = QtWidgets.QPushButton(parent=self.menu)
self.btn_quit.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.PointingHandCursor))
self.btn_quit.setObjectName('btn_quit')
self.verticalLayout_2.addWidget(self.btn_quit)
spacerItem1 = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Fixed)
self.verticalLayout_2.addItem(spacerItem1)
self.verticalLayout_3.addWidget(self.menu)
self.horizontalLayout_2.addWidget(self.leftMenu)
self.settings_stacked = QtWidgets.QStackedWidget(parent=Settings)
self.settings_stacked.setObjectName('settings_stacked')
self.pageSystem = QtWidgets.QWidget()
self.pageSystem.setObjectName('pageSystem')
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.pageSystem)
self.verticalLayout_7.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_7.setSpacing(0)
self.verticalLayout_7.setObjectName('verticalLayout_7')
self.scrollArea = QtWidgets.QScrollArea(parent=self.pageSystem)
self.scrollArea.setFrameShape(QtWidgets.QFrame.Shape.NoFrame)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName('scrollArea')
self.system_scrollArea = QtWidgets.QWidget()
self.system_scrollArea.setGeometry(QtCore.QRect(0, 0, 832, 659))
self.system_scrollArea.setObjectName('system_scrollArea')
self.horizontalLayout_16 = QtWidgets.QHBoxLayout(self.system_scrollArea)
self.horizontalLayout_16.setObjectName('horizontalLayout_16')
self.verticalLayout_6 = QtWidgets.QVBoxLayout()
self.verticalLayout_6.setSpacing(0)
self.verticalLayout_6.setObjectName('verticalLayout_6')
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setSpacing(0)
self.horizontalLayout.setObjectName('horizontalLayout')
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Minimum)
self.horizontalLayout.addItem(spacerItem2)
self.contents_system = QtWidgets.QVBoxLayout()
self.contents_system.setContentsMargins((- 1), 0, (- 1), 0)
self.contents_system.setSpacing(10)
self.contents_system.setObjectName('contents_system')
self.label = QtWidgets.QLabel(parent=self.system_scrollArea)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Preferred, QtWidgets.QSizePolicy.Policy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
self.label.setFont(font)
self.label.setObjectName('label')
self.contents_system.addWidget(self.label)
self.frameSettings = QtWidgets.QFrame(parent=self.system_scrollArea)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Preferred, QtWidgets.QSizePolicy.Policy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frameSettings.sizePolicy().hasHeightForWidth())
self.frameSettings.setSizePolicy(sizePolicy)
self.frameSettings.setMinimumSize(QtCore.QSize(620, 0))
self.frameSettings.setMaximumSize(QtCore.QSize(620, ))
self.frameSettings.setStyleSheet('')
self.frameSettings.setFrameShape(QtWidgets.QFrame.Shape.StyledPanel)
self.frameSettings.setFrameShadow(QtWidgets.QFrame.Shadow.Raised)
self.frameSettings.setObjectName('frameSettings')
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.frameSettings)
self.verticalLayout_4.setContentsMargins(15, 15, 15, 15)
self.verticalLayout_4.setSpacing(15)
self.verticalLayout_4.setObjectName('verticalLayout_4')
self.start_system = QtWidgets.QCheckBox(parent=self.frameSettings)
self.start_system.setObjectName('start_system')
self.verticalLayout_4.addWidget(self.start_system)
self.keepBackground = QtWidgets.QCheckBox(parent=self.frameSettings)
self.keepBackground.setChecked(True)
self.keepBackground.setObjectName('keepBackground')
self.verticalLayout_4.addWidget(self.keepBackground)
self.lineWayland = QtWidgets.QFrame(parent=self.frameSettings)
self.lineWayland.setFrameShape(QtWidgets.QFrame.Shape.HLine)
self.lineWayland.setFrameShadow(QtWidgets.QFrame.Shadow.Sunken)
self.lineWayland.setObjectName('lineWayland')
self.verticalLayout_4.addWidget(self.lineWayland)
self.wayland = QtWidgets.QCheckBox(parent=self.frameSettings)
self.wayland.setChecked(True)
self.wayland.setObjectName('wayland')
self.verticalLayout_4.addWidget(self.wayland)
self.contents_system.addWidget(self.frameSettings)
self.checkSpellChecker = QtWidgets.QCheckBox(parent=self.system_scrollArea)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
self.checkSpellChecker.setFont(font)
self.checkSpellChecker.setChecked(True)
self.checkSpellChecker.setObjectName('checkSpellChecker')
self.contents_system.addWidget(self.checkSpellChecker)
self.frameSpellChecker = QtWidgets.QFrame(parent=self.system_scrollArea)
self.frameSpellChecker.setMinimumSize(QtCore.QSize(620, 0))
self.frameSpellChecker.setMaximumSize(QtCore.QSize(620, ))
self.frameSpellChecker.setFrameShape(QtWidgets.QFrame.Shape.StyledPanel)
self.frameSpellChecker.setFrameShadow(QtWidgets.QFrame.Shadow.Raised)
self.frameSpellChecker.setObjectName('frameSpellChecker')
self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.frameSpellChecker)
self.horizontalLayout_5.setContentsMargins(15, 15, 15, 15)
self.horizontalLayout_5.setSpacing(15)
self.horizontalLayout_5.setObjectName('horizontalLayout_5')
self.comboSpellChecker = QtWidgets.QComboBox(parent=self.frameSpellChecker)
self.comboSpellChecker.setObjectName('comboSpellChecker')
self.horizontalLayout_5.addWidget(self.comboSpellChecker)
self.btnApply = QtWidgets.QPushButton(parent=self.frameSpellChecker)
self.btnApply.setMaximumSize(QtCore.QSize(150, ))
self.btnApply.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.PointingHandCursor))
self.btnApply.setObjectName('btnApply')
self.horizontalLayout_5.addWidget(self.btnApply)
self.contents_system.addWidget(self.frameSpellChecker)
self.label_MenuBar = QtWidgets.QLabel(parent=self.system_scrollArea)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
self.label_MenuBar.setFont(font)
self.label_MenuBar.setObjectName('label_MenuBar')
self.contents_system.addWidget(self.label_MenuBar)
self.frameMenuBar = QtWidgets.QFrame(parent=self.system_scrollArea)
self.frameMenuBar.setMinimumSize(QtCore.QSize(0, 0))
self.frameMenuBar.setMaximumSize(QtCore.QSize(620, ))
self.frameMenuBar.setFrameShape(QtWidgets.QFrame.Shape.StyledPanel)
self.frameMenuBar.setFrameShadow(QtWidgets.QFrame.Shadow.Raised)
self.frameMenuBar.setObjectName('frameMenuBar')
self.verticalLayout_19 = QtWidgets.QVBoxLayout(self.frameMenuBar)
self.verticalLayout_19.setContentsMargins(15, 15, 15, 15)
self.verticalLayout_19.setSpacing(15)
self.verticalLayout_19.setObjectName('verticalLayout_19')
self.menubar = QtWidgets.QCheckBox(parent=self.frameMenuBar)
self.menubar.setObjectName('menubar')
self.verticalLayout_19.addWidget(self.menubar)
self.contents_system.addWidget(self.frameMenuBar)
self.check_zap_window = QtWidgets.QCheckBox(parent=self.system_scrollArea)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
self.check_zap_window.setFont(font)
self.check_zap_window.setChecked(True)
self.check_zap_window.setObjectName('check_zap_window')
self.contents_system.addWidget(self.check_zap_window)
self.frameZapWindow = QtWidgets.QFrame(parent=self.system_scrollArea)
self.frameZapWindow.setMinimumSize(QtCore.QSize(0, 0))
self.frameZapWindow.setMaximumSize(QtCore.QSize(620, ))
self.frameZapWindow.setFrameShape(QtWidgets.QFrame.Shape.StyledPanel)
self.frameZapWindow.setFrameShadow(QtWidgets.QFrame.Shadow.Raised)
self.frameZapWindow.setObjectName('frameZapWindow')
self.verticalLayout_29 = QtWidgets.QVBoxLayout(self.frameZapWindow)
self.verticalLayout_29.setContentsMargins(15, 15, 15, 15)
self.verticalLayout_29.setSpacing(15)
self.verticalLayout_29.setObjectName('verticalLayout_29')
self.label_15 = QtWidgets.QLabel(parent=self.frameZapWindow)
self.label_15.setObjectName('label_15')
self.verticalLayout_29.addWidget(self.label_15)
self.cb_maximize = QtWidgets.QCheckBox(parent=self.frameZapWindow)
self.cb_maximize.setObjectName('cb_maximize')
self.verticalLayout_29.addWidget(self.cb_maximize)
self.cb_minimize = QtWidgets.QCheckBox(parent=self.frameZapWindow)
self.cb_minimize.setObjectName('cb_minimize')
self.verticalLayout_29.addWidget(self.cb_minimize)
self.cb_positLeft = QtWidgets.QCheckBox(parent=self.frameZapWindow)
self.cb_positLeft.setObjectName('cb_positLeft')
self.verticalLayout_29.addWidget(self.cb_positLeft)
self.contents_system.addWidget(self.frameZapWindow)
self.horizontalLayout.addLayout(self.contents_system)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Minimum)
self.horizontalLayout.addItem(spacerItem3)
self.verticalLayout_6.addLayout(self.horizontalLayout)
spacerItem4 = QtWidgets.QSpacerItem(20, 301, QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Expanding)
self.verticalLayout_6.addItem(spacerItem4)
self.horizontalLayout_16.addLayout(self.verticalLayout_6)
self.scrollArea.setWidget(self.system_scrollArea)
self.verticalLayout_7.addWidget(self.scrollArea)
self.settings_stacked.addWidget(self.pageSystem)
self.pageAppearance = QtWidgets.QWidget()
self.pageAppearance.setObjectName('pageAppearance')
self.verticalLayout_14 = QtWidgets.QVBoxLayout(self.pageAppearance)
self.verticalLayout_14.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_14.setSpacing(0)
self.verticalLayout_14.setObjectName('verticalLayout_14')
self.scrollArea_4 = QtWidgets.QScrollArea(parent=self.pageAppearance)
self.scrollArea_4.setFrameShape(QtWidgets.QFrame.Shape.NoFrame)
self.scrollArea_4.setWidgetResizable(True)
self.scrollArea_4.setObjectName('scrollArea_4')
self.appearance_scrollArea = QtWidgets.QWidget()
self.appearance_scrollArea.setGeometry(QtCore.QRect(0, 0, 832, 659))
self.appearance_scrollArea.setObjectName('appearance_scrollArea')
self.verticalLayout_33 = QtWidgets.QVBoxLayout(self.appearance_scrollArea)
self.verticalLayout_33.setObjectName('verticalLayout_33')
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setSpacing(0)
self.horizontalLayout_7.setObjectName('horizontalLayout_7')
spacerItem5 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Minimum)
self.horizontalLayout_7.addItem(spacerItem5)
self.container_appearance = QtWidgets.QVBoxLayout()
self.container_appearance.setContentsMargins((- 1), 0, (- 1), (- 1))
self.container_appearance.setSpacing(10)
self.container_appearance.setObjectName('container_appearance')
self.label_2 = QtWidgets.QLabel(parent=self.appearance_scrollArea)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Preferred, QtWidgets.QSizePolicy.Policy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
self.label_2.setFont(font)
self.label_2.setObjectName('label_2')
self.container_appearance.addWidget(self.label_2)
self.frameAppearance = QtWidgets.QFrame(parent=self.appearance_scrollArea)
self.frameAppearance.setMinimumSize(QtCore.QSize(620, 0))
self.frameAppearance.setMaximumSize(QtCore.QSize(620, ))
self.frameAppearance.setFrameShape(QtWidgets.QFrame.Shape.StyledPanel)
self.frameAppearance.setFrameShadow(QtWidgets.QFrame.Shadow.Raised)
self.frameAppearance.setObjectName('frameAppearance')
self.verticalLayout_10 = QtWidgets.QVBoxLayout(self.frameAppearance)
self.verticalLayout_10.setContentsMargins(15, 15, 15, 15)
self.verticalLayout_10.setSpacing(15)
self.verticalLayout_10.setObjectName('verticalLayout_10')
self.label_5 = QtWidgets.QLabel(parent=self.frameAppearance)
font = QtGui.QFont()
font.setPointSize(12)
self.label_5.setFont(font)
self.label_5.setObjectName('label_5')
self.verticalLayout_10.addWidget(self.label_5)
self.horizontalLayout_10 = QtWidgets.QHBoxLayout()
self.horizontalLayout_10.setContentsMargins(0, (- 1), 0, (- 1))
self.horizontalLayout_10.setObjectName('horizontalLayout_10')
spacerItem6 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Minimum)
self.horizontalLayout_10.addItem(spacerItem6)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName('verticalLayout')
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName('horizontalLayout_3')
spacerItem7 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Minimum)
self.horizontalLayout_3.addItem(spacerItem7)
self.rb_system = QtWidgets.QRadioButton(parent=self.frameAppearance)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Fixed, QtWidgets.QSizePolicy.Policy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.rb_system.sizePolicy().hasHeightForWidth())
self.rb_system.setSizePolicy(sizePolicy)
self.rb_system.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.PointingHandCursor))
self.rb_system.setText('')
self.rb_system.setChecked(True)
self.rb_system.setObjectName('rb_system')
self.horizontalLayout_3.addWidget(self.rb_system)
spacerItem8 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Minimum)
self.horizontalLayout_3.addItem(spacerItem8)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.label_11 = QtWidgets.QLabel(parent=self.frameAppearance)
self.label_11.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)
self.label_11.setObjectName('label_11')
self.verticalLayout.addWidget(self.label_11)
self.horizontalLayout_10.addLayout(self.verticalLayout)
spacerItem9 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Fixed, QtWidgets.QSizePolicy.Policy.Minimum)
self.horizontalLayout_10.addItem(spacerItem9)
self.verticalLayout_8 = QtWidgets.QVBoxLayout()
self.verticalLayout_8.setObjectName('verticalLayout_8')
self.horizontalLayout_8 = QtWidgets.QHBoxLayout()
self.horizontalLayout_8.setObjectName('horizontalLayout_8')
spacerItem10 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Minimum)
self.horizontalLayout_8.addItem(spacerItem10)
self.rb_light = QtWidgets.QRadioButton(parent=self.frameAppearance)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Fixed, QtWidgets.QSizePolicy.Policy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.rb_light.sizePolicy().hasHeightForWidth())
self.rb_light.setSizePolicy(sizePolicy)
self.rb_light.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.PointingHandCursor))
self.rb_light.setText('')
self.rb_light.setObjectName('rb_light')
self.horizontalLayout_8.addWidget(self.rb_light)
spacerItem11 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Minimum)
self.horizontalLayout_8.addItem(spacerItem11)
self.verticalLayout_8.addLayout(self.horizontalLayout_8)
self.label_12 = QtWidgets.QLabel(parent=self.frameAppearance)
self.label_12.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)
self.label_12.setObjectName('label_12')
self.verticalLayout_8.addWidget(self.label_12)
self.horizontalLayout_10.addLayout(self.verticalLayout_8)
spacerItem12 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Fixed, QtWidgets.QSizePolicy.Policy.Minimum)
self.horizontalLayout_10.addItem(spacerItem12)
self.verticalLayout_9 = QtWidgets.QVBoxLayout()
self.verticalLayout_9.setObjectName('verticalLayout_9')
self.horizontalLayout_9 = QtWidgets.QHBoxLayout()
self.horizontalLayout_9.setObjectName('horizontalLayout_9')
spacerItem13 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Minimum)
self.horizontalLayout_9.addItem(spacerItem13)
self.rb_dark = QtWidgets.QRadioButton(parent=self.frameAppearance)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Fixed, QtWidgets.QSizePolicy.Policy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.rb_dark.sizePolicy().hasHeightForWidth())
self.rb_dark.setSizePolicy(sizePolicy)
self.rb_dark.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.PointingHandCursor))
self.rb_dark.setText('')
self.rb_dark.setObjectName('rb_dark')
self.horizontalLayout_9.addWidget(self.rb_dark)
spacerItem14 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Minimum)
self.horizontalLayout_9.addItem(spacerItem14)
self.verticalLayout_9.addLayout(self.horizontalLayout_9)
self.label_13 = QtWidgets.QLabel(parent=self.frameAppearance)
self.label_13.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)
self.label_13.setObjectName('label_13')
self.verticalLayout_9.addWidget(self.label_13)
self.horizontalLayout_10.addLayout(self.verticalLayout_9)
spacerItem15 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Minimum)
self.horizontalLayout_10.addItem(spacerItem15)
self.verticalLayout_10.addLayout(self.horizontalLayout_10)
self.container_appearance.addWidget(self.frameAppearance)
self.disableTrayIcon = QtWidgets.QCheckBox(parent=self.appearance_scrollArea)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
self.disableTrayIcon.setFont(font)
self.disableTrayIcon.setObjectName('disableTrayIcon')
self.container_appearance.addWidget(self.disableTrayIcon)
self.frameTray = QtWidgets.QFrame(parent=self.appearance_scrollArea)
self.frameTray.setMinimumSize(QtCore.QSize(620, 0))
self.frameTray.setMaximumSize(QtCore.QSize(620, ))
self.frameTray.setFrameShape(QtWidgets.QFrame.Shape.StyledPanel)
self.frameTray.setFrameShadow(QtWidgets.QFrame.Shadow.Raised)
self.frameTray.setObjectName('frameTray')
self.verticalLayout_21 = QtWidgets.QVBoxLayout(self.frameTray)
self.verticalLayout_21.setContentsMargins(15, 15, 15, 15)
self.verticalLayout_21.setSpacing(15)
self.verticalLayout_21.setObjectName('verticalLayout_21')
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName('gridLayout')
self.label_tray_def = QtWidgets.QLabel(parent=self.frameTray)
self.label_tray_def.setMinimumSize(QtCore.QSize(32, 32))
self.label_tray_def.setMaximumSize(QtCore.QSize(32, 32))
self.label_tray_def.setText('')
self.label_tray_def.setScaledContents(True)
self.label_tray_def.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)
self.label_tray_def.setObjectName('label_tray_def')
self.gridLayout.addWidget(self.label_tray_def, 0, 0, 1, 1)
self.label_tray_dark = QtWidgets.QLabel(parent=self.frameTray)
self.label_tray_dark.setMinimumSize(QtCore.QSize(32, 32))
self.label_tray_dark.setMaximumSize(QtCore.QSize(32, 32))
self.label_tray_dark.setText('')
self.label_tray_dark.setScaledContents(True)
self.label_tray_dark.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)
self.label_tray_dark.setObjectName('label_tray_dark')
self.gridLayout.addWidget(self.label_tray_dark, 2, 0, 1, 1)
self.label_tray_light = QtWidgets.QLabel(parent=self.frameTray)
self.label_tray_light.setMinimumSize(QtCore.QSize(32, 32))
self.label_tray_light.setMaximumSize(QtCore.QSize(32, 32))
self.label_tray_light.setText('')
self.label_tray_light.setScaledContents(True)
self.label_tray_light.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)
self.label_tray_light.setObjectName('label_tray_light')
self.gridLayout.addWidget(self.label_tray_light, 1, 0, 1, 1)
self.rb_tray_default = QtWidgets.QRadioButton(parent=self.frameTray)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.rb_tray_default.sizePolicy().hasHeightForWidth())
self.rb_tray_default.setSizePolicy(sizePolicy)
self.rb_tray_default.setMaximumSize(QtCore.QSize(, ))
self.rb_tray_default.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.ArrowCursor))
self.rb_tray_default.setChecked(True)
self.rb_tray_default.setObjectName('rb_tray_default')
self.gridLayout.addWidget(self.rb_tray_default, 0, 2, 1, 1)
self.rb_tray_light = QtWidgets.QRadioButton(parent=self.frameTray)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.rb_tray_light.sizePolicy().hasHeightForWidth())
self.rb_tray_light.setSizePolicy(sizePolicy)
self.rb_tray_light.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.ArrowCursor))
self.rb_tray_light.setObjectName('rb_tray_light')
self.gridLayout.addWidget(self.rb_tray_light, 1, 2, 1, 1)
self.rb_tray_dark = QtWidgets.QRadioButton(parent=self.frameTray)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.rb_tray_dark.sizePolicy().hasHeightForWidth())
self.rb_tray_dark.setSizePolicy(sizePolicy)
self.rb_tray_dark.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.ArrowCursor))
self.rb_tray_dark.setObjectName('rb_tray_dark')
self.gridLayout.addWidget(self.rb_tray_dark, 2, 2, 1, 1)
self.verticalLayout_21.addLayout(self.gridLayout)
self.container_appearance.addWidget(self.frameTray)
self.horizontalLayout_7.addLayout(self.container_appearance)
spacerItem16 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Minimum)
self.horizontalLayout_7.addItem(spacerItem16)
self.verticalLayout_33.addLayout(self.horizontalLayout_7)
spacerItem17 = QtWidgets.QSpacerItem(20, 142, QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Expanding)
self.verticalLayout_33.addItem(spacerItem17)
self.scrollArea_4.setWidget(self.appearance_scrollArea)
self.verticalLayout_14.addWidget(self.scrollArea_4)
self.settings_stacked.addWidget(self.pageAppearance)
self.pageNotifications = QtWidgets.QWidget()
self.pageNotifications.setObjectName('pageNotifications')
self.verticalLayout_18 = QtWidgets.QVBoxLayout(self.pageNotifications)
self.verticalLayout_18.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_18.setSpacing(0)
self.verticalLayout_18.setObjectName('verticalLayout_18')
self.scrollArea_2 = QtWidgets.QScrollArea(parent=self.pageNotifications)
self.scrollArea_2.setMinimumSize(QtCore.QSize(0, 0))
self.scrollArea_2.setFrameShape(QtWidgets.QFrame.Shape.NoFrame)
self.scrollArea_2.setWidgetResizable(True)
self.scrollArea_2.setObjectName('scrollArea_2')
self.notification_scrollArea = QtWidgets.QWidget()
self.notification_scrollArea.setGeometry(QtCore.QRect(0, 0, 832, 659))
self.notification_scrollArea.setObjectName('notification_scrollArea')
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.notification_scrollArea)
self.verticalLayout_5.setObjectName('verticalLayout_5')
self.horizontalLayout_14 = QtWidgets.QHBoxLayout()
self.horizontalLayout_14.setContentsMargins((- 1), 0, (- 1), (- 1))
self.horizontalLayout_14.setSpacing(0)
self.horizontalLayout_14.setObjectName('horizontalLayout_14')
spacerItem18 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Minimum)
self.horizontalLayout_14.addItem(spacerItem18)
self.container_notifications = QtWidgets.QVBoxLayout()
self.container_notifications.setContentsMargins((- 1), 0, (- 1), (- 1))
self.container_notifications.setSpacing(10)
self.container_notifications.setObjectName('container_notifications')
self.titileNotifications = QtWidgets.QLabel(parent=self.notification_scrollArea)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
self.titileNotifications.setFont(font)
self.titileNotifications.setObjectName('titileNotifications')
self.container_notifications.addWidget(self.titileNotifications)
self.verticalLayout_13 = QtWidgets.QVBoxLayout()
self.verticalLayout_13.setSpacing(0)
self.verticalLayout_13.setObjectName('verticalLayout_13')
self.notify_desktop = QtWidgets.QCheckBox(parent=self.notification_scrollArea)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(False)
self.notify_desktop.setFont(font)
self.notify_desktop.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.ArrowCursor))
self.notify_desktop.setLayoutDirection(QtCore.Qt.LayoutDirection.LeftToRight)
self.notify_desktop.setObjectName('notify_desktop')
self.verticalLayout_13.addWidget(self.notify_desktop)
self.container_notifications.addLayout(self.verticalLayout_13)
self.frameNotifications = QtWidgets.QFrame(parent=self.notification_scrollArea)
self.frameNotifications.setEnabled(True)
self.frameNotifications.setMinimumSize(QtCore.QSize(620, 0))
self.frameNotifications.setMaximumSize(QtCore.QSize(620, ))
self.frameNotifications.setFrameShape(QtWidgets.QFrame.Shape.StyledPanel)
self.frameNotifications.setFrameShadow(QtWidgets.QFrame.Shadow.Raised)
self.frameNotifications.setObjectName('frameNotifications')
self.verticalLayout_16 = QtWidgets.QVBoxLayout(self.frameNotifications)
self.verticalLayout_16.setContentsMargins(15, 15, 15, 15)
self.verticalLayout_16.setSpacing(15)
self.verticalLayout_16.setObjectName('verticalLayout_16')
self.show_photo = QtWidgets.QCheckBox(parent=self.frameNotifications)
self.show_photo.setEnabled(True)
self.show_photo.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.ArrowCursor))
self.show_photo.setChecked(True)
self.show_photo.setObjectName('show_photo')
self.verticalLayout_16.addWidget(self.show_photo)
self.show_name = QtWidgets.QCheckBox(parent=self.frameNotifications)
self.show_name.setEnabled(True)
self.show_name.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.ArrowCursor))
self.show_name.setChecked(True)
self.show_name.setObjectName('show_name')
self.verticalLayout_16.addWidget(self.show_name)
self.show_msg = QtWidgets.QCheckBox(parent=self.frameNotifications)
self.show_msg.setEnabled(True)
self.show_msg.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.ArrowCursor))
self.show_msg.setChecked(True)
self.show_msg.setObjectName('show_msg')
self.verticalLayout_16.addWidget(self.show_msg)
self.show_sound = QtWidgets.QCheckBox(parent=self.frameNotifications)
self.show_sound.setChecked(True)
self.show_sound.setObjectName('show_sound')
self.verticalLayout_16.addWidget(self.show_sound)
self.container_notifications.addWidget(self.frameNotifications)
self.horizontalLayout_14.addLayout(self.container_notifications)
spacerItem19 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Minimum)
self.horizontalLayout_14.addItem(spacerItem19)
self.verticalLayout_5.addLayout(self.horizontalLayout_14)
spacerItem20 = QtWidgets.QSpacerItem(20, 292, QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Expanding)
self.verticalLayout_5.addItem(spacerItem20)
self.scrollArea_2.setWidget(self.notification_scrollArea)
self.verticalLayout_18.addWidget(self.scrollArea_2)
self.settings_stacked.addWidget(self.pageNotifications)
self.pageDonations = QtWidgets.QWidget()
self.pageDonations.setObjectName('pageDonations')
self.verticalLayout_32 = QtWidgets.QVBoxLayout(self.pageDonations)
self.verticalLayout_32.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_32.setSpacing(0)
self.verticalLayout_32.setObjectName('verticalLayout_32')
self.scrollArea_3 = QtWidgets.QScrollArea(parent=self.pageDonations)
self.scrollArea_3.setFrameShape(QtWidgets.QFrame.Shape.NoFrame)
self.scrollArea_3.setWidgetResizable(True)
self.scrollArea_3.setObjectName('scrollArea_3')
self.donations_scrollArea = QtWidgets.QWidget()
self.donations_scrollArea.setGeometry(QtCore.QRect(0, 0, 832, 659))
self.donations_scrollArea.setObjectName('donations_scrollArea')
self.verticalLayout_30 = QtWidgets.QVBoxLayout(self.donations_scrollArea)
self.verticalLayout_30.setObjectName('verticalLayout_30')
self.horizontalLayout_20 = QtWidgets.QHBoxLayout()
self.horizontalLayout_20.setSpacing(0)
self.horizontalLayout_20.setObjectName('horizontalLayout_20')
spacerItem21 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Minimum)
self.horizontalLayout_20.addItem(spacerItem21)
self.container_donations = QtWidgets.QVBoxLayout()
self.container_donations.setContentsMargins((- 1), 0, (- 1), (- 1))
self.container_donations.setSpacing(10)
self.container_donations.setObjectName('container_donations')
self.title_donations = QtWidgets.QLabel(parent=self.donations_scrollArea)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
self.title_donations.setFont(font)
self.title_donations.setObjectName('title_donations')
self.container_donations.addWidget(self.title_donations)
self.frame_donations = QtWidgets.QFrame(parent=self.donations_scrollArea)
self.frame_donations.setMinimumSize(QtCore.QSize(620, 200))
self.frame_donations.setMaximumSize(QtCore.QSize(620, ))
self.frame_donations.setFrameShape(QtWidgets.QFrame.Shape.NoFrame)
self.frame_donations.setFrameShadow(QtWidgets.QFrame.Shadow.Raised)
self.frame_donations.setObjectName('frame_donations')
self.verticalLayout_11 = QtWidgets.QVBoxLayout(self.frame_donations)
self.verticalLayout_11.setContentsMargins(15, 15, 15, 15)
self.verticalLayout_11.setSpacing(15)
self.verticalLayout_11.setObjectName('verticalLayout_11')
self.gridLayout_2 = QtWidgets.QGridLayout()
self.gridLayout_2.setSpacing(20)
self.gridLayout_2.setObjectName('gridLayout_2')
self.btn_kofi = QtWidgets.QPushButton(parent=self.frame_donations)
self.btn_kofi.setMinimumSize(QtCore.QSize(0, 100))
self.btn_kofi.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.PointingHandCursor))
self.btn_kofi.setText('')
self.btn_kofi.setObjectName('btn_kofi')
self.gridLayout_2.addWidget(self.btn_kofi, 0, 1, 1, 1)
self.btn_paypal = QtWidgets.QPushButton(parent=self.frame_donations)
self.btn_paypal.setMinimumSize(QtCore.QSize(0, 100))
self.btn_paypal.setMaximumSize(QtCore.QSize(, ))
self.btn_paypal.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.PointingHandCursor))
self.btn_paypal.setText('')
self.btn_paypal.setFlat(False)
self.btn_paypal.setObjectName('btn_paypal')
self.gridLayout_2.addWidget(self.btn_paypal, 0, 0, 1, 1)
self.btn_pix = QtWidgets.QPushButton(parent=self.frame_donations)
self.btn_pix.setMinimumSize(QtCore.QSize(0, 100))
self.btn_pix.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.PointingHandCursor))
self.btn_pix.setText('')
self.btn_pix.setFlat(False)
self.btn_pix.setObjectName('btn_pix')
self.gridLayout_2.addWidget(self.btn_pix, 1, 0, 1, 1)
self.btn_gitSponor = QtWidgets.QPushButton(parent=self.frame_donations)
self.btn_gitSponor.setMinimumSize(QtCore.QSize(0, 100))
self.btn_gitSponor.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.PointingHandCursor))
self.btn_gitSponor.setText('')
self.btn_gitSponor.setObjectName('btn_gitSponor')
self.gridLayout_2.addWidget(self.btn_gitSponor, 1, 1, 1, 1)
self.verticalLayout_11.addLayout(self.gridLayout_2)
self.container_donations.addWidget(self.frame_donations)
self.horizontalLayout_20.addLayout(self.container_donations)
spacerItem22 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Minimum)
self.horizontalLayout_20.addItem(spacerItem22)
self.verticalLayout_30.addLayout(self.horizontalLayout_20)
spacerItem23 = QtWidgets.QSpacerItem(20, 181, QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Expanding)
self.verticalLayout_30.addItem(spacerItem23)
self.scrollArea_3.setWidget(self.donations_scrollArea)
self.verticalLayout_32.addWidget(self.scrollArea_3)
self.settings_stacked.addWidget(self.pageDonations)
self.pageAbout = QtWidgets.QWidget()
self.pageAbout.setObjectName('pageAbout')
self.verticalLayout_25 = QtWidgets.QVBoxLayout(self.pageAbout)
self.verticalLayout_25.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_25.setSpacing(0)
self.verticalLayout_25.setObjectName('verticalLayout_25')
self.scrollArea_5 = QtWidgets.QScrollArea(parent=self.pageAbout)
self.scrollArea_5.setFrameShape(QtWidgets.QFrame.Shape.NoFrame)
self.scrollArea_5.setWidgetResizable(True)
self.scrollArea_5.setObjectName('scrollArea_5')
self.about_scrollArea = QtWidgets.QWidget()
self.about_scrollArea.setGeometry(QtCore.QRect(0, 0, 832, 659))
self.about_scrollArea.setObjectName('about_scrollArea')
self.verticalLayout_17 = QtWidgets.QVBoxLayout(self.about_scrollArea)
self.verticalLayout_17.setObjectName('verticalLayout_17')
self.horizontalLayout_15 = QtWidgets.QHBoxLayout()
self.horizontalLayout_15.setSpacing(0)
self.horizontalLayout_15.setObjectName('horizontalLayout_15')
spacerItem24 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Minimum)
self.horizontalLayout_15.addItem(spacerItem24)
self.verticalLayout_24 = QtWidgets.QVBoxLayout()
self.verticalLayout_24.setContentsMargins((- 1), 0, (- 1), (- 1))
self.verticalLayout_24.setSpacing(10)
self.verticalLayout_24.setObjectName('verticalLayout_24')
self.label_3 = QtWidgets.QLabel(parent=self.about_scrollArea)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
self.label_3.setFont(font)
self.label_3.setObjectName('label_3')
self.verticalLayout_24.addWidget(self.label_3)
self.aboutFrame = QtWidgets.QFrame(parent=self.about_scrollArea)
self.aboutFrame.setMinimumSize(QtCore.QSize(620, 0))
self.aboutFrame.setMaximumSize(QtCore.QSize(620, ))
self.aboutFrame.setFrameShape(QtWidgets.QFrame.Shape.NoFrame)
self.aboutFrame.setFrameShadow(QtWidgets.QFrame.Shadow.Raised)
self.aboutFrame.setObjectName('aboutFrame')
self.verticalLayout_23 = QtWidgets.QVBoxLayout(self.aboutFrame)
self.verticalLayout_23.setContentsMargins(15, 15, 15, 15)
self.verticalLayout_23.setSpacing(15)
self.verticalLayout_23.setObjectName('verticalLayout_23')
self.verticalLayout_20 = QtWidgets.QVBoxLayout()
self.verticalLayout_20.setObjectName('verticalLayout_20')
self.verticalLayout_15 = QtWidgets.QVBoxLayout()
self.verticalLayout_15.setSpacing(6)
self.verticalLayout_15.setObjectName('verticalLayout_15')
self.icon_app = QtWidgets.QLabel(parent=self.aboutFrame)
self.icon_app.setMinimumSize(QtCore.QSize(100, 100))
self.icon_app.setMaximumSize(QtCore.QSize(100, 100))
self.icon_app.setText('')
self.icon_app.setScaledContents(True)
self.icon_app.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)
self.icon_app.setObjectName('icon_app')
self.verticalLayout_15.addWidget(self.icon_app, 0, QtCore.Qt.AlignmentFlag.AlignHCenter)
self.name_app = QtWidgets.QLabel(parent=self.aboutFrame)
font = QtGui.QFont()
font.setPointSize(19)
font.setBold(True)
self.name_app.setFont(font)
self.name_app.setObjectName('name_app')
self.verticalLayout_15.addWidget(self.name_app, 0, QtCore.Qt.AlignmentFlag.AlignHCenter)
self.version_app = QtWidgets.QLabel(parent=self.aboutFrame)
font = QtGui.QFont()
font.setPointSize(11)
self.version_app.setFont(font)
self.version_app.setObjectName('version_app')
self.verticalLayout_15.addWidget(self.version_app, 0, QtCore.Qt.AlignmentFlag.AlignHCenter)
self.verticalLayout_20.addLayout(self.verticalLayout_15)
self.verticalLayout_23.addLayout(self.verticalLayout_20)
self.desc_app = QtWidgets.QLabel(parent=self.aboutFrame)
font = QtGui.QFont()
font.setPointSize(10)
self.desc_app.setFont(font)
self.desc_app.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)
self.desc_app.setObjectName('desc_app')
self.verticalLayout_23.addWidget(self.desc_app)
self.verticalLayout_22 = QtWidgets.QVBoxLayout()
self.verticalLayout_22.setSpacing(0)
self.verticalLayout_22.setObjectName('verticalLayout_22')
self.label_4 = QtWidgets.QLabel(parent=self.aboutFrame)
self.label_4.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)
self.label_4.setObjectName('label_4')
self.verticalLayout_22.addWidget(self.label_4)
self.verticalLayout_23.addLayout(self.verticalLayout_22)
self.frame_about_btns = QtWidgets.QFrame(parent=self.aboutFrame)
self.frame_about_btns.setMinimumSize(QtCore.QSize(590, 0))
self.frame_about_btns.setMaximumSize(QtCore.QSize(590, ))
self.frame_about_btns.setStyleSheet('#frame_about_btns .QPushButton {\n color: rgb(255, 255, 255); \n border-radius: 15px;\n height: 30px;\n font: 12pt;\n}\n\n#frame_about_btns .QPushButton:hover {\n background-color: rgb(249, 240, 107);\n border-color: rgb(154, 153, 150);\n color: #31363b;\n}\n\n#btn_changelog{\n background-color: rgb(26, 95, 180);\n border: 1px solid rgb(26, 95, 180);\n}\n#btn_learn{\n background-color: rgb(38, 162, 105);\n border: 1px solid rgb(38, 162, 105);\n}\n#btn_report{\n background-color: rgb(224, 27, 36);\n border: 1px solid rgb(224, 27, 36);\n}')
self.frame_about_btns.setFrameShape(QtWidgets.QFrame.Shape.NoFrame)
self.frame_about_btns.setFrameShadow(QtWidgets.QFrame.Shadow.Raised)
self.frame_about_btns.setObjectName('frame_about_btns')
self.gridLayout_3 = QtWidgets.QGridLayout(self.frame_about_btns)
self.gridLayout_3.setSpacing(15)
self.gridLayout_3.setObjectName('gridLayout_3')
self.btn_learn = QtWidgets.QPushButton(parent=self.frame_about_btns)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Preferred, QtWidgets.QSizePolicy.Policy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btn_learn.sizePolicy().hasHeightForWidth())
self.btn_learn.setSizePolicy(sizePolicy)
self.btn_learn.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.PointingHandCursor))
self.btn_learn.setObjectName('btn_learn')
self.gridLayout_3.addWidget(self.btn_learn, 0, 0, 1, 1)
self.btn_changelog = QtWidgets.QPushButton(parent=self.frame_about_btns)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Preferred, QtWidgets.QSizePolicy.Policy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btn_changelog.sizePolicy().hasHeightForWidth())
self.btn_changelog.setSizePolicy(sizePolicy)
self.btn_changelog.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.PointingHandCursor))
self.btn_changelog.setObjectName('btn_changelog')
self.gridLayout_3.addWidget(self.btn_changelog, 0, 1, 1, 1)
self.btn_report = QtWidgets.QPushButton(parent=self.frame_about_btns)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btn_report.sizePolicy().hasHeightForWidth())
self.btn_report.setSizePolicy(sizePolicy)
self.btn_report.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.PointingHandCursor))
self.btn_report.setObjectName('btn_report')
self.gridLayout_3.addWidget(self.btn_report, 1, 0, 1, 2)
self.verticalLayout_23.addWidget(self.frame_about_btns)
self.verticalLayout_24.addWidget(self.aboutFrame)
self.horizontalLayout_15.addLayout(self.verticalLayout_24)
spacerItem25 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Minimum)
self.horizontalLayout_15.addItem(spacerItem25)
self.verticalLayout_17.addLayout(self.horizontalLayout_15)
spacerItem26 = QtWidgets.QSpacerItem(20, 238, QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Expanding)
self.verticalLayout_17.addItem(spacerItem26)
self.scrollArea_5.setWidget(self.about_scrollArea)
self.verticalLayout_25.addWidget(self.scrollArea_5)
self.settings_stacked.addWidget(self.pageAbout)
self.pageUsers = QtWidgets.QWidget()
self.pageUsers.setObjectName('pageUsers')
self.verticalLayout_12 = QtWidgets.QVBoxLayout(self.pageUsers)
self.verticalLayout_12.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_12.setSpacing(0)
self.verticalLayout_12.setObjectName('verticalLayout_12')
self.scrollArea_6 = QtWidgets.QScrollArea(parent=self.pageUsers)
self.scrollArea_6.setFrameShape(QtWidgets.QFrame.Shape.NoFrame)
self.scrollArea_6.setWidgetResizable(True)
self.scrollArea_6.setObjectName('scrollArea_6')
self.usersScrollArea = QtWidgets.QWidget()
self.usersScrollArea.setGeometry(QtCore.QRect(0, 0, 832, 659))
self.usersScrollArea.setObjectName('usersScrollArea')
self.verticalLayout_26 = QtWidgets.QVBoxLayout(self.usersScrollArea)
self.verticalLayout_26.setObjectName('verticalLayout_26')
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setSpacing(0)
self.horizontalLayout_6.setObjectName('horizontalLayout_6')
spacerItem27 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Minimum)
self.horizontalLayout_6.addItem(spacerItem27)
self.content_users = QtWidgets.QVBoxLayout()
self.content_users.setSpacing(10)
self.content_users.setObjectName('content_users')
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setSpacing(0)
self.horizontalLayout_4.setObjectName('horizontalLayout_4')
self.label_7 = QtWidgets.QLabel(parent=self.usersScrollArea)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
self.label_7.setFont(font)
self.label_7.setObjectName('label_7')
self.horizontalLayout_4.addWidget(self.label_7)
spacerItem28 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Minimum)
self.horizontalLayout_4.addItem(spacerItem28)
self.btnNewUser = QtWidgets.QPushButton(parent=self.usersScrollArea)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btnNewUser.sizePolicy().hasHeightForWidth())
self.btnNewUser.setSizePolicy(sizePolicy)
self.btnNewUser.setMinimumSize(QtCore.QSize(0, 25))
self.btnNewUser.setMaximumSize(QtCore.QSize(, 25))
font = QtGui.QFont()
font.setBold(True)
self.btnNewUser.setFont(font)
self.btnNewUser.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.PointingHandCursor))
self.btnNewUser.setStyleSheet('')
self.btnNewUser.setObjectName('btnNewUser')
self.horizontalLayout_4.addWidget(self.btnNewUser)
self.content_users.addLayout(self.horizontalLayout_4)
self.label_limiteUser = QtWidgets.QLabel(parent=self.usersScrollArea)
self.label_limiteUser.setEnabled(True)
font = QtGui.QFont()
font.setBold(True)
font.setItalic(True)
self.label_limiteUser.setFont(font)
self.label_limiteUser.setStyleSheet('color: rgb(224, 27, 36);')
self.label_limiteUser.setObjectName('label_limiteUser')
self.content_users.addWidget(self.label_limiteUser)
self.usersList = QtWidgets.QVBoxLayout()
self.usersList.setContentsMargins((- 1), 0, (- 1), 0)
self.usersList.setSpacing(10)
self.usersList.setObjectName('usersList')
self.content_users.addLayout(self.usersList)
self.horizontalLayout_6.addLayout(self.content_users)
spacerItem29 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Minimum)
self.horizontalLayout_6.addItem(spacerItem29)
self.verticalLayout_26.addLayout(self.horizontalLayout_6)
spacerItem30 = QtWidgets.QSpacerItem(20, 590, QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Expanding)
self.verticalLayout_26.addItem(spacerItem30)
self.scrollArea_6.setWidget(self.usersScrollArea)
self.verticalLayout_12.addWidget(self.scrollArea_6)
self.settings_stacked.addWidget(self.pageUsers)
self.horizontalLayout_2.addWidget(self.settings_stacked)
self.retranslateUi(Settings)
self.settings_stacked.setCurrentIndex(5)
QtCore.QMetaObject.connectSlotsByName(Settings)
def retranslateUi(self, Settings):
Settings.setWindowTitle(_('Form'))
self.label_16.setText(_('Settings'))
self.btn_home.setText(_('Home page'))
self.btn_users.setText(_('User Management'))
self.btn_system.setText(_('System'))
self.btn_appearance.setText(_('Appearance'))
self.btn_notifications.setText(_('Notifications'))
self.btn_donations.setText(_('Donations'))
self.btn_about.setText(_('About'))
self.btn_quit.setText(_('Quit'))
self.label.setText(_('System'))
self.start_system.setText(_('Start ZapZap with the system'))
self.keepBackground.setText(_('Hide on close'))
self.wayland.setText(_('Wayland window system'))
self.checkSpellChecker.setText(_('SpellChecker'))
self.btnApply.setText(_('Apply'))
self.label_MenuBar.setText(_('Menu bar'))
self.menubar.setText(_('Hide menu bar'))
self.check_zap_window.setText(_('ZapZap window (Restart required)'))
self.label_15.setText(_('Title bar buttons'))
self.cb_maximize.setText(_('Maximize'))
self.cb_minimize.setText(_('Minimize'))
self.cb_positLeft.setText(_('Positioning on the left side'))
self.label_2.setText(_('Customize appearance'))
self.label_5.setText(_('General appearance'))
self.label_11.setText(_('System style'))
self.label_12.setText(_('Light style'))
self.label_13.setText(_('Dark style'))
self.disableTrayIcon.setText(_('Tray icon'))
self.rb_tray_default.setText(_('Default'))
self.rb_tray_light.setText(_('Symbolic light'))
self.rb_tray_dark.setText(_('Symbolic dark'))
self.titileNotifications.setText(_('Notifications'))
self.notify_desktop.setText(_('Notifications on the desktop'))
self.show_photo.setText(_('Show the photo of the sender'))
self.show_name.setText(_("Show the sender's name"))
self.show_msg.setText(_('Show message preview'))
self.show_sound.setText(_('Reproduce sounds when receiving messages'))
self.title_donations.setText(_('Donations'))
self.label_3.setText(_('About'))
self.name_app.setText(_('ZapZap'))
self.version_app.setText(_('Version {id} (Official compilation)'))
self.desc_app.setText(_('An unofficial WhatsApp desktop application'))
self.label_4.setText(_('GNU General Public License v3.0'))
self.btn_learn.setText(_('Learn more'))
self.btn_changelog.setText(_('Changelog'))
self.btn_report.setText(_('Report issue...'))
self.label_7.setText(_('User Management'))
self.btnNewUser.setText(_('New User'))
self.label_limiteUser.setText(_('Users limit reached (9 in total)')) |
class fsdp_config():
mixed_precision: bool = True
use_fp16: bool = False
seed: int = 42
fsdp_activation_checkpointing: bool = True
limit_all_gathers: bool = True
sharding_strategy: ShardingStrategy = ShardingStrategy.FULL_SHARD
checkpoint_type: StateDictType = StateDictType.FULL_STATE_DICT
save_optimizer: bool = False |
def test_validate_raises(kernel_types=kernel_types, contiguity_types=contiguity_types, triang_types=triang_types):
with pytest.raises(ValueError):
_validate_geometry_input(rivers, valid_geometry_types=kernel_types)
with pytest.raises(ValueError):
_validate_geometry_input(columbus, valid_geometry_types=kernel_types)
with pytest.raises(ValueError):
_validate_geometry_input(rivers, valid_geometry_types=triang_types)
with pytest.raises(ValueError):
_validate_geometry_input(columbus, valid_geometry_types=triang_types)
with pytest.raises(ValueError):
_validate_geometry_input(columbus.set_geometry(columbus.centroid), valid_geometry_types=contiguity_types)
with pytest.raises(ValueError):
_validate_geometry_input(columbus.set_geometry(columbus.centroid).geometry, valid_geometry_types=contiguity_types)
with pytest.raises(ValueError):
_validate_geometry_input(numpy.arange(20).reshape((- 1), 2), valid_geometry_types=contiguity_types) |
class NominationEntry(ModelReprMixin, models.Model):
nomination = models.ForeignKey(Nomination, on_delete=models.CASCADE, help_text='The nomination this entry belongs to.', related_name='entries')
actor = models.ForeignKey(User, on_delete=models.CASCADE, help_text='The staff member that nominated this user.', related_name='nomination_set')
reason = models.TextField(help_text='Why the actor nominated this user.', default='', blank=True)
inserted_at = models.DateTimeField(auto_now_add=True, help_text='The creation date of this nomination entry.')
class Meta():
verbose_name_plural = 'nomination entries'
ordering = ('-inserted_at',) |
def test_cached_per_instance():
get_x_cache = CachingClass.get_x.__cached_per_instance_cache__
with_kwargs_cache = CachingClass.with_kwargs.__cached_per_instance_cache__
assert_eq(0, len(get_x_cache), extra=repr(get_x_cache))
assert_eq(0, len(with_kwargs_cache), extra=repr(with_kwargs_cache))
object1 = CachingClass(1)
object2 = CachingClass(2)
assert_eq(object1.x, 0)
assert_eq(object2.x, 0)
assert_eq(object1.get_x(), 1)
assert_eq(1, len(get_x_cache), extra=repr(get_x_cache))
assert_eq(0, len(with_kwargs_cache), extra=repr(with_kwargs_cache))
assert_eq(object1.x, 1)
assert_eq(object2.x, 0)
assert_eq(object1.get_x(), 1)
assert_eq(1, len(get_x_cache), extra=repr(get_x_cache))
assert_eq(0, len(with_kwargs_cache), extra=repr(with_kwargs_cache))
assert_eq(object1.x, 1)
assert_eq(object2.x, 0)
assert_eq(object2.get_x(), 2)
assert_eq(2, len(get_x_cache), extra=repr(get_x_cache))
assert_eq(0, len(with_kwargs_cache), extra=repr(with_kwargs_cache))
assert_eq(object1.x, 1)
assert_eq(object2.x, 2)
assert_eq(7, object1.with_kwargs())
assert_eq(7, object1.with_kwargs(x=1))
assert_eq(7, object1.with_kwargs())
assert_eq(16, object1.with_kwargs(x=3, y=3, z=3))
assert_eq(2, len(get_x_cache), extra=repr(get_x_cache))
assert_eq(1, len(with_kwargs_cache), extra=repr(with_kwargs_cache))
del object1
assert_eq(1, len(get_x_cache), extra=repr(get_x_cache))
assert_eq(0, len(with_kwargs_cache), extra=repr(with_kwargs_cache))
del object2
assert_eq(0, len(get_x_cache), extra=repr(get_x_cache))
assert_eq(0, len(with_kwargs_cache), extra=repr(with_kwargs_cache))
object3 = CachingClass(0)
assert_eq(0, object3.x)
object3.with_variable_kwargs(k1=2)
assert_eq(2, object3.x)
object3.with_variable_kwargs(k1=2)
assert_eq(2, object3.x)
object3.with_variable_kwargs(k2=2)
assert_eq(4, object3.x)
object3.with_variable_kwargs(k1=2, k2=2)
assert_eq(8, object3.x) |
class proposed_method_involve_AD(BaseNet):
def __init__(self, conf):
super(proposed_method_involve_AD, self).__init__(conf)
self.discriminator = None
self.generator = None
self.gan = None
def build(self):
discr = critic_2D_with_AD(self.conf.discr_params)
discr.build()
log.info('Discriminator')
self.discriminator = discr.model
self.discriminator.summary(print_fn=log.info)
gen = G_unet_16_2D_bn_with_AD(self.conf.gen_params)
gen.build()
log.info('Generator')
self.generator = gen.model
self.generator.summary(print_fn=log.info)
self.discriminator.trainable = False
X_yng = Input(shape=self.conf.input_shape)
t_old = Input(shape=(self.conf.age_dim,))
age_diff = Input(shape=(self.conf.age_dim,))
age_gap = Input(shape=(1,))
AD_vector = Input(shape=(self.conf.AD_dim,))
M_old = self.generator([X_yng, age_diff, AD_vector])
gen_X_old = Add()([X_yng, M_old])
if self.conf.use_tanh:
gen_X_old = Activation(activation='tanh')(gen_X_old)
Map_reg = Activation(activation='linear', name='map_l1_reg')(M_old)
valid = self.discriminator([gen_X_old, t_old, AD_vector])
partial_l1_regularization = partial(l1_regularization_loss, age_gap=age_gap)
self.gan = Model(inputs=[X_yng, t_old, age_diff, age_gap, AD_vector], outputs=[valid, Map_reg])
self.gan.compile(loss={'discriminator': wasserstein_loss, 'map_l1_reg': partial_l1_regularization}, loss_weights=[1, self.conf.l1_reg_weight], optimizer=Adam(lr=self.conf.lr, decay=self.conf.decay))
self.gan.summary(print_fn=log.info)
self.discriminator.trainable = True
real_old = Input(shape=self.conf.input_shape)
real_age = Input(shape=(self.conf.age_dim,))
real_AD = Input(shape=(self.conf.AD_dim,))
fake_old = Input(shape=self.conf.input_shape)
fake_age = Input(shape=(self.conf.age_dim,))
fake_AD = Input(shape=(self.conf.AD_dim,))
average_samples = Input(shape=self.conf.input_shape)
average_age = Input(shape=(self.conf.age_dim,))
average_AD = Input(shape=(self.conf.AD_dim,))
discriminator_real = Activation(activation='linear', name='d_real')(self.discriminator([real_old, real_age, real_AD]))
discriminator_fake = Activation(activation='linear', name='d_fake')(self.discriminator([fake_old, fake_age, fake_AD]))
discriminator_average = Activation(activation='linear', name='gp')(self.discriminator([average_samples, average_age, average_AD]))
partial_gp_loss = partial(gradient_penalty_loss, averaged_samples=average_samples, gradient_penalty_weight=self.conf.gp_weight)
partial_gp_loss.__name__ = 'gradient_penalty'
self.critic_model = Model(inputs=[real_old, real_age, real_AD, fake_old, fake_age, fake_AD, average_samples, average_age, average_AD], outputs=[discriminator_real, discriminator_fake, discriminator_average])
self.critic_model.compile(optimizer=Adam(lr=self.conf.lr, decay=self.conf.decay), loss={'d_real': wasserstein_loss, 'd_fake': wasserstein_loss, 'gp': partial_gp_loss})
X_yng = Input(shape=self.conf.input_shape)
t_yng = Input(shape=(self.conf.age_dim,))
t_AD = Input(shape=(self.conf.AD_dim,))
M_zero = self.generator([X_yng, t_yng, t_AD])
gen_X_yng = Add(name='self_reg')([X_yng, M_zero])
self.GAN_zero_reg = Model(inputs=[X_yng, t_yng, t_AD], outputs=gen_X_yng)
self.GAN_zero_reg.compile(loss={'self_reg': 'MAE'}, loss_weights=[self.conf.self_rec_weight], optimizer=Adam(lr=self.conf.lr, decay=self.conf.decay))
self.GAN_zero_reg.summary(print_fn=log.info)
def load_models(self):
if os.path.exists((self.conf.folder + '/va_gan')):
log.info('Loading trained model from file')
self.gan.load_weights((self.conf.folder + '/va_gan'))
def save_models(self):
log.debug('Saving trained model')
self.gan.save_weights((self.conf.folder + '/va_gan')) |
_start_docstrings('\n VAN Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ', VAN_START_DOCSTRING)
class VanForImageClassification(VanPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.van = VanModel(config)
self.classifier = (nn.Linear(config.hidden_sizes[(- 1)], config.num_labels) if (config.num_labels > 0) else nn.Identity())
self.post_init()
_start_docstrings_to_model_forward(VAN_INPUTS_DOCSTRING)
_code_sample_docstrings(processor_class=_FEAT_EXTRACTOR_FOR_DOC, checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT)
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[(Tuple, ImageClassifierOutputWithNoAttention)]:
return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict)
outputs = self.van(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = (outputs.pooler_output if return_dict else outputs[1])
logits = self.classifier(pooled_output)
loss = None
if (labels is not None):
if (self.config.problem_type is None):
if (self.config.num_labels == 1):
self.config.problem_type = 'regression'
elif ((self.config.num_labels > 1) and ((labels.dtype == torch.long) or (labels.dtype == torch.int))):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if (self.config.problem_type == 'regression'):
loss_fct = MSELoss()
if (self.config.num_labels == 1):
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif (self.config.problem_type == 'single_label_classification'):
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view((- 1), self.config.num_labels), labels.view((- 1)))
elif (self.config.problem_type == 'multi_label_classification'):
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if (not return_dict):
output = ((logits,) + outputs[2:])
return (((loss,) + output) if (loss is not None) else output)
return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states) |
class AttrVI_ATTR_RET_COUNT(RangeAttribute):
resources = [constants.EventType.io_completion]
py_name = 'return_count'
visa_name = 'VI_ATTR_RET_COUNT'
visa_type = 'ViUInt32'
default = NotAvailable
(read, write, local) = (True, False, True)
(min_value, max_value, values) = (0, , None) |
def precision(k=10):
def top_k(y_true, y_pred, rel_threshold=0.0):
if (k <= 0):
return 0.0
s = 0.0
y_true = _to_list(np.squeeze(y_true).tolist())
y_pred = _to_list(np.squeeze(y_pred).tolist())
c = zip(y_true, y_pred)
random.shuffle(c)
c = sorted(c, key=(lambda x: x[1]), reverse=True)
ipos = 0
prec = 0.0
for (i, (g, p)) in enumerate(c):
if (i >= k):
break
if (g > rel_threshold):
prec += 1
prec /= k
return prec
return top_k |
def main(args: argparse.Namespace):
model_path = args.model
input_dir = args.input_dir
output_dir = args.output_dir
with_image = (True if output_dir else False)
with_gpu = (True if torch.cuda.is_available() else False)
model = load_model(model_path, with_gpu)
for image_fn in os.listdir(input_dir):
try:
with torch.no_grad():
(ploy, im) = Toolbox.predict(image_fn, input_dir, model, with_image, output_dir, with_gpu)
except Exception as e:
traceback.print_exc() |
def create_quantsim_model_and_compute_encodings(model, dummy_input, quantsim_config=None):
from pathlib import Path
Path('/tmp/test_batch_norm_fold_to_scale').mkdir(parents=True, exist_ok=True)
config_file_path = '/tmp/test_batch_norm_fold_to_scale/quantsim_config.json'
quantsim_config = (quantsim_config or symmetric_quantsim_config)
with open(config_file_path, 'w') as f:
json.dump(quantsim_config, f)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.training_range_learning_with_tf_init, config_file=config_file_path)
def forward_pass_callback(model, _):
model(dummy_input)
sim.compute_encodings(forward_pass_callback, None)
return sim |
def verify_help_text(cmd2_app: cmd2.Cmd, help_output: Union[(str, List[str])], verbose_strings: Optional[List[str]]=None) -> None:
if isinstance(help_output, str):
help_text = help_output
else:
help_text = ''.join(help_output)
commands = cmd2_app.get_visible_commands()
for command in commands:
assert (command in help_text)
if verbose_strings:
for verbose_string in verbose_strings:
assert (verbose_string in help_text) |
def is_cython_or_generator(fn):
if hasattr(fn, '__func__'):
fn = fn.__func__
if inspect.isgeneratorfunction(fn):
return True
name = type(fn).__name__
return ((name == 'generator') or (name == 'method_descriptor') or (name == 'cython_function_or_method') or (name == 'builtin_function_or_method')) |
class SynchronizedLyrics(EventPlugin, PluginConfigMixin):
PLUGIN_ID = 'SynchronizedLyrics'
PLUGIN_NAME = _('Synchronized Lyrics')
PLUGIN_DESC = _('Shows synchronized lyrics from an .lrc file with same name as the track (or similar).')
PLUGIN_ICON = Icons.FORMAT_JUSTIFY_FILL
SYNC_PERIOD = 10000
DEFAULT_BGCOLOR = '#C2C'
DEFAULT_TXTCOLOR = '#FFFFFFFFFFFF'
DEFAULT_FONTSIZE = 25
CFG_BGCOLOR_KEY = 'backgroundColor'
CFG_TXTCOLOR_KEY = 'textColor'
CFG_FONTSIZE_KEY = 'fontSize'
LINE_REGEX = re.compile('\\s*\\[([0-9]+:[0-9.]*)]\\s*(.+)\\s*')
def __init__(self) -> None:
super().__init__()
self._lines: list[tuple[(int, str)]] = []
self._timers: list[tuple[(int, int)]] = []
self._start_clearing_from = 0
self.textview = None
self.scrolled_window = None
def PluginPreferences(cls, window):
vb = Gtk.VBox(spacing=6)
vb.set_border_width(6)
t = Gtk.Table(n_rows=5, n_columns=2, homogeneous=True)
t.set_col_spacings(6)
t.set_row_spacings(3)
clr_section = Gtk.Label()
clr_section.set_markup(util.bold(_('Colors')))
t.attach(clr_section, 0, 2, 0, 1)
l = Gtk.Label(label=_('Text:'))
l.set_alignment(xalign=1.0, yalign=0.5)
t.attach(l, 0, 1, 1, 2, xoptions=Gtk.AttachOptions.FILL)
c = Gdk.RGBA()
c.parse(cls._get_text_color())
b = Gtk.ColorButton(rgba=c)
t.attach(b, 1, 2, 1, 2)
b.connect('color-set', cls._set_text_color)
l = Gtk.Label(label=_('Background:'))
l.set_alignment(xalign=1.0, yalign=0.5)
t.attach(l, 0, 1, 2, 3, xoptions=Gtk.AttachOptions.FILL)
c = Gdk.RGBA()
c.parse(cls._get_background_color())
b = Gtk.ColorButton(rgba=c)
t.attach(b, 1, 2, 2, 3)
b.connect('color-set', cls._set_background_color)
font_section = Gtk.Label()
font_section.set_markup(util.bold(_('Font')))
t.attach(font_section, 0, 2, 3, 4)
l = Gtk.Label(label=_('Size (px):'))
l.set_alignment(xalign=1.0, yalign=0.5)
t.attach(l, 0, 1, 4, 5, xoptions=Gtk.AttachOptions.FILL)
a = Gtk.Adjustment.new(cls._get_font_size(), 10, 72, 2, 3, 0)
s = Gtk.SpinButton(adjustment=a)
s.set_numeric(True)
s.set_text(str(cls._get_font_size()))
t.attach(s, 1, 2, 4, 5)
s.connect('value-changed', cls._set_font_size)
vb.pack_start(t, False, False, 0)
return vb
def _get_text_color(cls):
v = cls.config_get(cls.CFG_TXTCOLOR_KEY, cls.DEFAULT_TXTCOLOR)
return ((v[:3] + v[5:7]) + v[9:11])
def _get_background_color(cls):
v = cls.config_get(cls.CFG_BGCOLOR_KEY, cls.DEFAULT_BGCOLOR)
return ((v[:3] + v[5:7]) + v[9:11])
def _get_font_size(cls):
return int(cls.config_get(cls.CFG_FONTSIZE_KEY, cls.DEFAULT_FONTSIZE))
def _set_text_color(self, button):
self.config_set(self.CFG_TXTCOLOR_KEY, button.get_color().to_string())
self._style_lyrics_window()
def _set_background_color(self, button):
self.config_set(self.CFG_BGCOLOR_KEY, button.get_color().to_string())
self._style_lyrics_window()
def _set_font_size(self, button):
self.config_set(self.CFG_FONTSIZE_KEY, button.get_value_as_int())
self._style_lyrics_window()
def enabled(self):
self.scrolled_window = Gtk.ScrolledWindow()
self.scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
self.scrolled_window.get_vadjustment().set_value(0)
self.textview = Gtk.TextView()
self.textview.set_editable(False)
self.textview.set_cursor_visible(False)
self.textview.set_wrap_mode(Gtk.WrapMode.WORD)
self.textview.set_justification(Gtk.Justification.CENTER)
self.scrolled_window.add_with_viewport(self.textview)
vb = Gtk.HBox()
vb.pack_start(self.scrolled_window, True, True, 6)
vb.show_all()
app.window.get_child().pack_start(vb, False, True, 0)
app.window.get_child().reorder_child(vb, 2)
self._style_lyrics_window()
self.scrolled_window.show()
self._sync_timer = GLib.timeout_add(self.SYNC_PERIOD, self._sync)
self._build_data(app.player.song)
self._timer_control()
def disabled(self):
self._clear_timers()
GLib.source_remove(self._sync_timer)
self.textview.destroy()
self.textview = None
self.scrolled_window.destroy()
self.scrolled_window = None
def _style_lyrics_window(self):
if (self.scrolled_window is None):
return
self.scrolled_window.set_size_request((- 1), (1.5 * self._get_font_size()))
qltk.add_css(self.textview, f'''
* {{
background-color: {self._get_background_color()};
color: {self._get_text_color()};
font-size: {self._get_font_size()}px;
padding: 0.25rem;
border-radius: 6px;
}}
''')
def _cur_position(self):
return app.player.get_position()
_cache()
def _build_data(self, song: (AudioFile | None)) -> list[tuple[(int, str)]]:
if self.textview:
self.textview.get_buffer().set_text('')
if song:
track_name = splitext((song('~basename') or ''))[0]
dir_ = song('~dirname')
print_d(f'Looking for .lrc files in {dir_}')
for filename in [f'{s}.lrc' for s in {track_name, track_name.lower(), track_name.upper(), song('~artist~title'), song('~artist~tracknumber~title'), song('~tracknumber~title')}]:
print_d(f'Looking for {filename!r}')
try:
with open(os.path.join(dir_, filename), encoding='utf-8') as f:
print_d(f'Found lyrics file: {filename}')
contents = f.read()
except FileNotFoundError:
continue
return self._parse_lrc(contents)
print_d(f'No lyrics found for {track_name!r}')
return []
def _parse_lrc(self, contents: str) -> list[tuple[(int, str)]]:
data = []
for line in contents.splitlines():
match = self.LINE_REGEX.match(line)
if (not match):
continue
(timing, text) = match.groups()
(minutes, seconds) = (float(p) for p in timing.split(':', 1))
timestamp = int((1000 * ((minutes * 60) + seconds)))
data.append((timestamp, text))
return sorted(data)
def _set_timers(self):
if (not self._timers):
print_d('Setting timers')
cur_time = self._cur_position()
cur_idx = self._greater(self._lines, cur_time)
if (cur_idx != (- 1)):
while ((cur_idx < len(self._lines)) and (self._lines[cur_idx][0] < (cur_time + self.SYNC_PERIOD))):
timestamp = self._lines[cur_idx][0]
line = self._lines[cur_idx][1]
tid = GLib.timeout_add((timestamp - cur_time), self._show, line)
self._timers.append((timestamp, tid))
cur_idx += 1
def _sync(self):
if (not app.player.paused):
self._clear_timers()
self._set_timers()
return True
def _timer_control(self):
if app.player.paused:
self._clear_timers()
else:
self._set_timers()
return False
def _clear_timers(self):
for (_ts, tid) in self._timers[self._start_clearing_from:]:
GLib.source_remove(tid)
self._timers = []
self._start_clearing_from = 0
def _show(self, line) -> bool:
if self.textview:
self.textview.get_buffer().set_text(line)
self._start_clearing_from += 1
print_d(f' {line.strip()} ')
return False
def plugin_on_song_started(self, song: AudioFile) -> None:
if song:
print_d(f'Preparing for {song.key}')
self._clear_timers()
self._lines = self._build_data(song)
GLib.timeout_add(5, self._timer_control)
def plugin_on_song_ended(self, song, stopped):
self._clear_timers()
def plugin_on_paused(self):
self._timer_control()
def plugin_on_unpaused(self):
self._timer_control()
def plugin_on_seek(self, song, msec):
if (not app.player.paused):
self._clear_timers()
self._set_timers()
def _greater(self, array, probe):
length = len(array)
if (length == 0):
return (- 1)
elif (probe < array[0][0]):
return 0
elif (probe >= array[(length - 1)][0]):
return length
else:
return self._search(array, probe, 0, (length - 1))
def _search(self, array, probe, lower, upper):
if (lower == upper):
if (array[lower][0] <= probe):
return (lower + 1)
else:
return lower
else:
middle = int(((lower + upper) / 2))
if (array[middle][0] <= probe):
return self._search(array, probe, (middle + 1), upper)
else:
return self._search(array, probe, lower, middle) |
def find_length(data):
if (len(data.shape) > 1):
return 0
data = data[:min(20000, len(data))]
base = 3
auto_corr = acf(data, nlags=400, fft=True)[base:]
local_max = argrelextrema(auto_corr, np.greater)[0]
try:
max_local_max = np.argmax([auto_corr[lcm] for lcm in local_max])
if ((local_max[max_local_max] < 3) or (local_max[max_local_max] > 300)):
return 125
return (local_max[max_local_max] + base)
except:
return 125 |
(Publisher)
class PublisherAdmin(RemoveDeleteMixin, SimpleHistoryAdmin):
list_display = ('name', 'slug', 'report', 'revenue_share_percentage', 'payout_method', 'unauthed_ad_decisions', 'allow_paid_campaigns', 'allow_affiliate_campaigns', 'allow_community_campaigns', 'allow_house_campaigns', 'record_views')
list_filter = ('payout_method', 'unauthed_ad_decisions', 'allow_paid_campaigns', 'allow_affiliate_campaigns', 'allow_community_campaigns', 'allow_house_campaigns', 'record_views')
list_per_page = 500
prepopulated_fields = {'slug': ('name',)}
raw_id_fields = ('djstripe_account',)
readonly_fields = ('publisher_group_list', 'modified', 'created')
search_fields = ('name', 'slug')
def publisher_group_list(self, instance):
if (not instance.pk):
return ''
return ', '.join([pg.name for pg in instance.publisher_groups.all()])
def report(self, instance):
if (not instance.pk):
return ''
name = escape(instance.name)
url = instance.get_absolute_url()
return mark_safe(f'<a href="{url}">{name}</a> Report') |
def push_to_hf_hub(model, repo_id: str, commit_message: str='Add model', token: Optional[str]=None, revision: Optional[str]=None, private: bool=False, create_pr: bool=False, model_config: Optional[dict]=None):
repo_url = create_repo(repo_id, token=token, private=private, exist_ok=True)
(_, repo_owner, repo_name) = repo_type_and_id_from_hf_id(repo_url)
repo_id = f'{repo_owner}/{repo_name}'
try:
get_hf_file_metadata(hf_hub_url(repo_id=repo_id, filename='README.md', revision=revision))
has_readme = True
except EntryNotFoundError:
has_readme = False
with TemporaryDirectory() as tmpdir:
save_for_hf(model, tmpdir, model_config=model_config)
if (not has_readme):
model_name = repo_id.split('/')[(- 1)]
readme_path = (Path(tmpdir) / 'README.md')
readme_text = f'''---
tags:
- image-classification
- timm
library_tag: timm
---
# Model card for {model_name}'''
readme_path.write_text(readme_text)
return upload_folder(repo_id=repo_id, folder_path=tmpdir, revision=revision, create_pr=create_pr, commit_message=commit_message) |
def test_request_reset_password_fails_with_not_active_user(graphql_client, sent_emails):
user = UserFactory(email='', is_active=False)
body = graphql_client.query('mutation($email: String!) {\n requestResetPassword(email: $email) {\n __typename\n ... on OperationSuccess {\n ok\n }\n }\n }', variables={'email': user.email})
assert (body['data']['requestResetPassword']['__typename'] == 'OperationSuccess')
assert (body['data']['requestResetPassword']['ok'] is False)
assert (len(sent_emails) == 0) |
class F29_Bootloader(F21_Bootloader):
removedKeywords = F21_Bootloader.removedKeywords
removedAttrs = F21_Bootloader.removedAttrs
def _getParser(self):
op = F21_Bootloader._getParser(self)
op.add_argument('--upgrade', action='store_true', default=False, deprecated=F29, help='upgrade the boot loader installed on disk')
return op |
_for('torch', '2.0', None)
def set_tensor_dict(module_dict, module, name: str, tensor: torch.Tensor) -> None:
if (name in module_dict['_parameters']):
del module_dict['_parameters'][name]
was_buffer = (name in module_dict['_buffers'])
if was_buffer:
del module_dict['_buffers'][name]
if isinstance(tensor, nn.Parameter):
module_dict.pop(name, None)
for hook in _global_parameter_registration_hooks.values():
output = hook(module, name, tensor)
if (output is not None):
tensor = output
module_dict['_parameters'][name] = tensor
elif (was_buffer and isinstance(tensor, Tensor)):
module_dict['_buffers'][name] = tensor
else:
module_dict[name] = tensor |
def area_def2basemap(area_def, **kwargs):
import warnings
warnings.warn("Basemap is no longer maintained. Please switch to cartopy by using 'area_def.to_cartopy_crs()'. See the pyresample documentation for more details.", DeprecationWarning, stacklevel=2)
from mpl_toolkits.basemap import Basemap
basemap_args = kwargs
basemap_args['rsphere'] = _sphere_radii(area_def)
_add_area_info_to_basemap_args(area_def, basemap_args)
return Basemap(**basemap_args) |
def make_baseline_net(output_dir):
with open('{}/model/test_nyu_baseline_testset.prototxt'.format(output_dir), 'w') as f:
f.write(hand_baseline('test-test', output_dir))
with open('{}/model/test_nyu_baseline_trainset.prototxt'.format(output_dir), 'w') as f:
f.write(hand_baseline('test-train', output_dir)) |
def test_resolve_lang_codes_m2m100():
sources = [dlt.lang.m2m100.FRENCH, 'fr', 'French']
targets = [dlt.lang.m2m100.ENGLISH, 'en', 'English']
for (source, target) in zip(sources, targets):
s = _resolve_lang_codes(source, 'source', 'm2m100')
t = _resolve_lang_codes(target, 'target', 'm2m100')
assert (s == 'fr')
assert (t == 'en') |
def test_messages(caplog: pytest.LogCaptureFixture) -> None:
caplog.set_level(logging.INFO)
logger.info('boo %s', 'arg')
logger.info('bar %s\nbaz %s', 'arg1', 'arg2')
assert ('boo arg' == caplog.messages[0])
assert ('bar arg1\nbaz arg2' == caplog.messages[1])
assert (caplog.text.count('\n') > len(caplog.messages))
assert (len(caplog.text.splitlines()) > len(caplog.messages))
try:
raise Exception('test')
except Exception:
logger.exception('oops')
assert ('oops' in caplog.text)
assert ('oops' in caplog.messages[(- 1)])
assert ('Exception' in caplog.text)
assert ('Exception' not in caplog.messages[(- 1)]) |
class BasicTokenizer(object):
def __init__(self, do_lower_case=True, never_split=('[UNK]', '[SEP]', '[PAD]', '[CLS]', '[MASK]')):
self.do_lower_case = do_lower_case
self.never_split = never_split
def tokenize(self, text):
text = self._clean_text(text)
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for (i, token) in enumerate(orig_tokens):
if (self.do_lower_case and (token not in self.never_split)):
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend([(i, t) for t in self._run_split_on_punc(token)])
return split_tokens
def _run_strip_accents(self, text):
text = unicodedata.normalize('NFD', text)
output = []
for char in text:
cat = unicodedata.category(char)
if (cat == 'Mn'):
continue
output.append(char)
return ''.join(output)
def _run_split_on_punc(self, text):
if (text in self.never_split):
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while (i < len(chars)):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[(- 1)].append(char)
i += 1
return [''.join(x) for x in output]
def _tokenize_chinese_chars(self, text):
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(' ')
output.append(char)
output.append(' ')
else:
output.append(char)
return ''.join(output)
def _is_chinese_char(self, cp):
if (((cp >= 19968) and (cp <= 40959)) or ((cp >= 13312) and (cp <= 19903)) or ((cp >= 131072) and (cp <= 173791)) or ((cp >= 173824) and (cp <= 177983)) or ((cp >= 177984) and (cp <= 178207)) or ((cp >= 178208) and (cp <= 183983)) or ((cp >= 63744) and (cp <= 64255)) or ((cp >= 194560) and (cp <= 195103))):
return True
return False
def _clean_text(self, text):
output = []
for char in text:
cp = ord(char)
if ((cp == 0) or (cp == 65533) or _is_control(char)):
continue
if _is_whitespace(char):
output.append(' ')
else:
output.append(char)
return ''.join(output) |
class STAT0(IntEnum):
SMBALT = (1 << 15)
SMBTO = (1 << 14)
PECERR = (1 << 12)
OUERR = (1 << 11)
AERR = (1 << 10)
LOSTARB = (1 << 9)
BERR = (1 << 8)
TBE = (1 << 7)
RBNE = (1 << 6)
STPDET = (1 << 4)
ADD10SEND = (1 << 3)
BTC = (1 << 2)
ADDSEND = (1 << 1)
SBSEND = (1 << 0) |
class ContextManagerTests(unittest.TestCase):
.patch('sys.stdout', new_callable=io.StringIO)
def test_no_context(self, mock_stdout):
with ContextManagers([]):
print('Transformers are awesome!')
self.assertEqual(mock_stdout.getvalue(), 'Transformers are awesome!\n')
.patch('sys.stdout', new_callable=io.StringIO)
def test_one_context(self, mock_stdout):
with ContextManagers([context_en()]):
print('Transformers are awesome!')
self.assertEqual(mock_stdout.getvalue(), 'Welcome!\nTransformers are awesome!\nBye!\n')
.patch('sys.stdout', new_callable=io.StringIO)
def test_two_context(self, mock_stdout):
with ContextManagers([context_fr(), context_en()]):
print('Transformers are awesome!')
self.assertEqual(mock_stdout.getvalue(), 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n') |
class Wheel(distribution.Distribution):
def __init__(self, filename: str, metadata_version: Optional[str]=None) -> None:
self.filename = filename
self.basefilename = os.path.basename(self.filename)
self.metadata_version = metadata_version
self.extractMetadata()
def py_version(self) -> str:
wheel_info = wheel_file_re.match(self.basefilename)
if (wheel_info is None):
return 'any'
else:
return wheel_info.group('pyver')
def find_candidate_metadata_files(names: List[str]) -> List[List[str]]:
tuples = [x.split('/') for x in names if ('METADATA' in x)]
return [x[1] for x in sorted(((len(x), x) for x in tuples))]
def read(self) -> bytes:
fqn = os.path.abspath(os.path.normpath(self.filename))
if (not os.path.exists(fqn)):
raise exceptions.InvalidDistribution(('No such file: %s' % fqn))
if fqn.endswith('.whl'):
archive = zipfile.ZipFile(fqn)
names = archive.namelist()
def read_file(name: str) -> bytes:
return archive.read(name)
else:
raise exceptions.InvalidDistribution(('Not a known archive format for file: %s' % fqn))
try:
for path in self.find_candidate_metadata_files(names):
candidate = '/'.join(path)
data = read_file(candidate)
if (b'Metadata-Version' in data):
return data
finally:
archive.close()
raise exceptions.InvalidDistribution(('No METADATA in archive: %s' % fqn))
def parse(self, data: bytes) -> None:
super().parse(data)
fp = io.StringIO(data.decode('utf-8', errors='replace'))
msg = distribution.parse(fp)
self.description = msg.get_payload() |
class TestQcQuantizeRecurrentOp(unittest.TestCase):
testcases = [TestCase(test_name='rnn_single_layer_no_bias', model=torch.nn.RNN(input_size=4, hidden_size=5, num_layers=1, bias=False), input_shape=(5, 3, 4)), TestCase(test_name='rnn_single_layer', model=torch.nn.RNN(input_size=4, hidden_size=5, num_layers=1), input_shape=(5, 3, 4)), TestCase(test_name='rnn_single_layer_valid_hx', model=torch.nn.RNN(input_size=4, hidden_size=5, num_layers=1), input_shape=(5, 3, 4), valid_hx=True), TestCase(test_name='rnn_single_layer_batch_first', model=torch.nn.RNN(input_size=4, hidden_size=5, num_layers=1, batch_first=True), input_shape=(3, 5, 4)), TestCase(test_name='rnn_single_layer_packed_sequence', model=torch.nn.RNN(input_size=4, hidden_size=5, num_layers=1), input_shape=(5, 3, 4), sequence_lens=[1, 5, 3]), TestCase(test_name='rnn_multilayer', model=torch.nn.RNN(input_size=4, hidden_size=5, num_layers=2), input_shape=(5, 3, 4)), TestCase(test_name='rnn_multilayer_hx_input', model=torch.nn.RNN(input_size=4, hidden_size=5, num_layers=2), input_shape=(5, 3, 4), valid_hx=True), TestCase(test_name='rnn_multilayer_batch_first', model=torch.nn.RNN(input_size=4, hidden_size=5, num_layers=3, batch_first=True), input_shape=(3, 5, 4)), TestCase(test_name='rnn_bidirectional', model=torch.nn.RNN(input_size=4, hidden_size=5, num_layers=1, bidirectional=True), input_shape=(5, 3, 4)), TestCase(test_name='rnn_bidirectional_batch_first', model=torch.nn.RNN(input_size=4, hidden_size=5, num_layers=1, bidirectional=True, batch_first=True), input_shape=(3, 5, 4)), TestCase(test_name='rnn_multilayer_bidrectional', model=torch.nn.RNN(input_size=4, hidden_size=5, num_layers=2, bidirectional=True), input_shape=(3, 5, 4)), TestCase(test_name='rnn_multilayer_bidrectional_batch_first', model=torch.nn.RNN(input_size=4, hidden_size=5, num_layers=2, bidirectional=True, batch_first=False), input_shape=(3, 5, 4)), TestCase(test_name='lstm_single_layer', model=torch.nn.LSTM(input_size=4, hidden_size=5, num_layers=1), input_shape=(5, 3, 4)), TestCase(test_name='lstm_single_layer_valid_hx', model=torch.nn.LSTM(input_size=4, hidden_size=5, num_layers=1), input_shape=(5, 3, 4), valid_hx=True), TestCase(test_name='lstm_single_layer_batch_first', model=torch.nn.LSTM(input_size=4, hidden_size=5, num_layers=1, batch_first=True), input_shape=(3, 5, 4)), TestCase(test_name='lstm_bidirectional', model=torch.nn.LSTM(input_size=4, hidden_size=5, num_layers=1, bidirectional=True), input_shape=(5, 3, 4), sequence_lens=[1, 5, 3]), TestCase(test_name='lstm_multilayer', model=torch.nn.LSTM(input_size=4, hidden_size=5, num_layers=2), input_shape=(5, 3, 4)), TestCase(test_name='lstm_multilayer_valid_hx', model=torch.nn.LSTM(input_size=4, hidden_size=5, num_layers=2), input_shape=(5, 3, 4), valid_hx=True), TestCase(test_name='lstm_multilayer_batch_first', model=torch.nn.LSTM(input_size=4, hidden_size=5, num_layers=3, batch_first=True), input_shape=(3, 5, 4)), TestCase(test_name='lstm_multilayer_bidirectional_large_dimension', model=torch.nn.LSTM(input_size=10, hidden_size=20, num_layers=3, bidirectional=True, batch_first=True), input_shape=(25, 500, 10), sequence_lens=[480, 31, 210, 9, 411, 498, 298, 345, 241, 403, 479, 347, 42, 95, 380, 454, 470, 57, 293, 457, 194, 45, 366, 458, 172]), TestCase(test_name='gru_single_layer', model=torch.nn.GRU(input_size=4, hidden_size=5, num_layers=1), input_shape=(5, 3, 4)), TestCase(test_name='gru_single_layer_batch_first', model=torch.nn.GRU(input_size=4, hidden_size=5, num_layers=1, batch_first=True), input_shape=(3, 5, 4)), TestCase(test_name='gru_bidirectional', model=torch.nn.GRU(input_size=4, hidden_size=5, num_layers=1, bidirectional=True), input_shape=(5, 3, 4)), TestCase(test_name='gru_multilayer', model=torch.nn.GRU(input_size=4, hidden_size=5, num_layers=2), input_shape=(5, 3, 4)), TestCase(test_name='gru_multilayer_batch_first', model=torch.nn.GRU(input_size=4, hidden_size=5, num_layers=3, batch_first=True), input_shape=(3, 5, 4))]
def verify_custom_op(self, tc: TestCase):
quant_op = QcQuantizeRecurrent(module_to_quantize=tc.model, weight_bw=32, activation_bw=32, is_symmetric=False, quant_scheme=QuantScheme.post_training_tf_enhanced, round_mode='nearest', data_type=QuantizationDataType.int)
for input_quantizer in quant_op.input_quantizers.values():
input_quantizer.enabled = False
for (name, param) in quant_op.named_parameters(recurse=False):
quant_op.param_quantizers[name].enabled = False
x = torch.rand(tc.input_shape).to(tc.device)
h = None
if tc.valid_hx:
(o_rnn, h_rnn) = tc.model(input=x, hx=None)
if isinstance(h_rnn, tuple):
h = (h_rnn[0], h_rnn[1])
else:
h = torch.stack([h_rnn])
h = h[0]
(o_rnn, h_rnn) = tc.model(input=x, hx=h)
(o_qc_rnn, h_qc_rnn) = quant_op(x, hx=h)
if (not isinstance(h_qc_rnn, tuple)):
h_qc_rnn = [h_qc_rnn]
h_rnn = [h_rnn]
for (h, h_qc) in zip(h_rnn, h_qc_rnn):
self.assertTrue(torch.allclose(h, h_qc, atol=1e-05), msg='h/c mismatched, Failed TestCase:{}'.format(tc.test_name))
self.assertTrue(torch.allclose(o_rnn, o_qc_rnn, atol=1e-05), msg='output mismatched, Failed TestCase:{}'.format(tc.test_name))
def validate_backward_pass(self, tc: TestCase):
original_model = copy.deepcopy(tc.model)
quant_op = QcQuantizeRecurrent(module_to_quantize=tc.model, weight_bw=8, activation_bw=8, is_symmetric=False, quant_scheme=QuantScheme.post_training_tf_enhanced, round_mode='nearest', data_type=QuantizationDataType.int)
encodings = libpymo.TfEncoding()
encodings.bw = 8
encodings.max = 3
encodings.min = (- 2)
encodings.delta = 1
encodings.offset = 0.2
for input_quantizer in quant_op.input_quantizers.values():
input_quantizer.enabled = True
input_quantizer.encoding = encodings
for (name, param) in quant_op.named_parameters(recurse=False):
quant_op.param_quantizers[name].enabled = True
quant_op.param_quantizers[name].encoding = encodings
for output_quantizer in quant_op.output_quantizers.values():
output_quantizer.encoding = encodings
for (name, param) in original_model.named_parameters():
self.assertTrue(torch.allclose(param.data, getattr(quant_op, name).data, atol=1e-05))
inp = torch.rand(tc.input_shape, requires_grad=True).to(tc.device)
(o_qc_rnn, _) = quant_op(inp, hx=None)
for (name, param) in original_model.named_parameters():
self.assertTrue(torch.allclose(param.data, getattr(quant_op, name).data, atol=1e-05))
optimizer = torch.optim.SGD(quant_op.parameters(), lr=0.05, momentum=0.5)
loss = o_qc_rnn.flatten().sum()
loss.backward()
for (name, param) in quant_op.module_to_quantize.named_parameters():
self.assertTrue((param.grad is None))
quant_param = getattr(quant_op, name)
self.assertTrue((quant_param.grad is not None))
self.assertTrue(torch.allclose(param.data, quant_param.data))
optimizer.step()
for (name, param) in original_model.named_parameters():
quant_param = getattr(quant_op, name)
self.assertFalse(torch.allclose(param.data, quant_param.data, atol=1e-05))
module_to_quantize_param = getattr(quant_op.module_to_quantize, name)
self.assertTrue(torch.allclose(param.data, module_to_quantize_param.data, atol=1e-05))
quant_op.update_params()
for (name, param) in quant_op.module_to_quantize.named_parameters():
orig_param = getattr(original_model, name)
self.assertFalse(torch.allclose(param.data, orig_param.data, atol=1e-05))
quant_param = getattr(quant_op, name)
self.assertTrue(torch.allclose(param.data, quant_param.data, atol=1e-05))
def compare_quantizer(self, quantizer, loaded_quantizer):
self.assertEqual(quantizer.round_mode, loaded_quantizer.round_mode)
self.assertEqual(quantizer.quant_scheme, loaded_quantizer.quant_scheme)
self.assertEqual(quantizer.bitwidth, loaded_quantizer.bitwidth)
self.assertEqual(quantizer.encoding.max, loaded_quantizer.encoding.max)
self.assertEqual(quantizer.encoding.min, loaded_quantizer.encoding.min)
self.assertEqual(quantizer.encoding.delta, loaded_quantizer.encoding.delta)
self.assertEqual(quantizer.encoding.offset, loaded_quantizer.encoding.offset)
def validate_serialize_deserialize(self, tc: TestCase):
original_model = copy.deepcopy(tc.model)
quant_op = QcQuantizeRecurrent(module_to_quantize=tc.model, weight_bw=8, activation_bw=8, is_symmetric=False, quant_scheme=QuantScheme.post_training_tf_enhanced, round_mode='nearest', data_type=QuantizationDataType.int)
quant_op.eval()
inp = torch.rand(tc.input_shape, requires_grad=True).to(tc.device)
encodings = libpymo.TfEncoding()
encodings.bw = 8
encodings.max = 3
encodings.min = (- 2)
encodings.delta = 1
encodings.offset = 0.2
for input_quantizer in quant_op.input_quantizers.values():
input_quantizer.enabled = True
input_quantizer.encoding = encodings
for (name, param) in quant_op.named_parameters(recurse=False):
quant_op.param_quantizers[name].enabled = True
quant_op.param_quantizers[name].encoding = encodings
for output_quantizer in quant_op.output_quantizers.values():
output_quantizer.encoding = encodings
(o_qc_rnn, _) = quant_op(inp, hx=None)
optimizer = torch.optim.SGD(quant_op.parameters(), lr=0.05, momentum=0.5)
loss = o_qc_rnn.flatten().sum()
loss.backward()
optimizer.step()
quant_op.compute_encoding()
quant_op.compute_weight_encodings()
(o_pre, h_pre) = quant_op(inp, hx=None)
with tempfile.NamedTemporaryFile() as f:
torch.save(quant_op, f)
f.seek(0)
loaded_model = torch.load(f)
loaded_model.eval()
for (name, param) in quant_op.named_parameters(recurse=False):
loaded_param = getattr(loaded_model, name)
self.assertTrue(torch.equal(param, loaded_param), msg='param mismatched recurrent op param mis-matched, TestCase:{}'.format(tc.test_name))
for (name, param) in quant_op.module_to_quantize.named_parameters():
loaded_param = getattr(loaded_model.module_to_quantize, name)
self.assertTrue(torch.equal(param, loaded_param), msg='original module mismatched, TestCase:{}'.format(tc.test_name))
for (name, output_quantizer) in quant_op.output_quantizers.items():
if output_quantizer.enabled:
self.compare_quantizer(output_quantizer, loaded_model.output_quantizers[name])
for (name, quantizer) in quant_op.param_quantizers.items():
if quantizer.enabled:
self.compare_quantizer(quantizer, loaded_model.param_quantizers[name])
(o_post, h_post) = loaded_model(inp, hx=None)
self.assertTrue(torch.equal(o_pre, o_post), msg='output mismatched, Failed TestCase:{}'.format(tc.test_name))
if isinstance(h_pre, tuple):
for (pre, post) in zip(h_pre, h_post):
self.assertTrue(torch.equal(pre, post), msg='h or c mismatched, Failed TestCase:{}'.format(tc.test_name))
else:
self.assertTrue(torch.equal(h_pre, h_post), msg='h mis-matched, Failed TestCase:{}'.format(tc.test_name))
def verify_packed_sequence_inputs(self, tc: TestCase):
quant_op = QcQuantizeRecurrent(module_to_quantize=tc.model, weight_bw=32, activation_bw=32, is_symmetric=False, quant_scheme=QuantScheme.post_training_tf_enhanced, round_mode='nearest', data_type=QuantizationDataType.int)
for input_quantizer in quant_op.input_quantizers.values():
input_quantizer.enabled = False
for (name, param) in quant_op.named_parameters(recurse=False):
quant_op.param_quantizers[name].enabled = False
x = torch.rand(tc.input_shape).to(tc.device)
h = None
seq_len_for_packing = []
if (tc.sequence_lens is None):
if tc.model.batch_first:
num_batches = tc.input_shape[0]
seq_len = tc.input_shape[1]
else:
num_batches = tc.input_shape[1]
seq_len = tc.input_shape[0]
for i in range(num_batches):
seq_len_for_packing.append(max(1, (seq_len - i)))
else:
seq_len_for_packing = tc.sequence_lens
x = pack_padded_sequence(x, seq_len_for_packing, batch_first=tc.model.batch_first, enforce_sorted=False)
if tc.valid_hx:
(o_rnn, h_rnn) = tc.model(input=x, hx=None)
if isinstance(h_rnn, tuple):
h = (h_rnn[0], h_rnn[1])
else:
h = torch.stack([h_rnn])
h = h[0]
(o_rnn, h_rnn) = tc.model(input=x, hx=h)
(o_qc_rnn, h_qc_rnn) = quant_op(x, hx=h)
if (not isinstance(h_qc_rnn, tuple)):
h_qc_rnn = [h_qc_rnn]
h_rnn = [h_rnn]
for (h, h_qc) in zip(h_rnn, h_qc_rnn):
self.assertTrue(torch.allclose(h, h_qc, atol=1e-05), msg='h/c mismatched, Failed TestCase:{}'.format(tc.test_name))
self.assertTrue(torch.allclose(o_rnn.data, o_qc_rnn.data, atol=1e-05), msg='output data mismatched, Failed TestCase:{}'.format(tc.test_name))
self.assertTrue(torch.equal(o_rnn.batch_sizes, o_qc_rnn.batch_sizes), msg='output batch_sizes mismatched, Failed TestCase:{}'.format(tc.test_name))
if (o_rnn.unsorted_indices is not None):
self.assertTrue(torch.equal(o_rnn.unsorted_indices, o_qc_rnn.unsorted_indices), msg='output unsorted_indices mismatched, Failed TestCase:{}'.format(tc.test_name))
if (o_rnn.sorted_indices is not None):
self.assertTrue(torch.equal(o_rnn.sorted_indices, o_qc_rnn.sorted_indices), msg='output sorted_indices mismatched, Failed TestCase:{}'.format(tc.test_name))
def test_qc_rnn_equivalence(self):
for tc in TestQcQuantizeRecurrentOp.testcases:
self.verify_custom_op(tc)
def test_qc_recurrent_backward(self):
torch.manual_seed(0)
for tc in TestQcQuantizeRecurrentOp.testcases:
self.validate_backward_pass(tc)
def test_save_and_load_rnn(self):
torch.manual_seed(0)
for tc in TestQcQuantizeRecurrentOp.testcases:
self.validate_serialize_deserialize(tc)
def test_qc_rnn_default_lstm_quantizer_configuration(self):
model = torch.nn.LSTM(input_size=4, hidden_size=5, num_layers=2, bidirectional=True, bias=True)
quant_op = QcQuantizeRecurrent(module_to_quantize=model, weight_bw=8, activation_bw=8, is_symmetric=False, quant_scheme=QuantScheme.post_training_tf_enhanced, round_mode='nearest', data_type=QuantizationDataType.int)
self.assertEqual(8, quant_op.input_quantizers['input_l0'].bitwidth)
self.assertEqual(8, quant_op.input_quantizers['initial_h_l0'].bitwidth)
self.assertFalse(quant_op.input_quantizers['initial_c_l0'].enabled)
self.assertEqual(8, quant_op.input_quantizers['input_l1'].bitwidth)
self.assertEqual(8, quant_op.input_quantizers['initial_h_l1'].bitwidth)
self.assertFalse(quant_op.input_quantizers['initial_c_l1'].enabled)
self.assertEqual(8, quant_op.output_quantizers['h_l0'].bitwidth)
self.assertFalse(quant_op.output_quantizers['c_l0'].enabled)
self.assertEqual(8, quant_op.output_quantizers['h_l1'].bitwidth)
self.assertFalse(quant_op.output_quantizers['c_l1'].enabled)
for (name, quantizer) in quant_op.param_quantizers.items():
if ('weight' in name):
self.assertEqual(8, quantizer.bitwidth, msg='weight quantizer={}'.format(name))
else:
self.assertFalse(quantizer.enabled)
group_cfg = {'hidden_l0': ['initial_h_l0', 'h_l0'], 'bias_l0': ['bias_ih_l0', 'bias_hh_l0', 'bias_ih_l0_reverse', 'bias_hh_l0_reverse'], 'W_l0': ['weight_ih_l0', 'weight_ih_l0_reverse'], 'R_l0': ['weight_hh_l0', 'weight_hh_l0_reverse'], 'hidden_l1': ['initial_h_l1', 'h_l1'], 'bias_l1': ['bias_ih_l1', 'bias_hh_l1', 'bias_ih_l1_reverse', 'bias_hh_l1_reverse'], 'W_l1': ['weight_ih_l1', 'weight_ih_l1_reverse'], 'R_l1': ['weight_hh_l1', 'weight_hh_l1_reverse']}
for (group_name, tensor_names) in group_cfg.items():
group_quantizer = quant_op.grouped_quantizers[group_name]
if ('bias' not in group_name):
self.assertTrue(group_quantizer.enabled)
else:
self.assertFalse(group_quantizer.enabled)
for tensor_name in tensor_names:
if (tensor_name in quant_op.input_quantizers):
self.assertEqual(group_quantizer, quant_op.input_quantizers[tensor_name])
elif (tensor_name in quant_op.output_quantizers):
self.assertEqual(group_quantizer, quant_op.output_quantizers[tensor_name])
else:
self.assertIn(tensor_name, quant_op.param_quantizers)
self.assertEqual(group_quantizer, quant_op.param_quantizers[tensor_name])
def test_packed_sequence_inputs_equivalence(self):
for tc in TestQcQuantizeRecurrentOp.testcases:
self.verify_packed_sequence_inputs(tc) |
class MultiHeadedAttention(torch.nn.Module):
def __init__(self, h, query_size, value_size, dropout=0.1):
super().__init__()
assert ((query_size % h) == 0)
assert ((value_size % h) == 0)
self.d_k = (value_size // h)
self.h = h
self.linears = torch.nn.ModuleList([torch.nn.Linear(query_size, value_size), torch.nn.Linear(value_size, value_size), torch.nn.Linear(value_size, value_size), torch.nn.Linear(value_size, value_size)])
self.attn = None
self.dropout = torch.nn.Dropout(p=dropout)
def forward(self, query, values, attn_mask=None):
if (attn_mask is not None):
attn_mask = attn_mask.unsqueeze(1)
nbatches = query.size(0)
(query, keys, values) = [l(x).view(nbatches, (- 1), self.h, self.d_k).transpose(1, 2) for (l, x) in zip(self.linears, (query, values, values))]
(x, self.attn) = transformer.attention(query, keys, values, mask=attn_mask, dropout=self.dropout)
x = x.transpose(1, 2).contiguous().view(nbatches, (- 1), (self.h * self.d_k))
x = x.squeeze(1)
return (self.linears[(- 1)](x), self.attn) |
def _recon_lcs(x, y):
(i, j) = (len(x), len(y))
table = _lcs(x, y)
if (table[(i, j)] == 0):
return []
lcs = []
while 1:
if ((i == 0) or (j == 0)):
break
elif (x[(i - 1)] == y[(j - 1)]):
lcs = ([(x[(i - 1)], (i - 1))] + lcs)
i = (i - 1)
j = (j - 1)
elif (table[((i - 1), j)] > table[(i, (j - 1))]):
i = (i - 1)
else:
j = (j - 1)
'\n def _recon(i, j):\n """private recon calculation"""\n if i == 0 or j == 0:\n return []\n elif x[i - 1] == y[j - 1]:\n return _recon(i - 1, j - 1) + [(x[i - 1], i - 1)]\n elif table[i - 1, j] > table[i, j - 1]:\n return _recon(i - 1, j)\n else:\n return _recon(i, j - 1)\n \n LCS = _recon(len(x), len(y))\n pdb.set_trace()\n '
return lcs |
def _read_spotting_detections_and_labels(results_dir: Path, video_data: List[VideoDatum]):
detections = []
labels = []
for video_datum in video_data:
base_path = (results_dir / video_datum.relative_path)
labels_path = _labels_path(base_path, Task.SPOTTING)
if labels_path.exists():
labels.append(np.load(str(labels_path)))
detections_path = _spotting_path((results_dir / video_datum.relative_path))
detections.append(np.load(str(detections_path)))
return (detections, labels) |
def notify_closing(handle: ((Handle | int) | _HasFileNo)) -> None:
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
try:
return GLOBAL_RUN_CONTEXT.runner.io_manager.notify_closing(handle)
except AttributeError:
raise RuntimeError('must be called from async context') from None |
class TAKInfo(StreamInfo):
channels = 0
length = 0
sample_rate = 0
bitrate = 0
encoder_info = ''
_error(IOError, TAKHeaderError)
_error(BitReaderError, TAKHeaderError)
def __init__(self, fileobj):
stream_id = fileobj.read(4)
if ((len(stream_id) != 4) or (not (stream_id == b'tBaK'))):
raise TAKHeaderError('not a TAK file')
bitreader = _LSBBitReader(fileobj)
found_stream_info = False
while True:
type = TAKMetadata(bitreader.bits(7))
bitreader.skip(1)
size = struct.unpack('<I', (bitreader.bytes(3) + b'\x00'))[0]
data_size = (size - CRC_SIZE)
pos = fileobj.tell()
if (type == TAKMetadata.END):
break
elif (type == TAKMetadata.STREAM_INFO):
self._parse_stream_info(bitreader, size)
found_stream_info = True
elif (type == TAKMetadata.ENCODER_INFO):
self._parse_encoder_info(bitreader, data_size)
assert bitreader.is_aligned()
fileobj.seek((pos + size))
if (not found_stream_info):
raise TAKHeaderError('missing stream info')
if (self.sample_rate > 0):
self.length = (self.number_of_samples / float(self.sample_rate))
def _parse_stream_info(self, bitreader, size):
if ((size < STREAM_INFO_SIZE_MIN) or (size > STREAM_INFO_SIZE_MAX)):
raise TAKHeaderError('stream info has invalid length')
bitreader.skip(ENCODER_INFO_CODEC_BITS)
bitreader.skip(ENCODER_INFO_PROFILE_BITS)
bitreader.skip(SIZE_INFO_FRAME_DURATION_BITS)
self.number_of_samples = bitreader.bits(SIZE_INFO_SAMPLE_NUM_BITS)
bitreader.skip(AUDIO_FORMAT_DATA_TYPE_BITS)
self.sample_rate = (bitreader.bits(AUDIO_FORMAT_SAMPLE_RATE_BITS) + SAMPLE_RATE_MIN)
self.bits_per_sample = (bitreader.bits(AUDIO_FORMAT_SAMPLE_BITS_BITS) + SAMPLE_BITS_MIN)
self.channels = (bitreader.bits(AUDIO_FORMAT_CHANNEL_NUM_BITS) + CHANNEL_NUM_MIN)
bitreader.skip(AUDIO_FORMAT_HAS_EXTENSION_BITS)
def _parse_encoder_info(self, bitreader, size):
patch = bitreader.bits(8)
minor = bitreader.bits(8)
major = bitreader.bits(8)
self.encoder_info = ('TAK %d.%d.%d' % (major, minor, patch))
def pprint(self):
return (u'%s, %d Hz, %d bits, %.2f seconds, %d channel(s)' % ((self.encoder_info or 'TAK'), self.sample_rate, self.bits_per_sample, self.length, self.channels)) |
('--user', '-u', default='reanahub', help='DockerHub user name [reanahub]')
('--tag', '-t', default='latest', help="Image tag to push. Default 'latest'. Use 'auto' to push git-tag-based value such as '0.7.0-alpha.3'")
('--component', '-c', multiple=True, default=['CLUSTER'], help='Which components? [name|CLUSTER]')
('--registry', '-r', default='docker.io', help='Registry to use in the image tag [default=docker.io]')
('--image-name', help='Does the local image have a custom name?')
_commands.command(name='docker-push')
def docker_push(user, tag, component, registry, image_name):
components = select_components(component)
if (image_name and (len(components) > 1)):
click.secho('Cannot use custom image name with multiple components.', fg='red')
sys.exit(1)
for component in components:
component_tag = tag
if is_component_dockerised(component):
if (tag == 'auto'):
component_tag = get_docker_tag(component)
cmd = f'docker push {registry}/{user}/{(image_name or component)}:{component_tag}'
run_command(cmd, component)
else:
msg = 'Ignoring this component that does not contain a Dockerfile.'
display_message(msg, component) |
class CIFAR_Net(nn.Module):
def __init__(self, args):
super(CIFAR_Net, self).__init__()
self.conv1 = (torch.nn.Sequential(nn.Conv2d(3, 10, kernel_size=5), nn.ReLU(), nn.BatchNorm2d(10), nn.Dropout(p=args.dp_rate), nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))) if args.BatchNorm else torch.nn.Sequential(nn.Conv2d(3, 10, kernel_size=5), nn.ReLU(), nn.Dropout(p=args.dp_rate), nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))))
self.conv2 = (torch.nn.Sequential(nn.Conv2d(10, 20, kernel_size=5), nn.BatchNorm2d(20), nn.ReLU(), nn.Dropout(p=args.dp_rate), nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))) if args.BatchNorm else torch.nn.Sequential(nn.Conv2d(10, 20, kernel_size=5), nn.ReLU(), nn.Dropout(p=args.dp_rate), nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))))
self.fc1 = (torch.nn.Sequential(nn.Linear(500, 500), nn.BatchNorm1d(500), nn.Dropout(p=args.dp_rate), nn.ReLU()) if args.BatchNorm else torch.nn.Sequential(nn.Linear(500, 500), nn.Dropout(p=args.dp_rate), nn.ReLU()))
self.fc2 = (torch.nn.Sequential(nn.Linear(500, 500), nn.BatchNorm1d(500), nn.Dropout(p=args.dp_rate), nn.ReLU()) if args.BatchNorm else torch.nn.Sequential(nn.Linear(500, 500), nn.Dropout(p=args.dp_rate), nn.ReLU()))
self.fc3 = torch.nn.Sequential(nn.Linear(500, 10))
def forward(self, f, if_decov=False):
f = self.conv1(f)
f = self.conv2(f)
f = f.view((- 1), 500)
f = self.fc1(f)
feat = self.fc2(f)
return self.fc3(feat) |
def setup_args():
description = 'Collect codec metrics and performances.'
parser = argparse.ArgumentParser(description=description)
subparsers = parser.add_subparsers(dest='codec', help='Select codec')
subparsers.required = True
parser.add_argument('image', type=str, help='image filepath')
parser.add_argument('target', type=float, help='target value to match')
parser.add_argument('-m', '--metric', type=str, choices=['bpp', 'psnr', 'ms-ssim'], default='bpp')
parser.add_argument('--save', action='store_true', help='Save reconstructed image to disk')
parser.add_argument('--prefix', type=str, default='.')
return (parser, subparsers) |
class ParameterAssignment(VersionBase):
def __init__(self, parameterref, value):
self.parameterref = parameterref
self.value = value
def __eq__(self, other):
if isinstance(other, ParameterAssignment):
if (self.get_attributes() == other.get_attributes()):
return True
return False
def parse(element):
value = element.attrib['value']
parameterref = element.attrib['parameterRef']
return ParameterAssignment(parameterref, value)
def get_attributes(self):
retdict = {}
retdict['parameterRef'] = self.parameterref
retdict['value'] = str(self.value)
return retdict
def get_element(self):
return ET.Element('ParameterAssignment', attrib=self.get_attributes()) |
def sort_along_x(x, y):
out_x = []
out_y = []
for (i, j) in zip(x, y):
i = np.array(i)
j = np.array(j)
ind = np.argsort(i, axis=0)
out_x.append(np.take_along_axis(i, ind[::(- 1)], axis=0).tolist())
out_y.append(np.take_along_axis(j, ind[::(- 1)], axis=0).tolist())
return (out_x, out_y) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.