code stringlengths 281 23.7M |
|---|
def predict_from_folder(model: str, input_folder: str, output_folder: str, folds: Union[(Tuple[int], List[int])], save_npz: bool, num_threads_preprocessing: int, num_threads_nifti_save: int, lowres_segmentations: Union[(str, None)], part_id: int, num_parts: int, tta: bool, mixed_precision: bool=True, overwrite_existing: bool=True, mode: str='normal', overwrite_all_in_gpu: bool=None, step_size: float=0.5, checkpoint_name: str='model_final_checkpoint', segmentation_export_kwargs: dict=None):
maybe_mkdir_p(output_folder)
shutil.copy(join(model, 'plans.pkl'), output_folder)
assert isfile(join(model, 'plans.pkl')), 'Folder with saved model weights must contain a plans.pkl file'
expected_num_modalities = load_pickle(join(model, 'plans.pkl'))['num_modalities']
case_ids = check_input_folder_and_return_caseIDs(input_folder, expected_num_modalities)
output_files = [join(output_folder, (i + '.nii.gz')) for i in case_ids]
all_files = subfiles(input_folder, suffix='.nii.gz', join=False, sort=True)
list_of_lists = [[join(input_folder, i) for i in all_files if (i[:len(j)].startswith(j) and (len(i) == (len(j) + 12)))] for j in case_ids]
if (lowres_segmentations is not None):
assert isdir(lowres_segmentations), 'if lowres_segmentations is not None then it must point to a directory'
lowres_segmentations = [join(lowres_segmentations, (i + '.nii.gz')) for i in case_ids]
assert all([isfile(i) for i in lowres_segmentations]), 'not all lowres_segmentations files are present. (I was searching for case_id.nii.gz in that folder)'
lowres_segmentations = lowres_segmentations[part_id::num_parts]
else:
lowres_segmentations = None
if (mode == 'normal'):
if (overwrite_all_in_gpu is None):
all_in_gpu = False
else:
all_in_gpu = overwrite_all_in_gpu
return predict_cases(model, list_of_lists[part_id::num_parts], output_files[part_id::num_parts], folds, save_npz, num_threads_preprocessing, num_threads_nifti_save, lowres_segmentations, tta, mixed_precision=mixed_precision, overwrite_existing=overwrite_existing, all_in_gpu=all_in_gpu, step_size=step_size, checkpoint_name=checkpoint_name, segmentation_export_kwargs=segmentation_export_kwargs)
elif (mode == 'fast'):
if (overwrite_all_in_gpu is None):
all_in_gpu = True
else:
all_in_gpu = overwrite_all_in_gpu
assert (save_npz is False)
return predict_cases_fast(model, list_of_lists[part_id::num_parts], output_files[part_id::num_parts], folds, num_threads_preprocessing, num_threads_nifti_save, lowres_segmentations, tta, mixed_precision=mixed_precision, overwrite_existing=overwrite_existing, all_in_gpu=all_in_gpu, step_size=step_size, checkpoint_name=checkpoint_name, segmentation_export_kwargs=segmentation_export_kwargs)
elif (mode == 'fastest'):
if (overwrite_all_in_gpu is None):
all_in_gpu = True
else:
all_in_gpu = overwrite_all_in_gpu
assert (save_npz is False)
return predict_cases_fastest(model, list_of_lists[part_id::num_parts], output_files[part_id::num_parts], folds, num_threads_preprocessing, num_threads_nifti_save, lowres_segmentations, tta, mixed_precision=mixed_precision, overwrite_existing=overwrite_existing, all_in_gpu=all_in_gpu, step_size=step_size, checkpoint_name=checkpoint_name)
else:
raise ValueError('unrecognized mode. Must be normal, fast or fastest') |
class TrackItemCollection(PymiereBaseCollection):
def __init__(self, pymiere_id):
super(TrackItemCollection, self).__init__(pymiere_id, 'numItems')
def __getitem__(self, index):
return TrackItem(**super(TrackItemCollection, self).__getitem__(index))
def __iter__(self):
return iter([self.__getitem__(i) for i in range(len(self))]) |
(wrapper=True)
def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> Generator[(None, TestReport, TestReport)]:
rep = (yield)
xfailed = item.stash.get(xfailed_key, None)
if item.config.option.runxfail:
pass
elif (call.excinfo and isinstance(call.excinfo.value, xfail.Exception)):
assert (call.excinfo.value.msg is not None)
rep.wasxfail = ('reason: ' + call.excinfo.value.msg)
rep.outcome = 'skipped'
elif ((not rep.skipped) and xfailed):
if call.excinfo:
raises = xfailed.raises
if ((raises is not None) and (not isinstance(call.excinfo.value, raises))):
rep.outcome = 'failed'
else:
rep.outcome = 'skipped'
rep.wasxfail = xfailed.reason
elif (call.when == 'call'):
if xfailed.strict:
rep.outcome = 'failed'
rep.longrepr = ('[XPASS(strict)] ' + xfailed.reason)
else:
rep.outcome = 'passed'
rep.wasxfail = xfailed.reason
return rep |
class AllGatherGrad(torch.autograd.Function):
def forward(ctx: Any, tensor: torch.Tensor, group: Optional['torch.distributed.ProcessGroup']=None) -> torch.Tensor:
ctx.group = group
gathered_tensor = [torch.zeros_like(tensor) for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(gathered_tensor, tensor, group=group)
gathered_tensor = torch.stack(gathered_tensor, dim=0)
return gathered_tensor
def backward(ctx: Any, *grad_output: torch.Tensor) -> Tuple[(torch.Tensor, None)]:
grad_output = torch.cat(grad_output)
torch.distributed.all_reduce(grad_output, op=torch.distributed.ReduceOp.SUM, async_op=False, group=ctx.group)
return (grad_output[torch.distributed.get_rank()], None) |
def _generate_sequential_enc_asset(file, model, image, precision=2):
model.eval()
input_image = image.clone()
enc_keys = {}
for (layer, module) in model.named_children():
image = module(image)
enc_keys[layer] = pystiche.TensorKey(image, precision=precision)
input = {'image': input_image}
params = {'precision': precision}
output = {'enc_keys': enc_keys}
store_asset(input, params, output, file) |
class _EvalSession():
def __init__(self, title: str, quantsim_factory: Callable, eval_func: Callable[([ort.InferenceSession], float)], results_dir: str, strict_validation: bool, ptq: bool):
self.title = title
self._quantsim_factory = quantsim_factory
self._eval_func = eval_func
self._results_dir = results_dir
self._strict_validation = strict_validation
self._ptq = ptq
self._spinner = None
self.result = {'status': None, 'error': None, 'target_satisfied': False, 'effective': True}
os.makedirs(self._results_dir, exist_ok=True)
self.diagnostics = Diagnostics()
self.title_lowercase = self.title.lower().replace('-', ' ')
self.title_lowercase = '_'.join(self.title_lowercase.split())
stdout_write = sys.stdout.write
self._log = io.StringIO()
def write_wrapper(*args, **kwargs):
self._log.write(*args, **kwargs)
return stdout_write(*args, **kwargs)
self._stdout_redirect = patch.object(sys.stdout, 'write', write_wrapper)
self._ptq_result = None
self._cached_result = None
def is_ptq_session(self):
return self._ptq
def reset_status(self):
self.result = {'status': None, 'error': None, 'target_satisfied': False, 'effective': True}
def wrap(self, fn):
import pickle
from uuid import uuid4
results_dir = self._results_dir
class CachedResult():
def __init__(self, obj):
self._filename = os.path.join(results_dir, f'.{uuid4()}')
while os.path.exists(self._filename):
self._filename = os.path.join(results_dir, f'.{uuid4()}')
with open(self._filename, 'wb') as f:
pickle.dump(obj, f)
def load(self):
with open(self._filename, 'rb') as f:
return pickle.load(f)
(fn)
def wrapper(*args, **kwargs):
if self._cached_result:
return self._cached_result.load()
ret = fn(*args, **kwargs)
self._cached_result = CachedResult(ret)
return ret
return wrapper
def eval(self, model: ONNXModel, **kwargs):
sim = self._quantsim_factory(model, **kwargs)
acc = self._eval_func(sim.session)
return acc
def __enter__(self):
self._spinner = Spinner(self.title)
self._spinner.__enter__()
self._stdout_redirect.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if (self._ptq_result is not None):
_logger.info('Session finished: %s. (eval score: %f). Applied techniques: %s', self.title, self._ptq_result.accuracy, ' '.join(self._ptq_result.applied_techniques))
self._spinner.__exit__(exc_type, exc_val, exc_tb)
if exc_val:
buffer = io.StringIO()
traceback.print_exception(exc_type, exc_val, exc_tb, file=buffer)
if self._strict_validation:
print(buffer.getvalue())
else:
print(f'''
WARNING: The following exception was raised but ignored:
{buffer.getvalue()}
''')
self._stdout_redirect.stop()
self.diagnostics.add(self._log.getvalue())
self.result['error'] = exc_val
if (not exc_val):
self.result['status'] = 'success'
elif self._strict_validation:
self.result['status'] = 'error-failed'
else:
self.result['status'] = 'error-ignored'
if (exc_val and (not self._strict_validation)):
return True
return None
def ptq_result(self) -> Optional[PtqResult]:
return self._ptq_result
def set_ptq_result(self, applied_techniques: List[str], model: onnx.ModelProto=None, sim: QuantizationSimModel=None, acc: float=None, **kwargs) -> None:
if (sim is None):
assert (acc is None)
assert (model is not None)
sim = self._quantsim_factory(model, **kwargs)
acc = self._eval_func(sim.session)
else:
assert (acc is not None)
assert (model is None)
self._set_ptq_result(sim, acc, applied_techniques)
def _set_ptq_result(self, sim: QuantizationSimModel, acc: float, applied_techniques: List[str]) -> PtqResult:
if (self._ptq_result is not None):
raise RuntimeError('sess.eval() can be called only once per each _EvalSession instance.')
(model_path, encoding_path) = self._export(sim)
self._ptq_result = PtqResult(model_path=model_path, encoding_path=encoding_path, accuracy=acc, applied_techniques=applied_techniques)
return self._ptq_result
def _export(self, sim: QuantizationSimModel) -> Tuple[(str, str)]:
sim.export(path=self._results_dir, filename_prefix=self.title_lowercase)
model_path = os.path.join(self._results_dir, f'{self.title_lowercase}.onnx')
encoding_path = os.path.join(self._results_dir, f'{self.title_lowercase}.encodings')
_logger.info('The results of %s is saved in %s and %s.', self.title, model_path, encoding_path)
return (model_path, encoding_path) |
def parse_args():
parser = argparse.ArgumentParser(description='Generate training and test set of FUNSD ')
parser.add_argument('root_path', help='Root dir path of FUNSD')
parser.add_argument('--nproc', default=1, type=int, help='Number of process')
args = parser.parse_args()
return args |
def ValidateFormats(argFormat, argName, errors):
if (argFormat == 'xywh'):
return BBFormat.XYWH
elif (argFormat == 'xyrb'):
return BBFormat.XYX2Y2
elif (argFormat is None):
return BBFormat.XYWH
else:
errors.append(("argument %s: invalid value. It must be either 'xywh' or 'xyrb'" % argName)) |
class TestConnectedGraphUtils(unittest.TestCase):
def test_get_module_act_func_pair_with_modules(self):
model = test_models.TinyModel().eval()
inp_tensor_list = [torch.randn(1, 3, 32, 32)]
module_act_func_pair = connectedgraph_utils.get_module_act_func_pair(model, inp_tensor_list)
self.assertEqual(len(module_act_func_pair), 12)
self.assertTrue(isinstance(module_act_func_pair[model.bn1], torch.nn.ReLU))
self.assertTrue(isinstance(module_act_func_pair[model.bn2], torch.nn.ReLU))
self.assertTrue(isinstance(module_act_func_pair[model.conv3], torch.nn.ReLU))
self.assertEqual(module_act_func_pair[model.conv1], None)
self.assertEqual(module_act_func_pair[model.conv2], None)
self.assertEqual(module_act_func_pair[model.fc], None)
def test_get_module_act_func_pair_for_activations(self):
model = ModelWithMultipleActivations().eval()
inp_tensor_list = [torch.randn(1, 3, 32, 32)]
module_act_func_pair = connectedgraph_utils.get_module_act_func_pair(model, inp_tensor_list)
self.assertTrue(isinstance(module_act_func_pair[model.bn1], torch.nn.Hardshrink))
self.assertTrue(isinstance(module_act_func_pair[model.bn2], torch.nn.GELU))
self.assertTrue(isinstance(module_act_func_pair[model.conv3], torch.nn.Tanhshrink))
self.assertTrue(isinstance(module_act_func_pair[model.conv4], torch.nn.Mish))
self.assertTrue(isinstance(module_act_func_pair[model.conv5], torch.nn.Softmax2d))
self.assertEqual(module_act_func_pair[model.conv1], None)
self.assertEqual(module_act_func_pair[model.hardshrink], None)
def test_get_ops_with_missing_modules(self):
model = test_models.ModelWithFunctionalOps()
rand_inp = torch.randn(1, 3, 32, 32)
ops_with_missing_modules = connectedgraph_utils.get_ops_with_missing_modules(model, rand_inp)
self.assertEqual(2, len(ops_with_missing_modules))
def test_find_nodes_in_forward_pass_for_elementwise_ops(self):
dummy_input = (torch.randn(1, 3, 4, 4), torch.randn(1, 3, 4, 4))
trace = torch.jit.trace(elementwise_ops.Add(), dummy_input)
nodes = ConnectedGraph._find_aten_nodes_in_forward_pass(trace)
assert (len(nodes) == 1)
dummy_input = (torch.randn(1, 3, 4, 4), torch.randn(1, 3, 4, 4))
trace = torch.jit.trace(elementwise_ops.Subtract(), dummy_input)
nodes = ConnectedGraph._find_aten_nodes_in_forward_pass(trace)
assert (len(nodes) == 1)
dummy_input = (torch.randn(1, 3, 4, 4), torch.randn(1, 3, 4, 4))
trace = torch.jit.trace(elementwise_ops.Multiply(), dummy_input)
nodes = ConnectedGraph._find_aten_nodes_in_forward_pass(trace)
assert (len(nodes) == 1)
dummy_input = (torch.randn(1, 3, 4, 4), torch.randn(1, 3, 4, 4))
trace = torch.jit.trace(elementwise_ops.Divide(), dummy_input)
nodes = ConnectedGraph._find_aten_nodes_in_forward_pass(trace)
assert (len(nodes) == 1)
dummy_input = (torch.randn(1, 3, 4, 4), torch.randn(1, 3, 4, 4))
trace = torch.jit.trace(elementwise_ops.MatMul(), dummy_input)
nodes = ConnectedGraph._find_aten_nodes_in_forward_pass(trace)
assert (len(nodes) == 1)
dummy_input = (torch.randn(1, 3, 4, 4), torch.randn(1, 3, 4, 4))
trace = torch.jit.trace(elementwise_ops.Concat(), dummy_input)
nodes = ConnectedGraph._find_aten_nodes_in_forward_pass(trace)
assert (len(nodes) == 1)
.cuda
def test_find_nodes_in_forward_pass_for_custom_module(self):
class CustomModule(torch.nn.Module):
def forward(x: torch.Tensor):
y = x.detach()
return (y * torch.nn.functional.softplus(x).relu())
dummy_input = torch.randn(1, 3, 4, 4)
trace = torch.jit.trace(CustomModule().cuda(), dummy_input.cuda())
nodes = ConnectedGraph._find_aten_nodes_in_forward_pass(trace)
assert (len(nodes) == 3)
def test_find_nodes_in_forward_pass_for_custom_conv2d_module(self):
class CustomModule(torch.nn.Module):
def forward(inp: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor):
return torch.nn.functional.conv2d(inp, weight, bias)
dummy_input = torch.randn(1, 3, 4, 4)
dummy_weight = torch.randn(32, 3, 1, 1)
dummy_bias = torch.randn((32,))
dummy_input = (dummy_input, dummy_weight, dummy_bias)
trace = torch.jit.trace(CustomModule(), dummy_input)
nodes = ConnectedGraph._find_aten_nodes_in_forward_pass(trace)
assert (len(nodes) == 1)
def test_find_nodes_in_forward_pass_for_torch_nn_module(self):
dummy_input = torch.randn(1, 3, 4, 4)
conv = torch.nn.Conv2d(3, 3, 2).eval()
print(conv.__class__)
trace = torch.jit.trace(conv, dummy_input)
nodes = ConnectedGraph._find_aten_nodes_in_forward_pass(trace)
assert (len(nodes) == 1)
dummy_input = torch.randn(1, 3, 4, 4)
relu = torch.nn.ReLU(inplace=True).eval()
trace = torch.jit.trace(relu, dummy_input)
nodes = ConnectedGraph._find_aten_nodes_in_forward_pass(trace)
assert (len(nodes) == 1)
dummy_input = torch.randn(1, 3, 4, 4)
bn = torch.nn.BatchNorm2d(3).eval()
trace = torch.jit.trace(bn, dummy_input)
nodes = ConnectedGraph._find_aten_nodes_in_forward_pass(trace)
assert (len(nodes) == 1)
def test_find_nodes_in_forward_pass_for_unused_module(self):
class MultiOutputWithUnuseModel(torch.nn.Module):
def __init__(self):
super(MultiOutputWithUnuseModel, self).__init__()
self.layer = test_models.TupleOutputModel()
self.conv1 = torch.nn.Conv2d(2, 4, kernel_size=3, padding=1)
self.conv2 = torch.nn.Conv2d(6, 4, kernel_size=3, padding=1)
def forward(self, *inputs):
(x, _, z) = self.layer(inputs[0])
x1 = self.conv1(x)
z1 = self.conv2(z)
return torch.cat([x1, z1], 1)
dummy_input = torch.rand(1, 3, 8, 8)
model = MultiOutputWithUnuseModel().eval()
print(model.conv1.__class__)
trace = torch.jit.trace(model, dummy_input)
trace = getattr(trace, 'layer')
conv2_trace = getattr(trace, 'conv2')
nodes = ConnectedGraph._find_aten_nodes_in_forward_pass(conv2_trace)
assert (len(nodes) == 0)
conv1_trace = getattr(trace, 'conv1')
nodes = ConnectedGraph._find_aten_nodes_in_forward_pass(conv1_trace)
assert (len(nodes) == 1)
def test_find_nodes_in_forward_pass_for_undefined_graph(self):
dummy_input = torch.rand(1, 3, 8, 8)
model = test_models.NestedSequentialModel().eval()
print(model.inner_seq[1].__class__)
trace = torch.jit.trace(model, dummy_input)
inner_seq_trace = getattr(trace, 'inner_seq')
bn_trace = getattr(inner_seq_trace, '1')
nodes = ConnectedGraph._find_aten_nodes_in_forward_pass(bn_trace)
assert (len(nodes) == 0)
with pytest.raises(RuntimeError):
_ = bn_trace.graph
def test_constant_elementwise_inputs(self):
class ConstantElementwiseInputModel(torch.nn.Module):
def __init__(self):
super(ConstantElementwiseInputModel, self).__init__()
self.add = elementwise_ops.Add()
self.mul = elementwise_ops.Multiply()
def forward(self, inp):
x = self.add(inp, torch.tensor(2.0))
x = self.mul(torch.tensor(3.0), x)
return x
model = ConstantElementwiseInputModel()
cg = ConnectedGraph(model, model_input=torch.randn(1, 6))
assert (len(get_all_ops_with_constant_inputs(cg)) == 2)
assert (not cg.ordered_ops[0].inputs[0].is_const)
assert cg.ordered_ops[0].inputs[1].is_const
assert cg.ordered_ops[1].inputs[0].is_const
assert (not cg.ordered_ops[1].inputs[1].is_const)
def test_constant_single_input(self):
class ConstantSingleInputModel(torch.nn.Module):
def __init__(self):
super(ConstantSingleInputModel, self).__init__()
self.relu = torch.nn.ReLU()
self.relu2 = torch.nn.ReLU()
self.add = elementwise_ops.Add()
self.add2 = elementwise_ops.Add()
self.register_buffer('constant_1', torch.tensor([3.0, 4.0]))
def forward(self, inp):
x = self.relu(torch.tensor([(- 1.0), 1.0]))
y = self.relu2(self.constant_1)
x = self.add(x, inp)
x = self.add2(x, y)
return x
model = ConstantSingleInputModel()
dummy_input = torch.randn(1, 2)
cg = ConnectedGraph(model, model_input=dummy_input)
assert cg.ordered_ops[0].inputs[0].is_const
assert cg.ordered_ops[1].inputs[0].is_const
assert (not cg.ordered_ops[2].inputs[0].is_const)
assert (not cg.ordered_ops[2].inputs[1].is_const)
assert (not cg.ordered_ops[3].inputs[0].is_const)
assert (not cg.ordered_ops[3].inputs[1].is_const)
def test_model_with_non_leaf_candidate_0(self):
class ConvLinearModel(torch.nn.Module):
def __init__(self):
super(ConvLinearModel, self).__init__()
self.conv1 = nn.Conv2d(3, 12, kernel_size=(1, 1), stride=(1, 1), padding=0, bias=False)
self.linear1 = nn.Linear(32, 32, bias=False)
def forward(self, inp):
y = self.conv1(inp)
y = self.linear1(y)
return y
class TopLevelModel(torch.nn.Module):
def __init__(self):
super(TopLevelModel, self).__init__()
self.layer1 = ConvLinearModel()
self.linear1 = nn.Linear(32, 64, bias=False)
def forward(self, inp):
y = self.layer1(inp)
y = self.linear1(y)
return y
model = TopLevelModel()
model.eval()
dummy_input = torch.randn(1, 3, 32, 32)
aimet_torch.utils.modules_to_treat_as_leaf = [ConvLinearModel]
cg_1 = ConnectedGraph(model, model_input=dummy_input)
assert (len(cg_1.ordered_ops) == 2)
assert (len(cg_1.ordered_ops[0].inputs) == 3)
assert (cg_1.ordered_ops[0].inputs[0].name == 'input_0_to_ConvLinearModel_0')
assert (cg_1.ordered_ops[0].inputs[0].is_model_input == True)
assert (cg_1.ordered_ops[0].inputs[1].name == 'TopLevelModel.layer1.conv1.weight')
assert (cg_1.ordered_ops[0].inputs[1].is_model_input == False)
assert (cg_1.ordered_ops[0].inputs[1].is_parm == True)
assert (cg_1.ordered_ops[0].inputs[2].name == 'TopLevelModel.layer1.linear1.weight')
assert (cg_1.ordered_ops[0].inputs[2].is_model_input == False)
assert (cg_1.ordered_ops[0].inputs[2].is_parm == True)
def test_model_with_non_leaf_candidate_1(self):
class ConvLinearModel(torch.nn.Module):
def __init__(self):
super(ConvLinearModel, self).__init__()
self.conv1 = nn.Conv2d(3, 12, kernel_size=(1, 1), stride=(1, 1), padding=0, bias=False)
self.linear1 = nn.Linear(3, 3, bias=False)
def forward(self, inp):
y = self.conv1(inp)
y = self.linear1(y)
return y
class TopLevelModel(torch.nn.Module):
def __init__(self):
super(TopLevelModel, self).__init__()
self.linear1 = nn.Linear(3, 3, bias=False)
self.layer1 = ConvLinearModel()
self.linear2 = nn.Linear(3, 6, bias=False)
def forward(self, inp):
y = self.linear1(inp)
y = self.layer1(y)
y = self.linear2(y)
return y
model = TopLevelModel()
model.eval()
dummy_input = torch.randn(1, 3, 3, 3)
aimet_torch.utils.modules_to_treat_as_leaf = [ConvLinearModel]
cg_1 = ConnectedGraph(model, model_input=dummy_input)
assert (len(cg_1.ordered_ops) == 3)
assert (len(cg_1.ordered_ops[1].inputs) == 3)
assert (cg_1.ordered_ops[0].inputs[0].name == 'input_0_to_Gemm_0')
assert (cg_1.ordered_ops[0].inputs[0].is_model_input == True)
assert (cg_1.ordered_ops[0].inputs[1].name == 'TopLevelModel.linear1.weight')
assert (cg_1.ordered_ops[0].inputs[1].is_parm == True)
assert (cg_1.ordered_ops[1].inputs[0].name == 'Gemm_0_to_ConvLinearModel_1')
assert (cg_1.ordered_ops[1].inputs[0].is_model_input == False)
assert (cg_1.ordered_ops[1].inputs[1].is_parm == True)
assert (cg_1.ordered_ops[1].inputs[2].is_parm == True)
assert (cg_1.ordered_ops[2].inputs[0].name == 'ConvLinearModel_1_to_Gemm_2')
assert (cg_1.ordered_ops[2].inputs[0].is_model_input == False)
assert (cg_1.ordered_ops[2].inputs[1].is_parm == True)
def test_conv_bn_mangle_nodes(self):
class ModelWithConvBNMangleNodes(torch.nn.Module):
def __init__(self):
super(ModelWithConvBNMangleNodes, self).__init__()
self.inner_seq = nn.Sequential(nn.Conv2d(3, 16, kernel_size=2, stride=2, padding=2, bias=False), nn.BatchNorm2d(16))
self.seq_list = nn.Sequential(self.inner_seq, nn.ReLU(inplace=True))
def forward(self, inp):
return self.inner_seq(inp)
dummy_input = torch.randn(1, 3, 8, 8)
model = ModelWithConvBNMangleNodes()
cg = ConnectedGraph(model, dummy_input)
assert (len(cg.ordered_ops[0].inputs) == 2)
assert (len(cg.ordered_ops[1].inputs) == 5)
for inp in cg.ordered_ops[0].inputs[1:]:
assert inp.is_parm
assert (not inp.is_const)
for inp in cg.ordered_ops[1].inputs[1:]:
assert inp.is_parm
assert (not inp.is_const)
def test_remove_inputs_for_ops(self):
class MockConnectedGraph(ConnectedGraph):
def __init__(self):
self._ops = {}
self._products = {}
mcg = MockConnectedGraph()
conv_1 = Op('conv_1', 'conv_1', None, False, 'Conv', None)
p1 = Product('p1', None)
p2 = Product('p2', None)
conv_1.add_input(p1)
conv_1.add_input(p2)
mcg._ops[conv_1.name] = conv_1
mcg._products[p1.name] = p1
mcg._products[p2.name] = p2
add_1 = Op('add_1', 'add_1', None, False, 'Add', None)
p3 = Product('p3', None)
p4 = Product('p4', None)
p5 = Product('p5', None)
add_1.add_input(p3)
add_1.add_input(p4)
add_1.add_input(p5)
mcg._ops[add_1.name] = add_1
mcg._products[p3.name] = p3
mcg._products[p4.name] = p4
mcg._products[p5.name] = p5
assert (len(conv_1.inputs) == 2)
assert (conv_1.inputs == [p1, p2])
assert (p1.name in mcg._products.keys())
assert (len(add_1.inputs) == 3)
assert (add_1.inputs == [p3, p4, p5])
assert (p5.name in mcg._products.keys())
mcg._remove_inputs_for_ops()
assert (len(conv_1.inputs) == 1)
assert (conv_1.inputs == [p2])
assert (p1.name not in mcg._products.keys())
assert (len(add_1.inputs) == 2)
assert (add_1.inputs == [p3, p4])
assert (p5.name not in mcg._products.keys()) |
def _recat_pooled_embedding_grad_out(grad_output: Tensor, num_features_per_rank: List[int]) -> Tensor:
grad_outputs_by_rank = grad_output.split(num_features_per_rank, dim=1)
return torch.cat([grad_output_by_rank.contiguous().view((- 1)) for grad_output_by_rank in grad_outputs_by_rank], dim=0) |
class TestFileScope():
def test_by_module(self, pytester: pytest.Pytester) -> None:
test_file = "\n import pytest\n class TestA:\n .parametrize('i', range(10))\n def test(self, i):\n pass\n\n class TestB:\n .parametrize('i', range(10))\n def test(self, i):\n pass\n "
pytester.makepyfile(test_a=test_file, test_b=test_file)
result = pytester.runpytest('-n2', '--dist=loadfile', '-v')
test_a_workers_and_test_count = get_workers_and_test_count_by_prefix('test_a.py::TestA', result.outlines)
test_b_workers_and_test_count = get_workers_and_test_count_by_prefix('test_b.py::TestB', result.outlines)
assert ((test_a_workers_and_test_count in ({'gw0': 10}, {'gw1': 0})) or (test_a_workers_and_test_count in ({'gw0': 0}, {'gw1': 10})))
assert ((test_b_workers_and_test_count in ({'gw0': 10}, {'gw1': 0})) or (test_b_workers_and_test_count in ({'gw0': 0}, {'gw1': 10})))
def test_by_class(self, pytester: pytest.Pytester) -> None:
pytester.makepyfile(test_a="\n import pytest\n class TestA:\n .parametrize('i', range(10))\n def test(self, i):\n pass\n\n class TestB:\n .parametrize('i', range(10))\n def test(self, i):\n pass\n ")
result = pytester.runpytest('-n2', '--dist=loadfile', '-v')
test_a_workers_and_test_count = get_workers_and_test_count_by_prefix('test_a.py::TestA', result.outlines)
test_b_workers_and_test_count = get_workers_and_test_count_by_prefix('test_a.py::TestB', result.outlines)
assert ((test_a_workers_and_test_count in ({'gw0': 10}, {'gw1': 0})) or (test_a_workers_and_test_count in ({'gw0': 0}, {'gw1': 10})))
assert ((test_b_workers_and_test_count in ({'gw0': 10}, {'gw1': 0})) or (test_b_workers_and_test_count in ({'gw0': 0}, {'gw1': 10})))
def test_module_single_start(self, pytester: pytest.Pytester) -> None:
test_file1 = '\n import pytest\n def test():\n pass\n '
test_file2 = '\n import pytest\n def test_1():\n pass\n def test_2():\n pass\n '
pytester.makepyfile(test_a=test_file1, test_b=test_file1, test_c=test_file2)
result = pytester.runpytest('-n2', '--dist=loadfile', '-v')
a = get_workers_and_test_count_by_prefix('test_a.py::test', result.outlines)
b = get_workers_and_test_count_by_prefix('test_b.py::test', result.outlines)
c1 = get_workers_and_test_count_by_prefix('test_c.py::test_1', result.outlines)
c2 = get_workers_and_test_count_by_prefix('test_c.py::test_2', result.outlines)
assert (a in ({'gw0': 1}, {'gw1': 1}))
assert (b in ({'gw0': 1}, {'gw1': 1}))
assert (a.items() != b.items())
assert (c1 == c2) |
class Trainer_t3():
def __init__(self, net, t_net, train_loader, test_loader, optimizer, optimizer_t, lr_scheduler, lr_scheduler_t, model_name, train_loger=None, pruned=False):
self.net = net
self.t_net = t_net
self.train_loader = train_loader
self.test_loader = test_loader
self.optimizer = optimizer
self.optimizer_t = optimizer_t
self.pruned = pruned
self.lr_scheduler = lr_scheduler
self.lr_scheduler_t = lr_scheduler_t
self.model_name = model_name
self.train_loger = train_loger
self.criterion = nn.CrossEntropyLoss()
self.KLloss_1step = KLLoss_t3()
self.KLloss_2step = KLLoss_lowtem()
def save_model(self):
print('\n ... Save Model ...')
model_path = os.path.join(self.train_loger.model_cache, (self.model_name + '.pth'))
try:
state_dict = self.net.module.state_dict()
except AttributeError:
state_dict = self.net.state_dict()
torch.save(state_dict, model_path)
def load_check_point(self):
ckpt_cache = self.train_loger.ckpt_cache
net_path = os.path.join(ckpt_cache, (self.model_name + '_ckpt_net.pth'))
opt_path = os.path.join(ckpt_cache, (self.model_name + '_ckpt_opt.pth'))
lrs_path = os.path.join(ckpt_cache, (self.model_name + '_ckpt_lrs.pth'))
self.net.load_state_dict(torch.load(net_path))
self.optimizer.load_state_dict(torch.load(opt_path))
self.lr_scheduler.load_state_dict(torch.load(lrs_path))
epoch_acc_path = os.path.join(ckpt_cache, (self.model_name + '_ckpt_epoch_acc.pkl'))
with open(epoch_acc_path, 'rb') as fd:
check_point = pickle.load(fd)
start_epoch = check_point['start_epoch']
best_quan_acc = check_point['best_quan_acc']
return (start_epoch, best_quan_acc)
def save_check_point(self, epoch, best_quan_acc):
ckpt_cache = self.train_loger.ckpt_cache
net_path = os.path.join(ckpt_cache, (self.model_name + '_ckpt_net.pth'))
opt_path = os.path.join(ckpt_cache, (self.model_name + '_ckpt_opt.pth'))
lrs_path = os.path.join(ckpt_cache, (self.model_name + '_ckpt_lrs.pth'))
torch.save(self.net.state_dict(), net_path)
torch.save(self.optimizer.state_dict(), opt_path)
torch.save(self.lr_scheduler.state_dict(), lrs_path)
epoch_acc_path = os.path.join(ckpt_cache, (self.model_name + '_ckpt_epoch_acc.pkl'))
with open(epoch_acc_path, 'wb') as fd:
check_point = {'start_epoch': (epoch + 1), 'best_quan_acc': best_quan_acc}
pickle.dump(check_point, fd)
def __call__(self, total_epoch, save_check_point=True, resume=False):
start_epoch = 0
best_quan_acc = 0
if resume:
(start_epoch, best_quan_acc) = self.load_check_point()
print(('\n Resume, start_epoch=%d, best_quan_acc=%.3f' % (start_epoch, best_quan_acc)))
for epoch in range(start_epoch, total_epoch):
print(('\n %s | Current: %d | Total: %d' % (datetime.now(), (epoch + 1), total_epoch)))
train_perf_epoch = train_one_epoch(s_net=self.net, t_net=self.t_net, epoch=epoch, train_loader=self.train_loader, optimizer=self.optimizer, optimizer_t=self.optimizer_t, criterion=self.criterion, criterion_kl_1step=self.KLloss_1step, criterion_kl_2step=self.KLloss_2step, log_fun=self.train_loger.print_log)
if self.pruned:
make_weights_zero(self.net)
quan_perf_epoch = eval_performance(net=self.net, test_loader=self.test_loader, criterion=self.criterion, log_fun=self.train_loger.print_log)
self.lr_scheduler.step()
self.lr_scheduler_t.step()
self.train_loger.print_perf((train_perf_epoch + quan_perf_epoch))
if (quan_perf_epoch[1] > best_quan_acc):
best_quan_acc = quan_perf_epoch[1]
self.save_model()
if self.pruned:
count_sparsity(self.net)
if save_check_point:
self.save_check_point(epoch, best_quan_acc) |
_bad_gc_old_pyvista
.allow_bad_gc_pyside
.parametrize('close_event', ['plotter_close', 'window_close', pytest.param('q_key_press', marks=pytest.mark.allow_bad_gc), 'menu_exit', 'del_finalizer'])
.parametrize('empty_scene', [True, False])
def test_background_plotting_close(qtbot, close_event, empty_scene, plotting, ensure_closed):
plotter = _create_testing_scene(empty_scene)
assert_hasattr(plotter, 'app_window', MainWindow)
assert_hasattr(plotter, 'main_menu', QMenuBar)
assert hasattr(plotter, 'iren')
assert_hasattr(plotter, 'render_timer', QTimer)
assert_hasattr(plotter, '_closed', bool)
assert_hasattr(plotter, 'interactor', QVTKRenderWindowInteractor)
window = plotter.app_window
main_menu = plotter.main_menu
assert (not main_menu.isNativeMenuBar())
interactor = plotter.interactor
render_timer = plotter.render_timer
qtbot.addWidget(window)
render_blocker = qtbot.wait_signals([render_timer.timeout], timeout=500)
render_blocker.wait()
with qtbot.wait_exposed(window, timeout=10000):
window.show()
with qtbot.wait_exposed(interactor, timeout=10000):
interactor.show()
assert window.isVisible()
assert interactor.isVisible()
assert main_menu.isVisible()
assert render_timer.isActive()
assert (not plotter._closed)
with qtbot.wait_signals([window.signal_close], timeout=500):
if (close_event == 'plotter_close'):
plotter.close()
elif (close_event == 'window_close'):
window.close()
elif (close_event == 'q_key_press'):
qtbot.keyClick(interactor, 'q')
elif (close_event == 'menu_exit'):
plotter._menu_close_action.trigger()
elif (close_event == 'del_finalizer'):
plotter.__del__()
assert (not window.isVisible())
assert (not interactor.isVisible())
assert (not main_menu.isVisible())
assert (not render_timer.isActive())
if (Version(pyvista.__version__) < Version('0.27.0')):
assert (not hasattr(window.vtk_widget, 'iren'))
assert plotter._closed |
def test_slots_unpickle_after_attr_removed():
a = A(1, 2, 3)
a_pickled = pickle.dumps(a)
a_unpickled = pickle.loads(a_pickled)
assert (a_unpickled == a)
(slots=True)
class NEW_A():
x = attr.ib()
c = attr.ib()
with mock.patch(f'{__name__}.A', NEW_A):
new_a = pickle.loads(a_pickled)
assert (new_a.x == 1)
assert (new_a.c == 3)
assert (not hasattr(new_a, 'b')) |
class Sheet(tk.Frame):
def __init__(self, parent, name: str='!sheet', show_table: bool=True, show_top_left: bool=True, show_row_index: bool=True, show_header: bool=True, show_x_scrollbar: bool=True, show_y_scrollbar: bool=True, width: int=None, height: int=None, headers: List=None, header: List=None, default_header: str='letters', default_row_index: str='numbers', to_clipboard_delimiter='\t', to_clipboard_quotechar='"', to_clipboard_lineterminator='\n', from_clipboard_delimiters=['\t'], show_default_header_for_empty: bool=True, show_default_index_for_empty: bool=True, page_up_down_select_row: bool=True, expand_sheet_if_paste_too_big: bool=False, paste_insert_column_limit: int=None, paste_insert_row_limit: int=None, show_dropdown_borders: bool=False, arrow_key_down_right_scroll_page: bool=False, enable_edit_cell_auto_resize: bool=True, edit_cell_validation: bool=True, data_reference: List=None, data: List=None, startup_select: Tuple=None, startup_focus: bool=True, total_columns: int=None, total_rows: int=None, column_width: int=120, header_height: str='1', max_column_width: str='inf', max_row_height: str='inf', max_header_height: str='inf', max_index_width: str='inf', row_index: List=None, index: List=None, after_redraw_time_ms: int=20, row_index_width: int=None, auto_resize_default_row_index: bool=True, auto_resize_columns: Union[(int, None)]=None, auto_resize_rows: Union[(int, None)]=None, set_all_heights_and_widths: bool=False, set_cell_sizes_on_zoom: bool=False, row_height: str='1', zoom: int=100, font: Tuple=get_font(), header_font: Tuple=get_heading_font(), index_font: Tuple=get_index_font(), popup_menu_font: Tuple=get_font(), align: str='w', header_align: str='center', row_index_align: str='center', displayed_columns: List=[], all_columns_displayed: bool=True, displayed_rows: List=[], all_rows_displayed: bool=True, max_undos: int=30, outline_thickness: int=0, outline_color: str=theme_light_blue['outline_color'], column_drag_and_drop_perform: bool=True, row_drag_and_drop_perform: bool=True, empty_horizontal: int=150, empty_vertical: int=100, selected_rows_to_end_of_window: bool=False, horizontal_grid_to_end_of_window: bool=False, vertical_grid_to_end_of_window: bool=False, show_vertical_grid: bool=True, show_horizontal_grid: bool=True, display_selected_fg_over_highlights: bool=False, show_selected_cells_border: bool=True, theme='light blue', popup_menu_fg=theme_light_blue['popup_menu_fg'], popup_menu_bg=theme_light_blue['popup_menu_bg'], popup_menu_highlight_bg=theme_light_blue['popup_menu_highlight_bg'], popup_menu_highlight_fg=theme_light_blue['popup_menu_highlight_fg'], frame_bg=theme_light_blue['table_bg'], table_grid_fg=theme_light_blue['table_grid_fg'], table_bg=theme_light_blue['table_bg'], table_fg=theme_light_blue['table_fg'], table_selected_cells_border_fg=theme_light_blue['table_selected_cells_border_fg'], table_selected_cells_bg=theme_light_blue['table_selected_cells_bg'], table_selected_cells_fg=theme_light_blue['table_selected_cells_fg'], table_selected_rows_border_fg=theme_light_blue['table_selected_rows_border_fg'], table_selected_rows_bg=theme_light_blue['table_selected_rows_bg'], table_selected_rows_fg=theme_light_blue['table_selected_rows_fg'], table_selected_columns_border_fg=theme_light_blue['table_selected_columns_border_fg'], table_selected_columns_bg=theme_light_blue['table_selected_columns_bg'], table_selected_columns_fg=theme_light_blue['table_selected_columns_fg'], resizing_line_fg=theme_light_blue['resizing_line_fg'], drag_and_drop_bg=theme_light_blue['drag_and_drop_bg'], index_bg=theme_light_blue['index_bg'], index_border_fg=theme_light_blue['index_border_fg'], index_grid_fg=theme_light_blue['index_grid_fg'], index_fg=theme_light_blue['index_fg'], index_selected_cells_bg=theme_light_blue['index_selected_cells_bg'], index_selected_cells_fg=theme_light_blue['index_selected_cells_fg'], index_selected_rows_bg=theme_light_blue['index_selected_rows_bg'], index_selected_rows_fg=theme_light_blue['index_selected_rows_fg'], index_hidden_rows_expander_bg=theme_light_blue['index_hidden_rows_expander_bg'], header_bg=theme_light_blue['header_bg'], header_border_fg=theme_light_blue['header_border_fg'], header_grid_fg=theme_light_blue['header_grid_fg'], header_fg=theme_light_blue['header_fg'], header_selected_cells_bg=theme_light_blue['header_selected_cells_bg'], header_selected_cells_fg=theme_light_blue['header_selected_cells_fg'], header_selected_columns_bg=theme_light_blue['header_selected_columns_bg'], header_selected_columns_fg=theme_light_blue['header_selected_columns_fg'], header_hidden_columns_expander_bg=theme_light_blue['header_hidden_columns_expander_bg'], top_left_bg=theme_light_blue['top_left_bg'], top_left_fg=theme_light_blue['top_left_fg'], top_left_fg_highlight=theme_light_blue['top_left_fg_highlight']):
tk.Frame.__init__(self, parent, background=frame_bg, highlightthickness=outline_thickness, highlightbackground=outline_color, highlightcolor=outline_color)
self.C = parent
self.name = name
self.dropdown_class = Sheet_Dropdown
self.last_event_data = DotDict()
self.bound_events = DotDict({k: [] for k in emitted_events})
self.after_redraw_id = None
self.after_redraw_time_ms = after_redraw_time_ms
if ((width is not None) or (height is not None)):
self.grid_propagate(0)
if (width is not None):
self.config(width=width)
if (height is not None):
self.config(height=height)
if ((width is not None) and (height is None)):
self.config(height=300)
if ((height is not None) and (width is None)):
self.config(width=350)
self.grid_columnconfigure(1, weight=1)
self.grid_rowconfigure(1, weight=1)
self.RI = RowIndex(parentframe=self, row_index_align=self.convert_align(row_index_align), index_bg=index_bg, index_border_fg=index_border_fg, index_grid_fg=index_grid_fg, index_fg=index_fg, index_selected_cells_bg=index_selected_cells_bg, index_selected_cells_fg=index_selected_cells_fg, index_selected_rows_bg=index_selected_rows_bg, index_selected_rows_fg=index_selected_rows_fg, index_hidden_rows_expander_bg=index_hidden_rows_expander_bg, drag_and_drop_bg=drag_and_drop_bg, resizing_line_fg=resizing_line_fg, row_drag_and_drop_perform=row_drag_and_drop_perform, default_row_index=default_row_index, auto_resize_width=auto_resize_default_row_index, show_default_index_for_empty=show_default_index_for_empty)
self.CH = ColumnHeaders(parentframe=self, default_header=default_header, header_align=self.convert_align(header_align), header_bg=header_bg, header_border_fg=header_border_fg, header_grid_fg=header_grid_fg, header_fg=header_fg, header_selected_cells_bg=header_selected_cells_bg, header_selected_cells_fg=header_selected_cells_fg, header_selected_columns_bg=header_selected_columns_bg, header_selected_columns_fg=header_selected_columns_fg, header_hidden_columns_expander_bg=header_hidden_columns_expander_bg, drag_and_drop_bg=drag_and_drop_bg, column_drag_and_drop_perform=column_drag_and_drop_perform, resizing_line_fg=resizing_line_fg, show_default_header_for_empty=show_default_header_for_empty)
self.MT = MainTable(parentframe=self, max_column_width=max_column_width, max_header_height=max_header_height, max_row_height=max_row_height, max_index_width=max_index_width, row_index_width=row_index_width, header_height=header_height, column_width=column_width, row_height=row_height, show_index=show_row_index, show_header=show_header, enable_edit_cell_auto_resize=enable_edit_cell_auto_resize, edit_cell_validation=edit_cell_validation, page_up_down_select_row=page_up_down_select_row, expand_sheet_if_paste_too_big=expand_sheet_if_paste_too_big, paste_insert_column_limit=paste_insert_column_limit, paste_insert_row_limit=paste_insert_row_limit, show_dropdown_borders=show_dropdown_borders, arrow_key_down_right_scroll_page=arrow_key_down_right_scroll_page, display_selected_fg_over_highlights=display_selected_fg_over_highlights, show_vertical_grid=show_vertical_grid, show_horizontal_grid=show_horizontal_grid, to_clipboard_delimiter=to_clipboard_delimiter, to_clipboard_quotechar=to_clipboard_quotechar, to_clipboard_lineterminator=to_clipboard_lineterminator, from_clipboard_delimiters=from_clipboard_delimiters, column_headers_canvas=self.CH, row_index_canvas=self.RI, headers=headers, header=header, data_reference=(data if (data_reference is None) else data_reference), auto_resize_columns=auto_resize_columns, auto_resize_rows=auto_resize_rows, set_cell_sizes_on_zoom=set_cell_sizes_on_zoom, total_cols=total_columns, total_rows=total_rows, row_index=row_index, index=index, zoom=zoom, font=font, header_font=header_font, index_font=index_font, popup_menu_font=popup_menu_font, popup_menu_fg=popup_menu_fg, popup_menu_bg=popup_menu_bg, popup_menu_highlight_bg=popup_menu_highlight_bg, popup_menu_highlight_fg=popup_menu_highlight_fg, align=self.convert_align(align), table_bg=table_bg, table_grid_fg=table_grid_fg, table_fg=table_fg, show_selected_cells_border=show_selected_cells_border, table_selected_cells_border_fg=table_selected_cells_border_fg, table_selected_cells_bg=table_selected_cells_bg, table_selected_cells_fg=table_selected_cells_fg, table_selected_rows_border_fg=table_selected_rows_border_fg, table_selected_rows_bg=table_selected_rows_bg, table_selected_rows_fg=table_selected_rows_fg, table_selected_columns_border_fg=table_selected_columns_border_fg, table_selected_columns_bg=table_selected_columns_bg, table_selected_columns_fg=table_selected_columns_fg, displayed_columns=displayed_columns, all_columns_displayed=all_columns_displayed, displayed_rows=displayed_rows, all_rows_displayed=all_rows_displayed, selected_rows_to_end_of_window=selected_rows_to_end_of_window, horizontal_grid_to_end_of_window=horizontal_grid_to_end_of_window, vertical_grid_to_end_of_window=vertical_grid_to_end_of_window, empty_horizontal=empty_horizontal, empty_vertical=empty_vertical, max_undos=max_undos)
self.TL = TopLeftRectangle(parentframe=self, main_canvas=self.MT, row_index_canvas=self.RI, header_canvas=self.CH, top_left_bg=top_left_bg, top_left_fg=top_left_fg, top_left_fg_highlight=top_left_fg_highlight)
self.yscroll = ttk.Scrollbar(self, command=self.MT.set_yviews, orient='vertical')
self.xscroll = ttk.Scrollbar(self, command=self.MT.set_xviews, orient='horizontal')
if show_top_left:
self.TL.grid(row=0, column=0)
if show_table:
self.MT.grid(row=1, column=1, sticky='nswe')
self.MT['xscrollcommand'] = self.xscroll.set
self.MT['yscrollcommand'] = self.yscroll.set
if show_row_index:
self.RI.grid(row=1, column=0, sticky='nswe')
self.RI['yscrollcommand'] = self.yscroll.set
if show_header:
self.CH.grid(row=0, column=1, sticky='nswe')
self.CH['xscrollcommand'] = self.xscroll.set
if show_x_scrollbar:
self.xscroll.grid(row=2, column=0, columnspan=2, sticky='nswe')
self.xscroll_showing = True
self.xscroll_disabled = False
else:
self.xscroll_showing = False
self.xscroll_disabled = True
if show_y_scrollbar:
self.yscroll.grid(row=0, column=2, rowspan=3, sticky='nswe')
self.yscroll_showing = True
self.yscroll_disabled = False
else:
self.yscroll_showing = False
self.yscroll_disabled = True
self.update_idletasks()
self.MT.update_idletasks()
self.RI.update_idletasks()
self.CH.update_idletasks()
if (theme != 'light blue'):
self.change_theme(theme)
for (k, v) in locals().items():
if ((k in theme_light_blue) and (v != theme_light_blue[k])):
self.set_options(**{k: v})
if set_all_heights_and_widths:
self.set_all_cell_sizes_to_text()
if (startup_select is not None):
try:
if (startup_select[(- 1)] == 'cells'):
self.MT.create_selected(*startup_select)
self.MT.set_currently_selected(startup_select[0], startup_select[1], type_='cell', inside=True)
self.see(startup_select[0], startup_select[1])
elif (startup_select[(- 1)] == 'rows'):
self.MT.create_selected(startup_select[0], 0, startup_select[1], (len(self.MT.col_positions) - 1), 'rows')
self.MT.set_currently_selected(startup_select[0], 0, type_='row', inside=True)
self.see(startup_select[0], 0)
elif (startup_select[(- 1)] in ('cols', 'columns')):
self.MT.create_selected(0, startup_select[0], (len(self.MT.row_positions) - 1), startup_select[1], 'columns')
self.MT.set_currently_selected(0, startup_select[0], type_='column', inside=True)
self.see(0, startup_select[0])
except Exception:
pass
self.refresh()
if startup_focus:
self.MT.focus_set()
def set_refresh_timer(self, redraw=True):
if (redraw and (self.after_redraw_id is None)):
self.after_redraw_id = self.after(self.after_redraw_time_ms, self.after_redraw)
def after_redraw(self, redraw_header=True, redraw_row_index=True):
self.MT.main_table_redraw_grid_and_text(redraw_header=redraw_header, redraw_row_index=redraw_row_index)
self.after_redraw_id = None
def show(self, canvas='all'):
if (canvas == 'all'):
self.hide()
self.TL.grid(row=0, column=0)
self.RI.grid(row=1, column=0, sticky='nswe')
self.CH.grid(row=0, column=1, sticky='nswe')
self.MT.grid(row=1, column=1, sticky='nswe')
self.yscroll.grid(row=0, column=2, rowspan=3, sticky='nswe')
self.xscroll.grid(row=2, column=0, columnspan=2, sticky='nswe')
self.MT['xscrollcommand'] = self.xscroll.set
self.CH['xscrollcommand'] = self.xscroll.set
self.MT['yscrollcommand'] = self.yscroll.set
self.RI['yscrollcommand'] = self.yscroll.set
self.xscroll_showing = True
self.yscroll_showing = True
self.xscroll_disabled = False
self.yscroll_disabled = False
elif (canvas == 'row_index'):
self.RI.grid(row=1, column=0, sticky='nswe')
self.MT['yscrollcommand'] = self.yscroll.set
self.RI['yscrollcommand'] = self.yscroll.set
self.MT.show_index = True
elif (canvas == 'header'):
self.CH.grid(row=0, column=1, sticky='nswe')
self.MT['xscrollcommand'] = self.xscroll.set
self.CH['xscrollcommand'] = self.xscroll.set
self.MT.show_header = True
elif (canvas == 'top_left'):
self.TL.grid(row=0, column=0)
elif (canvas == 'x_scrollbar'):
self.xscroll.grid(row=2, column=0, columnspan=2, sticky='nswe')
self.xscroll_showing = True
self.xscroll_disabled = False
elif (canvas == 'y_scrollbar'):
self.yscroll.grid(row=0, column=2, rowspan=3, sticky='nswe')
self.yscroll_showing = True
self.yscroll_disabled = False
self.MT.update_idletasks()
def hide(self, canvas='all'):
if (canvas.lower() == 'all'):
self.TL.grid_forget()
self.RI.grid_forget()
self.RI['yscrollcommand'] = 0
self.MT.show_index = False
self.CH.grid_forget()
self.CH['xscrollcommand'] = 0
self.MT.show_header = False
self.MT.grid_forget()
self.yscroll.grid_forget()
self.xscroll.grid_forget()
self.xscroll_showing = False
self.yscroll_showing = False
self.xscroll_disabled = True
self.yscroll_disabled = True
elif (canvas.lower() == 'row_index'):
self.RI.grid_forget()
self.RI['yscrollcommand'] = 0
self.MT.show_index = False
elif (canvas.lower() == 'header'):
self.CH.grid_forget()
self.CH['xscrollcommand'] = 0
self.MT.show_header = False
elif (canvas.lower() == 'top_left'):
self.TL.grid_forget()
elif (canvas.lower() == 'x_scrollbar'):
self.xscroll.grid_forget()
self.xscroll_showing = False
self.xscroll_disabled = True
elif (canvas.lower() == 'y_scrollbar'):
self.yscroll.grid_forget()
self.yscroll_showing = False
self.yscroll_disabled = True
def height_and_width(self, height=None, width=None):
if ((width is not None) or (height is not None)):
self.grid_propagate(0)
elif ((width is None) and (height is None)):
self.grid_propagate(1)
if (width is not None):
self.config(width=width)
if (height is not None):
self.config(height=height)
def focus_set(self, canvas='table'):
if (canvas == 'table'):
self.MT.focus_set()
elif (canvas == 'header'):
self.CH.focus_set()
elif (canvas == 'index'):
self.RI.focus_set()
elif (canvas == 'topleft'):
self.TL.focus_set()
def displayed_column_to_data(self, c):
return (c if self.MT.all_columns_displayed else self.MT.displayed_columns[c])
def displayed_row_to_data(self, r):
return (r if self.MT.all_rows_displayed else self.MT.displayed_rows[r])
def popup_menu_add_command(self, label, func, table_menu=True, index_menu=True, header_menu=True, empty_space_menu=True):
if ((label not in self.MT.extra_table_rc_menu_funcs) and table_menu):
self.MT.extra_table_rc_menu_funcs[label] = func
if ((label not in self.MT.extra_index_rc_menu_funcs) and index_menu):
self.MT.extra_index_rc_menu_funcs[label] = func
if ((label not in self.MT.extra_header_rc_menu_funcs) and header_menu):
self.MT.extra_header_rc_menu_funcs[label] = func
if ((label not in self.MT.extra_empty_space_rc_menu_funcs) and empty_space_menu):
self.MT.extra_empty_space_rc_menu_funcs[label] = func
self.MT.create_rc_menus()
def popup_menu_del_command(self, label=None):
if (label is None):
self.MT.extra_table_rc_menu_funcs = {}
self.MT.extra_index_rc_menu_funcs = {}
self.MT.extra_header_rc_menu_funcs = {}
self.MT.extra_empty_space_rc_menu_funcs = {}
else:
for func_dict in (self.MT.extra_table_rc_menu_funcs, self.MT.extra_index_rc_menu_funcs, self.MT.extra_header_rc_menu_funcs, self.MT.extra_empty_space_rc_menu_funcs):
if (label in func_dict):
del func_dict[label]
self.MT.create_rc_menus()
def extra_bindings(self, bindings, func=None):
if isinstance(bindings, str):
iterable = [(bindings, func)]
elif (is_iterable(bindings) and isinstance(bindings[0], str)):
iterable = [(b, func) for b in bindings]
elif is_iterable(bindings):
iterable = bindings
for (b, f) in iterable:
b = b.lower()
if (b in emitted_events):
if f:
self.bind(b, f)
else:
self.unbind(b)
if (b in ('all', 'bind_all', 'unbind_all')):
self.MT.extra_begin_ctrl_c_func = f
self.MT.extra_begin_ctrl_x_func = f
self.MT.extra_begin_ctrl_v_func = f
self.MT.extra_begin_ctrl_z_func = f
self.MT.extra_begin_delete_key_func = f
self.RI.ri_extra_begin_drag_drop_func = f
self.CH.ch_extra_begin_drag_drop_func = f
self.MT.extra_begin_del_rows_rc_func = f
self.MT.extra_begin_del_cols_rc_func = f
self.MT.extra_begin_insert_cols_rc_func = f
self.MT.extra_begin_insert_rows_rc_func = f
self.MT.extra_begin_edit_cell_func = f
self.CH.extra_begin_edit_cell_func = f
self.RI.extra_begin_edit_cell_func = f
self.CH.column_width_resize_func = f
self.RI.row_height_resize_func = f
if (b in ('all', 'bind_all', 'unbind_all', 'all_select_events', 'select', 'selectevents', 'select_events')):
self.MT.selection_binding_func = f
self.MT.select_all_binding_func = f
self.RI.selection_binding_func = f
self.CH.selection_binding_func = f
self.MT.drag_selection_binding_func = f
self.RI.drag_selection_binding_func = f
self.CH.drag_selection_binding_func = f
self.MT.shift_selection_binding_func = f
self.RI.shift_selection_binding_func = f
self.CH.shift_selection_binding_func = f
self.MT.ctrl_selection_binding_func = f
self.RI.ctrl_selection_binding_func = f
self.CH.ctrl_selection_binding_func = f
self.MT.deselection_binding_func = f
if (b in ('all', 'bind_all', 'unbind_all', 'all_modified_events', 'sheetmodified', 'sheet_modifiedmodified_events', 'modified')):
self.MT.extra_end_ctrl_c_func = f
self.MT.extra_end_ctrl_x_func = f
self.MT.extra_end_ctrl_v_func = f
self.MT.extra_end_ctrl_z_func = f
self.MT.extra_end_delete_key_func = f
self.RI.ri_extra_end_drag_drop_func = f
self.CH.ch_extra_end_drag_drop_func = f
self.MT.extra_end_del_rows_rc_func = f
self.MT.extra_end_del_cols_rc_func = f
self.MT.extra_end_insert_cols_rc_func = f
self.MT.extra_end_insert_rows_rc_func = f
self.MT.extra_end_edit_cell_func = f
self.CH.extra_end_edit_cell_func = f
self.RI.extra_end_edit_cell_func = f
if (b in ('begin_copy', 'begin_ctrl_c')):
self.MT.extra_begin_ctrl_c_func = f
if (b in ('ctrl_c', 'end_copy', 'end_ctrl_c', 'copy')):
self.MT.extra_end_ctrl_c_func = f
if (b in ('begin_cut', 'begin_ctrl_x')):
self.MT.extra_begin_ctrl_x_func = f
if (b in ('ctrl_x', 'end_cut', 'end_ctrl_x', 'cut')):
self.MT.extra_end_ctrl_x_func = f
if (b in ('begin_paste', 'begin_ctrl_v')):
self.MT.extra_begin_ctrl_v_func = f
if (b in ('ctrl_v', 'end_paste', 'end_ctrl_v', 'paste')):
self.MT.extra_end_ctrl_v_func = f
if (b in ('begin_undo', 'begin_ctrl_z')):
self.MT.extra_begin_ctrl_z_func = f
if (b in ('ctrl_z', 'end_undo', 'end_ctrl_z', 'undo')):
self.MT.extra_end_ctrl_z_func = f
if (b in ('begin_delete_key', 'begin_delete')):
self.MT.extra_begin_delete_key_func = f
if (b in ('delete_key', 'end_delete', 'end_delete_key', 'delete')):
self.MT.extra_end_delete_key_func = f
if (b in ('begin_edit_cell', 'begin_edit_table')):
self.MT.extra_begin_edit_cell_func = f
if (b in ('end_edit_cell', 'edit_cell', 'edit_table')):
self.MT.extra_end_edit_cell_func = f
if (b == 'begin_edit_header'):
self.CH.extra_begin_edit_cell_func = f
if (b in ('end_edit_header', 'edit_header')):
self.CH.extra_end_edit_cell_func = f
if (b == 'begin_edit_index'):
self.RI.extra_begin_edit_cell_func = f
if (b in ('end_edit_index', 'edit_index')):
self.RI.extra_end_edit_cell_func = f
if (b in ('begin_row_index_drag_drop', 'begin_move_rows')):
self.RI.ri_extra_begin_drag_drop_func = f
if (b in ('row_index_drag_drop', 'move_rows', 'end_move_rows', 'end_row_index_drag_drop')):
self.RI.ri_extra_end_drag_drop_func = f
if (b in ('begin_column_header_drag_drop', 'begin_move_columns')):
self.CH.ch_extra_begin_drag_drop_func = f
if (b in ('column_header_drag_drop', 'move_columns', 'end_move_columns', 'end_column_header_drag_drop')):
self.CH.ch_extra_end_drag_drop_func = f
if (b in ('begin_rc_delete_row', 'begin_delete_rows')):
self.MT.extra_begin_del_rows_rc_func = f
if (b in ('rc_delete_row', 'end_rc_delete_row', 'end_delete_rows', 'delete_rows')):
self.MT.extra_end_del_rows_rc_func = f
if (b in ('begin_rc_delete_column', 'begin_delete_columns')):
self.MT.extra_begin_del_cols_rc_func = f
if (b in ('rc_delete_column', 'end_rc_delete_column', 'end_delete_columns', 'delete_columns')):
self.MT.extra_end_del_cols_rc_func = f
if (b in ('begin_rc_insert_column', 'begin_insert_column', 'begin_insert_columns', 'begin_add_column', 'begin_rc_add_column', 'begin_add_columns')):
self.MT.extra_begin_insert_cols_rc_func = f
if (b in ('rc_insert_column', 'end_rc_insert_column', 'end_insert_column', 'end_insert_columns', 'rc_add_column', 'end_rc_add_column', 'end_add_column', 'end_add_columns')):
self.MT.extra_end_insert_cols_rc_func = f
if (b in ('begin_rc_insert_row', 'begin_insert_row', 'begin_insert_rows', 'begin_rc_add_row', 'begin_add_row', 'begin_add_rows')):
self.MT.extra_begin_insert_rows_rc_func = f
if (b in ('rc_insert_row', 'end_rc_insert_row', 'end_insert_row', 'end_insert_rows', 'rc_add_row', 'end_rc_add_row', 'end_add_row', 'end_add_rows')):
self.MT.extra_end_insert_rows_rc_func = f
if (b == 'column_width_resize'):
self.CH.column_width_resize_func = f
if (b == 'row_height_resize'):
self.RI.row_height_resize_func = f
if (b == 'cell_select'):
self.MT.selection_binding_func = f
if (b in ('select_all', 'ctrl_a')):
self.MT.select_all_binding_func = f
if (b == 'row_select'):
self.RI.selection_binding_func = f
if (b in ('col_select', 'column_select')):
self.CH.selection_binding_func = f
if (b == 'drag_select_cells'):
self.MT.drag_selection_binding_func = f
if (b == 'drag_select_rows'):
self.RI.drag_selection_binding_func = f
if (b == 'drag_select_columns'):
self.CH.drag_selection_binding_func = f
if (b == 'shift_cell_select'):
self.MT.shift_selection_binding_func = f
if (b == 'shift_row_select'):
self.RI.shift_selection_binding_func = f
if (b == 'shift_column_select'):
self.CH.shift_selection_binding_func = f
if (b == 'ctrl_cell_select'):
self.MT.ctrl_selection_binding_func = f
if (b == 'ctrl_row_select'):
self.RI.ctrl_selection_binding_func = f
if (b == 'ctrl_column_select'):
self.CH.ctrl_selection_binding_func = f
if (b == 'deselect'):
self.MT.deselection_binding_func = f
def emit_event(self, event: str, data=None):
if (data is None):
data = tuple()
dct = DotDict()
dct.sheetname = self.name
dct.data = data
self.last_event_data = dct
for func in self.bound_events[event]:
func(dct)
def event(self):
return self.last_event_data
def sync_scroll(self, widget: object) -> Sheet:
if (widget is self):
return self
self.MT.synced_scrolls.add(widget)
if isinstance(widget, Sheet):
widget.MT.synced_scrolls.add(self)
return self
def unsync_scroll(self, widget: (None | Sheet)=None) -> Sheet:
if (widget is None):
for widget in self.MT.synced_scrolls:
if isinstance(widget, Sheet):
widget.MT.synced_scrolls.discard(self)
self.MT.synced_scrolls = set()
else:
if (isinstance(widget, Sheet) and (self in widget.MT.synced_scrolls)):
widget.MT.synced_scrolls.discard(self)
self.MT.synced_scrolls.discard(widget)
return self
def bind(self, binding, func, add=None):
if (binding in emitted_events):
if add:
self.bound_events[binding].append(func)
else:
self.bound_events[binding] = [func]
if (binding == '<ButtonPress-1>'):
self.MT.extra_b1_press_func = func
self.CH.extra_b1_press_func = func
self.RI.extra_b1_press_func = func
self.TL.extra_b1_press_func = func
elif (binding == '<ButtonMotion-1>'):
self.MT.extra_b1_motion_func = func
self.CH.extra_b1_motion_func = func
self.RI.extra_b1_motion_func = func
self.TL.extra_b1_motion_func = func
elif (binding == '<ButtonRelease-1>'):
self.MT.extra_b1_release_func = func
self.CH.extra_b1_release_func = func
self.RI.extra_b1_release_func = func
self.TL.extra_b1_release_func = func
elif (binding == '<Double-Button-1>'):
self.MT.extra_double_b1_func = func
self.CH.extra_double_b1_func = func
self.RI.extra_double_b1_func = func
self.TL.extra_double_b1_func = func
elif (binding == '<Motion>'):
self.MT.extra_motion_func = func
self.CH.extra_motion_func = func
self.RI.extra_motion_func = func
self.TL.extra_motion_func = func
elif (binding == rc_binding):
self.MT.extra_rc_func = func
self.CH.extra_rc_func = func
self.RI.extra_rc_func = func
self.TL.extra_rc_func = func
else:
self.MT.bind(binding, func, add=add)
self.CH.bind(binding, func, add=add)
self.RI.bind(binding, func, add=add)
self.TL.bind(binding, func, add=add)
def unbind(self, binding):
if (binding in emitted_events):
self.bound_events[binding] = []
if (binding == '<ButtonPress-1>'):
self.MT.extra_b1_press_func = None
self.CH.extra_b1_press_func = None
self.RI.extra_b1_press_func = None
self.TL.extra_b1_press_func = None
elif (binding == '<ButtonMotion-1>'):
self.MT.extra_b1_motion_func = None
self.CH.extra_b1_motion_func = None
self.RI.extra_b1_motion_func = None
self.TL.extra_b1_motion_func = None
elif (binding == '<ButtonRelease-1>'):
self.MT.extra_b1_release_func = None
self.CH.extra_b1_release_func = None
self.RI.extra_b1_release_func = None
self.TL.extra_b1_release_func = None
elif (binding == '<Double-Button-1>'):
self.MT.extra_double_b1_func = None
self.CH.extra_double_b1_func = None
self.RI.extra_double_b1_func = None
self.TL.extra_double_b1_func = None
elif (binding == '<Motion>'):
self.MT.extra_motion_func = None
self.CH.extra_motion_func = None
self.RI.extra_motion_func = None
self.TL.extra_motion_func = None
elif (binding == rc_binding):
self.MT.extra_rc_func = None
self.CH.extra_rc_func = None
self.RI.extra_rc_func = None
self.TL.extra_rc_func = None
else:
self.MT.unbind(binding)
self.CH.unbind(binding)
self.RI.unbind(binding)
self.TL.unbind(binding)
def enable_bindings(self, *bindings):
self.MT.enable_bindings(bindings)
def disable_bindings(self, *bindings):
self.MT.disable_bindings(bindings)
def basic_bindings(self, enable=False):
for canvas in (self.MT, self.CH, self.RI, self.TL):
canvas.basic_bindings(enable)
def edit_bindings(self, enable=False):
if enable:
self.MT.edit_bindings(True)
elif (not enable):
self.MT.edit_bindings(False)
def cell_edit_binding(self, enable=False, keys=[]):
self.MT.bind_cell_edit(enable, keys=[])
def identify_region(self, event):
if (event.widget == self.MT):
return 'table'
elif (event.widget == self.RI):
return 'index'
elif (event.widget == self.CH):
return 'header'
elif (event.widget == self.TL):
return 'top left'
def identify_row(self, event, exclude_index=False, allow_end=True):
ev_w = event.widget
if (ev_w == self.MT):
return self.MT.identify_row(y=event.y, allow_end=allow_end)
elif (ev_w == self.RI):
if exclude_index:
return None
else:
return self.MT.identify_row(y=event.y, allow_end=allow_end)
elif ((ev_w == self.CH) or (ev_w == self.TL)):
return None
def identify_column(self, event, exclude_header=False, allow_end=True):
ev_w = event.widget
if (ev_w == self.MT):
return self.MT.identify_col(x=event.x, allow_end=allow_end)
elif ((ev_w == self.RI) or (ev_w == self.TL)):
return None
elif (ev_w == self.CH):
if exclude_header:
return None
else:
return self.MT.identify_col(x=event.x, allow_end=allow_end)
def get_example_canvas_column_widths(self, total_cols=None):
colpos = int(self.MT.default_column_width)
if (total_cols is not None):
return list(accumulate(chain([0], (colpos for c in range(total_cols)))))
return list(accumulate(chain([0], (colpos for c in range((len(self.MT.col_positions) - 1))))))
def get_example_canvas_row_heights(self, total_rows=None):
rowpos = self.MT.default_row_height[1]
if (total_rows is not None):
return list(accumulate(chain([0], (rowpos for c in range(total_rows)))))
return list(accumulate(chain([0], (rowpos for c in range((len(self.MT.row_positions) - 1))))))
def get_column_widths(self, canvas_positions=False):
if canvas_positions:
return [int(n) for n in self.MT.col_positions]
return [int((b - a)) for (a, b) in zip(self.MT.col_positions, islice(self.MT.col_positions, 1, len(self.MT.col_positions)))]
def get_row_heights(self, canvas_positions=False):
if canvas_positions:
return [int(n) for n in self.MT.row_positions]
return [int((b - a)) for (a, b) in zip(self.MT.row_positions, islice(self.MT.row_positions, 1, len(self.MT.row_positions)))]
def set_all_cell_sizes_to_text(self, redraw=True):
self.MT.set_all_cell_sizes_to_text()
self.set_refresh_timer(redraw)
return (self.MT.row_positions, self.MT.col_positions)
def set_all_column_widths(self, width=None, only_set_if_too_small=False, redraw=True, recreate_selection_boxes=True):
self.CH.set_width_of_all_cols(width=width, only_set_if_too_small=only_set_if_too_small, recreate=recreate_selection_boxes)
self.set_refresh_timer(redraw)
def column_width(self, column=None, width=None, only_set_if_too_small=False, redraw=True):
if (column == 'all'):
if (width == 'default'):
self.MT.reset_col_positions()
elif (column == 'displayed'):
if (width == 'text'):
(sc, ec) = self.MT.get_visible_columns(self.MT.canvasx(0), self.MT.canvasx(self.winfo_width()))
for c in range(sc, (ec - 1)):
self.CH.set_col_width(c)
elif ((width == 'text') and (column is not None)):
self.CH.set_col_width(col=column, width=None, only_set_if_too_small=only_set_if_too_small)
elif ((width is not None) and (column is not None)):
self.CH.set_col_width(col=column, width=width, only_set_if_too_small=only_set_if_too_small)
elif (column is not None):
return int((self.MT.col_positions[(column + 1)] - self.MT.col_positions[column]))
self.set_refresh_timer(redraw)
def set_column_widths(self, column_widths=None, canvas_positions=False, reset=False, verify=False):
cwx = None
if reset:
self.MT.reset_col_positions()
return
if verify:
cwx = self.verify_column_widths(column_widths, canvas_positions)
if is_iterable(column_widths):
if (canvas_positions and isinstance(column_widths, list)):
self.MT.col_positions = column_widths
else:
self.MT.col_positions = list(accumulate(chain([0], (width for width in column_widths))))
return cwx
def set_all_row_heights(self, height=None, only_set_if_too_small=False, redraw=True, recreate_selection_boxes=True):
self.RI.set_height_of_all_rows(height=height, only_set_if_too_small=only_set_if_too_small, recreate=recreate_selection_boxes)
self.set_refresh_timer(redraw)
def set_cell_size_to_text(self, row, column, only_set_if_too_small=False, redraw=True):
self.MT.set_cell_size_to_text(r=row, c=column, only_set_if_too_small=only_set_if_too_small)
self.set_refresh_timer(redraw)
def set_width_of_index_to_text(self, text=None, *args, **kwargs):
self.RI.set_width_of_index_to_text(text=text)
def set_height_of_header_to_text(self, text=None):
self.CH.set_height_of_header_to_text(text=text)
def row_height(self, row=None, height=None, only_set_if_too_small=False, redraw=True):
if (row == 'all'):
if (height == 'default'):
self.MT.reset_row_positions()
elif (row == 'displayed'):
if (height == 'text'):
(sr, er) = self.MT.get_visible_rows(self.MT.canvasy(0), self.MT.canvasy(self.winfo_width()))
for r in range(sr, (er - 1)):
self.RI.set_row_height(r)
elif ((height == 'text') and (row is not None)):
self.RI.set_row_height(row=row, height=None, only_set_if_too_small=only_set_if_too_small)
elif ((height is not None) and (row is not None)):
self.RI.set_row_height(row=row, height=height, only_set_if_too_small=only_set_if_too_small)
elif (row is not None):
return int((self.MT.row_positions[(row + 1)] - self.MT.row_positions[row]))
self.set_refresh_timer(redraw)
def set_row_heights(self, row_heights=None, canvas_positions=False, reset=False, verify=False):
if reset:
self.MT.reset_row_positions()
return
if is_iterable(row_heights):
qmin = self.MT.min_row_height
if (canvas_positions and isinstance(row_heights, list)):
if verify:
self.MT.row_positions = list(accumulate(chain([0], ((height if (qmin < height) else qmin) for height in [(x - z) for (z, x) in zip(islice(row_heights, 0, None), islice(row_heights, 1, None))]))))
else:
self.MT.row_positions = row_heights
elif verify:
self.MT.row_positions = [(qmin if ((z < qmin) or (not isinstance(z, int)) or isinstance(z, bool)) else z) for z in row_heights]
else:
self.MT.row_positions = list(accumulate(chain([0], (height for height in row_heights))))
def verify_row_heights(self, row_heights: List, canvas_positions=False):
if (not isinstance(row_heights, list)):
return False
if canvas_positions:
if (row_heights[0] != 0):
return False
return (not any(((((x - z) < self.MT.min_row_height) or (not isinstance(x, int)) or isinstance(x, bool)) for (z, x) in zip(islice(row_heights, 0, None), islice(row_heights, 1, None)))))
return (not any((((z < self.MT.min_row_height) or (not isinstance(z, int)) or isinstance(z, bool)) for z in row_heights)))
def verify_column_widths(self, column_widths: List, canvas_positions=False):
if (not isinstance(column_widths, list)):
return False
if canvas_positions:
if (column_widths[0] != 0):
return False
return (not any(((((x - z) < self.MT.min_column_width) or (not isinstance(x, int)) or isinstance(x, bool)) for (z, x) in zip(islice(column_widths, 0, None), islice(column_widths, 1, None)))))
return (not any((((z < self.MT.min_column_width) or (not isinstance(z, int)) or isinstance(z, bool)) for z in column_widths)))
def default_row_height(self, height=None):
if (height is not None):
self.MT.default_row_height = ((height if isinstance(height, str) else 'pixels'), (height if isinstance(height, int) else self.MT.get_lines_cell_height(int(height))))
return self.MT.default_row_height[1]
def default_header_height(self, height=None):
if (height is not None):
self.MT.default_header_height = ((height if isinstance(height, str) else 'pixels'), (height if isinstance(height, int) else self.MT.get_lines_cell_height(int(height), font=self.MT.header_font)))
return self.MT.default_header_height[1]
def default_column_width(self, width=None):
if (width is not None):
if (width < self.MT.min_column_width):
self.MT.default_column_width = (self.MT.min_column_width + 20)
else:
self.MT.default_column_width = int(width)
return self.MT.default_column_width
def cut(self, event=None):
self.MT.ctrl_x()
def copy(self, event=None):
self.MT.ctrl_c()
def paste(self, event=None):
self.MT.ctrl_v()
def delete(self, event=None):
self.MT.delete_key()
def undo(self, event=None):
self.MT.ctrl_z()
def delete_row_position(self, idx: int, deselect_all=False):
self.MT.del_row_position(idx=idx, deselect_all=deselect_all)
def delete_row(self, idx=0, deselect_all=False, redraw=True):
self.delete_rows(rows={idx}, deselect_all=deselect_all, redraw=False)
self.set_refresh_timer(redraw)
def delete_rows(self, rows: Set=set(), deselect_all=False, redraw=True):
if deselect_all:
self.deselect('all', redraw=False)
if isinstance(rows, set):
to_del = rows
else:
to_del = set(rows)
if (not to_del):
return
self.MT.data[:] = [row for (r, row) in enumerate(self.MT.data) if (r not in to_del)]
to_bis = sorted(to_del)
if self.MT.all_rows_displayed:
self.set_row_heights(row_heights=(h for (r, h) in enumerate((int((b - a)) for (a, b) in zip(self.MT.row_positions, islice(self.MT.row_positions, 1, len(self.MT.row_positions))))) if (r not in to_del)))
else:
dispset = set(self.MT.displayed_rows)
heights_to_del = {i for (i, r) in enumerate(to_bis) if (r in dispset)}
if heights_to_del:
self.set_row_heights(row_heights=(h for (r, h) in enumerate((int((b - a)) for (a, b) in zip(self.MT.row_positions, islice(self.MT.row_positions, 1, len(self.MT.row_positions))))) if (r not in heights_to_del)))
self.MT.displayed_rows = [r for r in self.MT.displayed_rows if (r not in to_del)]
self.MT.cell_options = {((r if (not bisect.bisect_left(to_bis, r)) else (r - bisect.bisect_left(to_bis, r))), c): v for ((r, c), v) in self.MT.cell_options.items() if (r not in to_del)}
self.MT.row_options = {(r if (not bisect.bisect_left(to_bis, r)) else (r - bisect.bisect_left(to_bis, r))): v for (r, v) in self.MT.row_options.items() if (r not in to_del)}
self.RI.cell_options = {(r if (not bisect.bisect_left(to_bis, r)) else (r - bisect.bisect_left(to_bis, r))): v for (r, v) in self.RI.cell_options.items() if (r not in to_del)}
self.set_refresh_timer(redraw)
def insert_row_position(self, idx='end', height=None, deselect_all=False, redraw=False):
self.MT.insert_row_position(idx=idx, height=height, deselect_all=deselect_all)
self.set_refresh_timer(redraw)
def insert_row_positions(self, idx='end', heights=None, deselect_all=False, redraw=False):
self.MT.insert_row_positions(idx=idx, heights=heights, deselect_all=deselect_all)
self.set_refresh_timer(redraw)
def total_rows(self, number=None, mod_positions=True, mod_data=True):
if (number is None):
return int(self.MT.total_data_rows())
if ((not isinstance(number, int)) or (number < 0)):
raise ValueError('number argument must be integer and > 0')
if (number > len(self.MT.data)):
if mod_positions:
height = self.MT.get_lines_cell_height(int(self.MT.default_row_height[0]))
for r in range((number - len(self.MT.data))):
self.MT.insert_row_position('end', height)
elif (number < len(self.MT.data)):
if (not self.MT.all_rows_displayed):
self.MT.display_rows(enable=False, reset_row_positions=False, deselect_all=True)
self.MT.row_positions[(number + 1):] = []
if mod_data:
self.MT.data_dimensions(total_rows=number)
def total_columns(self, number=None, mod_positions=True, mod_data=True):
total_cols = self.MT.total_data_cols()
if (number is None):
return int(total_cols)
if ((not isinstance(number, int)) or (number < 0)):
raise ValueError('number argument must be integer and > 0')
if (number > total_cols):
if mod_positions:
width = self.MT.default_column_width
for c in range((number - total_cols)):
self.MT.insert_col_position('end', width)
elif (number < total_cols):
if (not self.MT.all_columns_displayed):
self.MT.display_columns(enable=False, reset_col_positions=False, deselect_all=True)
self.MT.col_positions[(number + 1):] = []
if mod_data:
self.MT.data_dimensions(total_columns=number)
def sheet_display_dimensions(self, total_rows=None, total_columns=None):
if ((total_rows is None) and (total_columns is None)):
return ((len(self.MT.row_positions) - 1), (len(self.MT.col_positions) - 1))
if (total_rows is not None):
height = self.MT.get_lines_cell_height(int(self.MT.default_row_height[0]))
self.MT.row_positions = list(accumulate(chain([0], (height for row in range(total_rows)))))
if (total_columns is not None):
width = self.MT.default_column_width
self.MT.col_positions = list(accumulate(chain([0], (width for column in range(total_columns)))))
def set_sheet_data_and_display_dimensions(self, total_rows=None, total_columns=None):
self.sheet_display_dimensions(total_rows=total_rows, total_columns=total_columns)
self.MT.data_dimensions(total_rows=total_rows, total_columns=total_columns)
def move_row_position(self, row: int, moveto: int):
self.MT.move_row_position(row, moveto)
def move_row(self, row: int, moveto: int):
self.move_rows(moveto, row, 1)
def delete_column_position(self, idx: int, deselect_all=False):
self.MT.del_col_position(idx, deselect_all=deselect_all)
def delete_column(self, idx=0, deselect_all=False, redraw=True):
self.delete_columns(columns={idx}, deselect_all=deselect_all, redraw=False)
self.set_refresh_timer(redraw)
def delete_columns(self, columns: Set=set(), deselect_all=False, redraw=True):
if deselect_all:
self.deselect('all', redraw=False)
if isinstance(columns, set):
to_del = columns
else:
to_del = set(columns)
if (not to_del):
return
self.MT.data[:] = [[e for (c, e) in enumerate(r) if (c not in to_del)] for r in self.MT.data]
to_bis = sorted(to_del)
if self.MT.all_columns_displayed:
self.set_column_widths(column_widths=(w for (c, w) in enumerate((int((b - a)) for (a, b) in zip(self.MT.col_positions, islice(self.MT.col_positions, 1, len(self.MT.col_positions))))) if (c not in to_del)))
else:
dispset = set(self.MT.displayed_columns)
widths_to_del = {i for (i, c) in enumerate(to_bis) if (c in dispset)}
if widths_to_del:
self.set_column_widths(column_widths=(w for (c, w) in enumerate((int((b - a)) for (a, b) in zip(self.MT.col_positions, islice(self.MT.col_positions, 1, len(self.MT.col_positions))))) if (c not in widths_to_del)))
self.MT.displayed_columns = [(c if (not bisect.bisect_left(to_bis, c)) else (c - bisect.bisect_left(to_bis, c))) for c in self.MT.displayed_columns if (c not in to_del)]
self.MT.cell_options = {(r, (c if (not bisect.bisect_left(to_bis, c)) else (c - bisect.bisect_left(to_bis, c)))): v for ((r, c), v) in self.MT.cell_options.items() if (c not in to_del)}
self.MT.col_options = {(c if (not bisect.bisect_left(to_bis, c)) else (c - bisect.bisect_left(to_bis, c))): v for (c, v) in self.MT.col_options.items() if (c not in to_del)}
self.CH.cell_options = {(c if (not bisect.bisect_left(to_bis, c)) else (c - bisect.bisect_left(to_bis, c))): v for (c, v) in self.CH.cell_options.items() if (c not in to_del)}
self.set_refresh_timer(redraw)
def insert_column_position(self, idx='end', width=None, deselect_all=False, redraw=False):
self.MT.insert_col_position(idx=idx, width=width, deselect_all=deselect_all)
self.set_refresh_timer(redraw)
def insert_column_positions(self, idx='end', widths=None, deselect_all=False, redraw=False):
self.MT.insert_col_positions(idx=idx, widths=widths, deselect_all=deselect_all)
self.set_refresh_timer(redraw)
def move_column_position(self, column: int, moveto: int):
self.MT.move_col_position(column, moveto)
def move_column(self, column: int, moveto: int):
self.move_columns(moveto, column, 1)
def move_columns(self, moveto: int, to_move_min: int, number_of_columns: int, move_data: bool=True, index_type: str='displayed', create_selections: bool=True, redraw=False):
(new_selected, dispset) = self.MT.move_columns_adjust_options_dict(moveto, to_move_min, number_of_columns, move_data, create_selections, index_type=index_type.lower())
self.set_refresh_timer(redraw)
return (new_selected, dispset)
def move_rows(self, moveto: int, to_move_min: int, number_of_rows: int, move_data: bool=True, index_type: str='displayed', create_selections: bool=True, redraw=False):
(new_selected, dispset) = self.MT.move_rows_adjust_options_dict(moveto, to_move_min, number_of_rows, move_data, create_selections, index_type=index_type.lower())
self.set_refresh_timer(redraw)
return (new_selected, dispset)
def open_cell(self, ignore_existing_editor=True):
self.MT.open_cell(event=GeneratedMouseEvent(), ignore_existing_editor=ignore_existing_editor)
def open_header_cell(self, ignore_existing_editor=True):
self.CH.open_cell(event=GeneratedMouseEvent(), ignore_existing_editor=ignore_existing_editor)
def open_index_cell(self, ignore_existing_editor=True):
self.RI.open_cell(event=GeneratedMouseEvent(), ignore_existing_editor=ignore_existing_editor)
def set_text_editor_value(self, text='', r=None, c=None):
if ((self.MT.text_editor is not None) and (r is None) and (c is None)):
self.MT.text_editor.set_text(text)
elif ((self.MT.text_editor is not None) and (self.MT.text_editor_loc == (r, c))):
self.MT.text_editor.set_text(text)
def bind_text_editor_set(self, func, row, column):
self.MT.bind_text_editor_destroy(func, row, column)
def destroy_text_editor(self, event=None):
self.MT.destroy_text_editor(event=event)
def get_text_editor_widget(self, event=None):
try:
return self.MT.text_editor.textedit
except Exception:
return None
def bind_key_text_editor(self, key: str, function):
self.MT.text_editor_user_bound_keys[key] = function
def unbind_key_text_editor(self, key: str):
if (key == 'all'):
for key in self.MT.text_editor_user_bound_keys:
try:
self.MT.text_editor.textedit.unbind(key)
except Exception:
pass
self.MT.text_editor_user_bound_keys = {}
else:
if (key in self.MT.text_editor_user_bound_keys):
del self.MT.text_editor_user_bound_keys[key]
try:
self.MT.text_editor.textedit.unbind(key)
except Exception:
pass
def get_xview(self):
return self.MT.xview()
def get_yview(self):
return self.MT.yview()
def set_xview(self, position, option='moveto'):
self.MT.set_xviews(option, position)
def set_yview(self, position, option='moveto'):
self.MT.set_yviews(option, position)
def set_view(self, x_args, y_args):
self.MT.set_view(x_args, y_args)
def see(self, row=0, column=0, keep_yscroll=False, keep_xscroll=False, bottom_right_corner=False, check_cell_visibility=True, redraw=True):
self.MT.see(row, column, keep_yscroll, keep_xscroll, bottom_right_corner, check_cell_visibility=check_cell_visibility, redraw=False)
self.set_refresh_timer(redraw)
def select_row(self, row, redraw=True):
self.RI.select_row((int(row) if (not isinstance(row, int)) else row), redraw=False)
self.set_refresh_timer(redraw)
def select_column(self, column, redraw=True):
self.CH.select_col((int(column) if (not isinstance(column, int)) else column), redraw=False)
self.set_refresh_timer(redraw)
def select_cell(self, row, column, redraw=True):
self.MT.select_cell((int(row) if (not isinstance(row, int)) else row), (int(column) if (not isinstance(column, int)) else column), redraw=False)
self.set_refresh_timer(redraw)
def select_all(self, redraw=True, run_binding_func=True):
self.MT.select_all(redraw=False, run_binding_func=run_binding_func)
self.set_refresh_timer(redraw)
def add_cell_selection(self, row, column, redraw=True, run_binding_func=True, set_as_current=True):
self.MT.add_selection(r=row, c=column, redraw=False, run_binding_func=run_binding_func, set_as_current=set_as_current)
self.set_refresh_timer(redraw)
def add_row_selection(self, row, redraw=True, run_binding_func=True, set_as_current=True):
self.RI.add_selection(r=row, redraw=False, run_binding_func=run_binding_func, set_as_current=set_as_current)
self.set_refresh_timer(redraw)
def add_column_selection(self, column, redraw=True, run_binding_func=True, set_as_current=True):
self.CH.add_selection(c=column, redraw=False, run_binding_func=run_binding_func, set_as_current=set_as_current)
self.set_refresh_timer(redraw)
def toggle_select_cell(self, row, column, add_selection=True, redraw=True, run_binding_func=True, set_as_current=True):
self.MT.toggle_select_cell(row=row, column=column, add_selection=add_selection, redraw=False, run_binding_func=run_binding_func, set_as_current=set_as_current)
self.set_refresh_timer(redraw)
def toggle_select_row(self, row, add_selection=True, redraw=True, run_binding_func=True, set_as_current=True):
self.RI.toggle_select_row(row=row, add_selection=add_selection, redraw=False, run_binding_func=run_binding_func, set_as_current=set_as_current)
self.set_refresh_timer(redraw)
def toggle_select_column(self, column, add_selection=True, redraw=True, run_binding_func=True, set_as_current=True):
self.CH.toggle_select_col(column=column, add_selection=add_selection, redraw=False, run_binding_func=run_binding_func, set_as_current=set_as_current)
self.set_refresh_timer(redraw)
def deselect(self, row=None, column=None, cell=None, redraw=True):
self.MT.deselect(r=row, c=column, cell=cell, redraw=False)
def get_currently_selected(self):
return self.MT.currently_selected()
def set_currently_selected(self, row, column, type_='cell', selection_binding=True):
self.MT.set_currently_selected(r=row, c=column, type_=type_)
if (selection_binding and (self.MT.selection_binding_func is not None)):
self.MT.selection_binding_func(SelectCellEvent('select_cell', row, column))
def get_selected_rows(self, get_cells=False, get_cells_as_rows=False, return_tuple=False):
if return_tuple:
return tuple(self.MT.get_selected_rows(get_cells=get_cells, get_cells_as_rows=get_cells_as_rows))
else:
return self.MT.get_selected_rows(get_cells=get_cells, get_cells_as_rows=get_cells_as_rows)
def get_selected_columns(self, get_cells=False, get_cells_as_columns=False, return_tuple=False):
if return_tuple:
return tuple(self.MT.get_selected_cols(get_cells=get_cells, get_cells_as_cols=get_cells_as_columns))
else:
return self.MT.get_selected_cols(get_cells=get_cells, get_cells_as_cols=get_cells_as_columns)
def get_selected_cells(self, get_rows=False, get_columns=False, sort_by_row=False, sort_by_column=False):
if (sort_by_row and sort_by_column):
sels = sorted(self.MT.get_selected_cells(get_rows=get_rows, get_cols=get_columns), key=(lambda t: t[1]))
return sorted(sels, key=(lambda t: t[0]))
elif sort_by_row:
return sorted(self.MT.get_selected_cells(get_rows=get_rows, get_cols=get_columns), key=(lambda t: t[0]))
elif sort_by_column:
return sorted(self.MT.get_selected_cells(get_rows=get_rows, get_cols=get_columns), key=(lambda t: t[1]))
else:
return self.MT.get_selected_cells(get_rows=get_rows, get_cols=get_columns)
def get_all_selection_boxes(self):
return self.MT.get_all_selection_boxes()
def get_all_selection_boxes_with_types(self):
return self.MT.get_all_selection_boxes_with_types()
def create_selection_box(self, r1, c1, r2, c2, type_='cells'):
return self.MT.create_selected(r1=r1, c1=c1, r2=r2, c2=c2, type_=('columns' if (type_ == 'cols') else type_))
def recreate_all_selection_boxes(self):
self.MT.recreate_all_selection_boxes()
def cell_visible(self, r, c):
return self.MT.cell_visible(r, c)
def cell_completely_visible(self, r, c, seperate_axes=False):
return self.MT.cell_completely_visible(r, c, seperate_axes)
def cell_selected(self, r, c):
return self.MT.cell_selected(r, c)
def row_selected(self, r):
return self.MT.row_selected(r)
def column_selected(self, c):
return self.MT.col_selected(c)
def anything_selected(self, exclude_columns=False, exclude_rows=False, exclude_cells=False):
if self.MT.anything_selected(exclude_columns=exclude_columns, exclude_rows=exclude_rows, exclude_cells=exclude_cells):
return True
return False
def all_selected(self):
return self.MT.all_selected()
def readonly_rows(self, rows=[], readonly=True, redraw=False):
if isinstance(rows, int):
rows_ = [rows]
else:
rows_ = rows
if (not readonly):
for r in rows_:
if ((r in self.MT.row_options) and ('readonly' in self.MT.row_options[r])):
del self.MT.row_options[r]['readonly']
else:
for r in rows_:
if (r not in self.MT.row_options):
self.MT.row_options[r] = {}
self.MT.row_options[r]['readonly'] = True
self.set_refresh_timer(redraw)
def readonly_columns(self, columns=[], readonly=True, redraw=False):
if isinstance(columns, int):
cols_ = [columns]
else:
cols_ = columns
if (not readonly):
for c in cols_:
if ((c in self.MT.col_options) and ('readonly' in self.MT.col_options[c])):
del self.MT.col_options[c]['readonly']
else:
for c in cols_:
if (c not in self.MT.col_options):
self.MT.col_options[c] = {}
self.MT.col_options[c]['readonly'] = True
self.set_refresh_timer(redraw)
def readonly_cells(self, row=0, column=0, cells=[], readonly=True, redraw=False):
if (not readonly):
if cells:
for (r, c) in cells:
if (((r, c) in self.MT.cell_options) and ('readonly' in self.MT.cell_options[(r, c)])):
del self.MT.cell_options[(r, c)]['readonly']
elif (((row, column) in self.MT.cell_options) and ('readonly' in self.MT.cell_options[(row, column)])):
del self.MT.cell_options[(row, column)]['readonly']
elif cells:
for (r, c) in cells:
if ((r, c) not in self.MT.cell_options):
self.MT.cell_options[(r, c)] = {}
self.MT.cell_options[(r, c)]['readonly'] = True
else:
if ((row, column) not in self.MT.cell_options):
self.MT.cell_options[(row, column)] = {}
self.MT.cell_options[(row, column)]['readonly'] = True
self.set_refresh_timer(redraw)
def readonly_header(self, columns=[], readonly=True, redraw=False):
self.CH.readonly_header(columns=columns, readonly=readonly)
self.set_refresh_timer(redraw)
def readonly_index(self, rows=[], readonly=True, redraw=False):
self.RI.readonly_index(rows=rows, readonly=readonly)
self.set_refresh_timer(redraw)
def dehighlight_all(self, redraw=True):
for k in self.MT.cell_options:
if ('highlight' in self.MT.cell_options[k]):
del self.MT.cell_options[k]['highlight']
for k in self.MT.row_options:
if ('highlight' in self.MT.row_options[k]):
del self.MT.row_options[k]['highlight']
for k in self.MT.col_options:
if ('highlight' in self.MT.col_options[k]):
del self.MT.col_options[k]['highlight']
for k in self.RI.cell_options:
if ('highlight' in self.RI.cell_options[k]):
del self.RI.cell_options[k]['highlight']
for k in self.CH.cell_options:
if ('highlight' in self.CH.cell_options[k]):
del self.CH.cell_options[k]['highlight']
self.set_refresh_timer(redraw)
def dehighlight_rows(self, rows=[], redraw=True):
if isinstance(rows, int):
rows_ = [rows]
else:
rows_ = rows
if ((not rows_) or (rows_ == 'all')):
for r in self.MT.row_options:
if ('highlight' in self.MT.row_options[r]):
del self.MT.row_options[r]['highlight']
for r in self.RI.cell_options:
if ('highlight' in self.RI.cell_options[r]):
del self.RI.cell_options[r]['highlight']
else:
for r in rows_:
try:
del self.MT.row_options[r]['highlight']
except Exception:
pass
try:
del self.RI.cell_options[r]['highlight']
except Exception:
pass
self.set_refresh_timer(redraw)
def dehighlight_columns(self, columns=[], redraw=True):
if isinstance(columns, int):
columns_ = [columns]
else:
columns_ = columns
if ((not columns_) or (columns_ == 'all')):
for c in self.MT.col_options:
if ('highlight' in self.MT.col_options[c]):
del self.MT.col_options[c]['highlight']
for c in self.CH.cell_options:
if ('highlight' in self.CH.cell_options[c]):
del self.CH.cell_options[c]['highlight']
else:
for c in columns_:
try:
del self.MT.col_options[c]['highlight']
except Exception:
pass
try:
del self.CH.cell_options[c]['highlight']
except Exception:
pass
self.set_refresh_timer(redraw)
def highlight_rows(self, rows=[], bg=None, fg=None, highlight_index=True, redraw=True, end_of_screen=False, overwrite=True):
if ((bg is None) and (fg is None)):
return
for r in ((rows,) if isinstance(rows, int) else rows):
if (r not in self.MT.row_options):
self.MT.row_options[r] = {}
if (('highlight' in self.MT.row_options[r]) and (not overwrite)):
self.MT.row_options[r]['highlight'] = ((self.MT.row_options[r]['highlight'][0] if (bg is None) else bg), (self.MT.row_options[r]['highlight'][1] if (fg is None) else fg), (self.MT.row_options[r]['highlight'][2] if (self.MT.row_options[r]['highlight'][2] != end_of_screen) else end_of_screen))
else:
self.MT.row_options[r]['highlight'] = (bg, fg, end_of_screen)
if highlight_index:
self.highlight_cells(cells=rows, canvas='index', bg=bg, fg=fg, redraw=False)
self.set_refresh_timer(redraw)
def highlight_columns(self, columns=[], bg=None, fg=None, highlight_header=True, redraw=True, overwrite=True):
if ((bg is None) and (fg is None)):
return
for c in ((columns,) if isinstance(columns, int) else columns):
if (c not in self.MT.col_options):
self.MT.col_options[c] = {}
if (('highlight' in self.MT.col_options[c]) and (not overwrite)):
self.MT.col_options[c]['highlight'] = ((self.MT.col_options[c]['highlight'][0] if (bg is None) else bg), (self.MT.col_options[c]['highlight'][1] if (fg is None) else fg))
else:
self.MT.col_options[c]['highlight'] = (bg, fg)
if highlight_header:
self.highlight_cells(cells=columns, canvas='header', bg=bg, fg=fg, redraw=False)
self.set_refresh_timer(redraw)
def highlight_cells(self, row=0, column=0, cells=[], canvas='table', bg=None, fg=None, redraw=True, overwrite=True):
if ((bg is None) and (fg is None)):
return
if (canvas == 'table'):
if cells:
for (r_, c_) in cells:
if ((r_, c_) not in self.MT.cell_options):
self.MT.cell_options[(r_, c_)] = {}
if (('highlight' in self.MT.cell_options[(r_, c_)]) and (not overwrite)):
self.MT.cell_options[(r_, c_)]['highlight'] = ((self.MT.cell_options[(r_, c_)]['highlight'][0] if (bg is None) else bg), (self.MT.cell_options[(r_, c_)]['highlight'][1] if (fg is None) else fg))
else:
self.MT.cell_options[(r_, c_)]['highlight'] = (bg, fg)
else:
if (isinstance(row, str) and (row.lower() == 'all') and isinstance(column, int)):
riter = range(self.MT.total_data_rows())
citer = (column,)
elif (isinstance(column, str) and (column.lower() == 'all') and isinstance(row, int)):
riter = (row,)
citer = range(self.MT.total_data_cols())
elif (isinstance(row, int) and isinstance(column, int)):
riter = (row,)
citer = (column,)
for r_ in riter:
for c_ in citer:
if ((r_, c_) not in self.MT.cell_options):
self.MT.cell_options[(r_, c_)] = {}
if (('highlight' in self.MT.cell_options[(r_, c_)]) and (not overwrite)):
self.MT.cell_options[(r_, c_)]['highlight'] = ((self.MT.cell_options[(r_, c_)]['highlight'][0] if (bg is None) else bg), (self.MT.cell_options[(r_, c_)]['highlight'][1] if (fg is None) else fg))
else:
self.MT.cell_options[(r_, c_)]['highlight'] = (bg, fg)
elif (canvas in ('row_index', 'index')):
if ((bg is None) and (fg is None)):
return
iterable = (cells if (cells and (not isinstance(cells, int))) else ((cells,) if isinstance(cells, int) else (row,)))
for r_ in iterable:
if (r_ not in self.RI.cell_options):
self.RI.cell_options[r_] = {}
if (('highlight' in self.RI.cell_options[r_]) and (not overwrite)):
self.RI.cell_options[r_]['highlight'] = ((self.RI.cell_options[r_]['highlight'][0] if (bg is None) else bg), (self.RI.cell_options[r_]['highlight'][1] if (fg is None) else fg))
else:
self.RI.cell_options[r_]['highlight'] = (bg, fg)
elif (canvas == 'header'):
if ((bg is None) and (fg is None)):
return
iterable = (cells if (cells and (not isinstance(cells, int))) else ((cells,) if isinstance(cells, int) else (column,)))
for c_ in iterable:
if (c_ not in self.CH.cell_options):
self.CH.cell_options[c_] = {}
if (('highlight' in self.CH.cell_options[c_]) and (not overwrite)):
self.CH.cell_options[c_]['highlight'] = ((self.CH.cell_options[c_]['highlight'][0] if (bg is None) else bg), (self.CH.cell_options[c_]['highlight'][1] if (fg is None) else fg))
else:
self.CH.cell_options[c_]['highlight'] = (bg, fg)
self.set_refresh_timer(redraw)
def dehighlight_cells(self, row=0, column=0, cells=[], canvas='table', all_=False, redraw=True):
if ((row == 'all') and (canvas == 'table')):
for (k, v) in self.MT.cell_options.items():
if ('highlight' in v):
del self.MT.cell_options[k]['highlight']
elif ((row == 'all') and (canvas == 'row_index')):
for (k, v) in self.RI.cell_options.items():
if ('highlight' in v):
del self.RI.cell_options[k]['highlight']
elif ((row == 'all') and (canvas == 'header')):
for (k, v) in self.CH.cell_options.items():
if ('highlight' in v):
del self.CH.cell_options[k]['highlight']
if (canvas == 'table'):
if (cells and (not all_)):
for t in cells:
try:
del self.MT.cell_options[t]['highlight']
except Exception:
pass
elif (not all_):
if (((row, column) in self.MT.cell_options) and ('highlight' in self.MT.cell_options[(row, column)])):
del self.MT.cell_options[(row, column)]['highlight']
elif all_:
for k in self.MT.cell_options:
if ('highlight' in self.MT.cell_options[k]):
del self.MT.cell_options[k]['highlight']
elif (canvas == 'row_index'):
if (cells and (not all_)):
for r in cells:
try:
del self.RI.cell_options[r]['highlight']
except Exception:
pass
elif (not all_):
if ((row in self.RI.cell_options) and ('highlight' in self.RI.cell_options[row])):
del self.RI.cell_options[row]['highlight']
elif all_:
for r in self.RI.cell_options:
if ('highlight' in self.RI.cell_options[r]):
del self.RI.cell_options[r]['highlight']
elif (canvas == 'header'):
if (cells and (not all_)):
for c in cells:
try:
del self.CH.cell_options[c]['highlight']
except Exception:
pass
elif (not all_):
if ((column in self.CH.cell_options) and ('highlight' in self.CH.cell_options[column])):
del self.CH.cell_options[column]['highlight']
elif all_:
for c in self.CH.cell_options:
if ('highlight' in self.CH.cell_options[c]):
del self.CH.cell_options[c]['highlight']
self.set_refresh_timer(redraw)
def delete_out_of_bounds_options(self):
maxc = self.total_columns()
maxr = self.total_rows()
self.MT.cell_options = {k: v for (k, v) in self.MT.cell_options.items() if ((k[0] < maxr) and (k[1] < maxc))}
self.RI.cell_options = {k: v for (k, v) in self.RI.cell_options.items() if (k < maxr)}
self.CH.cell_options = {k: v for (k, v) in self.CH.cell_options.items() if (k < maxc)}
self.MT.col_options = {k: v for (k, v) in self.MT.col_options.items() if (k < maxc)}
self.MT.row_options = {k: v for (k, v) in self.MT.row_options.items() if (k < maxr)}
def reset_all_options(self):
self.MT.cell_options = {}
self.RI.cell_options = {}
self.CH.cell_options = {}
self.MT.col_options = {}
self.MT.row_options = {}
def get_cell_options(self, canvas='table'):
if (canvas == 'table'):
return self.MT.cell_options
elif (canvas == 'row_index'):
return self.RI.cell_options
elif (canvas == 'header'):
return self.CH.cell_options
def get_highlighted_cells(self, canvas='table'):
if (canvas == 'table'):
return {k: v['highlight'] for (k, v) in self.MT.cell_options.items() if ('highlight' in v)}
elif (canvas == 'row_index'):
return {k: v['highlight'] for (k, v) in self.RI.cell_options.items() if ('highlight' in v)}
elif (canvas == 'header'):
return {k: v['highlight'] for (k, v) in self.CH.cell_options.items() if ('highlight' in v)}
def get_frame_y(self, y: int):
return (y + self.CH.current_height)
def get_frame_x(self, x: int):
return (x + self.RI.current_width)
def convert_align(self, align: str):
a = align.lower()
if (a in ('c', 'center', 'centre')):
return 'center'
elif (a in ('w', 'west', 'left')):
return 'w'
elif (a in ('e', 'east', 'right')):
return 'e'
raise ValueError('Align must be one of the following values: c, center, w, west, left, e, east, right')
def get_cell_alignments(self):
return {(r, c): v['align'] for ((r, c), v) in self.MT.cell_options.items() if ('align' in v)}
def get_column_alignments(self):
return {c: v['align'] for (c, v) in self.MT.col_options.items() if ('align' in v)}
def get_row_alignments(self):
return {r: v['align'] for (r, v) in self.MT.row_options.items() if ('align' in v)}
def align_rows(self, rows=[], align='global', align_index=False, redraw=True):
if ((align == 'global') or self.convert_align(align)):
if isinstance(rows, dict):
for (k, v) in rows.items():
self.MT.align_rows(rows=k, align=v, align_index=align_index)
else:
self.MT.align_rows(rows=rows, align=(align if (align == 'global') else self.convert_align(align)), align_index=align_index)
self.set_refresh_timer(redraw)
def align_columns(self, columns=[], align='global', align_header=False, redraw=True):
if ((align == 'global') or self.convert_align(align)):
if isinstance(columns, dict):
for (k, v) in columns.items():
self.MT.align_columns(columns=k, align=v, align_header=align_header)
else:
self.MT.align_columns(columns=columns, align=(align if (align == 'global') else self.convert_align(align)), align_header=align_header)
self.set_refresh_timer(redraw)
def align_cells(self, row=0, column=0, cells=[], align='global', redraw=True):
if ((align == 'global') or self.convert_align(align)):
if isinstance(cells, dict):
for ((r, c), v) in cells.items():
self.MT.align_cells(row=r, column=c, cells=[], align=v)
else:
self.MT.align_cells(row=row, column=column, cells=cells, align=(align if (align == 'global') else self.convert_align(align)))
self.set_refresh_timer(redraw)
def align_header(self, columns=[], align='global', redraw=True):
if ((align == 'global') or self.convert_align(align)):
if isinstance(columns, dict):
for (k, v) in columns.items():
self.CH.align_cells(columns=k, align=v)
else:
self.CH.align_cells(columns=columns, align=(align if (align == 'global') else self.convert_align(align)))
self.set_refresh_timer(redraw)
def align_index(self, rows=[], align='global', redraw=True):
if ((align == 'global') or self.convert_align(align)):
if isinstance(rows, dict):
for (k, v) in rows.items():
self.RI.align_cells(rows=rows, align=v)
else:
self.RI.align_cells(rows=rows, align=(align if (align == 'global') else self.convert_align(align)))
self.set_refresh_timer(redraw)
def align(self, align: str=None, redraw=True):
if (align is None):
return self.MT.align
elif self.convert_align(align):
self.MT.align = self.convert_align(align)
else:
raise ValueError('Align must be one of the following values: c, center, w, west, e, east')
self.set_refresh_timer(redraw)
def header_align(self, align: str=None, redraw=True):
if (align is None):
return self.CH.align
elif self.convert_align(align):
self.CH.align = self.convert_align(align)
else:
raise ValueError('Align must be one of the following values: c, center, w, west, e, east')
self.set_refresh_timer(redraw)
def row_index_align(self, align: str=None, redraw=True):
if (align is None):
return self.RI.align
elif self.convert_align(align):
self.RI.align = self.convert_align(align)
else:
raise ValueError('Align must be one of the following values: c, center, w, west, e, east')
self.set_refresh_timer(redraw)
def font(self, newfont=None, reset_row_positions=True):
return self.MT.set_table_font(newfont, reset_row_positions=reset_row_positions)
def header_font(self, newfont=None):
return self.MT.set_header_font(newfont)
def set_options(self, redraw=True, **kwargs):
if ('set_cell_sizes_on_zoom' in kwargs):
self.MT.set_cell_sizes_on_zoom = kwargs['set_cell_sizes_on_zoom']
if ('auto_resize_columns' in kwargs):
self.MT.auto_resize_columns = kwargs['auto_resize_columns']
if ('auto_resize_rows' in kwargs):
self.MT.auto_resize_rows = kwargs['auto_resize_rows']
if ('to_clipboard_delimiter' in kwargs):
self.MT.to_clipboard_delimiter = kwargs['to_clipboard_delimiter']
if ('to_clipboard_quotechar' in kwargs):
self.MT.to_clipboard_quotechar = kwargs['to_clipboard_quotechar']
if ('to_clipboard_lineterminator' in kwargs):
self.MT.to_clipboard_lineterminator = kwargs['to_clipboard_lineterminator']
if ('from_clipboard_delimiters' in kwargs):
self.MT.from_clipboard_delimiters = kwargs['from_clipboard_delimiters']
if ('show_dropdown_borders' in kwargs):
self.MT.show_dropdown_borders = kwargs['show_dropdown_borders']
if ('edit_cell_validation' in kwargs):
self.MT.edit_cell_validation = kwargs['edit_cell_validation']
if ('show_default_header_for_empty' in kwargs):
self.CH.show_default_header_for_empty = kwargs['show_default_header_for_empty']
if ('show_default_index_for_empty' in kwargs):
self.RI.show_default_index_for_empty = kwargs['show_default_index_for_empty']
if ('selected_rows_to_end_of_window' in kwargs):
self.MT.selected_rows_to_end_of_window = kwargs['selected_rows_to_end_of_window']
if ('horizontal_grid_to_end_of_window' in kwargs):
self.MT.horizontal_grid_to_end_of_window = kwargs['horizontal_grid_to_end_of_window']
if ('vertical_grid_to_end_of_window' in kwargs):
self.MT.vertical_grid_to_end_of_window = kwargs['vertical_grid_to_end_of_window']
if ('paste_insert_column_limit' in kwargs):
self.MT.paste_insert_column_limit = kwargs['paste_insert_column_limit']
if ('paste_insert_row_limit' in kwargs):
self.MT.paste_insert_row_limit = kwargs['paste_insert_row_limit']
if ('expand_sheet_if_paste_too_big' in kwargs):
self.MT.expand_sheet_if_paste_too_big = kwargs['expand_sheet_if_paste_too_big']
if ('arrow_key_down_right_scroll_page' in kwargs):
self.MT.arrow_key_down_right_scroll_page = kwargs['arrow_key_down_right_scroll_page']
if ('enable_edit_cell_auto_resize' in kwargs):
self.MT.cell_auto_resize_enabled = kwargs['enable_edit_cell_auto_resize']
if ('header_hidden_columns_expander_bg' in kwargs):
self.CH.header_hidden_columns_expander_bg = kwargs['header_hidden_columns_expander_bg']
if ('index_hidden_rows_expander_bg' in kwargs):
self.RI.index_hidden_rows_expander_bg = kwargs['index_hidden_rows_expander_bg']
if ('page_up_down_select_row' in kwargs):
self.MT.page_up_down_select_row = kwargs['page_up_down_select_row']
if ('display_selected_fg_over_highlights' in kwargs):
self.MT.display_selected_fg_over_highlights = kwargs['display_selected_fg_over_highlights']
if ('show_horizontal_grid' in kwargs):
self.MT.show_horizontal_grid = kwargs['show_horizontal_grid']
if ('show_vertical_grid' in kwargs):
self.MT.show_vertical_grid = kwargs['show_vertical_grid']
if ('empty_horizontal' in kwargs):
self.MT.empty_horizontal = kwargs['empty_horizontal']
if ('empty_vertical' in kwargs):
self.MT.empty_vertical = kwargs['empty_vertical']
if ('row_height' in kwargs):
self.MT.default_row_height = ((kwargs['row_height'] if isinstance(kwargs['row_height'], str) else 'pixels'), (kwargs['row_height'] if isinstance(kwargs['row_height'], int) else self.MT.get_lines_cell_height(int(kwargs['row_height']))))
if ('column_width' in kwargs):
self.MT.default_column_width = ((self.MT.min_column_width + 20) if (kwargs['column_width'] < self.MT.min_column_width) else int(kwargs['column_width']))
if ('header_height' in kwargs):
self.MT.default_header_height = ((kwargs['header_height'] if isinstance(kwargs['header_height'], str) else 'pixels'), (kwargs['header_height'] if isinstance(kwargs['header_height'], int) else self.MT.get_lines_cell_height(int(kwargs['header_height']), font=self.MT.header_font)))
if ('row_drag_and_drop_perform' in kwargs):
self.RI.row_drag_and_drop_perform = kwargs['row_drag_and_drop_perform']
if ('column_drag_and_drop_perform' in kwargs):
self.CH.column_drag_and_drop_perform = kwargs['column_drag_and_drop_perform']
if ('popup_menu_font' in kwargs):
self.MT.popup_menu_font = kwargs['popup_menu_font']
if ('popup_menu_fg' in kwargs):
self.MT.popup_menu_fg = kwargs['popup_menu_fg']
if ('popup_menu_bg' in kwargs):
self.MT.popup_menu_bg = kwargs['popup_menu_bg']
if ('popup_menu_highlight_bg' in kwargs):
self.MT.popup_menu_highlight_bg = kwargs['popup_menu_highlight_bg']
if ('popup_menu_highlight_fg' in kwargs):
self.MT.popup_menu_highlight_fg = kwargs['popup_menu_highlight_fg']
if ('top_left_fg_highlight' in kwargs):
self.TL.top_left_fg_highlight = kwargs['top_left_fg_highlight']
if ('auto_resize_default_row_index' in kwargs):
self.RI.auto_resize_width = kwargs['auto_resize_default_row_index']
if ('header_selected_columns_bg' in kwargs):
self.CH.header_selected_columns_bg = kwargs['header_selected_columns_bg']
if ('header_selected_columns_fg' in kwargs):
self.CH.header_selected_columns_fg = kwargs['header_selected_columns_fg']
if ('index_selected_rows_bg' in kwargs):
self.RI.index_selected_rows_bg = kwargs['index_selected_rows_bg']
if ('index_selected_rows_fg' in kwargs):
self.RI.index_selected_rows_fg = kwargs['index_selected_rows_fg']
if ('table_selected_rows_border_fg' in kwargs):
self.MT.table_selected_rows_border_fg = kwargs['table_selected_rows_border_fg']
if ('table_selected_rows_bg' in kwargs):
self.MT.table_selected_rows_bg = kwargs['table_selected_rows_bg']
if ('table_selected_rows_fg' in kwargs):
self.MT.table_selected_rows_fg = kwargs['table_selected_rows_fg']
if ('table_selected_columns_border_fg' in kwargs):
self.MT.table_selected_columns_border_fg = kwargs['table_selected_columns_border_fg']
if ('table_selected_columns_bg' in kwargs):
self.MT.table_selected_columns_bg = kwargs['table_selected_columns_bg']
if ('table_selected_columns_fg' in kwargs):
self.MT.table_selected_columns_fg = kwargs['table_selected_columns_fg']
if ('default_header' in kwargs):
self.CH.default_header = kwargs['default_header'].lower()
if ('default_row_index' in kwargs):
self.RI.default_index = kwargs['default_row_index'].lower()
if ('max_column_width' in kwargs):
self.MT.max_column_width = float(kwargs['max_column_width'])
if ('max_row_height' in kwargs):
self.MT.max_row_height = float(kwargs['max_row_height'])
if ('max_header_height' in kwargs):
self.MT.max_header_height = float(kwargs['max_header_height'])
if ('max_index_width' in kwargs):
self.MT.max_index_width = float(kwargs['max_index_width'])
if ('font' in kwargs):
self.MT.set_table_font(kwargs['font'])
if ('header_font' in kwargs):
self.MT.set_header_font(kwargs['header_font'])
if ('index_font' in kwargs):
self.MT.set_index_font(kwargs['index_font'])
if ('theme' in kwargs):
self.change_theme(kwargs['theme'])
if ('show_selected_cells_border' in kwargs):
self.MT.show_selected_cells_border = kwargs['show_selected_cells_border']
if ('header_bg' in kwargs):
self.CH.config(background=kwargs['header_bg'])
self.CH.header_bg = kwargs['header_bg']
if ('header_border_fg' in kwargs):
self.CH.header_border_fg = kwargs['header_border_fg']
if ('header_grid_fg' in kwargs):
self.CH.header_grid_fg = kwargs['header_grid_fg']
if ('header_fg' in kwargs):
self.CH.header_fg = kwargs['header_fg']
if ('header_selected_cells_bg' in kwargs):
self.CH.header_selected_cells_bg = kwargs['header_selected_cells_bg']
if ('header_selected_cells_fg' in kwargs):
self.CH.header_selected_cells_fg = kwargs['header_selected_cells_fg']
if ('index_bg' in kwargs):
self.RI.config(background=kwargs['index_bg'])
self.RI.index_bg = kwargs['index_bg']
if ('index_border_fg' in kwargs):
self.RI.index_border_fg = kwargs['index_border_fg']
if ('index_grid_fg' in kwargs):
self.RI.index_grid_fg = kwargs['index_grid_fg']
if ('index_fg' in kwargs):
self.RI.index_fg = kwargs['index_fg']
if ('index_selected_cells_bg' in kwargs):
self.RI.index_selected_cells_bg = kwargs['index_selected_cells_bg']
if ('index_selected_cells_fg' in kwargs):
self.RI.index_selected_cells_fg = kwargs['index_selected_cells_fg']
if ('top_left_bg' in kwargs):
self.TL.config(background=kwargs['top_left_bg'])
if ('top_left_fg' in kwargs):
self.TL.top_left_fg = kwargs['top_left_fg']
self.TL.itemconfig('rw', fill=kwargs['top_left_fg'])
self.TL.itemconfig('rh', fill=kwargs['top_left_fg'])
if ('frame_bg' in kwargs):
self.config(background=kwargs['frame_bg'])
if ('table_bg' in kwargs):
self.MT.config(background=kwargs['table_bg'])
self.MT.table_bg = kwargs['table_bg']
if ('table_grid_fg' in kwargs):
self.MT.table_grid_fg = kwargs['table_grid_fg']
if ('table_fg' in kwargs):
self.MT.table_fg = kwargs['table_fg']
if ('table_selected_cells_border_fg' in kwargs):
self.MT.table_selected_cells_border_fg = kwargs['table_selected_cells_border_fg']
if ('table_selected_cells_bg' in kwargs):
self.MT.table_selected_cells_bg = kwargs['table_selected_cells_bg']
if ('table_selected_cells_fg' in kwargs):
self.MT.table_selected_cells_fg = kwargs['table_selected_cells_fg']
if ('resizing_line_fg' in kwargs):
self.CH.resizing_line_fg = kwargs['resizing_line_fg']
self.RI.resizing_line_fg = kwargs['resizing_line_fg']
if ('drag_and_drop_bg' in kwargs):
self.CH.drag_and_drop_bg = kwargs['drag_and_drop_bg']
self.RI.drag_and_drop_bg = kwargs['drag_and_drop_bg']
if ('outline_thickness' in kwargs):
self.config(highlightthickness=kwargs['outline_thickness'])
if ('outline_color' in kwargs):
self.config(highlightbackground=kwargs['outline_color'], highlightcolor=kwargs['outline_color'])
self.MT.create_rc_menus()
self.set_refresh_timer(redraw)
def change_theme(self, theme='light blue', redraw=True):
if (theme.lower() in ('light blue', 'light_blue')):
self.set_options(**theme_light_blue, redraw=False)
self.config(bg=theme_light_blue['table_bg'])
elif (theme.lower() == 'dark'):
self.set_options(**theme_dark, redraw=False)
self.config(bg=theme_dark['table_bg'])
elif (theme.lower() in ('light green', 'light_green')):
self.set_options(**theme_light_green, redraw=False)
self.config(bg=theme_light_green['table_bg'])
elif (theme.lower() in ('dark blue', 'dark_blue')):
self.set_options(**theme_dark_blue, redraw=False)
self.config(bg=theme_dark_blue['table_bg'])
elif (theme.lower() in ('dark green', 'dark_green')):
self.set_options(**theme_dark_green, redraw=False)
self.config(bg=theme_dark_green['table_bg'])
elif (theme.lower() == 'black'):
self.set_options(**theme_black, redraw=False)
self.config(bg=theme_black['table_bg'])
self.MT.recreate_all_selection_boxes()
self.set_refresh_timer(redraw)
def get_header_data(self, c, get_displayed=False):
return self.CH.get_cell_data(datacn=c, get_displayed=get_displayed)
def get_index_data(self, r, get_displayed=False):
return self.RI.get_cell_data(datarn=r, get_displayed=get_displayed)
def get_sheet_data(self, get_displayed=False, get_header=False, get_index=False, get_header_displayed=True, get_index_displayed=True, only_rows=None, only_columns=None, **kwargs):
if kwargs:
show_kwargs_warning(kwargs, 'get_sheet_data')
if (only_rows is not None):
if isinstance(only_rows, int):
only_rows = (only_rows,)
elif (not is_iterable(only_rows)):
raise ValueError(f"Argument 'only_rows' must be either int or iterable or None. Not {type(only_rows)}")
if (only_columns is not None):
if isinstance(only_columns, int):
only_columns = (only_columns,)
elif (not is_iterable(only_columns)):
raise ValueError(f"Argument 'only_columns' must be either int or iterable or None. Not {type(only_columns)}")
if get_header:
maxlen = (len(self.MT._headers) if isinstance(self.MT._headers, (list, tuple)) else 0)
data = []
for rn in (only_rows if (only_rows is not None) else range(len(self.MT.data))):
r = self.get_row_data(rn, get_displayed=get_displayed, only_columns=only_columns)
if (len(r) > maxlen):
maxlen = len(r)
if get_index:
data.append(([self.get_index_data(rn, get_displayed=get_index_displayed)] + r))
else:
data.append(r)
iterable = (only_columns if (only_columns is not None) else range(maxlen))
if get_index:
return ([([''] + [self.get_header_data(cn, get_displayed=get_header_displayed) for cn in iterable])] + data)
else:
return ([[self.get_header_data(cn, get_displayed=get_header_displayed) for cn in iterable]] + data)
elif (not get_header):
iterable = (only_rows if (only_rows is not None) else range(len(self.MT.data)))
return [self.get_row_data(rn, get_displayed=get_displayed, get_index=get_index, get_index_displayed=get_index_displayed, only_columns=only_columns) for rn in iterable]
def get_value_for_empty_cell(self, r, c, r_ops=True, c_ops=True):
return self.MT.get_value_for_empty_cell(r, c, r_ops, c_ops)
def get_cell_data(self, r, c, get_displayed=False, **kwargs):
if kwargs:
show_kwargs_warning(kwargs, 'get_cell_data')
return self.MT.get_cell_data(r, c, get_displayed)
def get_row_data(self, r, get_displayed=False, get_index=False, get_index_displayed=True, only_columns=None, **kwargs):
if kwargs:
show_kwargs_warning(kwargs, 'get_row_data')
if (only_columns is not None):
if isinstance(only_columns, int):
only_columns = (only_columns,)
elif (not is_iterable(only_columns)):
raise ValueError(f"Argument 'only_columns' must be either int or iterable or None. Not {type(only_columns)}")
if (r >= self.MT.total_data_rows()):
raise IndexError(f'Row #{r} is out of range.')
if (r >= len(self.MT.data)):
total_data_cols = self.MT.total_data_cols()
self.MT.fix_data_len(r, (total_data_cols - 1))
iterable = (only_columns if (only_columns is not None) else range(len(self.MT.data[r])))
if get_index:
return ([self.get_index_data(r, get_displayed=get_index_displayed)] + [self.MT.get_cell_data(r, c, get_displayed=get_displayed) for c in iterable])
else:
return [self.MT.get_cell_data(r, c, get_displayed=get_displayed) for c in iterable]
def get_column_data(self, c, get_displayed=False, get_header=False, get_header_displayed=True, only_rows=None, **kwargs):
if kwargs:
show_kwargs_warning(kwargs, 'get_column_data')
if (only_rows is not None):
if isinstance(only_rows, int):
only_rows = (only_rows,)
elif (not is_iterable(only_rows)):
raise ValueError(f"Argument 'only_rows' must be either int or iterable or None. Not {type(only_rows)}")
iterable = (only_rows if (only_rows is not None) else range(len(self.MT.data)))
return (([self.get_header_data(c, get_displayed=get_header_displayed)] if get_header else []) + [self.MT.get_cell_data(r, c, get_displayed=get_displayed) for r in iterable])
def yield_sheet_rows(self, get_displayed=False, get_header=False, get_index=False, get_index_displayed=True, get_header_displayed=True, only_rows=None, only_columns=None, **kwargs):
if kwargs:
show_kwargs_warning(kwargs, 'yield_sheet_rows')
if (only_rows is not None):
if isinstance(only_rows, int):
only_rows = (only_rows,)
elif (not is_iterable(only_rows)):
raise ValueError(f"Argument 'only_rows' must be either int or iterable or None. Not {type(only_rows)}")
if (only_columns is not None):
if isinstance(only_columns, int):
only_columns = (only_columns,)
elif (not is_iterable(only_columns)):
raise ValueError(f"Argument 'only_columns' must be either int or iterable or None. Not {type(only_columns)}")
if get_header:
maxlen = self.MT.total_data_cols()
iterable = (only_columns if (only_columns is not None) else range(maxlen))
(yield (([''] if get_index else []) + [self.get_header_data(c, get_displayed=get_header_displayed) for c in iterable]))
iterable = (only_rows if (only_rows is not None) else range(len(self.MT.data)))
(yield from (self.get_row_data(r, get_displayed=get_displayed, get_index=get_index, get_index_displayed=get_index_displayed, only_columns=only_columns) for r in iterable))
def data(self):
return self.MT.data
def formatted(self, r, c):
if (((r, c) in self.MT.cell_options) and ('format' in self.MT.cell_options[(r, c)])):
return True
return False
def __iter__(self):
return self.MT.data.__iter__()
def __reversed__(self):
return reversed(self.MT.data)
def __contains__(self, key):
if isinstance(key, (list, tuple)):
return (key in self.MT.data)
return any(((key in row) for row in self.MT.data))
def data_reference(self, newdataref=None, reset_col_positions=True, reset_row_positions=True, redraw=False):
return self.MT.data_reference(newdataref, reset_col_positions, reset_row_positions, redraw)
def set_sheet_data(self, data=[[]], reset_col_positions=True, reset_row_positions=True, redraw=True, verify=False, reset_highlights=False, keep_formatting=True):
if (verify and ((not isinstance(data, list)) or (not all((isinstance(row, list) for row in data))))):
raise ValueError('Data argument must be a list of lists, sublists being rows')
if reset_highlights:
self.dehighlight_all()
return self.MT.data_reference(data, reset_col_positions, reset_row_positions, redraw, return_id=False, keep_formatting=keep_formatting)
def set_cell_data(self, r, c, value='', redraw=False, keep_formatting=True):
if (not keep_formatting):
self.MT.delete_cell_format(r, c, clear_values=False)
self.MT.set_cell_data(r, c, value)
if redraw:
self.set_refresh_timer()
def set_row_data(self, r, values=tuple(), add_columns=True, redraw=False, keep_formatting=True):
if (r >= len(self.MT.data)):
raise Exception('Row number is out of range')
if (not keep_formatting):
self.MT.delete_row_format(r, clear_values=False)
maxidx = (len(self.MT.data[r]) - 1)
if (not values):
self.MT.data[r][:] = self.MT.get_empty_row_seq(r, len(self.MT.data[r]))
if add_columns:
for (c, v) in enumerate(values):
if (c > maxidx):
self.MT.data[r].append(v)
if self.MT.all_columns_displayed:
self.MT.insert_col_position('end')
else:
self.set_cell_data(r=r, c=c, value=v, redraw=False, keep_formatting=keep_formatting)
else:
for (c, v) in enumerate(values):
if (c > maxidx):
self.MT.data[r].append(v)
else:
self.set_cell_data(r=r, c=c, value=v, redraw=False, keep_formatting=keep_formatting)
self.set_refresh_timer(redraw)
def set_column_data(self, c, values=tuple(), add_rows=True, redraw=False, keep_formatting=True):
if (not keep_formatting):
self.MT.delete_column_format(c, clear_values=False)
if add_rows:
maxidx = (len(self.MT.data) - 1)
total_cols = None
height = self.MT.default_row_height[1]
for (rn, v) in enumerate(values):
if (rn > maxidx):
if (total_cols is None):
total_cols = self.MT.total_data_cols()
self.MT.fix_data_len(rn, (total_cols - 1))
if self.MT.all_rows_displayed:
self.MT.insert_row_position('end', height=height)
maxidx += 1
if (c >= len(self.MT.data[rn])):
self.MT.fix_row_len(rn, c)
self.set_cell_data(r=rn, c=c, value=v, redraw=False, keep_formatting=keep_formatting)
else:
for (rn, v) in enumerate(values):
if (c >= len(self.MT.data[rn])):
self.MT.fix_row_len(rn, c)
self.set_cell_data(r=rn, c=c, value=v, redraw=False, keep_formatting=keep_formatting)
self.set_refresh_timer(redraw)
def insert_column(self, values: Union[(List, Tuple, int, None)]=None, idx: Union[(str, int)]='end', width=None, deselect_all=False, add_rows=True, equalize_data_row_lengths=True, mod_column_positions=True, redraw=True):
self.insert_columns(((values,) if isinstance(values, (list, tuple)) else (1 if (values is None) else values)), idx, ((width,) if isinstance(width, int) else width), deselect_all, add_rows, equalize_data_row_lengths, mod_column_positions, redraw)
def insert_columns(self, columns: Union[(List, Tuple, int, None)]=1, idx: Union[(str, int)]='end', widths=None, deselect_all=False, add_rows=True, equalize_data_row_lengths=True, mod_column_positions=True, redraw=True):
if equalize_data_row_lengths:
old_total = self.MT.equalize_data_row_lengths()
else:
old_total = self.MT.total_data_cols()
if isinstance(columns, int):
if (columns < 1):
raise ValueError(f'columns arg must be greater than 0, not {columns}')
total_rows = self.MT.total_data_rows()
start = (old_total if (idx == 'end') else idx)
data = [[self.MT.get_value_for_empty_cell(datarn, datacn, c_ops=(idx == 'end')) for datarn in range(total_rows)] for datacn in range(start, (start + columns))]
numcols = columns
else:
data = columns
numcols = len(columns)
if self.MT.all_columns_displayed:
if mod_column_positions:
self.MT.insert_col_positions(idx=idx, widths=(columns if (isinstance(columns, int) and (widths is None)) else widths), deselect_all=deselect_all)
elif (not self.MT.all_columns_displayed):
if (idx != 'end'):
self.MT.displayed_columns = [(c if (c < idx) else (c + numcols)) for c in self.MT.displayed_columns]
if mod_column_positions:
inspos = bisect.bisect_left(self.MT.displayed_columns, idx)
self.MT.displayed_columns[inspos:inspos] = list(range(idx, (idx + numcols)))
self.MT.insert_col_positions(idx=inspos, widths=(columns if (isinstance(columns, int) and (widths is None)) else widths), deselect_all=deselect_all)
maxidx = (len(self.MT.data) - 1)
if add_rows:
height = self.MT.default_row_height[1]
if (idx == 'end'):
for values in reversed(data):
for (rn, v) in enumerate(values):
if (rn > maxidx):
self.MT.data.append(self.MT.get_empty_row_seq(rn, old_total))
if self.MT.all_rows_displayed:
self.MT.insert_row_position('end', height=height)
maxidx += 1
self.MT.data[rn].append(v)
else:
for values in reversed(data):
for (rn, v) in enumerate(values):
if (rn > maxidx):
self.MT.data.append(self.MT.get_empty_row_seq(rn, old_total))
if self.MT.all_rows_displayed:
self.MT.insert_row_position('end', height=height)
maxidx += 1
self.MT.data[rn].insert(idx, v)
elif (idx == 'end'):
for values in reversed(data):
for (rn, v) in enumerate(values):
if (rn > maxidx):
break
self.MT.data[rn].append(v)
else:
for values in reversed(data):
for (rn, v) in enumerate(values):
if (rn > maxidx):
break
self.MT.data[rn].insert(idx, v)
if isinstance(idx, int):
num_add = len(data)
self.MT.cell_options = {(rn, (cn if (cn < idx) else (cn + num_add))): t2 for ((rn, cn), t2) in self.MT.cell_options.items()}
self.MT.col_options = {(cn if (cn < idx) else (cn + num_add)): t for (cn, t) in self.MT.col_options.items()}
self.CH.cell_options = {(cn if (cn < idx) else (cn + num_add)): t for (cn, t) in self.CH.cell_options.items()}
self.set_refresh_timer(redraw)
def insert_row(self, values: Union[(List, None)]=None, idx: Union[(str, int)]='end', height=None, deselect_all=False, add_columns=False, mod_row_positions=True, redraw=True):
self.insert_rows(rows=(1 if (values is None) else [values]), idx=idx, heights=(height if (height is None) else [height]), deselect_all=deselect_all, add_columns=add_columns, mod_row_positions=mod_row_positions, redraw=redraw)
def insert_rows(self, rows: Union[(List, int)]=1, idx: Union[(str, int)]='end', heights=None, deselect_all=False, add_columns=True, mod_row_positions=True, redraw=True):
total_cols = None
datarn = (len(self.MT.data) if (idx == 'end') else idx)
if isinstance(rows, int):
if (rows < 1):
raise ValueError(f'rows arg must be greater than 0, not {rows}')
total_cols = self.MT.total_data_cols()
data = [self.MT.get_empty_row_seq((datarn + i), total_cols, r_ops=False) for i in range(rows)]
elif (not isinstance(rows, list)):
data = list(rows)
else:
data = rows
try:
data = [(r if isinstance(r, list) else list(r)) for r in data]
except Exception as msg:
raise ValueError(f'rows arg must be int or list of lists. {msg}')
if add_columns:
if (total_cols is None):
total_cols = self.MT.total_data_cols()
data_max_cols = len(max(data, key=len))
if (data_max_cols > total_cols):
self.MT.equalize_data_row_lengths(total_columns=data_max_cols)
elif (total_cols > data_max_cols):
data[:] = [(data[i] + self.MT.get_empty_row_seq((datarn + i), end=total_cols, start=data_max_cols, r_ops=False)) for i in range(len(data))]
if self.MT.all_columns_displayed:
if (not self.MT.col_positions):
self.MT.col_positions = [0]
if (data_max_cols > (len(self.MT.col_positions) - 1)):
self.insert_column_positions('end', (data_max_cols - (len(self.MT.col_positions) - 1)))
if (self.MT.all_rows_displayed and mod_row_positions):
inspos = idx
if (not self.MT.all_rows_displayed):
numrows = len(data)
if (idx != 'end'):
self.MT.displayed_rows = [(r if (r < idx) else (r + numrows)) for r in self.MT.displayed_rows]
if mod_row_positions:
inspos = bisect.bisect_left(self.MT.displayed_rows, idx)
self.MT.displayed_rows[inspos:inspos] = list(range(idx, (idx + numrows)))
if mod_row_positions:
self.MT.insert_row_positions(idx=inspos, heights=(len(data) if (heights is None) else heights), deselect_all=deselect_all)
if (isinstance(idx, str) and (idx.lower() == 'end')):
self.MT.data.extend(data)
else:
self.MT.data[idx:idx] = data
num_add = len(data)
self.MT.cell_options = {((rn if (rn < idx) else (rn + num_add)), cn): t2 for ((rn, cn), t2) in self.MT.cell_options.items()}
self.MT.row_options = {(rn if (rn < idx) else (rn + num_add)): t for (rn, t) in self.MT.row_options.items()}
self.RI.cell_options = {(rn if (rn < idx) else (rn + num_add)): t for (rn, t) in self.RI.cell_options.items()}
self.set_refresh_timer(redraw)
def sheet_data_dimensions(self, total_rows=None, total_columns=None):
self.MT.data_dimensions(total_rows, total_columns)
def get_total_rows(self, include_index=False):
return self.MT.total_data_rows(include_index=include_index)
def get_total_columns(self, include_header=False):
return self.MT.total_data_cols(include_header=include_header)
def equalize_data_row_lengths(self):
return self.MT.equalize_data_row_lengths()
def display_rows(self, rows=None, all_rows_displayed=None, reset_row_positions=True, refresh=False, redraw=False, deselect_all=True, **kwargs):
if ('all_displayed' in kwargs):
all_rows_displayed = kwargs['all_displayed']
res = self.MT.display_rows(rows=(None if (isinstance(rows, str) and (rows.lower() == 'all')) else rows), all_rows_displayed=(True if (isinstance(rows, str) and (rows.lower() == 'all')) else all_rows_displayed), reset_row_positions=reset_row_positions, deselect_all=deselect_all)
if (refresh or redraw):
self.set_refresh_timer((redraw if redraw else refresh))
return res
def display_columns(self, columns=None, all_columns_displayed=None, reset_col_positions=True, refresh=False, redraw=False, deselect_all=True, **kwargs):
if ('all_displayed' in kwargs):
all_columns_displayed = kwargs['all_displayed']
res = self.MT.display_columns(columns=(None if (isinstance(columns, str) and (columns.lower() == 'all')) else columns), all_columns_displayed=(True if (isinstance(columns, str) and (columns.lower() == 'all')) else all_columns_displayed), reset_col_positions=reset_col_positions, deselect_all=deselect_all)
if (refresh or redraw):
self.set_refresh_timer((redraw if redraw else refresh))
return res
def all_rows_displayed(self, a=None):
v = bool(self.MT.all_rows_displayed)
if isinstance(a, bool):
self.MT.all_rows_displayed = a
return v
def all_columns_displayed(self, a=None):
v = bool(self.MT.all_columns_displayed)
if isinstance(a, bool):
self.MT.all_columns_displayed = a
return v
def hide_rows(self, rows=set(), redraw=True, deselect_all=True):
if isinstance(rows, int):
_rows = {rows}
elif isinstance(rows, set):
_rows = rows
else:
_rows = set(rows)
if (not _rows):
return
if self.MT.all_rows_displayed:
_rows = [r for r in range(self.MT.total_data_rows()) if (r not in _rows)]
else:
_rows = [e for (r, e) in enumerate(self.MT.displayed_rows) if (r not in _rows)]
self.display_rows(rows=_rows, all_rows_displayed=False, redraw=redraw, deselect_all=deselect_all)
def hide_columns(self, columns=set(), redraw=True, deselect_all=True):
if isinstance(columns, int):
_columns = {columns}
elif isinstance(columns, set):
_columns = columns
else:
_columns = set(columns)
if (not _columns):
return
if self.MT.all_columns_displayed:
_columns = [c for c in range(self.MT.total_data_cols()) if (c not in _columns)]
else:
_columns = [e for (c, e) in enumerate(self.MT.displayed_columns) if (c not in _columns)]
self.display_columns(columns=_columns, all_columns_displayed=False, redraw=redraw, deselect_all=deselect_all)
def show_ctrl_outline(self, canvas='table', start_cell=(0, 0), end_cell=(1, 1)):
self.MT.show_ctrl_outline(canvas=canvas, start_cell=start_cell, end_cell=end_cell)
def get_ctrl_x_c_boxes(self):
return self.MT.get_ctrl_x_c_boxes()
def get_selected_min_max(self):
return self.MT.get_selected_min_max()
def headers(self, newheaders=None, index=None, reset_col_positions=False, show_headers_if_not_sheet=True, redraw=False):
self.set_refresh_timer(redraw)
return self.MT.headers(newheaders, index, reset_col_positions=reset_col_positions, show_headers_if_not_sheet=show_headers_if_not_sheet, redraw=False)
def row_index(self, newindex=None, index=None, reset_row_positions=False, show_index_if_not_sheet=True, redraw=False):
self.set_refresh_timer(redraw)
return self.MT.row_index(newindex, index, reset_row_positions=reset_row_positions, show_index_if_not_sheet=show_index_if_not_sheet, redraw=False)
def reset_undos(self):
self.MT.undo_storage = deque(maxlen=self.MT.max_undos)
def redraw(self, redraw_header=True, redraw_row_index=True):
self.MT.main_table_redraw_grid_and_text(redraw_header=redraw_header, redraw_row_index=redraw_row_index)
def refresh(self, redraw_header=True, redraw_row_index=True):
self.MT.main_table_redraw_grid_and_text(redraw_header=redraw_header, redraw_row_index=redraw_row_index)
def create_checkbox(self, r=0, c=0, *args, **kwargs):
_kwargs = get_checkbox_kwargs(*args, **kwargs)
if (isinstance(r, str) and (r.lower() == 'all') and isinstance(c, int)):
for r_ in range(self.MT.total_data_rows()):
self.MT.create_checkbox(datarn=r_, datacn=c, **_kwargs)
elif (isinstance(c, str) and (c.lower() == 'all') and isinstance(r, int)):
for c_ in range(self.MT.total_data_cols()):
self.MT.create_checkbox(datarn=r, datacn=c_, **_kwargs)
elif (isinstance(r, str) and (r.lower() == 'all') and isinstance(c, str) and (c.lower() == 'all')):
totalcols = self.MT.total_data_cols()
for r_ in range(self.MT.total_data_rows()):
for c_ in range(totalcols):
self.MT.create_checkbox(datarn=r_, datacn=c_, **_kwargs)
elif (isinstance(r, int) and isinstance(c, int)):
self.MT.create_checkbox(datarn=r, datacn=c, **_kwargs)
self.set_refresh_timer(_kwargs['redraw'])
def checkbox_cell(self, r=0, c=0, *args, **kwargs):
self.create_checkbox(r=r, c=c, **get_checkbox_kwargs(*args, **kwargs))
def create_header_checkbox(self, c=0, *args, **kwargs):
_kwargs = get_checkbox_kwargs(*args, **kwargs)
if (isinstance(c, str) and (c.lower() == 'all')):
for c_ in range(self.MT.total_data_cols()):
self.CH.create_checkbox(datacn=c_, **_kwargs)
elif isinstance(c, int):
self.CH.create_checkbox(datacn=c, **_kwargs)
elif is_iterable(c):
for c_ in c:
self.CH.create_checkbox(datacn=c_, **_kwargs)
else:
self.CH.checkbox_header(**_kwargs)
self.set_refresh_timer(_kwargs['redraw'])
def create_index_checkbox(self, r=0, *args, **kwargs):
_kwargs = get_checkbox_kwargs(*args, **kwargs)
if (isinstance(r, str) and (r.lower() == 'all')):
for r_ in range(self.MT.total_data_rows()):
self.RI.create_checkbox(datarn=r_, **_kwargs)
elif isinstance(r, int):
self.RI.create_checkbox(datarn=r, **_kwargs)
elif is_iterable(r):
for r_ in r:
self.RI.create_checkbox(datarn=r_, **_kwargs)
else:
self.RI.checkbox_index(**_kwargs)
self.set_refresh_timer(_kwargs['redraw'])
def checkbox_row(self, r=0, *args, **kwargs):
_kwargs = get_checkbox_kwargs(*args, **kwargs)
if (isinstance(r, str) and (r.lower() == 'all')):
for r_ in range(self.MT.total_data_rows()):
self.MT.checkbox_row(datarn=r_, **_kwargs)
elif isinstance(r, int):
self.MT.checkbox_row(datarn=r, **_kwargs)
elif is_iterable(r):
for r_ in r:
self.MT.checkbox_row(datarn=r_, **_kwargs)
self.set_refresh_timer(_kwargs['redraw'])
def checkbox_column(self, c=0, *args, **kwargs):
_kwargs = get_checkbox_kwargs(*args, **kwargs)
if (isinstance(c, str) and (c.lower() == 'all')):
for c in range(self.MT.total_data_cols()):
self.MT.checkbox_column(datacn=c, **_kwargs)
elif isinstance(c, int):
self.MT.checkbox_column(datacn=c, **_kwargs)
elif is_iterable(c):
for c_ in c:
self.MT.checkbox_column(datacn=c_, **_kwargs)
self.set_refresh_timer(_kwargs['redraw'])
def checkbox_sheet(self, *args, **kwargs):
self.MT.checkbox_sheet(**get_checkbox_kwargs(*args, **kwargs))
def delete_checkbox(self, r=0, c=0):
if (isinstance(r, str) and (r.lower() == 'all') and isinstance(c, int)):
for (r_, c_) in self.MT.cell_options:
if ('checkbox' in self.MT.cell_options[(r_, c)]):
self.MT.delete_cell_options_checkbox(r_, c)
elif (isinstance(c, str) and (c.lower() == 'all') and isinstance(r, int)):
for (r_, c_) in self.MT.cell_options:
if ('checkbox' in self.MT.cell_options[(r, c_)]):
self.MT.delete_cell_options_checkbox(r, c_)
elif (isinstance(r, str) and (r.lower() == 'all') and isinstance(c, str) and (c.lower() == 'all')):
for (r_, c_) in self.MT.cell_options:
if ('checkbox' in self.MT.cell_options[(r_, c_)]):
self.MT.delete_cell_options_checkbox(r_, c_)
elif (isinstance(r, int) and isinstance(c, int)):
self.MT.delete_cell_options_checkbox(r, c)
def delete_cell_checkbox(self, r=0, c=0):
self.delete_checkbox(r, c)
def delete_row_checkbox(self, r=0):
if (isinstance(r, str) and (r.lower() == 'all')):
for r_ in self.MT.row_options:
self.MT.delete_row_options_checkbox(r_)
elif isinstance(r, int):
self.MT.delete_row_options_checkbox(r)
elif is_iterable(r):
for r_ in r:
self.MT.delete_row_options_checkbox(r_)
def delete_column_checkbox(self, c=0):
if (isinstance(c, str) and (c.lower() == 'all')):
for c_ in self.MT.col_options:
self.MT.delete_column_options_checkbox(c_)
elif isinstance(c, int):
self.MT.delete_column_options_checkbox(c)
elif is_iterable(c):
for c_ in c:
self.MT.delete_column_options_checkbox(c_)
def delete_sheet_checkbox(self):
self.MT.delete_options_checkbox()
def delete_header_checkbox(self, c=0):
if (isinstance(c, str) and (c.lower() == 'all')):
for c_ in self.CH.cell_options:
if ('checkbox' in self.CH.cell_options[c_]):
self.CH.delete_cell_options_checkbox(c_)
if isinstance(c, int):
self.CH.delete_cell_options_checkbox(c)
else:
self.CH.delete_options_checkbox()
def delete_index_checkbox(self, r=0):
if (isinstance(r, str) and (r.lower() == 'all')):
for r_ in self.RI.cell_options:
if ('checkbox' in self.RI.cell_options[r_]):
self.RI.delete_cell_options_checkbox(r_)
if isinstance(r, int):
self.RI.delete_cell_options_checkbox(r)
else:
self.RI.delete_options_checkbox()
def click_checkbox(self, r, c, checked=None):
kwargs = self.MT.get_cell_kwargs(r, c, key='checkbox')
if kwargs:
if (not isinstance(self.MT.data[r][c], bool)):
if (checked is None):
self.MT.data[r][c] = False
else:
self.MT.data[r][c] = bool(checked)
else:
self.MT.data[r][c] = (not self.MT.data[r][c])
def click_header_checkbox(self, c, checked=None):
kwargs = self.CH.get_cell_kwargs(c, key='checkbox')
if kwargs:
if (not isinstance(self.MT._headers[c], bool)):
if (checked is None):
self.MT._headers[c] = False
else:
self.MT._headers[c] = bool(checked)
else:
self.MT._headers[c] = (not self.MT._headers[c])
def click_index_checkbox(self, r, checked=None):
kwargs = self.RI.get_cell_kwargs(r, key='checkbox')
if kwargs:
if (not isinstance(self.MT._row_index[r], bool)):
if (checked is None):
self.MT._row_index[r] = False
else:
self.MT._row_index[r] = bool(checked)
else:
self.MT._row_index[r] = (not self.MT._row_index[r])
def get_checkboxes(self):
d = {**{k: v['checkbox'] for (k, v) in self.MT.cell_options.items() if ('checkbox' in v)}, **{k: v['checkbox'] for (k, v) in self.MT.row_options.items() if ('checkbox' in v)}, **{k: v['checkbox'] for (k, v) in self.MT.col_options.items() if ('checkbox' in v)}}
if ('checkbox' in self.MT.options):
return {**d, 'checkbox': self.MT.options['checkbox']}
return d
def get_header_checkboxes(self):
d = {k: v['checkbox'] for (k, v) in self.CH.cell_options.items() if ('checkbox' in v)}
if ('checkbox' in self.CH.options):
return {**d, 'checkbox': self.CH.options['checkbox']}
return d
def get_index_checkboxes(self):
d = {k: v['checkbox'] for (k, v) in self.RI.cell_options.items() if ('checkbox' in v)}
if ('checkbox' in self.RI.options):
return {**d, 'checkbox': self.RI.options['checkbox']}
return d
def checkbox(self, r, c, checked=None, state=None, check_function='', text=None):
if isinstance(checked, bool):
self.set_cell_data(r, c, checked)
kwargs = self.MT.get_cell_kwargs(r, c, key='checkbox')
if (check_function != ''):
kwargs['check_function'] = check_function
if (state and (state.lower() in ('normal', 'disabled'))):
kwargs['state'] = state
if (text is not None):
kwargs['text'] = text
return {**kwargs, 'checked': self.MT.data[r][c]}
def header_checkbox(self, c, checked=None, state=None, check_function='', text=None):
if isinstance(checked, bool):
self.headers(newheaders=checked, index=c)
kwargs = self.CH.get_cell_kwargs(c, key='checkbox')
if kwargs:
if (check_function != ''):
kwargs['check_function'] = check_function
if (state and (state.lower() in ('normal', 'disabled'))):
kwargs['state'] = state
if (text is not None):
kwargs['text'] = text
return {**kwargs, 'checked': self.MT._headers[c]}
def index_checkbox(self, r, checked=None, state=None, check_function='', text=None):
if isinstance(checked, bool):
self.row_index(newindex=checked, index=r)
kwargs = self.RI.get_cell_kwargs(r, key='checkbox')
if kwargs:
if (check_function != ''):
kwargs['check_function'] = check_function
if (state and (state.lower() in ('normal', 'disabled'))):
kwargs['state'] = state
if (text is not None):
kwargs['text'] = text
return {**kwargs, 'checked': self.MT._row_index[r]}
def create_dropdown(self, r=0, c=0, *args, **kwargs):
_kwargs = get_dropdown_kwargs(*args, **kwargs)
if (isinstance(r, str) and (r.lower() == 'all') and isinstance(c, int)):
for r_ in range(self.MT.total_data_rows()):
self.MT.create_dropdown(datarn=r_, datacn=c, **_kwargs)
elif (isinstance(c, str) and (c.lower() == 'all') and isinstance(r, int)):
for c_ in range(self.MT.total_data_cols()):
self.MT.create_dropdown(datarn=r, datacn=c_, **_kwargs)
elif (isinstance(r, str) and (r.lower() == 'all') and isinstance(c, str) and (c.lower() == 'all')):
totalcols = self.MT.total_data_cols()
for r_ in range(self.MT.total_data_rows()):
for c_ in range(totalcols):
self.MT.create_dropdown(datarn=r_, datacn=c_, **_kwargs)
elif (isinstance(r, int) and isinstance(c, int)):
self.MT.create_dropdown(datarn=r, datacn=c, **_kwargs)
self.set_refresh_timer(_kwargs['redraw'])
def dropdown_cell(self, r=0, c=0, *args, **kwargs):
self.create_dropdown(r=r, c=c, **get_dropdown_kwargs(*args, **kwargs))
def dropdown_row(self, r=0, *args, **kwargs):
_kwargs = get_dropdown_kwargs(*args, **kwargs)
if (isinstance(r, str) and (r.lower() == 'all')):
for r_ in range(self.MT.total_data_rows()):
self.MT.dropdown_row(datarn=r_, **_kwargs)
elif isinstance(r, int):
self.MT.dropdown_row(datarn=r, **_kwargs)
elif is_iterable(r):
for r_ in r:
self.MT.dropdown_row(datarn=r_, **_kwargs)
self.set_refresh_timer(_kwargs['redraw'])
def dropdown_column(self, c=0, *args, **kwargs):
_kwargs = get_dropdown_kwargs(*args, **kwargs)
if (isinstance(c, str) and (c.lower() == 'all')):
for c_ in range(self.MT.total_data_cols()):
self.MT.dropdown_column(datacn=c_, **_kwargs)
elif isinstance(c, int):
self.MT.dropdown_column(datacn=c, **_kwargs)
elif is_iterable(c):
for c_ in c:
self.MT.dropdown_column(datacn=c_, **_kwargs)
self.set_refresh_timer(_kwargs['redraw'])
def dropdown_sheet(self, *args, **kwargs):
_kwargs = get_dropdown_kwargs(*args, **kwargs)
self.MT.dropdown_sheet(**_kwargs)
self.set_refresh_timer(_kwargs['redraw'])
def create_header_dropdown(self, c=0, *args, **kwargs):
_kwargs = get_dropdown_kwargs(*args, **kwargs)
if (isinstance(c, str) and (c.lower() == 'all')):
for c_ in range(self.MT.total_data_cols()):
self.CH.create_dropdown(datacn=c_, **_kwargs)
elif isinstance(c, int):
self.CH.create_dropdown(datacn=c, **_kwargs)
elif is_iterable(c):
for c_ in c:
self.CH.create_dropdown(datacn=c_, **_kwargs)
elif (c is None):
self.CH.dropdown_header(**_kwargs)
self.set_refresh_timer(_kwargs['redraw'])
def create_index_dropdown(self, r=0, *args, **kwargs):
_kwargs = get_dropdown_kwargs(*args, **kwargs)
if (isinstance(r, str) and (r.lower() == 'all')):
for r_ in range(self.MT.total_data_rows()):
self.RI.create_dropdown(datarn=r_, **_kwargs)
elif isinstance(r, int):
self.RI.create_dropdown(datarn=r, **_kwargs)
elif is_iterable(r):
for r_ in r:
self.RI.create_dropdown(datarn=r_, **_kwargs)
elif (r is None):
self.RI.dropdown_index(**_kwargs)
self.set_refresh_timer(_kwargs['redraw'])
def delete_dropdown(self, r=0, c=0):
if (isinstance(r, str) and (r.lower() == 'all') and isinstance(c, int)):
for (r_, c_) in self.MT.cell_options:
if ('dropdown' in self.MT.cell_options[(r_, c)]):
self.MT.delete_cell_options_dropdown(r_, c)
elif (isinstance(c, str) and (c.lower() == 'all') and isinstance(r, int)):
for (r_, c_) in self.MT.cell_options:
if ('dropdown' in self.MT.cell_options[(r, c_)]):
self.MT.delete_cell_options_dropdown(r, c_)
elif (isinstance(r, str) and (r.lower() == 'all') and isinstance(c, str) and (c.lower() == 'all')):
for (r_, c_) in self.MT.cell_options:
if ('dropdown' in self.MT.cell_options[(r_, c_)]):
self.MT.delete_cell_options_dropdown(r_, c_)
elif (isinstance(r, int) and isinstance(c, int)):
self.MT.delete_cell_options_dropdown(r, c)
def delete_cell_dropdown(self, r=0, c=0):
self.delete_dropdown(r=r, c=c)
def delete_row_dropdown(self, r='all'):
if (isinstance(r, str) and (r.lower() == 'all')):
for r_ in self.MT.row_options:
if ('dropdown' in self.MT.row_options[r_]):
self.MT.delete_row_options_dropdown(datarn=r_)
elif isinstance(r, int):
self.MT.delete_row_options_dropdown(datarn=r)
elif is_iterable(r):
for r_ in r:
self.MT.delete_row_options_dropdown(datarn=r_)
def delete_column_dropdown(self, c='all'):
if (isinstance(c, str) and (c.lower() == 'all')):
for c_ in self.MT.col_options:
if ('dropdown' in self.MT.col_options[c_]):
self.MT.delete_column_options_dropdown(datacn=c_)
elif isinstance(c, int):
self.MT.delete_column_options_dropdown(datacn=c)
elif is_iterable(c):
for c_ in c:
self.MT.delete_column_options_dropdown(datacn=c_)
def delete_sheet_dropdown(self):
self.MT.delete_options_dropdown()
def delete_header_dropdown(self, c=None):
if (isinstance(c, str) and (c.lower() == 'all')):
for c_ in self.CH.cell_options:
if ('dropdown' in self.CH.cell_options[c_]):
self.CH.delete_cell_options_dropdown(c_)
elif isinstance(c, int):
self.CH.delete_cell_options_dropdown(c)
elif is_iterable(c):
for c_ in c:
self.CH.delete_cell_options_dropdown(c_)
elif (c is None):
self.CH.delete_options_dropdown(c)
def delete_index_dropdown(self, r=0):
if (isinstance(r, str) and (r.lower() == 'all')):
for r_ in self.RI.cell_options:
if ('dropdown' in self.RI.cell_options[r_]):
self.RI.delete_cell_options_dropdown(r_)
elif isinstance(r, int):
self.RI.delete_cell_options_dropdown(r)
elif is_iterable(r):
for r_ in r:
self.RI.delete_cell_options_dropdown(r_)
elif (r is None):
self.RI.delete_options_dropdown()
def get_dropdowns(self):
d = {**{k: v['dropdown'] for (k, v) in self.MT.cell_options.items() if ('dropdown' in v)}, **{k: v['dropdown'] for (k, v) in self.MT.row_options.items() if ('dropdown' in v)}, **{k: v['dropdown'] for (k, v) in self.MT.col_options.items() if ('dropdown' in v)}}
if ('dropdown' in self.MT.options):
return {**d, 'dropdown': self.MT.options['dropdown']}
return d
def get_header_dropdowns(self):
d = {k: v['dropdown'] for (k, v) in self.CH.cell_options.items() if ('dropdown' in v)}
if ('dropdown' in self.CH.options):
return {**d, 'dropdown': self.CH.options['dropdown']}
return d
def get_index_dropdowns(self):
d = {k: v['dropdown'] for (k, v) in self.RI.cell_options.items() if ('dropdown' in v)}
if ('dropdown' in self.RI.options):
return {**d, 'dropdown': self.RI.options['dropdown']}
return d
def set_dropdown_values(self, r=0, c=0, set_existing_dropdown=False, values=[], set_value=None):
if set_existing_dropdown:
if (self.MT.existing_dropdown_window is not None):
r_ = self.MT.existing_dropdown_window.r
c_ = self.MT.existing_dropdown_window.c
else:
raise Exception('No dropdown box is currently open')
else:
r_ = r
c_ = c
kwargs = self.MT.get_cell_kwargs(r, c, key='dropdown')
kwargs['values'] = values
if (kwargs['window'] != 'no dropdown open'):
kwargs['window'].values(values)
if (set_value is not None):
self.set_cell_data(r_, c_, set_value)
if ((kwargs['window'] != 'no dropdown open') and (self.MT.text_editor_loc is not None) and (self.MT.text_editor is not None)):
self.MT.text_editor.set_text(set_value)
def set_header_dropdown_values(self, c=0, set_existing_dropdown=False, values=[], set_value=None):
if set_existing_dropdown:
if (self.CH.existing_dropdown_window is not None):
c_ = self.CH.existing_dropdown_window.c
else:
raise Exception('No dropdown box is currently open')
else:
c_ = c
kwargs = self.CH.get_cell_kwargs(c_, key='dropdown')
if kwargs:
kwargs['values'] = values
if (kwargs['window'] != 'no dropdown open'):
kwargs['window'].values(values)
if (set_value is not None):
self.MT.headers(newheaders=set_value, index=c_)
def set_index_dropdown_values(self, r, set_existing_dropdown=False, values=[], set_value=None):
if set_existing_dropdown:
if (self.RI.existing_dropdown_window is not None):
r_ = self.RI.existing_dropdown_window.r
else:
raise Exception('No dropdown box is currently open')
else:
r_ = r
kwargs = self.RI.get_cell_kwargs(r_, key='dropdown')
if kwargs:
kwargs['values'] = values
if (kwargs['window'] != 'no dropdown open'):
kwargs['window'].values(values)
if (set_value is not None):
self.MT.row_index(newindex=set_value, index=r_)
def get_dropdown_values(self, r=0, c=0):
kwargs = self.MT.get_cell_kwargs(r, c, key='dropdown')
if kwargs:
return kwargs['values']
def get_header_dropdown_values(self, c=0):
kwargs = self.CH.get_cell_kwargs(c, key='dropdown')
if kwargs:
return kwargs['values']
def get_index_dropdown_values(self, r=0):
kwargs = self.RI.get_cell_kwargs(r, key='dropdown')
if kwargs:
kwargs['values']
def dropdown_functions(self, r, c, selection_function='', modified_function=''):
kwargs = self.MT.get_cell_kwargs(r, c, key='dropdown')
if kwargs:
if (selection_function != ''):
kwargs['select_function'] = selection_function
if (modified_function != ''):
kwargs['modified_function'] = modified_function
return kwargs
def header_dropdown_functions(self, c, selection_function='', modified_function=''):
kwargs = self.CH.get_cell_kwargs(c, key='dropdown')
if (selection_function != ''):
kwargs['selection_function'] = selection_function
if (modified_function != ''):
kwargs['modified_function'] = modified_function
return kwargs
def index_dropdown_functions(self, r, selection_function='', modified_function=''):
kwargs = self.RI.get_cell_kwargs(r, key='dropdown')
if (selection_function != ''):
kwargs['select_function'] = selection_function
if (modified_function != ''):
kwargs['modified_function'] = modified_function
return kwargs
def get_dropdown_value(self, r=0, c=0):
if self.MT.get_cell_kwargs(r, c, key='dropdown'):
return self.get_cell_data(r, c)
def get_header_dropdown_value(self, c=0):
if self.CH.get_cell_kwargs(c, key='dropdown'):
return self.MT._headers[c]
def get_index_dropdown_value(self, r=0):
if self.RI.get_cell_kwargs(r, key='dropdown'):
return self.MT._row_index[r]
def open_dropdown(self, r, c):
self.MT.open_dropdown_window(r, c)
def close_dropdown(self, r, c):
self.MT.close_dropdown_window(r, c)
def open_header_dropdown(self, c):
self.CH.open_dropdown_window(c)
def close_header_dropdown(self, c):
self.CH.close_dropdown_window(c)
def open_index_dropdown(self, r):
self.RI.open_dropdown_window(r)
def close_index_dropdown(self, r):
self.RI.close_dropdown_window(r)
def reapply_formatting(self):
self.MT.reapply_formatting()
def delete_all_formatting(self, clear_values=False):
self.MT.delete_all_formatting(clear_values=clear_values)
def format_cell(self, r, c, formatter_options={}, formatter_class=None, redraw=True, **kwargs):
if (isinstance(r, str) and (r.lower() == 'all') and isinstance(c, int)):
for r_ in range(self.MT.total_data_rows()):
self.MT.format_cell(datarn=r_, datacn=c, **{'formatter': formatter_class, **formatter_options, **kwargs})
elif (isinstance(c, str) and (c.lower() == 'all') and isinstance(r, int)):
for c_ in range(self.MT.total_data_cols()):
self.MT.format_cell(datarn=r, datacn=c_, **{'formatter': formatter_class, **formatter_options, **kwargs})
elif (isinstance(r, str) and (r.lower() == 'all') and isinstance(c, str) and (c.lower() == 'all')):
for r_ in range(self.MT.total_data_rows()):
for c_ in range(self.MT.total_data_cols()):
self.MT.format_cell(datarn=r_, datacn=c_, **{'formatter': formatter_class, **formatter_options, **kwargs})
else:
self.MT.format_cell(datarn=r, datacn=c, **{'formatter': formatter_class, **formatter_options, **kwargs})
self.set_refresh_timer(redraw)
def delete_cell_format(self, r='all', c='all', clear_values=False):
if (isinstance(r, str) and (r.lower() == 'all') and isinstance(c, int)):
for (r_, c_) in self.MT.cell_options:
if ('format' in self.MT.cell_options[(r_, c)]):
self.MT.delete_cell_format(r_, c, clear_values=clear_values)
elif (isinstance(c, str) and (c.lower() == 'all') and isinstance(r, int)):
for (r_, c_) in self.MT.cell_options:
if ('format' in self.MT.cell_options[(r, c_)]):
self.MT.delete_cell_format(r, c_, clear_values=clear_values)
elif (isinstance(r, str) and (r.lower() == 'all') and isinstance(c, str) and (c.lower() == 'all')):
for (r_, c_) in self.MT.cell_options:
if ('format' in self.MT.cell_options[(r_, c_)]):
self.MT.delete_cell_format(r_, c_, clear_values=clear_values)
else:
self.MT.delete_cell_format(r, c, clear_values=clear_values)
def format_row(self, r, formatter_options={}, formatter_class=None, redraw=True, **kwargs):
if (isinstance(r, str) and (r.lower() == 'all')):
for r_ in range(len(self.MT.data)):
self.MT.format_row(r_, **{'formatter': formatter_class, **formatter_options, **kwargs})
elif is_iterable(r):
for r_ in r:
self.MT.format_row(r_, **{'formatter': formatter_class, **formatter_options, **kwargs})
else:
self.MT.format_row(r, **{'formatter': formatter_class, **formatter_options, **kwargs})
self.set_refresh_timer(redraw)
def delete_row_format(self, r='all', clear_values=False):
if is_iterable(r):
for r_ in r:
self.MT.delete_row_format(r_, clear_values=clear_values)
else:
self.MT.delete_row_format(r, clear_values=clear_values)
def format_column(self, c, formatter_options={}, formatter_class=None, redraw=True, **kwargs):
if (isinstance(c, str) and (c.lower() == 'all')):
for c_ in range(self.MT.total_data_cols()):
self.MT.format_column(c_, **{'formatter': formatter_class, **formatter_options, **kwargs})
elif is_iterable(c):
for c_ in c:
self.MT.format_column(c_, **{'formatter': formatter_class, **formatter_options, **kwargs})
else:
self.MT.format_column(c, **{'formatter': formatter_class, **formatter_options, **kwargs})
self.set_refresh_timer(redraw)
def delete_column_format(self, c='all', clear_values=False):
if is_iterable(c):
for c_ in c:
self.MT.delete_column_format(c_, clear_values=clear_values)
else:
self.MT.delete_column_format(c, clear_values=clear_values)
def format_sheet(self, formatter_options={}, formatter_class=None, redraw=True, **kwargs):
self.MT.format_sheet(**{'formatter': formatter_class, **formatter_options, **kwargs})
self.set_refresh_timer(redraw)
def delete_sheet_format(self, clear_values=False):
self.MT.delete_sheet_format(clear_values=clear_values) |
class EbnfLexer(RegexLexer):
name = 'EBNF'
aliases = ['ebnf']
filenames = ['*.ebnf']
mimetypes = ['text/x-ebnf']
url = '
version_added = '2.0'
tokens = {'root': [include('whitespace'), include('comment_start'), include('identifier'), ('=', Operator, 'production')], 'production': [include('whitespace'), include('comment_start'), include('identifier'), ('"[^"]*"', String.Double), ("'[^']*'", String.Single), ('(\\?[^?]*\\?)', Name.Entity), ('[\\[\\]{}(),|]', Punctuation), ('-', Operator), (';', Punctuation, '#pop'), ('\\.', Punctuation, '#pop')], 'whitespace': [('\\s+', Text)], 'comment_start': [('\\(\\*', Comment.Multiline, 'comment')], 'comment': [('[^*)]', Comment.Multiline), include('comment_start'), ('\\*\\)', Comment.Multiline, '#pop'), ('[*)]', Comment.Multiline)], 'identifier': [('([a-zA-Z][\\w \\-]*)', Keyword)]} |
def AllDifferent(term, *others, excepting=None, matrix=False):
excepting = (list(excepting) if isinstance(excepting, (tuple, set)) else ([excepting] if isinstance(excepting, int) else excepting))
checkType(excepting, ([int], type(None)))
if matrix:
assert (len(others) == 0)
matrix = [flatten(row) for row in term]
assert all(((len(row) == len(matrix[0])) for row in matrix)), 'The matrix id badly formed'
assert all((checkType(l, [Variable]) for l in matrix))
if (not options.mini):
return ECtr(ConstraintAllDifferentMatrix(matrix, excepting))
else:
return ([AllDifferent(row) for row in matrix] + [AllDifferent(col) for col in columns(matrix)])
terms = flatten(term, others)
if ((len(terms) == 0) or ((len(terms) == 1) and isinstance(terms[0], (int, Variable, Node)))):
return None
checkType(terms, [Variable, Node])
auxiliary().replace_partial_constraints_and_constraints_with_condition_and_possibly_nodes(terms, nodes_too=options.mini)
return ECtr(ConstraintAllDifferent(terms, excepting)) |
class Market1501(BaseImageDataset):
dataset_dir = 'market1501/Market-1501-v19.09.15'
def __init__(self, root='your_dataset_path', verbose=True, **kwargs):
super(Market1501, self).__init__()
self.dataset_dir = osp.join(root, self.dataset_dir)
self.train_dir = osp.join(self.dataset_dir, 'bounding_box_train')
self.query_dir = osp.join(self.dataset_dir, 'query')
self.gallery_dir = osp.join(self.dataset_dir, 'bounding_box_test')
self._check_before_run()
train = self._process_dir(self.train_dir, relabel=True)
query = self._process_dir(self.query_dir, relabel=False)
gallery = self._process_dir(self.gallery_dir, relabel=False)
if verbose:
print('=> Market1501 loaded')
self.print_dataset_statistics(train, query, gallery)
self.train = train
self.query = query
self.gallery = gallery
(self.num_train_pids, self.num_train_imgs, self.num_train_cams) = self.get_imagedata_info(self.train)
(self.num_query_pids, self.num_query_imgs, self.num_query_cams) = self.get_imagedata_info(self.query)
(self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams) = self.get_imagedata_info(self.gallery)
def _check_before_run(self):
if (not osp.exists(self.dataset_dir)):
raise RuntimeError("'{}' is not available".format(self.dataset_dir))
if (not osp.exists(self.train_dir)):
raise RuntimeError("'{}' is not available".format(self.train_dir))
if (not osp.exists(self.query_dir)):
raise RuntimeError("'{}' is not available".format(self.query_dir))
if (not osp.exists(self.gallery_dir)):
raise RuntimeError("'{}' is not available".format(self.gallery_dir))
def _process_dir(self, dir_path, relabel=False):
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
pattern = re.compile('([-\\d]+)_c(\\d)')
pid_container = set()
for img_path in img_paths:
(pid, _) = map(int, pattern.search(img_path).groups())
if (pid == (- 1)):
continue
pid_container.add(pid)
pid2label = {pid: label for (label, pid) in enumerate(pid_container)}
dataset = []
for img_path in img_paths:
(pid, camid) = map(int, pattern.search(img_path).groups())
if (pid == (- 1)):
continue
assert (0 <= pid <= 1501)
assert (1 <= camid <= 6)
camid -= 1
if relabel:
pid = pid2label[pid]
dataset.append((img_path, pid, camid))
return dataset |
.skipif((shutil.which('notify-send') is None), reason='notify-send not installed.')
.usefixtures('dbus')
def test_notifications(manager_nospawn, minimal_conf_noscreen):
def background(obj):
(_, bground) = obj.eval('self.background')
return bground
notify.Notify.timeout_add = log_timeout
widget = notify.Notify(foreground_urgent=URGENT, foreground_low=LOW, background=BACKGROUND_NORMAL, background_urgent=BACKGROUND_URGENT, background_low=BACKGROUND_LOW)
config = minimal_conf_noscreen
config.screens = [libqtile.config.Screen(top=Bar([widget], 10))]
manager_nospawn.start(config)
obj = manager_nospawn.c.widget['notify']
notif_1 = [NS]
notif_1.extend(NOTIFICATION_1)
subprocess.run(notif_1)
assert (obj.info()['text'] == MESSAGE_1)
assert (background(obj) == BACKGROUND_NORMAL)
(_, timeout) = obj.eval('self.delay')
assert (timeout == '5.0')
notif_2 = [NS]
notif_2.extend(NOTIFICATION_2)
subprocess.run(notif_2)
assert (obj.info()['text'] == MESSAGE_2.format(colour=URGENT))
assert (background(obj) == BACKGROUND_URGENT)
(_, timeout) = obj.eval('self.delay')
assert (timeout == '10.0')
notif_3 = [NS]
notif_3.extend(NOTIFICATION_3)
subprocess.run(notif_3)
assert (obj.info()['text'] == MESSAGE_3.format(colour=LOW))
assert (background(obj) == BACKGROUND_LOW)
obj.next()
assert (obj.info()['text'] == MESSAGE_3.format(colour=LOW))
assert (background(obj) == BACKGROUND_LOW)
obj.prev()
assert (obj.info()['text'] == MESSAGE_2.format(colour=URGENT))
assert (background(obj) == BACKGROUND_URGENT)
obj.prev()
assert (obj.info()['text'] == MESSAGE_1)
assert (background(obj) == BACKGROUND_NORMAL)
obj.prev()
assert (obj.info()['text'] == MESSAGE_1)
assert (background(obj) == BACKGROUND_NORMAL)
obj.next()
assert (obj.info()['text'] == MESSAGE_2.format(colour=URGENT))
assert (background(obj) == BACKGROUND_URGENT)
obj.toggle()
assert (obj.info()['text'] == '')
assert (background(obj) == BACKGROUND_NORMAL)
obj.toggle()
assert (obj.info()['text'] == MESSAGE_3.format(colour=LOW))
assert (background(obj) == BACKGROUND_LOW)
obj.clear()
assert (obj.info()['text'] == '')
assert (background(obj) == BACKGROUND_NORMAL)
obj.display()
assert (obj.info()['text'] == MESSAGE_3.format(colour=LOW))
assert (background(obj) == BACKGROUND_LOW) |
class MultiHopContextsOnlyModel(MultipleContextModel):
def __init__(self, encoder: QuestionsAndParagraphsEncoder, word_embed: Optional[WordEmbedder], char_embed: Optional[CharWordEmbedder], embed_mapper: Optional[SequenceMapper], context_to_context_attention: Optional[AttentionWithPostMapper], sequence_encoder: SequenceEncoder, predictor: BinaryFixedPredictor, max_batch_size: Optional[int]=None, c2c_hops: int=1):
super().__init__(encoder=encoder, word_embed=word_embed, char_embed=char_embed, max_batch_size=max_batch_size)
self.embed_mapper = embed_mapper
self.context_to_context_attention = context_to_context_attention
self.sequence_encoder = sequence_encoder
self.predictor = predictor
self.c2c_hops = c2c_hops
self.context_fixed_merge = ConcatWithProduct()
def _get_predictions_for(self, is_train, question_embed, question_mask, context_embed, context_mask, answer, question_lm=None, context_lm=None, sentence_segments=None, sentence_mask=None):
(question_rep, context_rep) = (question_embed, context_embed)
(context1_rep, context2_rep) = tf.unstack(context_rep, axis=1, num=2)
(context1_mask, context2_mask) = tf.unstack(context_mask, axis=1, num=2)
if (self.embed_mapper is not None):
with tf.variable_scope('map_embed'):
context1_rep = self.embed_mapper.apply(is_train, context1_rep, context1_mask)
with tf.variable_scope('map_embed', reuse=True):
context2_rep = self.embed_mapper.apply(is_train, context2_rep, context2_mask)
if (self.context_to_context_attention is not None):
for hop in range(self.c2c_hops):
with tf.variable_scope(f'c2c_hop_{hop}'):
c1_to_c2 = self.context_to_context_attention.apply(is_train, x=context2_rep, keys=context1_rep, memories=context1_rep, x_mask=context2_mask, memory_mask=context1_mask)
with tf.variable_scope(f'c2c_hop_{hop}', reuse=True):
c1_to_c2_to_c1 = self.context_to_context_attention.apply(is_train, x=context1_rep, keys=c1_to_c2, memories=c1_to_c2, x_mask=context1_mask, memory_mask=context2_mask)
c2_to_c1 = self.context_to_context_attention.apply(is_train, x=context1_rep, keys=context2_rep, memories=context2_rep, x_mask=context1_mask, memory_mask=context2_mask)
c2_to_c1_to_c2 = self.context_to_context_attention.apply(is_train, x=context2_rep, keys=c2_to_c1, memories=c2_to_c1, x_mask=context2_mask, memory_mask=context1_mask)
context2_rep = tf.add(c2_to_c1_to_c2, c1_to_c2)
context1_rep = tf.add(c1_to_c2_to_c1, c2_to_c1)
with tf.variable_scope('seq_enc'):
fixed_rep1 = self.sequence_encoder.apply(is_train, context1_rep, context1_mask)
with tf.variable_scope('seq_enc', reuse=True):
fixed_rep2 = self.sequence_encoder.apply(is_train, context2_rep, context2_mask)
with tf.variable_scope('merge'):
fixed_rep = self.context_fixed_merge.apply(is_train, fixed_rep1, fixed_rep2)
with tf.variable_scope('predictor'):
return self.predictor.apply(is_train, fixed_rep, answer) |
class Speech2TextConfig(PretrainedConfig):
model_type = 'speech_to_text'
keys_to_ignore_at_inference = ['past_key_values']
attribute_map = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self, vocab_size=10000, encoder_layers=12, encoder_ffn_dim=2048, encoder_attention_heads=4, decoder_layers=6, decoder_ffn_dim=2048, decoder_attention_heads=4, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function='relu', d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=2, scale_embedding=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, max_source_positions=6000, max_target_positions=1024, num_conv_layers=2, conv_kernel_sizes=(5, 5), conv_channels=1024, input_feat_per_channel=80, input_channels=1, **kwargs):
self.vocab_size = vocab_size
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding
self.max_source_positions = max_source_positions
self.max_target_positions = max_target_positions
self.num_conv_layers = num_conv_layers
self.conv_kernel_sizes = list(conv_kernel_sizes)
self.conv_channels = conv_channels
self.input_feat_per_channel = input_feat_per_channel
self.input_channels = input_channels
if (len(self.conv_kernel_sizes) != self.num_conv_layers):
raise ValueError(f'Configuration for convolutional module is incorrect. It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, `config.num_conv_layers = {self.num_conv_layers}`.')
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, **kwargs) |
class InlineMixin():
def _get_call_args(result_type, title, attach, content):
args = {'id': None, 'type': result_type}
if (title is not None):
args['title'] = title
if (attach is not None):
if (not hasattr(attach, '_serialize_attachment')):
raise ValueError(('%s is not an attachment' % attach))
args['reply_markup'] = attach._serialize_attachment()
if (content is not None):
args['input_message_content'] = content._serialize()
return args
def _inject_file_args(args, file_id, url):
result_type = args['type']
if ((file_id is not None) and (url is None)):
args[(result_type + '_file_id')] = file_id
elif ((file_id is None) and (url is not None)):
args[(result_type + '_url')] = url
elif ((file_id is None) and (url is None)):
raise TypeError('file_id or URL is missing')
else:
raise TypeError('Only one among file_id and URL must be passed')
return args
def _inject_thumb_args(args, url, width=None, height=None):
if (url is not None):
args['thumb_url'] = url
if (width is not None):
args['thumb_width'] = width
if (height is not None):
args['thumb_height'] = height
return args
def _inject_caption_args(args, caption, syntax):
if (caption is not None):
args['caption'] = caption
if (syntax is not None):
args['parse_mode'] = syntax
else:
args['parse_mode'] = syntaxes.guess_syntax(caption, syntax)
return args
def article(self, title, content, description=None, url=None, hide_url=None, thumb_url=None, thumb_width=None, thumb_height=None, attach=None):
args = self._get_call_args('article', title, attach, content)
args = self._inject_thumb_args(args, thumb_url, thumb_width, thumb_height)
if (description is not None):
args['description'] = description
if (url is not None):
args['url'] = url
if (hide_url is not None):
args['hide_url'] = hide_url
return args
def photo(self, file_id=None, url=None, width=None, height=None, title=None, content=None, thumb_url=None, description=None, caption=None, syntax=None, attach=None):
args = self._get_call_args('photo', title, attach, content)
args = self._inject_file_args(args, file_id, url)
args = self._inject_thumb_args(args, thumb_url, None, None)
args = self._inject_caption_args(args, caption, syntax)
if (description is not None):
args['description'] = description
if (width is not None):
args['photo_width'] = width
if (height is not None):
args['photo_height'] = height
return args
def audio(self, file_id=None, url=None, title=None, performer=None, duration=None, caption=None, content=None, syntax=None, attach=None):
args = self._get_call_args('audio', title, attach, content)
args = self._inject_file_args(args, file_id, url)
args = self._inject_caption_args(args, caption, syntax)
if (performer is not None):
args['performer'] = performer
if (duration is not None):
args['audio_duration'] = duration
if (caption is not None):
args['caption'] = caption
return args
def voice(self, file_id=None, url=None, title=None, content=None, duration=None, caption=None, syntax=None, attach=None):
args = self._get_call_args('voice', title, attach, content)
args = self._inject_file_args(args, file_id, url)
args = self._inject_caption_args(args, caption, syntax)
if (duration is not None):
args['voice_duration'] = duration
return args
def video(self, file_id=None, url=None, title=None, content=None, thumb_url=None, description=None, mime_type=None, width=None, height=None, duration=None, caption=None, syntax=None, attach=None):
args = self._get_call_args('video', title, attach, content)
args = self._inject_file_args(args, file_id, url)
args = self._inject_thumb_args(args, thumb_url, None, None)
args = self._inject_caption_args(args, caption, syntax)
if (description is not None):
args['description'] = description
if (mime_type is not None):
args['mime_type'] = mime_type
if (width is not None):
args['video_width'] = width
if (height is not None):
args['video_height'] = height
if (duration is not None):
args['duration'] = duration
return args
def file(self, file_id=None, url=None, title=None, content=None, thumb_url=None, thumb_width=None, thumb_height=None, description=None, mime_type=None, caption=None, syntax=None, attach=None):
args = self._get_call_args('document', title, attach, content)
args = self._inject_file_args(args, file_id, url)
args = self._inject_thumb_args(args, thumb_url, thumb_width, thumb_height)
args = self._inject_caption_args(args, caption, syntax)
if (description is not None):
args['description'] = description
if (mime_type is not None):
args['mime_type'] = mime_type
return args
def location(self, latitude, longitude, title, live_period=None, content=None, thumb_url=None, thumb_width=None, thumb_height=None, attach=None):
args = self._get_call_args('location', title, attach, content)
args = self._inject_thumb_args(args, thumb_url, thumb_width, thumb_height)
args['latitude'] = latitude
args['longitude'] = longitude
args['title'] = title
if (live_period is not None):
args['live_period'] = live_period
return args
def venue(self, latitude, longitude, title, address, foursquare_id=None, foursquare_type=None, content=None, thumb_url=None, thumb_width=None, thumb_height=None, attach=None):
args = self._get_call_args('venue', title, attach, content)
args = self._inject_thumb_args(args, thumb_url, thumb_width, thumb_height)
args['latitude'] = latitude
args['longitude'] = longitude
args['title'] = title
args['address'] = address
if (foursquare_id is not None):
args['foursquare_id'] = foursquare_id
if (foursquare_type is not None):
args['foursquare_type'] = foursquare_type
return args
def sticker(self, file_id, content=None, attach=None):
args = self._get_call_args('sticker', None, attach, content)
args['sticker_file_id'] = file_id
return args
def contact(self, phone, first_name, last_name=None, vcard=None, content=None, thumb_url=None, thumb_width=None, thumb_height=None, attach=None):
args = self._get_call_args('contact', None, attach, content)
args = self._inject_thumb_args(args, thumb_url, thumb_width, thumb_height)
args['phone_number'] = phone
args['first_name'] = first_name
if (last_name is not None):
args['last_name'] = last_name
if (vcard is not None):
args['vcard'] = vcard
return args
def gif(self, file_id=None, url=None, title=None, content=None, thumb_url=None, width=None, height=None, duration=None, caption=None, syntax=None, attach=None):
args = self._get_call_args('gif', title, attach, content)
args = self._inject_file_args(args, file_id, url)
args = self._inject_thumb_args(args, thumb_url, None, None)
args = self._inject_caption_args(args, caption, syntax)
if (width is not None):
args['gif_width'] = width
if (height is not None):
args['gif_height'] = height
if (duration is not None):
args['gif_duration'] = duration
return args
def mpeg4_gif(self, file_id=None, url=None, title=None, content=None, thumb_url=None, width=None, height=None, duration=None, caption=None, syntax=None, attach=None):
args = self._get_call_args('mpeg4_gif', title, attach, content)
args = self._inject_thumb_args(args, thumb_url, None, None)
args = self._inject_caption_args(args, caption, syntax)
if ((file_id is not None) and (url is None)):
args['mpeg4_file_id'] = file_id
elif ((file_id is None) and (url is not None)):
args['mpeg4_url'] = url
elif ((file_id is None) and (url is None)):
raise TypeError('file_id or URL is missing')
else:
raise TypeError('Only one among file_id and URL must be passed')
if (width is not None):
args['mpeg4_width'] = width
if (height is not None):
args['mpeg4_height'] = height
if (duration is not None):
args['mpeg4_duration'] = duration
return args |
class IntegrationTests(fixtures.DistInfoPkg, unittest.TestCase):
def test_package_spec_installed(self):
def is_installed(package_spec):
req = packaging.requirements.Requirement(package_spec)
return (version(req.name) in req.specifier)
assert is_installed('distinfo-pkg==1.0')
assert is_installed('distinfo-pkg>=1.0,<2.0')
assert (not is_installed('distinfo-pkg<1.0')) |
def test_handshake_rejection_with_body() -> None:
events = _make_handshake_rejection(400, b'Hello')
assert (events == [RejectConnection(headers=[(b'content-length', b'5')], has_body=True, status_code=400), RejectData(body_finished=False, data=b'Hello'), RejectData(body_finished=True, data=b'')]) |
class ResNetBase(nn.Module):
def __init__(self, block, layers):
self.inplanes = 64
super(ResNetBase, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.out_channels = (512 * block.expansion)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = x.view(x.shape[0], x.shape[1], (- 1)).mean(dim=2)
return x |
def model_processing(model, src_dir, dest_dir, timeseq_len):
train_dir = os.path.join(src_dir, 'train')
test_dir = os.path.join(src_dir, 'test')
if os.path.exists(dest_dir):
print(dest_dir, 'already exists')
else:
os.mkdir(dest_dir)
print(dest_dir, 'created')
dest_train_dir = os.path.join(dest_dir, 'train')
if os.path.exists(dest_train_dir):
print(dest_train_dir, 'already exists')
else:
os.mkdir(dest_train_dir)
print(dest_train_dir, 'created')
dest_test_dir = os.path.join(dest_dir, 'test')
if os.path.exists(dest_test_dir):
print(dest_test_dir, 'already exists')
else:
os.mkdir(dest_test_dir)
print(dest_test_dir, 'created')
dir_mapping = OrderedDict([(train_dir, dest_train_dir), (test_dir, dest_test_dir)])
for (dir, dest_dir) in dir_mapping.items():
print('Processing data in {}'.format(dir))
for (index, class_name) in enumerate(os.listdir(dir)):
class_dir = os.path.join(dir, class_name)
dest_class_dir = os.path.join(dest_dir, class_name)
if (not os.path.exists(dest_class_dir)):
os.mkdir(dest_class_dir)
print(dest_class_dir, 'created')
for filename in os.listdir(class_dir):
file_dir = os.path.join(class_dir, filename)
clip_data = np.load(file_dir)
processed_data = model.predict(clip_data, batch_size=timeseq_len)
dest_file_dir = os.path.join(dest_class_dir, filename)
np.save(dest_file_dir, processed_data)
print('No.{} class {} finished, data saved in {}'.format(index, class_name, dest_class_dir)) |
.parametrize('v, dtype', [(set_test_value(pt.iscalar(), np.array(10, dtype='int32')), psb.float64)])
def test_reciprocal(v, dtype):
g = psb.reciprocal(v)
g_fg = FunctionGraph(outputs=[g])
compare_numba_and_py(g_fg, [i.tag.test_value for i in g_fg.inputs if (not isinstance(i, (SharedVariable, Constant)))]) |
class KnownValues(unittest.TestCase):
def test_KUKSpU_high_cost(self):
kmesh = [2, 1, 1]
kpts = cell.make_kpts(kmesh, wrap_around=True)
U_idx = ['1 C 2p']
U_val = [5.0]
mf = pdft.KUKSpU(cell, kpts, U_idx=U_idx, U_val=U_val, C_ao_lo='minao', minao_ref='gth-szv')
mf.conv_tol = 1e-10
e1 = mf.kernel()
self.assertAlmostEqual(e1, (- 10.), 8)
def test_KUKSpU_ksymm(self):
cell1 = cell.copy()
cell1.basis = 'gth-szv'
cell1.mesh = ([16] * 3)
cell1.build()
U_idx = ['1 C 2p']
U_val = [5.0]
kmesh = [2, 2, 1]
kpts0 = cell1.make_kpts(kmesh, wrap_around=True)
mf0 = pdft.KUKSpU(cell1, kpts0, U_idx=U_idx, U_val=U_val, C_ao_lo='minao')
e0 = mf0.kernel()
kpts = cell1.make_kpts(kmesh, wrap_around=True, space_group_symmetry=True, time_reversal_symmetry=True)
assert (kpts.nkpts_ibz == 3)
mf = pdft.KUKSpU(cell1, kpts, U_idx=U_idx, U_val=U_val, C_ao_lo='minao')
e1 = mf.kernel()
self.assertAlmostEqual(e1, e0, 8)
def test_get_veff(self):
kmesh = [2, 1, 1]
kpts = cell.make_kpts(kmesh, wrap_around=True)
U_idx = ['1 C 2p']
U_val = [5.0]
mf = pdft.KUKSpU(cell, kpts, U_idx=U_idx, U_val=U_val, C_ao_lo='minao', minao_ref='gth-szv')
dm = mf.get_init_guess(cell, 'minao')
vxc = mf.get_veff(cell, dm)
self.assertAlmostEqual(vxc.E_U, 0., 11)
self.assertAlmostEqual(lib.fp(vxc), 6., 8) |
def sstore_eip2200(computation: BaseComputation) -> None:
gas_remaining = computation.get_gas_remaining()
if (gas_remaining <= 2300):
raise OutOfGas('Net-metered SSTORE always fails below 2300 gas, per EIP-2200', gas_remaining)
else:
return net_sstore(GAS_SCHEDULE_EIP2200, computation) |
class Metric(object):
def __init__(self, args: Namespace):
self.args = args
self.denom = 1e-08
def __call__(self, gts, preds, mask: list) -> dict:
raise NotImplementedError
def _cal_token_level(self, gts, preds, mask: list):
raise NotImplementedError
def _cal_sentence_level(self, gts, preds, mask: list):
raise NotImplementedError |
_pytesseract
_sentencepiece
_tokenizers
class LayoutXLMProcessorTest(unittest.TestCase):
tokenizer_class = LayoutXLMTokenizer
rust_tokenizer_class = LayoutXLMTokenizerFast
def setUp(self):
feature_extractor_map = {'do_resize': True, 'size': 224, 'apply_ocr': True}
self.tmpdirname = tempfile.mkdtemp()
self.feature_extraction_file = os.path.join(self.tmpdirname, FEATURE_EXTRACTOR_NAME)
with open(self.feature_extraction_file, 'w', encoding='utf-8') as fp:
fp.write((json.dumps(feature_extractor_map) + '\n'))
def get_tokenizer(self, **kwargs) -> PreTrainedTokenizer:
return self.tokenizer_class.from_pretrained(SAMPLE_SP, **kwargs)
def get_rust_tokenizer(self, **kwargs) -> PreTrainedTokenizerFast:
return self.rust_tokenizer_class.from_pretrained(SAMPLE_SP, **kwargs)
def get_tokenizers(self, **kwargs) -> List[PreTrainedTokenizerBase]:
return [self.get_tokenizer(**kwargs), self.get_rust_tokenizer(**kwargs)]
def get_feature_extractor(self, **kwargs):
return LayoutLMv2FeatureExtractor.from_pretrained(self.tmpdirname, **kwargs)
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def test_save_load_pretrained_default(self):
feature_extractor = self.get_feature_extractor()
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
processor = LayoutXLMProcessor(feature_extractor=feature_extractor, tokenizer=tokenizer)
processor.save_pretrained(self.tmpdirname)
processor = LayoutXLMProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer, (LayoutXLMTokenizer, LayoutXLMTokenizerFast))
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor, LayoutLMv2FeatureExtractor)
def test_save_load_pretrained_additional_features(self):
processor = LayoutXLMProcessor(feature_extractor=self.get_feature_extractor(), tokenizer=self.get_tokenizer())
processor.save_pretrained(self.tmpdirname)
tokenizer_add_kwargs = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)')
feature_extractor_add_kwargs = self.get_feature_extractor(do_resize=False, size=30)
processor = LayoutXLMProcessor.from_pretrained(self.tmpdirname, use_fast=False, bos_token='(BOS)', eos_token='(EOS)', do_resize=False, size=30)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, LayoutXLMTokenizer)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor, LayoutLMv2FeatureExtractor)
tokenizer_add_kwargs = self.get_rust_tokenizer(bos_token='(BOS)', eos_token='(EOS)')
feature_extractor_add_kwargs = self.get_feature_extractor(do_resize=False, size=30)
processor = LayoutXLMProcessor.from_pretrained(self.tmpdirname, use_xlm=True, bos_token='(BOS)', eos_token='(EOS)', do_resize=False, size=30)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, LayoutXLMTokenizerFast)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor, LayoutLMv2FeatureExtractor) |
class TableLocator(Locator, dict):
def of(namespace_locator: Optional[NamespaceLocator], table_name: Optional[str]) -> TableLocator:
table_locator = TableLocator()
table_locator.namespace_locator = namespace_locator
table_locator.table_name = table_name
return table_locator
def at(namespace: Optional[str], table_name: Optional[str]) -> TableLocator:
namespace_locator = NamespaceLocator.of(namespace)
return TableLocator.of(namespace_locator, table_name)
def namespace_locator(self) -> NamespaceLocator:
val: Dict[(str, Any)] = self.get('namespaceLocator')
if ((val is not None) and (not isinstance(val, NamespaceLocator))):
self.namespace_locator = val = NamespaceLocator(val)
return val
_locator.setter
def namespace_locator(self, namespace_locator: Optional[NamespaceLocator]) -> None:
self['namespaceLocator'] = namespace_locator
def table_name(self) -> Optional[str]:
return self.get('tableName')
_name.setter
def table_name(self, table_name: Optional[str]) -> None:
self['tableName'] = table_name
def namespace(self) -> Optional[str]:
namespace_locator = self.namespace_locator
if namespace_locator:
return namespace_locator.namespace
return None
def canonical_string(self) -> str:
nl_hexdigest = self.namespace_locator.hexdigest()
table_name = self.table_name
return f'{nl_hexdigest}|{table_name}' |
class AsyncApis(Generic[AsyncClientT]):
def __init__(self, host: str=None, **kwargs: Any):
self.client = AsyncApiClient(host, **kwargs)
self.cluster_api = AsyncClusterApi(self.client)
self.collections_api = AsyncCollectionsApi(self.client)
self.points_api = AsyncPointsApi(self.client)
self.service_api = AsyncServiceApi(self.client)
self.snapshots_api = AsyncSnapshotsApi(self.client)
async def aclose(self) -> None:
(await self.client.aclose()) |
class TestEarlyInit():
def test_config_py_path(self, args, init_patch, config_py_arg):
config_py_arg.write('\n'.join(['config.load_autoconfig()', 'c.colors.hints.bg = "red"']))
configinit.early_init(args)
expected = 'colors.hints.bg = red'
assert (config.instance.dump_userconfig() == expected)
.parametrize('config_py', [True, 'error', False])
def test_config_py(self, init_patch, config_tmpdir, caplog, args, config_py):
config_py_file = (config_tmpdir / 'config.py')
if config_py:
config_py_lines = ['c.colors.hints.bg = "red"', 'config.load_autoconfig(False)']
if (config_py == 'error'):
config_py_lines.append('c.foo = 42')
config_py_file.write_text('\n'.join(config_py_lines), 'utf-8', ensure=True)
with caplog.at_level(logging.ERROR):
configinit.early_init(args)
expected_errors = []
if (config_py == 'error'):
expected_errors.append("While setting 'foo': No option 'foo'")
if (configinit._init_errors is None):
actual_errors = []
else:
actual_errors = [str(err) for err in configinit._init_errors.errors]
assert (actual_errors == expected_errors)
assert isinstance(config.instance, config.Config)
assert isinstance(config.key_instance, config.KeyConfig)
if config_py:
expected = 'colors.hints.bg = red'
else:
expected = '<Default configuration>'
assert (config.instance.dump_userconfig() == expected)
.parametrize('load_autoconfig', [True, False])
.parametrize('config_py', [True, 'error', False])
.parametrize('invalid_yaml', ['42', 'list', 'unknown', 'wrong-type', False])
def test_autoconfig_yml(self, init_patch, config_tmpdir, caplog, args, load_autoconfig, config_py, invalid_yaml):
autoconfig_file = (config_tmpdir / 'autoconfig.yml')
config_py_file = (config_tmpdir / 'config.py')
yaml_lines = {'42': '42', 'list': '[1, 2]', 'unknown': ['settings:', ' colors.foobar:', ' global: magenta', 'config_version: 2'], 'wrong-type': ['settings:', ' tabs.position:', ' global: true', 'config_version: 2'], False: ['settings:', ' colors.hints.fg:', ' global: magenta', 'config_version: 2']}
text = '\n'.join(yaml_lines[invalid_yaml])
autoconfig_file.write_text(text, 'utf-8', ensure=True)
if config_py:
config_py_lines = ['c.colors.hints.bg = "red"']
config_py_lines.append('config.load_autoconfig({})'.format(load_autoconfig))
if (config_py == 'error'):
config_py_lines.append('c.foo = 42')
config_py_file.write_text('\n'.join(config_py_lines), 'utf-8', ensure=True)
with caplog.at_level(logging.ERROR):
configinit.early_init(args)
expected_errors = []
if (load_autoconfig or (not config_py)):
suffix = (' (autoconfig.yml)' if config_py else '')
if (invalid_yaml in ['42', 'list']):
error = 'While loading data{}: Toplevel object is not a dict'.format(suffix)
expected_errors.append(error)
elif (invalid_yaml == 'wrong-type'):
error = "Error{}: Invalid value 'True' - expected a value of type str but got bool.".format(suffix)
expected_errors.append(error)
elif (invalid_yaml == 'unknown'):
error = 'While loading options{}: Unknown option colors.foobar'.format(suffix)
expected_errors.append(error)
if (config_py == 'error'):
expected_errors.append("While setting 'foo': No option 'foo'")
if (configinit._init_errors is None):
actual_errors = []
else:
actual_errors = [str(err) for err in configinit._init_errors.errors]
assert (actual_errors == expected_errors)
dump = config.instance.dump_userconfig()
if (config_py and load_autoconfig and (not invalid_yaml)):
expected = ['colors.hints.bg = red', 'colors.hints.fg = magenta']
elif config_py:
expected = ['colors.hints.bg = red']
elif invalid_yaml:
expected = ['<Default configuration>']
else:
expected = ['colors.hints.fg = magenta']
assert (dump == '\n'.join(expected))
def test_autoconfig_warning(self, init_patch, args, config_tmpdir, caplog):
config_py_file = (config_tmpdir / 'config.py')
config_py_file.ensure()
with caplog.at_level(logging.ERROR):
configinit.early_init(args)
assert (len(configinit._init_errors.errors) == 1)
error = configinit._init_errors.errors[0]
assert str(error).startswith('autoconfig loading not specified')
def test_autoconfig_warning_custom(self, init_patch, args, tmp_path, monkeypatch):
config_py_path = (tmp_path / 'config.py')
config_py_path.touch()
args.config_py = str(config_py_path)
monkeypatch.setattr(configinit.standarddir, 'config_py', (lambda : str(config_py_path)))
configinit.early_init(args)
def test_custom_non_existing_file(self, init_patch, args, tmp_path, caplog, monkeypatch):
config_py_path = (tmp_path / 'config.py')
assert (not config_py_path.exists())
args.config_py = str(config_py_path)
monkeypatch.setattr(configinit.standarddir, 'config_py', (lambda : str(config_py_path)))
with caplog.at_level(logging.ERROR):
configinit.early_init(args)
assert (len(configinit._init_errors.errors) == 1)
error = configinit._init_errors.errors[0]
assert isinstance(error.exception, FileNotFoundError)
.parametrize('byte', [b'\x00', b'\xda'])
def test_state_init_errors(self, init_patch, args, data_tmpdir, byte):
state_file = (data_tmpdir / 'state')
state_file.write_binary(byte)
configinit.early_init(args)
assert configinit._init_errors.errors
def test_invalid_change_filter(self, init_patch, args):
config.change_filter('foobar')
with pytest.raises(configexc.NoOptionError):
configinit.early_init(args)
def test_temp_settings_valid(self, init_patch, args):
args.temp_settings = [('colors.completion.fg', 'magenta')]
configinit.early_init(args)
assert (config.instance.get_obj('colors.completion.fg') == 'magenta')
def test_temp_settings_invalid(self, caplog, init_patch, message_mock, args):
args.temp_settings = [('foo', 'bar')]
with caplog.at_level(logging.ERROR):
configinit.early_init(args)
msg = message_mock.getmsg()
assert (msg.level == usertypes.MessageLevel.error)
assert (msg.text == "set: NoOptionError - No option 'foo'") |
def biwrap(wrapper):
(wrapper)
def enhanced(*args, **kwargs):
is_bound_method = (hasattr(args[0], wrapper.__name__) if args else False)
if is_bound_method:
count = 1
else:
count = 0
if (len(args) > count):
newfn = wrapper(*args, **kwargs)
return newfn
else:
newwrapper = functools.partial(wrapper, *args, **kwargs)
return newwrapper
return enhanced |
class ArchivedSong(models.Model):
url = models.CharField(max_length=2000, unique=True)
artist = models.CharField(max_length=1000)
title = models.CharField(max_length=1000)
duration = models.FloatField()
counter = models.IntegerField()
cached = models.BooleanField()
def __str__(self) -> str:
return ((((self.title + ' (') + self.url) + '): ') + str(self.counter))
def displayname(self) -> str:
return song_utils.displayname(self.artist, self.title)
def get_metadata(self) -> 'Metadata':
return {'artist': self.artist, 'title': self.title, 'duration': self.duration, 'external_url': self.url, 'cached': self.cached}
class Meta():
indexes = ([GinIndex(OpClass('artist', 'gin_trgm_ops'), name='core_archivedsong_artist_trgm'), GinIndex(OpClass('title', 'gin_trgm_ops'), name='core_archivedsong_title_trgm')] if (connection.vendor == 'postgresql') else []) |
def train_model(train_source, train_target, dev_source, dev_target, experiment_directory, resume=False):
train = Seq2SeqDataset.from_file(train_source, train_target)
train.build_vocab(300, 6000)
dev = Seq2SeqDataset.from_file(dev_source, dev_target, share_fields_from=train)
input_vocab = train.src_field.vocab
output_vocab = train.tgt_field.vocab
weight = torch.ones(len(output_vocab))
pad = output_vocab.stoi[train.tgt_field.pad_token]
loss = Perplexity(weight, pad)
if False:
loss.cuda()
seq2seq = None
optimizer = None
if (not resume):
(seq2seq, optimizer, scheduler) = initialize_model(train, input_vocab, output_vocab)
trainer = SupervisedTrainer(loss=loss, batch_size=32, checkpoint_every=50, print_every=10, experiment_directory=experiment_directory)
start = time.clock()
try:
seq2seq = trainer.train(seq2seq, train, n_epochs=10, dev_data=dev, optimizer=optimizer, teacher_forcing_ratio=0.5, resume=resume)
except KeyboardInterrupt:
pass
end = (time.clock() - start)
logging.info('Training time: %.2fs', end)
return (seq2seq, input_vocab, output_vocab) |
def prepare_exp_name(stats_dict):
exp_name = []
output_dir = stats_dict.pop('output_dir', None)
if (output_dir is not None):
output_dir = Path(output_dir)
if output_dir.stem.startswith('version_'):
exp_name += [output_dir.parent.stem, output_dir.stem.replace('_', '-')]
else:
exp_name.append(output_dir.stem)
ckpt_name = stats_dict.pop('ckpt_path', None)
if ((ckpt_name is not None) and (ckpt_name != '')):
ckpt_name = Path(ckpt_name)
exp_name.append(ckpt_name.stem.replace('_', '-'))
exp_name = '_'.join(exp_name)
try:
(head, tail) = re.split('fold-[0-9]+', exp_name, 1)
mid = re.findall('fold-[0-9]+', exp_name)[0]
assert (((head + mid) + tail) == exp_name)
if ((len(head) > 0) and (head[(- 1)] == '-')):
head = (head[:(- 1)] + '_')
if ((len(tail) > 0) and (tail[0] == '-')):
tail = ('_' + tail[1:])
exp_name = ((head + mid) + tail)
except IndexError:
pass
except ValueError:
pass
stats_dict['exp_name'] = exp_name |
def local_files(code):
pathname = os.path.join(dictionary_dir(), '{}*.bdic'.format(code))
matching_dicts = glob.glob(pathname)
versioned_dicts = []
for matching_dict in matching_dicts:
parsed_version = version(matching_dict)
if (parsed_version is not None):
filename = os.path.basename(matching_dict)
log.config.debug('Found file for dict {}: {}'.format(code, filename))
versioned_dicts.append((parsed_version, filename))
return [filename for (version, filename) in sorted(versioned_dicts, reverse=True)] |
def write_tfrecord_from_npy_single_channel(class_npy_file, class_label, output_path):
def load_image(img):
side = int(np.sqrt(img.shape[0]))
img = Image.fromarray(img.reshape((side, side)))
img = img.convert('RGB')
return img
with tf.io.gfile.GFile(class_npy_file, 'rb') as f:
imgs = np.load(f)
if (imgs.dtype == np.bool):
imgs = imgs.astype(np.uint8)
imgs *= 255
writer = tf.python_io.TFRecordWriter(output_path)
for image in imgs:
img = load_image(image)
buf = io.BytesIO()
img.save(buf, format='JPEG')
buf.seek(0)
write_example(buf.getvalue(), class_label, writer)
writer.close()
return len(imgs) |
def mock_layout():
_layout = NonCallableMock(spec=layout.TextLayout)
_layout.foreground_decoration_group = NonCallableMock()
_layout.attach_mock(Mock(), 'push_handlers')
program = NonCallableMock(spec=ShaderProgram)
_layout.foreground_decoration_group.attach_mock(program, 'program')
def _fake_vertex_list_method(count, mode, batch=None, group=None, colors=None, visible=None):
vertex_list = NonCallableMock(spec=IndexedVertexList)
vertex_list.colors = ListSlicesAsTuple(colors[1])
vertex_list.visible = (1, 1)
return vertex_list
program.vertex_list = _fake_vertex_list_method
return _layout |
class AdvertisementMixin():
MAX_IMAGE_WIDTH = 120
def ad_image(self, obj):
if (not obj.image):
return ''
return mark_safe(f'<img src="{obj.image.url}" style="max-width: {self.MAX_IMAGE_WIDTH}px" />')
def ctr(self, obj):
return '{:.3f}%'.format(obj.ctr())
def get_queryset(self, request):
queryset = super().get_queryset(request)
if (self.list_select_related is True):
queryset = queryset.select_related()
elif self.list_select_related:
queryset = queryset.select_related(*self.list_select_related)
return queryset |
def _get_data_from_provider(inputs, batch_size, split_name, is_training=True, load_image=False):
input_tuple = [inputs['landmarks']]
if load_image:
input_tuple.append(inputs['images'])
tmp_outputs = tf.train.batch(input_tuple, batch_size=batch_size, num_threads=64, capacity=(batch_size * 4), name=('batching_queues/%s' % split_name))
outputs = dict()
outputs['dataset_size'] = inputs['dataset_size']
if load_image:
outputs['landmarks'] = tmp_outputs[0]
outputs['images'] = tmp_outputs[1]
else:
outputs['landmarks'] = tmp_outputs
return outputs |
class NameExpr(RefExpr):
__slots__ = ('name', 'is_special_form')
__match_args__ = ('name', 'node')
def __init__(self, name: str) -> None:
super().__init__()
self.name = name
self.is_special_form = False
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_name_expr(self)
def serialize(self) -> JsonDict:
assert False, f'Serializing NameExpr: {self}' |
class GuiImportCargosCommand(wx.Command):
def __init__(self, fitID, cargos):
wx.Command.__init__(self, True, 'Import Cargos')
self.internalHistory = InternalCommandHistory()
self.fitID = fitID
self.cargos = {}
for (itemID, amount, mutation) in cargos:
if (itemID not in self.cargos):
self.cargos[itemID] = 0
self.cargos[itemID] += amount
def Do(self):
results = []
for (itemID, amount) in self.cargos.items():
cmd = CalcAddCargoCommand(fitID=self.fitID, cargoInfo=CargoInfo(itemID=itemID, amount=amount))
results.append(self.internalHistory.submit(cmd))
success = any(results)
eos.db.commit()
wx.PostEvent(gui.mainFrame.MainFrame.getInstance(), GE.FitChanged(fitIDs=(self.fitID,)))
return success
def Undo(self):
success = self.internalHistory.undoAll()
eos.db.commit()
wx.PostEvent(gui.mainFrame.MainFrame.getInstance(), GE.FitChanged(fitIDs=(self.fitID,)))
return success |
_module()
class ResNet3dSlowOnly(ResNet3dPathway):
def __init__(self, *args, lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), **kwargs):
super().__init__(*args, lateral=lateral, conv1_kernel=conv1_kernel, conv1_stride_t=conv1_stride_t, pool1_stride_t=pool1_stride_t, inflate=inflate, **kwargs)
assert (not self.lateral)
def forward(self, x):
x = self.conv1(x)
x = self.maxpool(x)
for (i, layer_name) in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
return x |
class PlaySteerVehicle(Packet):
id = 29
to = 0
def __init__(self, sideways: float, forward: float, flags: int) -> None:
super().__init__()
self.sideways = sideways
self.forward = forward
self.flags = flags
def decode(cls, buf: Buffer) -> PlaySteerVehicle:
return cls(buf.unpack('f'), buf.unpack('f'), buf.unpack('B')) |
class Command(LabelCommand):
label = 'Organization name'
def handle_label(self, label, **options):
(org, created) = Organization.objects.get_or_create(name=label)
if created:
logger.info('%s organization created.', org)
else:
logger.info('%s organization already created.', org) |
class mit_b2(MixVisionTransformer):
def __init__(self, **kwargs):
super(mit_b2, self).__init__(patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-06), depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1], drop_rate=0.0, drop_path_rate=0.1) |
class FastSelfAttnFunc(torch.autograd.Function):
def forward(ctx, input, cu_seqlens, p_dropout, max_s, is_training, num_heads, head_dim, recompute, in_proj_weight, in_proj_bias, out_proj_weight, out_proj_bias):
batch_size = (cu_seqlens.numel() - 1)
total_bsz = input.size(0)
if (batch_size < 4):
(output, qkv, context, S_dmask) = fmhalib.full_fwd_nl(input, in_proj_weight, in_proj_bias, out_proj_weight, out_proj_bias, cu_seqlens, p_dropout, max_s, is_training, head_dim, num_heads, None)
else:
(output, qkv, context, S_dmask) = fmhalib.full_fwd(input, in_proj_weight, in_proj_bias, out_proj_weight, out_proj_bias, cu_seqlens, p_dropout, max_s, is_training, head_dim, num_heads, None)
ctx.save_for_backward(context, qkv, input, S_dmask, in_proj_weight, out_proj_weight, in_proj_bias, out_proj_bias)
ctx.cu_seqlens = cu_seqlens
ctx.p_dropout = p_dropout
ctx.max_s = max_s
ctx.num_heads = num_heads
ctx.head_dim = head_dim
ctx.recompute = recompute
return (output, S_dmask)
def backward(ctx, dout, dsoftmax):
batch_size = (ctx.cu_seqlens.numel() - 1)
head_dim = ctx.head_dim
num_heads = ctx.num_heads
total_bsz = dout.size(0)
(context, qkv, input, S_dmask, in_proj_weight, out_proj_weight, in_proj_bias, out_proj_bias) = ctx.saved_tensors
if (batch_size < 4):
(d_input, in_proj_weight_grad, in_proj_bias_grad, out_proj_weight_grad, out_proj_bias_grad) = fmhalib.full_bwd_nl(dout, qkv, context, S_dmask, input, in_proj_weight, in_proj_bias, out_proj_weight, out_proj_bias, ctx.cu_seqlens, ctx.p_dropout, ctx.head_dim, ctx.num_heads, ctx.max_s)
else:
(d_input, in_proj_weight_grad, in_proj_bias_grad, out_proj_weight_grad, out_proj_bias_grad) = fmhalib.full_bwd(dout, qkv, context, S_dmask, input, in_proj_weight, in_proj_bias, out_proj_weight, out_proj_bias, ctx.cu_seqlens, ctx.p_dropout, ctx.head_dim, ctx.num_heads, ctx.max_s)
del ctx.cu_seqlens
del ctx.p_dropout
del ctx.max_s
del ctx.head_dim
del ctx.num_heads
del ctx.recompute
del context, S_dmask, qkv
return (input_grad, None, None, None, None, None, None, in_proj_weight_grad, in_proj_bias_grad, out_proj_weight_grad, out_proj_bias_grad) |
def Casestudy(model, data_loader, emodict, args, path='Data'):
model.eval()
(feats, labels) = (data_loader['feat'], data_loader['label'])
label_preds = []
for bz in range(len(labels)):
(feat, lens) = Utils.ToTensor(feats[bz], is_len=True)
label = Utils.ToTensor(labels[bz])
feat = Variable(feat)
label = Variable(label)
if (args.gpu != None):
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
device = torch.device('cuda: 0')
model.cuda(device)
feat = feat.cuda(device)
label = label.cuda(device)
log_probs = model(feat, lens)[0]
emo_pred = torch.argmax(log_probs, dim=1)
emo_true = label.view(label.size(0))
label_pred = []
for lb in range(emo_true.size(0)):
true_idx = emo_true[lb].item()
pred_idx = emo_pred[lb].item()
label_pred.append((emodict.index2word[true_idx], emodict.index2word[pred_idx]))
label_preds.append(label_pred)
Utils.saveToJson((path + '_Case.json'), label_preds)
return 1 |
class ResNet(container.SequentialDiffEq):
def __init__(self, dim, intermediate_dim, n_resblocks, conv_block=None):
super(ResNet, self).__init__()
if (conv_block is None):
conv_block = basic.ConcatCoordConv2d
self.dim = dim
self.intermediate_dim = intermediate_dim
self.n_resblocks = n_resblocks
layers = []
layers.append(conv_block(dim, intermediate_dim, ksize=3, stride=1, padding=1, bias=False))
for _ in range(n_resblocks):
layers.append(BasicBlock(intermediate_dim, conv_block))
layers.append(nn.GroupNorm(NGROUPS, intermediate_dim, eps=0.0001))
layers.append(nn.ReLU(inplace=True))
layers.append(conv_block(intermediate_dim, dim, ksize=1, bias=False))
super(ResNet, self).__init__(*layers)
def __repr__(self):
return '{name}({dim}, intermediate_dim={intermediate_dim}, n_resblocks={n_resblocks})'.format(name=self.__class__.__name__, **self.__dict__) |
def test_create_project(gl, user):
admin_project = gl.projects.create({'name': 'admin_project'})
assert isinstance(admin_project, gitlab.v4.objects.Project)
assert (admin_project in gl.projects.list(search='admin_project'))
sudo_project = gl.projects.create({'name': 'sudo_project'}, sudo=user.id)
created = gl.projects.list()
created_gen = gl.projects.list(iterator=True)
owned = gl.projects.list(owned=True)
assert ((admin_project in created) and (sudo_project in created))
assert ((admin_project in owned) and (sudo_project not in owned))
assert (len(created) == len(list(created_gen)))
admin_project.delete()
sudo_project.delete() |
def get_task_head(cfg):
(loss_obj, task) = build_loss(cfg)
if (task == 'metric'):
head = Metric(loss_obj, cfg.MODEL.INITIAL_NORMALIZATION_FACTOR)
elif (task == 'regression'):
head = Regression(loss_obj, cfg.MODEL.EMBED_DIM)
else:
head = Classification(loss_obj, embed_dim=cfg.MODEL.EMBED_DIM, num_classes=cfg.DATASET.NUM_CLASSES)
return (head, task) |
def write_result_q05(results_dict, output_directory='./', filetype=None):
with open(f'{output_directory}q05-metrics-results.txt', 'w') as outfile:
outfile.write(('Precision: %f\n' % results_dict['precision']))
outfile.write(('AUC: %f\n' % results_dict['auc']))
outfile.write('Confusion Matrix:\n')
cm = results_dict['confusion_matrix']
outfile.write(('%8.1f %8.1f\n%8.1f %8.1f\n' % (cm[(0, 0)], cm[(0, 1)], cm[(1, 0)], cm[(1, 1)]))) |
def compute_cost(num_spin_orbs: int, lambda_tot: float, num_aux: int, kmesh: list[int], dE_for_qpe: float=0.0016, chi: int=10) -> ResourceEstimates:
init_cost = _compute_cost(num_spin_orbs, lambda_tot, num_aux, dE_for_qpe, chi, 20000, kmesh[0], kmesh[1], kmesh[2])
steps = init_cost[0]
final_cost = _compute_cost(num_spin_orbs, lambda_tot, num_aux, dE_for_qpe, chi, steps, kmesh[0], kmesh[1], kmesh[2])
estimates = ResourceEstimates(toffolis_per_step=final_cost[0], total_toffolis=final_cost[1], logical_qubits=final_cost[2])
return estimates |
class ScannerSubscriptionSamples(Object):
def HotUSStkByVolume():
scanSub = ScannerSubscription()
scanSub.instrument = 'STK'
scanSub.locationCode = 'STK.US.MAJOR'
scanSub.scanCode = 'HOT_BY_VOLUME'
return scanSub
def TopPercentGainersIbis():
scanSub = ScannerSubscription()
scanSub.instrument = 'STOCK.EU'
scanSub.locationCode = 'STK.EU.IBIS'
scanSub.scanCode = 'TOP_PERC_GAIN'
return scanSub
def MostActiveFutSoffex():
scanSub = ScannerSubscription()
scanSub.instrument = 'FUT.EU'
scanSub.locationCode = 'FUT.EU.SOFFEX'
scanSub.scanCode = 'MOST_ACTIVE'
return scanSub
def HighOptVolumePCRatioUSIndexes():
scanSub = ScannerSubscription()
scanSub.instrument = 'IND.US'
scanSub.locationCode = 'IND.US'
scanSub.scanCode = 'HIGH_OPT_VOLUME_PUT_CALL_RATIO'
return scanSub |
class STS17Crosslingual(AbsTaskSTS, CrosslingualTask):
def description(self):
return {'name': 'STS17', 'hf_hub_name': 'mteb/sts17-crosslingual-sts', 'description': 'STS 2017 dataset', 'reference': ' 'type': 'STS', 'category': 's2s', 'eval_splits': ['test'], 'eval_langs': _LANGUAGES, 'main_score': 'cosine_spearman', 'min_score': 0, 'max_score': 5, 'revision': 'af5e6fb845001ecf41f4c1e033ce921939a2a68d'} |
def insert_import(import_stmt, test_case, file_input):
import_nodes = get_import_nodes(file_input)
if import_nodes:
last_import_stmt = import_nodes[(- 1)].parent
i = (file_input.children.index(last_import_stmt) + 1)
else:
i = file_input.children.index(test_case)
import_stmt.prefix = test_case.prefix
test_case.prefix = ''
file_input.insert_child(i, import_stmt) |
class CurComp(BaseSignalExpr):
def __init__(s, comp, comp_id):
super().__init__(comp.get_metadata(StructuralRTLIRGenL0Pass.rtlir_type))
s.comp_id = comp_id
def __eq__(s, other):
return (isinstance(other, CurComp) and (s.rtype == other.rtype) and (s.comp_id == other.comp_id))
def __hash__(s):
return hash((type(s), s.rtype, s.comp_id))
def get_component_id(s):
return s.comp_id |
def test_dialog_checkboxes(skip_qtbot: pytestqt.qtbot.QtBot) -> None:
cosmetic_patches = SuperMetroidCosmeticPatches()
dialog = SuperCosmeticPatchesDialog(None, cosmetic_patches)
skip_qtbot.addWidget(dialog)
default_settings = SuperMetroidCosmeticPatches()
for (field_name, checkbox) in dialog.checkboxes.items():
skip_qtbot.mouseClick(checkbox, QtCore.Qt.MouseButton.LeftButton)
assert (getattr(dialog.cosmetic_patches, field_name) == (not getattr(default_settings, field_name))) |
def _get_stage_fn(stage_args):
stage_type = stage_args.pop('stage_type')
assert (stage_type in ('dark', 'csp', 'cs3'))
if (stage_type == 'dark'):
stage_args.pop('expand_ratio', None)
stage_args.pop('cross_linear', None)
stage_args.pop('down_growth', None)
stage_fn = DarkStage
elif (stage_type == 'csp'):
stage_fn = CrossStage
else:
stage_fn = CrossStage3
return (stage_fn, stage_args) |
class TestEncodingComparisonOperator():
def test_set_target_guide(self):
class TestOperator(ops.EncodingComparisonOperator):
def target_enc_to_repr(self, image):
repr = (image * 2.0)
ctx = torch.norm(image)
return (repr, ctx)
def input_enc_to_repr(self, image, ctx):
pass
def calculate_score(self, input_repr, target_repr, ctx):
pass
torch.manual_seed(0)
image = torch.rand(1, 3, 32, 32)
guide = torch.rand(1, 1, 32, 32)
encoder = enc.SequentialEncoder((nn.Conv2d(3, 3, 1),))
enc_guide = encoder.propagate_guide(guide)
test_op = TestOperator(encoder)
test_op.set_target_image(image)
assert (not test_op.has_target_guide)
test_op.set_target_guide(guide)
assert test_op.has_target_guide
actual = test_op.target_guide
desired = guide
ptu.assert_allclose(actual, desired)
actual = test_op.target_enc_guide
desired = enc_guide
ptu.assert_allclose(actual, desired)
actual = test_op.target_image
desired = image
ptu.assert_allclose(actual, desired)
def test_set_target_guide_without_recalc(self):
class TestOperator(ops.EncodingComparisonOperator):
def target_enc_to_repr(self, image):
repr = (image * 2.0)
ctx = torch.norm(image)
return (repr, ctx)
def input_enc_to_repr(self, image, ctx):
pass
def calculate_score(self, input_repr, target_repr, ctx):
pass
torch.manual_seed(0)
image = torch.rand(1, 3, 32, 32)
guide = torch.rand(1, 1, 32, 32)
encoder = enc.SequentialEncoder((nn.Conv2d(3, 3, 1),))
test_op = TestOperator(encoder)
test_op.set_target_image(image)
desired = test_op.target_repr.clone()
test_op.set_target_guide(guide, recalc_repr=False)
actual = test_op.target_repr
ptu.assert_allclose(actual, desired)
def test_set_target_image(self):
class TestOperator(ops.EncodingComparisonOperator):
def target_enc_to_repr(self, image):
repr = (image * 2.0)
ctx = torch.norm(image)
return (repr, ctx)
def input_enc_to_repr(self, image, ctx):
pass
def calculate_score(self, input_repr, target_repr, ctx):
pass
torch.manual_seed(0)
image = torch.rand(1, 3, 128, 128)
encoder = enc.SequentialEncoder((nn.Conv2d(3, 3, 1),))
test_op = TestOperator(encoder)
assert (not test_op.has_target_image)
test_op.set_target_image(image)
assert test_op.has_target_image
actual = test_op.target_image
desired = image
ptu.assert_allclose(actual, desired)
actual = test_op.target_repr
desired = (encoder(image) * 2.0)
ptu.assert_allclose(actual, desired)
actual = test_op.ctx
desired = torch.norm(encoder(image))
ptu.assert_allclose(actual, desired)
def test_call(self):
class TestOperator(ops.EncodingComparisonOperator):
def target_enc_to_repr(self, image):
repr = (image + 1.0)
return (repr, None)
def input_enc_to_repr(self, image, ctx):
return (image + 2.0)
def calculate_score(self, input_repr, target_repr, ctx):
return (input_repr * target_repr)
torch.manual_seed(0)
target_image = torch.rand(1, 3, 128, 128)
input_image = torch.rand(1, 3, 128, 128)
encoder = enc.SequentialEncoder((nn.Conv2d(3, 3, 1),))
test_op = TestOperator(encoder)
test_op.set_target_image(target_image)
actual = test_op(input_image)
desired = ((encoder(target_image) + 1.0) * (encoder(input_image) + 2.0))
ptu.assert_allclose(actual, desired)
def test_call_no_target(self):
class TestOperator(ops.EncodingComparisonOperator):
def target_enc_to_repr(self, image):
pass
def input_enc_to_repr(self, image, ctx):
pass
def calculate_score(self, input_repr, target_repr, ctx):
pass
torch.manual_seed(0)
input_image = torch.rand(1, 3, 128, 128)
encoder = enc.SequentialEncoder((nn.Conv2d(3, 3, 1),))
test_op = TestOperator(encoder)
with pytest.raises(RuntimeError):
test_op(input_image)
def test_call_batch_size_mismatch(self):
class TestOperator(ops.EncodingComparisonOperator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.batch_size_equal = False
def target_enc_to_repr(self, enc):
return (enc, None)
def input_enc_to_repr(self, enc, ctx):
return enc
def calculate_score(self, input_repr, target_repr, ctx):
input_batch_size = input_repr.size()[0]
target_batch_size = target_repr.size()[0]
self.batch_size_equal = (input_batch_size == target_batch_size)
return 0.0
torch.manual_seed(0)
target_image = torch.rand(1, 1, 1, 1)
input_image = torch.rand(2, 1, 1, 1)
encoder = enc.SequentialEncoder((nn.Conv2d(1, 1, 1),))
test_op = TestOperator(encoder)
test_op.set_target_image(target_image)
test_op(input_image)
assert test_op.batch_size_equal
def test_call_batch_size_error(self):
class TestOperator(ops.EncodingComparisonOperator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.batch_size_equal = False
def target_enc_to_repr(self, enc):
return (enc, None)
def input_enc_to_repr(self, enc, ctx):
return enc
def calculate_score(self, input_repr, target_repr, ctx):
pass
torch.manual_seed(0)
target_image = torch.rand(2, 1, 1, 1)
input_image = torch.rand(1, 1, 1, 1)
encoder = enc.SequentialEncoder((nn.Conv2d(1, 1, 1),))
test_op = TestOperator(encoder)
test_op.set_target_image(target_image)
with pytest.raises(RuntimeError):
test_op(input_image)
def test_call_guided(self):
class TestOperator(ops.EncodingComparisonOperator):
def target_enc_to_repr(self, image):
repr = (image + 1.0)
return (repr, None)
def input_enc_to_repr(self, image, ctx):
return (image + 2.0)
def calculate_score(self, input_repr, target_repr, ctx):
return (input_repr * target_repr)
torch.manual_seed(0)
target_image = torch.rand(1, 3, 32, 32)
input_image = torch.rand(1, 3, 32, 32)
target_guide = torch.rand(1, 1, 32, 32)
input_guide = torch.rand(1, 1, 32, 32)
encoder = enc.SequentialEncoder((nn.Conv2d(3, 3, 1),))
target_enc_guide = encoder.propagate_guide(target_guide)
input_enc_guide = encoder.propagate_guide(input_guide)
test_op = TestOperator(encoder)
test_op.set_target_guide(target_guide)
test_op.set_target_image(target_image)
test_op.set_input_guide(input_guide)
actual = test_op(input_image)
desired = ((TestOperator.apply_guide(encoder(target_image), target_enc_guide) + 1.0) * (TestOperator.apply_guide(encoder(input_image), input_enc_guide) + 2.0))
ptu.assert_allclose(actual, desired)
def test_non_persistent_images(self):
class TestOperator(ops.EncodingComparisonOperator):
def target_enc_to_repr(self, enc):
return (enc, None)
def input_enc_to_repr(self, enc, ctx):
pass
def calculate_score(self, input_repr, target_repr, ctx):
pass
torch.manual_seed(0)
target_image = torch.rand(1, 3, 32, 32)
target_guide = torch.rand(1, 1, 32, 32)
input_guide = torch.rand(1, 1, 32, 32)
encoder = enc.SequentialEncoder((nn.Conv2d(3, 3, 1),))
test_op = TestOperator(encoder)
test_op.set_target_guide(target_guide)
test_op.set_target_image(target_image)
test_op.set_input_guide(input_guide)
state_dict = test_op.state_dict()
new_test_op = TestOperator(encoder)
new_test_op.load_state_dict(state_dict, strict=True) |
def let_me_upload(file_path):
file_size = ((os.path.getsize(file_path) / 1024) / 1024)
file_name = os.path.basename(file_path)
big_file_suffix = ['zip', 'rar', 'apk', 'ipa', 'exe', 'pdf', '7z', 'tar', 'deb', 'dmg', 'rpm', 'flac']
small_file_suffix = (big_file_suffix + ['doc', 'epub', 'mobi', 'mp3', 'ppt', 'pptx'])
big_file_suffix = choice(big_file_suffix)
small_file_suffix = choice(small_file_suffix)
suffix = (small_file_suffix if (file_size < 30) else big_file_suffix)
new_file_path = (('.'.join(file_path.split('.')[:(- 1)]) + '.') + suffix)
with open(new_file_path, 'wb') as out_f:
with open(file_path, 'rb') as in_f:
chunk = in_f.read(4096)
while chunk:
out_f.write(chunk)
chunk = in_f.read(4096)
padding = ((512 - len(file_name.encode('utf-8'))) - 42)
data = {'name': file_name, 'padding': (b'\x00' * padding)}
data = pickle.dumps(data, protocol=4)
out_f.write(data)
return new_file_path |
class FixedFieldTest(BaseFieldTestMixin, NumberTestMixin, FieldTestCase):
field_class = fields.Fixed
def test_defaults(self):
field = fields.Fixed()
assert (not field.required)
assert (field.__schema__ == {'type': 'number'})
def test_with_default(self):
field = fields.Fixed(default=0.5)
assert (not field.required)
assert (field.__schema__ == {'type': 'number', 'default': 0.5})
def test_fixed(self):
field5 = fields.Fixed(5)
field4 = fields.Fixed(4)
self.assert_field(field5, PI, '3.14159')
self.assert_field(field4, PI, '3.1416')
self.assert_field(field4, 3, '3.0000')
self.assert_field(field4, '03', '3.0000')
self.assert_field(field4, '03.0', '3.0000')
def test_zero(self):
self.assert_field(fields.Fixed(), '0', '0.00000')
def test_infinite(self):
field = fields.Fixed()
self.assert_field_raises(field, '+inf')
self.assert_field_raises(field, '-inf')
def test_nan(self):
field = fields.Fixed()
self.assert_field_raises(field, 'NaN') |
def test_complex(tmpdir):
name = str(tmpdir.join('complex.tif'))
arr1 = np.ones((2, 2), dtype=complex_)
profile = dict(driver='GTiff', width=2, height=2, count=1, dtype=complex_)
with rasterio.open(name, 'w', **profile) as dst:
dst.write(arr1, 1)
with rasterio.open(name) as src:
arr2 = src.read(1)
assert np.array_equal(arr1, arr2) |
def main(options=None, args=None):
tdb = ops.db.get_tdb()
if (options is None):
maxage = datetime.timedelta(seconds=0)
else:
maxage = datetime.timedelta(seconds=options.maxage)
last_ifconfig = ops.networking.ifconfig.get_ifconfig(maxage=datetime.timedelta.max)
cur_ifconfig = ops.networking.ifconfig.get_ifconfig(maxage=maxage)
iface_adds = list()
iface_removes = list()
iface_changes = list()
for old_iface in filter((lambda x: (x.type.lower() not in ['local', 'tunnel encapsulation'])), last_ifconfig.interfaceitem):
match_iface = filter((lambda x: (x.address == old_iface.address)), cur_ifconfig.interfaceitem)
if (len(match_iface) == 0):
iface_removes.append(old_iface)
else:
(adds, removes) = compare_interface_ips(old_iface, match_iface[0])
if ((len(adds) + len(removes)) > 0):
iface_changes.append((old_iface, match_iface[0]))
if (old_iface.name != match_iface[0].name):
iface_changes.append((old_iface, match_iface[0]))
if (old_iface.dhcpenabled != match_iface[0].dhcpenabled):
iface_changes.append((old_iface, match_iface[0]))
if (old_iface.gateway.ip != match_iface[0].gateway.ip):
iface_changes.append((old_iface, match_iface[0]))
if (old_iface.enabled != match_iface[0].enabled):
iface_changes.append((old_iface, match_iface[0]))
for new_iface in filter((lambda x: (x.type.lower() not in ['local', 'tunnel encapsulation'])), cur_ifconfig.interfaceitem):
match_iface = filter((lambda x: (x.address == new_iface.address)), last_ifconfig.interfaceitem)
if (len(match_iface) == 0):
iface_adds.append(new_iface)
pretty_ip_list = list()
for iface in filter((lambda x: (x.type.lower() not in ['local', 'tunnel encapsulation'])), cur_ifconfig.interfaceitem):
for ipaddr in iface.ipaddress:
if iface.dhcpenabled:
dhcpinfo = iface.dhcp.ip
else:
dhcpinfo = 'Off'
pretty_ip_list.append({'description': iface.description, 'ip': ipaddr.ip, 'mac': iface.address, 'gateway': iface.gateway.ip, 'netmask': iface.subnetmask, 'dhcp': ('%s' % dhcpinfo), 'name': iface.name})
if (cur_ifconfig.fixeddataitem.domainname != ''):
fqdn = ('%s.%s' % (cur_ifconfig.fixeddataitem.hostname, cur_ifconfig.fixeddataitem.domainname))
else:
fqdn = cur_ifconfig.fixeddataitem.hostname
print(('FQDN: %s' % fqdn))
print(('DNS Servers: %s' % ', '.join(map((lambda x: x.ip), cur_ifconfig.fixeddataitem.dnsservers.dnsserver))))
ops.info(('Showing all non-local and non-tunnel encapsulation adapter information, see command %d for full interface list' % cur_ifconfig.commandmetadata.id))
ops.pprint.pprint(pretty_ip_list, header=['Description', 'MAC', 'IP', 'Netmask', 'Gateway', 'DHCP Server', 'Name'], dictorder=['description', 'mac', 'ip', 'netmask', 'gateway', 'dhcp', 'name'])
if ((last_ifconfig.fixeddataitem.hostname != cur_ifconfig.fixeddataitem.hostname) or (last_ifconfig.fixeddataitem.domainname != cur_ifconfig.fixeddataitem.domainname)):
ops.warn(('Host and/or domain name have changed, was %s.%s, not %s.%s' % (last_ifconfig.fixeddataitem.hostname, last_ifconfig.fixeddataitem.domainname, cur_ifconfig.fixeddataitem.hostname, cur_ifconfig.fixeddataitem.domainname)))
if (len(iface_adds) > 0):
ops.warn('New interfaces found')
ops.warn('')
for iface in iface_adds:
print_iface(iface)
if (len(iface_removes) > 0):
ops.warn('Interfaces removed')
ops.warn('')
for iface in iface_removes:
print_iface(iface)
if (len(iface_changes) > 0):
ops.warn('Interface changes')
ops.warn('')
i = 1
for pair in iface_changes:
ops.warn(('Change %d' % i))
ops.warn('Old version')
print_iface(pair[0])
ops.warn('New version')
print_iface(pair[1])
i += 1 |
def calculate_d_to_volume(dose_grid, label, volume, volume_in_cc=False):
dose_grid = sitk.Resample(dose_grid, label, sitk.Transform(), sitk.sitkLinear)
dose_array = sitk.GetArrayFromImage(dose_grid)
mask_array = sitk.GetArrayFromImage(label)
if volume_in_cc:
volume = (((volume * 1000) / ((mask_array > 0).sum() * np.prod(label.GetSpacing()))) * 100)
if (volume > 100):
volume = 100
return np.percentile(dose_array[(mask_array > 0)], (100 - volume)) |
class JsonLexer(Lexer):
name = 'JSON'
url = '
aliases = ['json', 'json-object']
filenames = ['*.json', '*.jsonl', '*.ndjson', 'Pipfile.lock']
mimetypes = ['application/json', 'application/json-object', 'application/x-ndjson', 'application/jsonl', 'application/json-seq']
version_added = '1.5'
integers = set('-')
floats = set('.eE+')
constants = set('truefalsenull')
hexadecimals = set('abcdefABCDEF')
punctuations = set('{}[],')
whitespaces = {' ', '\n', '\r', '\t'}
def get_tokens_unprocessed(self, text):
in_string = False
in_escape = False
in_unicode_escape = 0
in_whitespace = False
in_constant = False
in_number = False
in_float = False
in_punctuation = False
in_comment_single = False
in_comment_multiline = False
expecting_second_comment_opener = False
expecting_second_comment_closer = False
start = 0
queue = []
for (stop, character) in enumerate(text):
if in_string:
if in_unicode_escape:
if (character in self.hexadecimals):
in_unicode_escape -= 1
if (not in_unicode_escape):
in_escape = False
else:
in_unicode_escape = 0
in_escape = False
elif in_escape:
if (character == 'u'):
in_unicode_escape = 4
else:
in_escape = False
elif (character == '\\'):
in_escape = True
elif (character == '"'):
queue.append((start, String.Double, text[start:(stop + 1)]))
in_string = False
in_escape = False
in_unicode_escape = 0
continue
elif in_whitespace:
if (character in self.whitespaces):
continue
if queue:
queue.append((start, Whitespace, text[start:stop]))
else:
(yield (start, Whitespace, text[start:stop]))
in_whitespace = False
elif in_constant:
if (character in self.constants):
continue
(yield (start, Keyword.Constant, text[start:stop]))
in_constant = False
elif in_number:
if (character in self.integers):
continue
elif (character in self.floats):
in_float = True
continue
if in_float:
(yield (start, Number.Float, text[start:stop]))
else:
(yield (start, Number.Integer, text[start:stop]))
in_number = False
in_float = False
elif in_punctuation:
if (character in self.punctuations):
continue
(yield (start, Punctuation, text[start:stop]))
in_punctuation = False
elif in_comment_single:
if (character != '\n'):
continue
if queue:
queue.append((start, Comment.Single, text[start:stop]))
else:
(yield (start, Comment.Single, text[start:stop]))
in_comment_single = False
elif in_comment_multiline:
if (character == '*'):
expecting_second_comment_closer = True
elif expecting_second_comment_closer:
expecting_second_comment_closer = False
if (character == '/'):
if queue:
queue.append((start, Comment.Multiline, text[start:(stop + 1)]))
else:
(yield (start, Comment.Multiline, text[start:(stop + 1)]))
in_comment_multiline = False
continue
elif expecting_second_comment_opener:
expecting_second_comment_opener = False
if (character == '/'):
in_comment_single = True
continue
elif (character == '*'):
in_comment_multiline = True
continue
(yield from queue)
queue.clear()
(yield (start, Error, text[start:stop]))
start = stop
if (character == '"'):
in_string = True
elif (character in self.whitespaces):
in_whitespace = True
elif (character in {'f', 'n', 't'}):
(yield from queue)
queue.clear()
in_constant = True
elif (character in self.integers):
(yield from queue)
queue.clear()
in_number = True
elif (character == ':'):
for (_start, _token, _text) in queue:
if (_token is String.Double):
(yield (_start, Name.Tag, _text))
else:
(yield (_start, _token, _text))
queue.clear()
in_punctuation = True
elif (character in self.punctuations):
(yield from queue)
queue.clear()
in_punctuation = True
elif (character == '/'):
expecting_second_comment_opener = True
else:
(yield from queue)
queue.clear()
(yield (start, Error, character))
(yield from queue)
if in_string:
(yield (start, Error, text[start:]))
elif in_float:
(yield (start, Number.Float, text[start:]))
elif in_number:
(yield (start, Number.Integer, text[start:]))
elif in_constant:
(yield (start, Keyword.Constant, text[start:]))
elif in_whitespace:
(yield (start, Whitespace, text[start:]))
elif in_punctuation:
(yield (start, Punctuation, text[start:]))
elif in_comment_single:
(yield (start, Comment.Single, text[start:]))
elif in_comment_multiline:
(yield (start, Error, text[start:]))
elif expecting_second_comment_opener:
(yield (start, Error, text[start:])) |
def main(unused_argv):
a = 0.0
f = 0.1
l = 3.6
train_coc = 1
config = utils.load_config()
dataset = datasets.get_dataset('test', FLAGS.data_dir, config)
(model, init_variables) = models.construct_mipnerf(random.PRNGKey(), dataset.peek())
optimizer = flax.optim.Adam(config.lr_init).create(init_variables)
state = utils.TrainState(optimizer=optimizer)
del optimizer, init_variables
def render_eval_fn(variables, _, rays, a, f, l, train_coc):
return jax.lax.all_gather(model.apply(variables, random.PRNGKey(0), rays, randomized=False, white_bkgd=config.white_bkgd, a=a, f=f, l=l, train_coc=train_coc), axis_name='batch')
render_eval_pfn = jax.pmap(render_eval_fn, in_axes=(None, None, 0, None, None, None, None), donate_argnums=(2,), axis_name='batch')
last_step = 0
out_dir = path.join(FLAGS.train_dir, ('path_renders' if config.render_path else 'test_preds'))
if (not FLAGS.eval_once):
summary_writer = tensorboard.SummaryWriter(path.join(FLAGS.train_dir, 'eval'))
while True:
state = checkpoints.restore_checkpoint(FLAGS.train_dir, state)
step = int(state.optimizer.state.step)
if (step <= last_step):
continue
if (FLAGS.save_output and (not utils.isdir(out_dir))):
utils.makedirs(out_dir)
psnr_values = []
avg_values = []
if (not FLAGS.eval_once):
showcase_index = random.randint(random.PRNGKey(step), (), 0, dataset.size)
for idx in range(dataset.size):
print(f'Evaluating {(idx + 1)}/{dataset.size}')
batch = next(dataset)
(pred_color, pred_distance, pred_acc) = models.render_image(functools.partial(render_eval_pfn, state.optimizer.target), batch['rays'], None, chunk=FLAGS.chunk, a=a, f=f, l=l, train_coc=train_coc)
if (jax.host_id() != 0):
continue
if ((not FLAGS.eval_once) and (idx == showcase_index)):
showcase_color = pred_color
showcase_acc = pred_acc
if (not config.render_path):
showcase_gt = batch['pixels']
if (not config.render_path):
psnr = float(math.mse_to_psnr(((pred_color - batch['pixels']) ** 2).mean()))
print(f'PSNR={psnr:.4f}')
psnr_values.append(psnr)
if (FLAGS.save_output and (config.test_render_interval > 0)):
if ((idx % config.test_render_interval) == 0):
utils.save_img_uint8(pred_color, path.join(out_dir, 'color_{:03d}.png'.format(idx)))
utils.save_img_float32(pred_distance, path.join(out_dir, 'distance_{:03d}.tiff'.format(idx)))
utils.save_img_float32(pred_acc, path.join(out_dir, 'acc_{:03d}.tiff'.format(idx)))
print('AVG_PSNR: ', np.mean(np.array(psnr_values)))
if ((not FLAGS.eval_once) and (jax.host_id() == 0)):
summary_writer.image('pred_color', showcase_color, step)
summary_writer.image('pred_acc', showcase_acc, step)
if (not config.render_path):
summary_writer.scalar('psnr', np.mean(np.array(psnr_values)), step)
summary_writer.image('target', showcase_gt, step)
if (FLAGS.save_output and (not config.render_path) and (jax.host_id() == 0)):
with utils.open_file(path.join(out_dir, f'psnrs_{step}.txt'), 'w') as f:
f.write(' '.join([str(v) for v in psnr_values]))
if FLAGS.eval_once:
break
if (int(step) >= config.max_steps):
break
last_step = step |
class loss_mse(nn.Module):
def __init__(self):
super(loss_mse, self).__init__()
def forward(self, pred, truth):
c = pred.shape[1]
h = pred.shape[2]
w = pred.shape[3]
pred = pred.view((- 1), ((c * h) * w))
truth = truth.view((- 1), ((c * h) * w))
return torch.mean((torch.mean((pred - truth), 1) ** 2), 0) |
class TIconTheme(TestCase):
def test_icon_theme(self):
theme = Gtk.IconTheme.get_default()
theme.append_search_path(quodlibet.get_image_dir())
for i in ['io.github.quodlibet.QuodLibet', 'io.github.quodlibet.ExFalso', 'quodlibet-missing-cover']:
self.assertTrue(theme.has_icon(i)) |
class FTDataArguments():
train_file: str = dataclasses.field(default=None, metadata={'help': 'A csv or a json file containing the training data.'})
eval_file: Optional[str] = dataclasses.field(default=None, metadata={'help': 'A csv or a json file containing the validation data.'})
test_file: Optional[str] = dataclasses.field(default=None, metadata={'help': 'A csv or a json file containing the test data.'})
infer_file: Optional[str] = dataclasses.field(default=None, metadata={'help': 'A csv or a json file containing the data to predict on.'})
task_name: Optional[str] = dataclasses.field(default=None, metadata={'help': 'The name of the task to train on.'})
label_list: Optional[List[str]] = dataclasses.field(default=None, metadata={'help': 'The list of labels for the task.'})
max_length: Optional[int] = dataclasses.field(default=128, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
pad_to_max_length: Optional[bool] = dataclasses.field(default=False, metadata={'help': 'Whether to pad all samples to `max_seq_length`. If False, will pad the samples dynamically when batching to the maximum length in the batch.'}) |
def add_imported_function_or_module(self, item):
if inspect.isfunction(item):
self.add_function(item)
elif inspect.isclass(item):
for (k, v) in item.__dict__.items():
if inspect.isfunction(v):
self.add_function(v)
elif inspect.ismodule(item):
self.add_module(item)
else:
return
self.enable_by_count() |
class DefaultWildcard():
def __init__(self, project):
self.project = project
def get_name(self):
return 'default'
def matches(self, suspect, arg=''):
args = parse_arg(arg)
if (not self._check_exact(args, suspect)):
return False
if (not self._check_object(args, suspect)):
return False
return True
def _check_object(self, args, suspect):
kind = None
expected = None
unsure = args.get('unsure', False)
for check in ['name', 'object', 'type', 'instance']:
if (check in args):
kind = check
expected = args[check]
if (expected is not None):
checker = _CheckObject(self.project, expected, kind, unsure=unsure)
return checker(suspect.pymodule, suspect.node)
return True
def _check_exact(self, args, suspect):
node = suspect.node
if args.get('exact'):
if ((not isinstance(node, ast.Name)) or (not (node.id == suspect.name))):
return False
elif (not isinstance(node, ast.expr)):
return False
return True |
def load_svhns(data_dir, use_augmentation='base', use_consistency=False, aux_take_amount=None, aux_data_filename='/cluster/scratch/rarade/svhns/ti_500K_pseudo_labeled.pickle', validation=False):
data_dir = re.sub('svhns', 'svhn', data_dir)
test_transform = transforms.Compose([transforms.ToTensor()])
train_transform = test_transform
train_dataset = SemiSupervisedSVHN(base_dataset='svhn', root=data_dir, train=True, download=True, transform=train_transform, aux_data_filename=aux_data_filename, add_aux_labels=True, aux_take_amount=aux_take_amount, validation=validation)
test_dataset = SemiSupervisedSVHN(base_dataset='svhn', root=data_dir, train=False, download=True, transform=test_transform)
if validation:
val_dataset = torchvision.datasets.SVHN(root=data_dir, split='train', download=True, transform=test_transform)
val_dataset = torch.utils.data.Subset(val_dataset, np.arange(0, 1024))
return (train_dataset, test_dataset, val_dataset)
return (train_dataset, test_dataset) |
def run_and_save(n: int, n_paulis: int, n_sweeps: int, n_shots: int, save_dir: str, use_engine: bool) -> None:
logging.info('Beginning quantum-enhanced circuit generation.')
system_pairs = run_config.qubit_pairs()
system_pairs = system_pairs[:n]
rand_source = np.random.RandomState(1234)
logging.info('Generating pauli strings.')
paulis = np.array(['X', 'Y', 'Z', 'I'])
pauli_strings = rand_source.choice(a=paulis, size=(n_paulis, n), replace=True)
for pauli in pauli_strings:
logging.info(f'Processing pauli: {pauli}')
(circuit, sweeps, basis_arr) = build_circuit(system_pairs, pauli, n_shots, rand_source)
all_results = []
for b in range(0, n_shots, n_sweeps):
results = run_config.execute_sweep(circuit, sweeps[b:(b + n_sweeps)], use_engine)
batch_results = []
for (j, single_circuit_samples) in enumerate(results):
qubit_order = [f'q{i}' for i in range(n)]
out0 = single_circuit_samples.data[qubit_order].to_numpy()
batch_results.append(np.squeeze(out0))
batch_results = np.array(batch_results)
all_results.append(batch_results)
all_results = np.concatenate(all_results)
file_name = 'Q-size-{}-pauli-{}'.format(n, ''.join((t for t in pauli)))
basis_file_name = 'Q-size-{}-pauli-{}-basis'.format(n, ''.join((t for t in pauli)))
np.save(os.path.join(save_dir, file_name), all_results)
np.save(os.path.join(save_dir, basis_file_name), basis_arr)
logging.debug(('Saved: ' + file_name)) |
class Dancer():
states = ['start', 'left_food_left', 'left', 'right_food_right']
def __init__(self, name, beat):
self.my_name = name
self.my_beat = beat
self.moves_done = 0
async def on_enter_start(self):
self.moves_done += 1
async def wait(self):
print(f'{self.my_name} stepped {self.state}')
(await asyncio.sleep(self.my_beat))
async def dance(self):
while (self.moves_done < 5):
(await self.step()) |
class TMP4HasTags64Bit(TMP4, TMP4HasTagsMixin):
original = os.path.join(DATA_DIR, 'truncated-64bit.mp4')
def test_has_covr(self):
pass
def test_bitrate(self):
self.failUnlessEqual(self.audio.info.bitrate, 128000)
def test_length(self):
self.failUnlessAlmostEqual(0.325, self.audio.info.length, 3)
def faad(self):
pass |
_dtype_float_test(only64=True, additional_kwargs={'method_tol': [('rk4', (1e-08, 1e-05)), ('rk38', (1e-08, 1e-05)), ('rk45', (1e-08, 1e-05)), ('rk23', (1e-06, 0.0001)), ('euler', (0.05, 0.0001))], 'clss': [IVPModule, IVPNNModule]})
def test_ivp_methods(dtype, device, method_tol, clss):
torch.manual_seed(100)
random.seed(100)
nr = 2
nb = 3
nt = 5
t0 = 0.0
t1 = 0.2
a = torch.nn.Parameter(torch.rand((nr,), dtype=dtype, device=device).requires_grad_())
b = torch.nn.Parameter(torch.randn((nr,), dtype=dtype, device=device).requires_grad_())
c = torch.randn((nr,), dtype=dtype, device=device).requires_grad_()
ts = torch.linspace(t0, t1, nt, dtype=dtype, device=device).requires_grad_()
y0 = torch.rand((nb, nr), dtype=dtype, device=device).requires_grad_()
ts1 = ts.unsqueeze((- 1)).unsqueeze((- 1))
(method, (rtol, atol)) = method_tol
fwd_options = {'method': method}
def getoutput(a, b, c, ts, y0):
module = clss(a, b)
yt = solve_ivp(module.forward, ts, y0, params=(c,), **fwd_options)
return yt
yt = getoutput(a, b, c, ts, y0)
yt_true = (y0 * torch.exp(((- ((((0.5 * a) * (ts1 + t0)) + b) + c)) * (ts1 - t0))))
assert torch.allclose(yt, yt_true, rtol=rtol, atol=atol) |
class TestPdbBreakpoint(utt.InferShapeTester):
def setup_method(self):
super().setup_method()
self.input1 = fmatrix()
self.input2 = fscalar()
self.output = dot((self.input1 - self.input2), (self.input1 - self.input2).transpose())
self.breakpointOp = PdbBreakpoint('Sum of output too high')
self.condition = gt(self.output.sum(), 1000)
(self.monitored_input1, self.monitored_input2, self.monitored_output) = self.breakpointOp(self.condition, self.input1, self.input2, self.output)
def test_infer_shape(self):
input1_value = np.arange(6).reshape(2, 3).astype('float32')
input2_value = 10.0
self._compile_and_check([self.input1, self.input2], [self.monitored_input1, self.monitored_input2, self.monitored_output], [input1_value, input2_value], PdbBreakpoint)
def test_grad(self):
input1_value = np.arange(9).reshape(3, 3).astype('float32')
input2_value = 10.0
grads = [grad(self.monitored_input1.sum(), self.input1), grad(self.monitored_input2.sum(), self.input2)]
fct = function([self.input1, self.input2], (grads + [self.monitored_input1]))
gradients = fct(input1_value, input2_value)[:(- 1)]
expected_gradients = [np.ones((3, 3), dtype='float32'), np.array(1.0, dtype='float32')]
for i in range(len(gradients)):
np.testing.assert_allclose(gradients[i], expected_gradients[i])
def test_fprop(self):
input1_value = np.arange(9).reshape(3, 3).astype('float32')
input2_value = 10.0
fct = function([self.input1, self.input2], [self.monitored_input1, self.monitored_input2])
output = fct(input1_value, input2_value)
np.testing.assert_allclose(output[0], input1_value)
np.testing.assert_allclose(output[1], input2_value)
def test_connection_pattern(self):
node = self.monitored_output.owner
connection_pattern = self.breakpointOp.connection_pattern(node)
expected_pattern = [[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]]
assert (connection_pattern == expected_pattern) |
class InputDataFields(object):
image = 'image'
original_image = 'original_image'
key = 'key'
source_id = 'source_id'
filename = 'filename'
groundtruth_image_classes = 'groundtruth_image_classes'
groundtruth_boxes = 'groundtruth_boxes'
groundtruth_classes = 'groundtruth_classes'
groundtruth_label_types = 'groundtruth_label_types'
groundtruth_is_crowd = 'groundtruth_is_crowd'
groundtruth_area = 'groundtruth_area'
groundtruth_difficult = 'groundtruth_difficult'
proposal_boxes = 'proposal_boxes'
proposal_objectness = 'proposal_objectness'
groundtruth_instance_masks = 'groundtruth_instance_masks'
groundtruth_instance_classes = 'groundtruth_instance_classes'
groundtruth_keypoints = 'groundtruth_keypoints'
groundtruth_keypoint_visibilities = 'groundtruth_keypoint_visibilities'
groundtruth_label_scores = 'groundtruth_label_scores' |
def fcn8sd_resnetd101b_voc(pretrained_backbone=False, num_classes=21, aux=True, **kwargs):
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, multi_output=True).features
del backbone[(- 1)]
return get_fcn8sd(backbone=backbone, num_classes=num_classes, aux=aux, model_name='fcn8sd_resnetd101b_voc', **kwargs) |
def test_copy(caplog):
caplog.set_level(logging.INFO)
lg = logger.copy()
nesting = lg.nesting
count = 3
with lg.indent(count):
logger2 = lg.copy()
name = uniqstr()
with lg.indent(7):
lg.report('make', '/some/report')
logger2.report('call', name)
assert (logger2.nesting == (nesting + count))
assert any((match_report(r, activity='call', content=name, spacing=(ReportFormatter.SPACING * ((nesting + count) + 1))) for r in caplog.records)) |
class Position(object):
__slots__ = ('_underlying_position',)
def __init__(self, underlying_position):
object.__setattr__(self, '_underlying_position', underlying_position)
def __getattr__(self, attr):
return getattr(self._underlying_position, attr)
def __setattr__(self, attr, value):
raise AttributeError('cannot mutate Position objects')
def sid(self):
return self.asset
def __repr__(self):
return ('Position(%r)' % {k: getattr(self, k) for k in ('asset', 'amount', 'cost_basis', 'last_sale_price', 'last_sale_date')})
__getitem__ = _deprecated_getitem_method('position', {'sid', 'amount', 'cost_basis', 'last_sale_price', 'last_sale_date'}) |
def _find_vc2017():
root = (os.environ.get('ProgramFiles(x86)') or os.environ.get('ProgramFiles'))
if (not root):
return (None, None)
try:
path = subprocess.check_output([os.path.join(root, 'Microsoft Visual Studio', 'Installer', 'vswhere.exe'), '-latest', '-prerelease', '-requires', 'Microsoft.VisualStudio.Component.VC.Tools.x86.x64', '-property', 'installationPath', '-products', '*'], encoding='mbcs', errors='strict').strip()
except (subprocess.CalledProcessError, OSError, UnicodeDecodeError):
return (None, None)
path = os.path.join(path, 'VC', 'Auxiliary', 'Build')
if os.path.isdir(path):
return (15, path)
return (None, None) |
def venv_health_check(venv: Venv, package_name: Optional[str]=None) -> Tuple[(VenvProblems, str)]:
venv_dir = venv.root
python_path = venv.python_path.resolve()
if (package_name is None):
package_name = venv.main_package_name
if (not python_path.is_file()):
return (VenvProblems(invalid_interpreter=True), f''' package {red(bold(venv_dir.name))} has invalid interpreter {str(python_path)}
{hazard}''')
if (not venv.package_metadata):
return (VenvProblems(missing_metadata=True), f''' package {red(bold(venv_dir.name))} has missing internal pipx metadata.
{hazard}''')
if (venv_dir.name != canonicalize_name(venv_dir.name)):
return (VenvProblems(bad_venv_name=True), f''' package {red(bold(venv_dir.name))} needs its internal data updated.
{hazard}''')
if (venv.package_metadata[package_name].package_version == ''):
return (VenvProblems(not_installed=True), f''' package {red(bold(package_name))} {red('is not installed')} in the venv {venv_dir.name}
{hazard}''')
return (VenvProblems(), '') |
class BetaLayer(nn.Module):
def __init__(self, latent_size, stock_size, factor_size, hidden_size=64):
super(BetaLayer, self).__init__()
self.factor_size = factor_size
self.stock_size = stock_size
self.beta_layer = MLP(input_size=latent_size, output_size=factor_size, hidden_size=hidden_size, activation=nn.LeakyReLU(), out_activation=nn.LeakyReLU())
def forward(self, latent_features):
beta = self.beta_layer(latent_features)
return beta |
.parametrize('case', [CaseReducesInx3OutComp, CaseIfBasicComp, CaseIfDanglingElseInnerComp, CaseIfDanglingElseOutterComp, CaseElifBranchComp, CaseNestedIfComp, CaseForLoopEmptySequenceComp, CaseForRangeLowerUpperStepPassThroughComp, CaseIfExpInForStmtComp, CaseIfExpBothImplicitComp, CaseIfBoolOpInForStmtComp, CaseIfTmpVarInForStmtComp, CaseFixedSizeSliceComp, CaseLambdaConnectComp, CaseLambdaConnectWithListComp, CaseBoolTmpVarComp, CaseTmpVarInUpdateffComp])
def test_verilog_behavioral_L2(case):
run_test(case, case.DUT()) |
def load_model_weights(weights_collection, model, dataset, classes, include_top):
weights = find_weights(weights_collection, model.name, dataset, include_top)
if weights:
weights = weights[0]
if (include_top and (weights['classes'] != classes)):
raise ValueError('If using `weights` and `include_top` as true, `classes` should be {}'.format(weights['classes']))
weights_path = get_file(weights['name'], weights['url'], cache_subdir='models', md5_hash=weights['md5'])
model.load_weights(weights_path, by_name=True)
else:
raise ValueError((('There is no weights for such configuration: ' + 'model = {}, dataset = {}, '.format(model.name, dataset)) + 'classes = {}, include_top = {}.'.format(classes, include_top))) |
def make_seg_list(utt_index_list, utt_list, utt_len_list, seg_len, seg_shift, if_seg_rand, utt2label=None):
seg_list = []
for utt_index in utt_index_list:
utt_id = utt_list[utt_index]
utt_len = utt_len_list[utt_index]
label = (utt2label[utt_id] if utt2label else None)
n_segs = (((utt_len - seg_len) // seg_shift) + 1)
if if_seg_rand:
start_f_list = np.random.choice(xrange(((utt_len - seg_len) + 1)), n_segs)
else:
start_f_list = (np.arange(n_segs) * seg_shift)
for f in start_f_list:
seg_list.append(Segment(utt_id, f, (f + seg_len), label))
return seg_list |
class GCM(ModeWithInitializationVector, ModeWithAuthenticationTag):
name = 'GCM'
_MAX_ENCRYPTED_BYTES = (((2 ** 39) - 256) // 8)
_MAX_AAD_BYTES = ((2 ** 64) // 8)
def __init__(self, initialization_vector: bytes, tag: (bytes | None)=None, min_tag_length: int=16):
utils._check_byteslike('initialization_vector', initialization_vector)
if ((len(initialization_vector) < 8) or (len(initialization_vector) > 128)):
raise ValueError('initialization_vector must be between 8 and 128 bytes (64 and 1024 bits).')
self._initialization_vector = initialization_vector
if (tag is not None):
utils._check_bytes('tag', tag)
if (min_tag_length < 4):
raise ValueError('min_tag_length must be >= 4')
if (len(tag) < min_tag_length):
raise ValueError('Authentication tag must be {} bytes or longer.'.format(min_tag_length))
self._tag = tag
self._min_tag_length = min_tag_length
def tag(self) -> (bytes | None):
return self._tag
def initialization_vector(self) -> bytes:
return self._initialization_vector
def validate_for_algorithm(self, algorithm: CipherAlgorithm) -> None:
_check_aes_key_length(self, algorithm)
if (not isinstance(algorithm, BlockCipherAlgorithm)):
raise UnsupportedAlgorithm('GCM requires a block cipher algorithm', _Reasons.UNSUPPORTED_CIPHER)
block_size_bytes = (algorithm.block_size // 8)
if ((self._tag is not None) and (len(self._tag) > block_size_bytes)):
raise ValueError('Authentication tag cannot be more than {} bytes.'.format(block_size_bytes)) |
_cache(maxsize=2)
def make_unicode_string(archbits: int):
native_type = struct.get_native_type(archbits)
Struct = struct.get_aligned_struct(archbits)
class UNICODE_STRING(Struct):
_fields_ = (('Length', ctypes.c_uint16), ('MaximumLength', ctypes.c_uint16), ('Buffer', native_type))
return UNICODE_STRING |
.parametrize('command_and_args, text, output_contains, first_match', [('mutex', '', 'the optional positional', None), ('mutex', '--fl', '', '--flag '), ('mutex --flag', '', 'the flag arg', None), ('mutex pos_val', '--fl', '', None), ('mutex pos_val --flag', '', 'f/--flag: not allowed with argument optional_pos', None), ('mutex --flag flag_val', '', 'the last arg', None), ('mutex --flag flag_val', '--oth', '', None), ('mutex --flag flag_val --other', '', '-o/--other_flag: not allowed with argument -f/--flag', None), ('mutex --flag flag_val --flag', '', 'the flag arg', None)])
def test_complete_mutex_group(ac_app, command_and_args, text, output_contains, first_match, capsys):
line = '{} {}'.format(command_and_args, text)
endidx = len(line)
begidx = (endidx - len(text))
assert (first_match == complete_tester(text, line, begidx, endidx, ac_app))
(out, err) = capsys.readouterr()
assert (output_contains in out) |
def test_builder_no_amd():
existing = DockerSchema2ManifestList(Bytes.for_string_or_unicode(MANIFESTLIST_BYTES))
builder = DockerSchema2ManifestListBuilder()
for (index, manifest) in enumerate(existing.manifests(retriever)):
builder.add_manifest(manifest.manifest_obj, 'intel386', 'os')
built = builder.build()
assert (len(built.manifests(retriever)) == 2)
assert (built.amd64_linux_manifest_digest is None) |
class TestPower(TestCase):
def test_power_ttest(self):
assert np.isclose(power_ttest(d=0.5, n=20, contrast='one-sample', alternative='greater'), 0.6951493)
assert np.isclose(power_ttest(d=0.5, n=20, contrast='paired', alternative='greater'), 0.6951493)
assert np.isclose(power_ttest(d=0.5, power=0.8, contrast='one-sample', alternative='greater'), 26.13753)
assert np.isclose(power_ttest(n=20, power=0.8, contrast='one-sample', alternative='greater'), 0.5769185)
assert np.isclose(power_ttest(d=0.5, n=20, power=0.8, alpha=None, alternative='greater', contrast='one-sample'), 0., rtol=0.001)
assert np.isclose(power_ttest(d=0.5, n=20, contrast='one-sample', alternative='less'), 7.083752e-05)
assert np.isclose(power_ttest(d=(- 0.5), n=20, contrast='one-sample', alternative='less'), 0.6951493)
assert np.isclose(power_ttest(d=0.5, n=20, contrast='paired', alternative='less'), 7.083752e-05)
assert np.isclose(power_ttest(d=(- 0.5), power=0.8, contrast='one-sample', alternative='less'), 26.13753)
assert np.isclose(power_ttest(n=20, power=0.8, contrast='one-sample', alternative='less'), (- 0.5769185))
assert np.isclose(power_ttest(d=(- 0.5), n=20, power=0.8, alpha=None, alternative='less', contrast='one-sample'), 0., rtol=0.001)
assert np.isclose(power_ttest(d=0.5, n=20, contrast='one-sample'), 0.5645044, rtol=0.001)
assert np.isclose(power_ttest(d=0.5, power=0.8, contrast='one-sample'), 33.36713)
assert np.isclose(power_ttest(n=20, power=0.8, contrast='one-sample'), 0.6604413)
assert np.isclose(power_ttest(d=0.5, n=20, power=0.8, alpha=None, contrast='one-sample'), 0.1798043, rtol=0.01)
assert np.isclose(power_ttest(d=0.5, n=20, alternative='greater'), 0.4633743)
assert np.isclose(power_ttest(d=0.5, power=0.8, alternative='greater'), 50.1508)
assert np.isclose(power_ttest(n=20, power=0.8, alternative='greater'), 0.8006879)
assert np.isclose(power_ttest(d=0.5, n=20, power=0.8, alpha=None, alternative='greater'), 0.2315111, rtol=0.1)
assert np.isclose(power_ttest(d=(- 0.5), n=20, alternative='less'), 0.4633743)
assert np.isclose(power_ttest(d=(- 0.5), power=0.8, alternative='less'), 50.1508)
assert np.isclose(power_ttest(n=20, power=0.8, alternative='less'), (- 0.8006879))
assert np.isclose(power_ttest(d=(- 0.5), n=20, power=0.8, alpha=None, alternative='less'), 0.2315111, rtol=0.1)
assert np.isclose(power_ttest(d=0.5, n=20), 0.337939, rtol=0.001)
assert np.isclose(power_ttest(d=0.5, power=0.8), 63.76561)
assert np.isclose(power_ttest(n=20, power=0.8), 0.9091587, rtol=0.001)
assert np.isclose(power_ttest(d=0.5, n=20, power=0.8, alpha=None), 0.4430163, rtol=0.1)
with pytest.raises(ValueError):
power_ttest(d=0.5)
def test_power_ttest2n(self):
assert np.isclose(power_ttest2n(nx=20, ny=18, d=0.5, alternative='greater'), 0.4463552)
assert np.isclose(power_ttest2n(nx=20, ny=18, power=0.8, alternative='greater'), 0.8234684)
assert np.isclose(power_ttest2n(nx=20, ny=18, d=0.5, power=0.8, alpha=None, alternative='greater'), 0.2444025, rtol=0.1)
assert np.isclose(power_ttest2n(nx=20, ny=18, d=0.5, alternative='less'), 0.)
assert np.isclose(power_ttest2n(nx=20, ny=18, d=(- 0.5), alternative='less'), 0.4463552)
assert np.isclose(power_ttest2n(nx=20, ny=18, power=0.8, alternative='less'), (- 0.8234684))
assert np.isclose(power_ttest2n(nx=20, ny=18, d=0.5, power=0.8, alpha=None, alternative='less'), 0.989896, rtol=0.1)
assert np.isclose(power_ttest2n(nx=20, ny=18, d=0.5), 0.3223224, rtol=0.001)
assert np.isclose(power_ttest2n(nx=20, ny=18, power=0.8), 0.9354168)
assert np.isclose(power_ttest2n(nx=20, ny=18, d=0.5, power=0.8, alpha=None), 0.46372, rtol=0.1)
with pytest.raises(ValueError):
power_ttest2n(nx=20, ny=20)
def test_power_anova(self):
eta = 0.0727003
assert np.isclose(power_anova(eta_squared=eta, k=4, n=20), 0.5149793)
assert np.isclose(power_anova(eta_squared=eta, n=20, power=0.8), 10.70313)
assert np.isclose(power_anova(eta_squared=eta, k=4, power=0.8), 35.75789)
assert np.isclose(power_anova(k=4, n=20, power=0.8, alpha=0.05), 0.1254838, rtol=0.001)
assert np.isclose(power_anova(eta_squared=eta, k=4, n=20, power=0.8, alpha=None), 0.2268337)
with pytest.raises(ValueError):
power_anova(eta_squared=eta, k=2)
def test_power_rm_anova(self):
eta = 0.2
eta2 = 0.
assert np.isclose(power_rm_anova(eta_squared=eta, m=4, n=20, epsilon=1, corr=0.5), 0.999807)
assert np.isclose(power_rm_anova(eta_squared=eta, m=3, n=20, epsilon=0.9, corr=0.5), 0.9968277)
assert np.isclose(power_rm_anova(eta_squared=eta2, m=3, n=10, epsilon=1, corr=0.7), 0.5271828)
assert np.isclose(power_rm_anova(eta_squared=eta2, m=2, n=18, epsilon=1, corr=0.6), 0.6089353)
assert np.isclose(power_rm_anova(eta_squared=eta2, m=3, n=10, corr=(- 0.5)), 0.13818, rtol=0.0001)
assert np.isclose(power_rm_anova(eta_squared=eta2, n=30, power=0.9), 3.)
assert np.isclose(power_rm_anova(eta_squared=eta2, n=20, power=0.8), 5.)
assert (np.ceil(power_rm_anova(eta_squared=eta, m=3, power=0.8)) == 9)
assert (np.ceil(power_rm_anova(eta_squared=eta2, m=4, power=0.8)) == 24)
assert (np.ceil(power_rm_anova(eta2, m=3, power=0.9, corr=0.8)) == 16)
assert (np.ceil(power_rm_anova(eta2, 3, power=0.9, epsilon=0.9)) == 39)
assert np.isclose(power_rm_anova(n=20, m=3, power=0.8), 0.0800112)
assert np.isclose(power_rm_anova(n=20, m=2, power=0.9), 0.)
assert np.isclose(power_rm_anova(n=50, m=4, power=0.95, corr=0.7), 0.)
assert np.isclose(power_rm_anova(eta_squared=eta, m=3, n=50, power=0.95, alpha=None), 3.652063e-09)
assert np.isclose(power_rm_anova(eta_squared=eta2, m=2, n=100, power=0.95, corr=0.6, alpha=None), 0.0001797, rtol=0.0001)
with pytest.raises(ValueError):
power_rm_anova(eta_squared=eta, m=2)
def test_power_corr(self):
assert np.isclose(power_corr(r=0.5, n=20), 0.6378746)
assert np.isclose(power_corr(r=0.5, power=0.8), 28.24841)
assert np.isclose(power_corr(n=20, power=0.8), 0.5821478)
assert np.isclose(power_corr(r=0.5, n=20, power=0.8, alpha=None), 0.1377332, rtol=0.001)
assert np.isclose(power_corr(r=0.5, n=20, alternative='greater'), 0.7509873)
assert np.isclose(power_corr(r=(- 0.1), n=20, alternative='greater'), 0.)
assert np.isclose(power_corr(r=0.5, power=0.8, alternative='greater'), 22.60907)
assert np.isclose(power_corr(n=20, power=0.8, alternative='greater'), 0.5286949)
assert np.isclose(power_corr(r=(- 0.5), n=20, alternative='less'), 0.7509873)
assert np.isclose(power_corr(r=(- 0.1), n=20, alternative='less'), 0.1118106)
assert np.isclose(power_corr(r=0.1, n=20, alternative='less'), 0.)
assert np.isclose(power_corr(r=(- 0.5), power=0.8, alternative='less'), 22.60907)
assert np.isclose(power_corr(n=20, power=0.8, alternative='less'), (- 0.5286949))
with pytest.raises(ValueError):
power_corr(r=0.5)
with pytest.warns(UserWarning):
power_corr(r=0.5, n=4)
power_corr(power=0.8, n=4)
def test_power_chi2(self):
assert np.isclose(power_chi2(dof=1, w=0.3, n=20), 0.2686618)
assert np.isclose(power_chi2(dof=2, w=0.3, n=100), 0.7706831)
assert np.isclose(power_chi2(dof=1, w=0.3, power=0.8), 87.20954)
assert np.isclose(power_chi2(dof=3, w=0.3, power=0.8), 121.1396)
assert np.isclose(power_chi2(dof=4, n=50, power=0.8), 0.4885751)
assert np.isclose(power_chi2(dof=1, n=50, power=0.8), 0.3962023)
assert np.isclose(power_chi2(dof=1, w=0.3, n=100, power=0.8, alpha=None), 0., atol=0.001)
with pytest.raises(ValueError):
power_chi2(1, w=0.3) |
class ResNet101vd(nn.Module):
def __init__(self, cout=64, idx=0):
super(ResNet101vd, self).__init__()
self.cout = cout
self.idx = idx
self.resnet101vd = ResNet(channels=[64, 128, 256, 512], cout=cout, idx=idx, block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True, bool_DeformableConv2d=False)
def forward(self, x):
x = self.resnet101vd(x)
return x |
.parametrize('language, feature_keyword, scenario_keyword', [('en', 'Feature', 'Scenario'), ('de', 'Funktionalitat', 'Szenario')])
def test_creating_language_agnostic_parser(language, feature_keyword, scenario_keyword, core):
parser = FeatureParser(core, '/', 1, language=language)
assert (parser.keywords.feature == feature_keyword)
assert (parser.keywords.scenario == scenario_keyword) |
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config['dask_profile'])
query_1 = '\n SELECT\n CAST(wcs_user_sk AS INTEGER) AS wcs_user_sk,\n CAST(wcs_item_sk AS INTEGER) AS wcs_item_sk,\n (wcs_click_date_sk * 86400 + wcs_click_time_sk) AS tstamp_inSec\n FROM web_clickstreams\n WHERE wcs_item_sk IS NOT NULL\n AND wcs_user_sk IS NOT NULL\n ORDER BY wcs_user_sk\n '
wcs_result = bc.sql(query_1)
session_df = wcs_result.map_partitions(get_distinct_sessions, keep_cols=['wcs_user_sk', 'wcs_item_sk'], time_out=q02_session_timeout_inSec)
del wcs_result
session_df = session_df.persist()
wait(session_df)
bc.create_table('session_df', session_df)
last_query = f'''
WITH item_df AS (
SELECT wcs_user_sk, session_id
FROM session_df
WHERE wcs_item_sk = {q02_item_sk}
)
SELECT sd.wcs_item_sk as item_sk_1,
count(sd.wcs_item_sk) as cnt
FROM session_df sd
INNER JOIN item_df id
ON sd.wcs_user_sk = id.wcs_user_sk
AND sd.session_id = id.session_id
AND sd.wcs_item_sk <> {q02_item_sk}
GROUP BY sd.wcs_item_sk
ORDER BY cnt desc
LIMIT {q02_limit}
'''
result = bc.sql(last_query)
result['item_sk_2'] = q02_item_sk
result_order = ['item_sk_1', 'item_sk_2', 'cnt']
result = result[result_order]
del session_df
bc.drop_table('session_df')
return result |
.route('/items/<content_type>/<heading>/')
def items(content_type: str, heading: str) -> None:
if (heading == 'alphabet'):
alphabet(content_type)
elif (heading == 'genres'):
genres(content_type)
elif (heading == 'search'):
search(content_type)
else:
data = {'type': (None if (content_type == 'all') else content_type.rstrip('s'))}
data.update(plugin.kwargs)
exclude_anime = (plugin.settings.exclude_anime == 'true')
if (heading == 'sort'):
data.update(plugin.sorting_params)
response = plugin.items.get('items', data=data, exclude_anime=exclude_anime)
else:
response = plugin.items.get(f'items/{heading}', data=data, exclude_anime=exclude_anime)
render_items(response.items, content_type)
render_pagination(response.pagination) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.