code stringlengths 281 23.7M |
|---|
class UnetCond5DS(nn.Module):
def __init__(self, input_nc=3, output_nc=3, nf=64, cond_dim=256, up_mode='upconv', use_dropout=False, return_lowres=False):
super(UnetCond5DS, self).__init__()
assert (up_mode in ('upconv', 'upsample'))
self.return_lowres = return_lowres
self.conv1 = Conv2DBlock(input_nc, nf, 4, 2, 1, use_bias=False, use_bn=False, use_relu=False)
self.conv2 = Conv2DBlock((1 * nf), (2 * nf), 4, 2, 1, use_bias=False, use_bn=True)
self.conv3 = Conv2DBlock((2 * nf), (4 * nf), 4, 2, 1, use_bias=False, use_bn=True)
self.conv4 = Conv2DBlock((4 * nf), (8 * nf), 4, 2, 1, use_bias=False, use_bn=True)
self.conv5 = Conv2DBlock((8 * nf), (8 * nf), 4, 2, 1, use_bias=False, use_bn=False)
self.upconv1 = UpConv2DBlockCBNCond((8 * nf), (8 * nf), 4, 2, 1, cond_dim=cond_dim, up_mode=up_mode)
self.upconv2 = UpConv2DBlockCBNCond(((8 * nf) * 2), (4 * nf), 4, 2, 1, cond_dim=cond_dim, up_mode=up_mode, use_dropout=use_dropout)
self.upconv3 = UpConv2DBlockCBNCond(((4 * nf) * 2), (2 * nf), 4, 2, 1, cond_dim=cond_dim, up_mode=up_mode, use_dropout=use_dropout)
self.upconvC4 = UpConv2DBlockCBNCond(((2 * nf) * 2), (1 * nf), 4, 2, 1, cond_dim=cond_dim, up_mode=up_mode)
self.upconvC5 = UpConv2DBlockCBNCond(((1 * nf) * 2), output_nc, 4, 2, 1, cond_dim=cond_dim, use_bn=False, use_bias=True, up_mode=up_mode)
def forward(self, x, cond):
d1 = self.conv1(x)
d2 = self.conv2(d1)
d3 = self.conv3(d2)
d4 = self.conv4(d3)
d5 = self.conv5(d4)
u1 = self.upconv1(d5, cond, d4)
u2 = self.upconv2(u1, cond, d3)
u3 = self.upconv3(u2, cond, d2)
uc4 = self.upconvC4(u3, cond, d1)
uc5 = self.upconvC5(uc4, cond)
return uc5 |
_fixtures(SqlAlchemyFixture, WebFixture)
def test_setting_cookies_on_response(sql_alchemy_fixture, web_fixture):
fixture = web_fixture
(UserSession)
class UserSessionStub(UserSession):
__tablename__ = 'usersessionstub'
__mapper_args__ = {'polymorphic_identity': 'usersessionstub'}
id = Column(Integer, ForeignKey('usersession.id'), primary_key=True)
secured = False
def is_secured(self):
return self.secured
with sql_alchemy_fixture.persistent_test_classes(UserSessionStub):
user_session = UserSessionStub()
class ResponseStub(Response):
def cookies(self):
cookies =
for (header, value) in self.headerlist:
if (header == 'Set-Cookie'):
cookies.load(value)
return cookies
user_session.secured = False
response = ResponseStub()
user_session.set_session_key(response)
session_cookie = response.cookies[fixture.config.web.session_key_name]
assert (session_cookie.value == urllib.parse.quote(user_session.as_key()))
assert (session_cookie['path'] == '/')
assert (not session_cookie['max-age'])
assert (fixture.config.web.secure_key_name not in response.cookies)
user_session.secured = True
response = ResponseStub()
user_session.set_session_key(response)
assert (fixture.config.web.session_key_name in response.cookies)
secure_cookie = response.cookies[fixture.config.web.secure_key_name]
assert (user_session.secure_salt == secure_cookie.value)
assert (secure_cookie['path'] == '/')
assert (secure_cookie['max-age'] == ('%s' % fixture.config.web.idle_secure_lifetime))
assert ('secure' in secure_cookie) |
def create_manifest_label(manifest_id, key, value, source_type_name, media_type_name=None):
if (not key):
raise InvalidLabelKeyException('Missing key on label')
if ((source_type_name != 'manifest') and (not validate_label_key(key))):
raise InvalidLabelKeyException(('Key `%s` is invalid or reserved' % key))
if (media_type_name is None):
media_type_name = 'text/plain'
if is_json(value):
media_type_name = 'application/json'
try:
media_type_id = Label.media_type.get_id(media_type_name)
except MediaType.DoesNotExist:
raise InvalidMediaTypeException()
source_type_id = Label.source_type.get_id(source_type_name)
try:
manifest = Manifest.select(Manifest, Repository).join(Repository).where((Manifest.id == manifest_id)).get()
except Manifest.DoesNotExist:
return None
repository = manifest.repository
with db_transaction():
label = Label.create(key=key, value=value, source_type=source_type_id, media_type=media_type_id)
manifest_label = ManifestLabel.create(manifest=manifest_id, label=label, repository=repository)
return label |
class KombuConsumerWorker(ConsumerMixin, PumpWorker):
def __init__(self, connection: kombu.Connection, queues: Sequence[kombu.Queue], work_queue: WorkQueue, serializer: Optional[KombuSerializer]=None, **kwargs: Any):
self.connection = connection
self.queues = queues
self.work_queue = work_queue
self.serializer = serializer
self.kwargs = kwargs
def get_consumers(self, Consumer: kombu.Consumer, channel: Channel) -> Sequence[kombu.Consumer]:
args = dict(queues=self.queues, on_message=self.work_queue.put, **self.kwargs)
if self.serializer:
args['accept'] = [self.serializer.name]
return [Consumer(**args)]
def stop(self) -> None:
logger.debug('Closing KombuConsumerWorker.')
self.should_stop = True |
class IterativeOperatorWInfo(LinearOperator):
def __init__(self, A: LinearOperator, alg: Algorithm):
super().__init__(A.dtype, A.shape)
self.A = A
self.alg = alg
self.info = {}
def _matmat(self, X):
(Y, self.info) = self.alg(self.A, X)
return Y
def __str__(self):
return f'{self.alg}({str(self.A)})' |
class ListSearcher(Searcher):
def __init__(self, param_grid):
self._configurations = list(ParameterGrid(param_grid))
Searcher.__init__(self)
def suggest(self, trial_id):
if self._configurations:
return self._configurations.pop(0)
def on_trial_complete(self, **kwargs):
pass |
def dataloader_didemo_train(args, tokenizer):
didemo_dataset = DiDeMo_DataLoader(subset='train', data_path=args.data_path, features_path=args.features_path, max_words=args.max_words, feature_framerate=args.feature_framerate, tokenizer=tokenizer, max_frames=args.max_frames, frame_order=args.train_frame_order, slice_framepos=args.slice_framepos)
train_sampler = torch.utils.data.distributed.DistributedSampler(didemo_dataset)
dataloader = DataLoader(didemo_dataset, batch_size=(args.batch_size // args.n_gpu), num_workers=args.num_thread_reader, pin_memory=True, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True)
return (dataloader, len(didemo_dataset), train_sampler) |
class GD32VF1xxUsart(QlConnectivityPeripheral):
class Type(ctypes.Structure):
_fields_ = [('STAT', ctypes.c_uint32), ('DATA', ctypes.c_uint32), ('BAUD', ctypes.c_uint32), ('CTL0', ctypes.c_uint32), ('CTL1', ctypes.c_uint32), ('CTL2', ctypes.c_uint32), ('GP', ctypes.c_uint32)]
def __init__(self, ql, label):
super().__init__(ql, label)
self.instance = self.struct(STAT=192, DATA=0, BAUD=0, CTL0=0, CTL1=0, CTL2=0, GP=0) |
(simple_typeddicts(total=False, not_required=True, typeddict_cls=(None if (not is_py38) else ExtensionsTypedDict)), booleans())
def test_required(cls_and_instance: Tuple[(type, Dict)], detailed_validation: bool) -> None:
c = mk_converter(detailed_validation=detailed_validation)
(cls, instance) = cls_and_instance
unstructured = c.unstructure(instance, unstructure_as=cls)
restructured = c.structure(unstructured, cls)
assert (restructured == instance) |
def save_config():
if (not config.test):
if (not os.path.exists(config.save_path)):
os.makedirs(config.save_path)
with open((config.save_path + '/config.txt'), 'w') as the_file:
for (k, v) in config.args.__dict__.items():
if ('False' in str(v)):
continue
elif ('True' in str(v)):
the_file.write('--{} '.format(k))
else:
the_file.write('--{} {} '.format(k, v)) |
class SquadTextLengthPreprocessor(Preprocessor):
def __init__(self, num_tokens_th):
self.num_tokens_th = num_tokens_th
def preprocess(self, question: SquadQuestionWithDistractors):
for par in (question.distractors + [question.paragraph]):
par.par_text = par.par_text[:self.num_tokens_th]
return question |
def main() -> None:
parser = ArgumentParser(prog=SCRIPT_NAME)
parser.add_argument('-v', '--verbose', action='store_true', default=False)
parser.add_argument('-n', '--dry-run', action='store_true', default=False)
commands = parser.add_subparsers(title='Sub-commands', required=True, dest='command')
commands.add_parser('compile', help='Compile source files. Keep current versions unless changed requirements force newer versions.')
upgrade_parser = commands.add_parser('upgrade', help='Compile source files and upgrade package versions. Optionally specify package names to upgrade only those.')
upgrade_parser.add_argument('--pre', action='store_true', default=False, help='Use pre-release versions of packages if available.')
upgrade_parser.add_argument('packages', metavar='package', nargs='*')
parsed = parser.parse_args()
_ensure_pip_tools()
if (parsed.command == 'compile'):
compile_source(verbose=parsed.verbose, dry_run=parsed.dry_run)
elif (parsed.command == 'upgrade'):
packages = set(parsed.packages)
if (not packages):
resp = input('Are you sure you want to upgrade ALL packages? [y/N] ')
if (resp.lower() != 'y'):
print('Aborting')
sys.exit(1)
compile_source(upgrade_all=True, verbose=parsed.verbose, dry_run=parsed.dry_run)
else:
if parsed.pre:
print("Warning: Using the '--pre' option can cause unintended upgrades to prerelease versions of unrelated packages. This is due to constraints in the underlying tools (pip-compile / pip) that don't currently allow constraining pre-releases to only specific packages.\nPlease carefully inspect the generated output files!")
upgrade_source(packages, verbose=parsed.verbose, dry_run=parsed.dry_run, pre=parsed.pre) |
class SnekIOTests(TestCase):
def test_safe_path(self) -> None:
cases = [('', ''), ('foo', 'foo'), ('foo/bar', 'foo/bar'), ('foo/bar.ext', 'foo/bar.ext')]
for (path, expected) in cases:
self.assertEqual(snekio.safe_path(path), expected)
def test_safe_path_raise(self):
cases = [('../foo', IllegalPathError, "File path '../foo' may not traverse beyond root"), ('/foo', IllegalPathError, "File path '/foo' must be relative")]
for (path, error, msg) in cases:
with self.assertRaises(error) as cm:
snekio.safe_path(path)
self.assertEqual(str(cm.exception), msg)
def test_file_from_dict(self):
cases = [({'path': 'foo', 'content': ''}, FileAttachment('foo', b'')), ({'path': 'foo'}, FileAttachment('foo', b'')), ({'path': 'foo', 'content': 'Zm9v'}, FileAttachment('foo', b'foo')), ({'path': 'foo/bar.ext', 'content': 'Zm9v'}, FileAttachment('foo/bar.ext', b'foo'))]
for (data, expected) in cases:
self.assertEqual(FileAttachment.from_dict(data), expected)
def test_file_from_dict_error(self):
cases = [({'path': 'foo', 'content': '9'}, ParsingError, "Invalid base64 encoding for file 'foo'"), ({'path': 'var/a.txt', 'content': '1='}, ParsingError, "Invalid base64 encoding for file 'var/a.txt'")]
for (data, error, msg) in cases:
with self.assertRaises(error) as cm:
FileAttachment.from_dict(data)
self.assertEqual(str(cm.exception), msg) |
def main():
args = parser.parse_args()
if (args.model == 'all'):
parsed_model = open_clip.list_models()
else:
parsed_model = args.model.split(',')
results = []
for m in parsed_model:
row = profile_model(m)
results.append(row)
df = pd.DataFrame(results, columns=results[0].keys())
df = df.sort_values('gmacs')
print(df)
if args.results_file:
df.to_csv(args.results_file, index=False) |
def get_arg_value(name_or_pos: Argument, arguments: BoundArgs) -> t.Any:
if isinstance(name_or_pos, int):
arg_values = tuple(arguments.items())
arg_pos = name_or_pos
try:
(name, value) = arg_values[arg_pos]
return value
except IndexError:
raise ValueError(f'Argument position {arg_pos} is out of bounds.')
elif isinstance(name_or_pos, str):
arg_name = name_or_pos
try:
return arguments[arg_name]
except KeyError:
raise ValueError(f"Argument {arg_name!r} doesn't exist.")
else:
raise TypeError("'arg' must either be an int (positional index) or a str (keyword).") |
class MainFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, title='pypilot client', size=(1000, 600))
host = ''
if (len(sys.argv) > 1):
host = sys.argv[1]
self.client = pypilotClient(host)
self.connected = False
ssizer = wx.FlexGridSizer(0, 1, 0, 0)
ssizer.AddGrowableRow(0)
ssizer.AddGrowableCol(0)
ssizer.SetFlexibleDirection(wx.BOTH)
ssizer.SetNonFlexibleGrowMode(wx.FLEX_GROWMODE_SPECIFIED)
self.scrolledWindow = wx.ScrolledWindow(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, (wx.HSCROLL | wx.VSCROLL))
self.scrolledWindow.SetScrollRate(5, 5)
self.Refresh(None)
ssizer.Add(self.scrolledWindow, 1, (wx.EXPAND | wx.ALL), 5)
bsizer = wx.FlexGridSizer(1, 0, 0, 0)
self.bRefresh = wx.Button(self, wx.ID_ANY, _('Refresh'))
self.bRefresh.Bind(wx.EVT_BUTTON, self.Refresh)
bsizer.Add(self.bRefresh)
self.bScope = wx.Button(self, wx.ID_ANY, _('Scope'))
self.bScope.Bind(wx.EVT_BUTTON, (lambda event: subprocess.Popen((['python', ((os.path.abspath(os.path.dirname(__file__)) + '/') + 'scope_wx.py')] + sys.argv[1:]))))
bsizer.Add(self.bScope)
self.bClose = wx.Button(self, wx.ID_ANY, 'Close')
self.bClose.Bind(wx.EVT_BUTTON, exit)
bsizer.Add(self.bClose)
ssizer.Add(bsizer, 1, wx.EXPAND)
self.SetSizer(ssizer)
self.Layout()
self.timer = wx.Timer(self, wx.ID_ANY)
self.timer.Start(500)
self.Bind(wx.EVT_TIMER, self.receive_messages, id=wx.ID_ANY)
def layout_widgets(self, value_list):
sizer = self.scrolledWindow.GetSizer()
if (not sizer):
sizer = wx.FlexGridSizer(0, 3, 0, 0)
sizer.AddGrowableCol(2)
sizer.SetFlexibleDirection(wx.BOTH)
sizer.SetNonFlexibleGrowMode(wx.FLEX_GROWMODE_SPECIFIED)
for name in sorted(value_list):
t = value_list[name]['type']
watch = True
if (t == 'SensorValue'):
watch = 10
self.client.watch(name, watch)
for name in sorted(value_list):
if (name in self.values):
continue
sizer.Add(wx.StaticText(self.scrolledWindow, wx.ID_ANY, name), 0, wx.ALL, 5)
self.values[name] = wx.StaticText(self.scrolledWindow, wx.ID_ANY)
sizer.Add(self.values[name], 0, wx.ALL, 5)
t = value_list[name]['type']
if (t == 'Property'):
tb = wx.TextCtrl(self.scrolledWindow, wx.ID_ANY)
sizer.Add(tb)
self.controls[name] = tb
elif (t == 'BooleanProperty'):
def proc():
cb = wx.CheckBox(self.scrolledWindow, wx.ID_ANY, '')
sizer.Add(cb, 0, wx.EXPAND)
self.controls[name] = cb
cbname = name
def oncheck(event):
self.client.set(cbname, cb.GetValue())
cb.Bind(wx.EVT_CHECKBOX, oncheck)
proc()
elif ((t == 'RangeProperty') or (t == 'RangeSetting')):
useSlider = True
def proc():
r = (value_list[name]['min'], value_list[name]['max'])
if useSlider:
s = wx.Slider(self.scrolledWindow)
s.SetRange(0, 1000)
else:
s = wx.SpinCtrlDouble(self.scrolledWindow)
s.SetRange(r[0], r[1])
s.SetIncrement(min(1, ((r[1] - r[0]) / 100.0)))
s.SetDigits((((- math.log(s.GetIncrement())) / math.log(10)) + 1))
sizer.Add(s, 0, wx.EXPAND)
self.controls[name] = s
sname = name
def onspin(event):
if useSlider:
v = (((s.GetValue() / 1000.0) * (r[1] - r[0])) + r[0])
self.client.set(sname, v)
else:
self.client.set(sname, s.GetValue())
if useSlider:
s.Bind(wx.EVT_SLIDER, onspin)
self.sliderrange[name] = r
else:
s.Bind(wx.EVT_SPINCTRLDOUBLE, onspin)
proc()
elif (t == 'EnumProperty'):
def proc():
c = wx.Choice(self.scrolledWindow, wx.ID_ANY)
for choice in value_list[name]['choices']:
c.Append(str(choice))
sizer.Add(c, 0, wx.EXPAND)
self.controls[name] = c
cname = name
def onchoice(event):
self.client.set(cname, str(c.GetStringSelection()))
c.Bind(wx.EVT_CHOICE, onchoice)
proc()
elif (t == 'ResettableValue'):
def proc():
b = wx.Button(self.scrolledWindow, wx.ID_ANY, _('Reset'))
sizer.Add(b, 0, wx.EXPAND)
bname = name
def onclick(event):
self.client.set(bname, 0)
b.Bind(wx.EVT_BUTTON, onclick)
proc()
else:
sizer.Add(wx.StaticText(self.scrolledWindow, wx.ID_ANY, ''))
self.scrolledWindow.SetSizer(sizer)
self.scrolledWindow.Layout()
sizer.Fit(self.scrolledWindow)
def Refresh(self, event):
if self.client.connection:
self.client.disconnect()
sizer = self.scrolledWindow.GetSizer()
if sizer:
sizer.Clear(True)
self.values = {}
self.controls = {}
self.sliderrange = {}
def receive_messages(self, event):
if (self.client.connection != self.connected):
self.connected = self.client.connection
if self.connected:
self.SetTitle(('pypilot client - ' + _('Connected')))
else:
self.SetTitle(('pypilot client - ' + _('Disconnected')))
value_list = self.client.list_values()
if value_list:
self.layout_widgets(value_list)
size = self.GetSize()
self.Fit()
self.SetSize(size)
while True:
result = self.client.receive()
if (not result):
break
for name in result:
value = round3(result[name])
strvalue = str(value)
if (len(strvalue) > 50):
strvalue = (strvalue[:47] + '...')
self.values[name].SetLabel(strvalue)
if (name in self.controls):
try:
t = str(type(self.controls[name]))
if ((t == "<class 'wx._controls.Choice'>") or (t == "<class 'wx._core.Choice'>")):
if (not self.controls[name].SetStringSelection(str(value))):
print(_('warning, invalid choice value specified'))
elif ((t == "<class 'wx._controls.Slider'>") or (t == "<class 'wx._core.Slider'>")):
r = self.sliderrange[name]
self.controls[name].SetValue(int(((float((value - r[0])) / (r[1] - r[0])) * 1000)))
else:
self.controls[name].SetValue(value)
except:
self.controls[name].SetValue(str(value)) |
def define_D(input_nc, size, ndf, which_model_netD, n_layers_D=3, norm='batch', use_sigmoid=False, init_type='normal', gpu_ids=[]):
netD = None
use_gpu = (len(gpu_ids) > 0)
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert torch.cuda.is_available()
if (which_model_netD == 'basic'):
netD = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
elif (which_model_netD == 'n_layers'):
netD = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
else:
raise NotImplementedError(('Discriminator model name [%s] is not recognized' % which_model_netD))
if use_gpu:
netD.cuda(gpu_ids[0])
init_weights(netD, init_type=init_type)
return netD |
def ircformat(color, text):
if (len(color) < 1):
return text
add = sub = ''
if ('_' in color):
add += '\x1d'
sub = ('\x1d' + sub)
color = color.strip('_')
if ('*' in color):
add += '\x02'
sub = ('\x02' + sub)
color = color.strip('*')
if (len(color) > 0):
add += ('\x03' + str(IRC_COLOR_MAP[color]).zfill(2))
sub = ('\x03' + sub)
return ((add + text) + sub)
return (((((('<' + add) + '>') + text) + '</') + sub) + '>') |
class ResUnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(ResUnetGenerator, self).__init__()
unet_block = ResUnetSkipConnectionBlock((ngf * 8), (ngf * 8), input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)
for i in range((num_downs - 5)):
unet_block = ResUnetSkipConnectionBlock((ngf * 8), (ngf * 8), input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = ResUnetSkipConnectionBlock((ngf * 4), (ngf * 8), input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = ResUnetSkipConnectionBlock((ngf * 2), (ngf * 4), input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = ResUnetSkipConnectionBlock(ngf, (ngf * 2), input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = ResUnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer)
self.model = unet_block
self.old_lr = opt.lr
self.old_lr_gmm = (0.1 * opt.lr)
def forward(self, input):
return self.model(input) |
class DescribeCT_R():
.parametrize(('initial_cxml', 'text', 'expected_cxml'), [('w:r', 'foobar', 'w:r/w:t"foobar"'), ('w:r', 'foobar ', 'w:r/w:t{xml:space=preserve}"foobar "'), ('w:r/(w:rPr/w:rStyle{w:val=emphasis}, w:cr)', 'foobar', 'w:r/(w:rPr/w:rStyle{w:val=emphasis}, w:cr, w:t"foobar")')])
def it_can_add_a_t_preserving_edge_whitespace(self, initial_cxml: str, text: str, expected_cxml: str):
r = cast(CT_R, element(initial_cxml))
expected_xml = xml(expected_cxml)
r.add_t(text)
assert (r.xml == expected_xml)
def it_can_assemble_the_text_in_the_run(self):
cxml = 'w:r/(w:br,w:cr,w:noBreakHyphen,w:ptab,w:t"foobar",w:tab)'
r = cast(CT_R, element(cxml))
assert (r.text == '\n\n-\tfoobar\t') |
def export_data(data, dtype, file):
img = data[dtype]
data_file = open(file, 'w')
csv_writer = csv.writer(data_file)
count = 0
for i in img:
if (count == 0):
header = i.keys()
csv_writer.writerow(header)
count += 1
csv_writer.writerow(i.values())
data_file.close() |
class RowIndex(tk.Canvas):
def __init__(self, *args, **kwargs):
tk.Canvas.__init__(self, kwargs['parentframe'], background=kwargs['index_bg'], highlightthickness=0)
self.parentframe = kwargs['parentframe']
self.MT = None
self.CH = None
self.TL = None
self.popup_menu_loc = None
self.extra_begin_edit_cell_func = None
self.extra_end_edit_cell_func = None
self.text_editor = None
self.text_editor_id = None
self.text_editor_loc = None
self.b1_pressed_loc = None
self.existing_dropdown_canvas_id = None
self.existing_dropdown_window = None
self.closed_dropdown = None
self.centre_alignment_text_mod_indexes = (slice(1, None), slice(None, (- 1)))
self.c_align_cyc = cycle(self.centre_alignment_text_mod_indexes)
self.being_drawn_rect = None
self.extra_motion_func = None
self.extra_b1_press_func = None
self.extra_b1_motion_func = None
self.extra_b1_release_func = None
self.extra_rc_func = None
self.selection_binding_func = None
self.shift_selection_binding_func = None
self.ctrl_selection_binding_func = None
self.drag_selection_binding_func = None
self.ri_extra_begin_drag_drop_func = None
self.ri_extra_end_drag_drop_func = None
self.extra_double_b1_func = None
self.row_height_resize_func = None
self.new_row_width = 0
self.cell_options = {}
self.options = {}
self.drag_and_drop_enabled = False
self.dragged_row = None
self.width_resizing_enabled = False
self.height_resizing_enabled = False
self.double_click_resizing_enabled = False
self.row_selection_enabled = False
self.rc_insert_row_enabled = False
self.rc_delete_row_enabled = False
self.edit_cell_enabled = False
self.visible_row_dividers = {}
self.row_width_resize_bbox = tuple()
self.rsz_w = None
self.rsz_h = None
self.currently_resizing_width = False
self.currently_resizing_height = False
self.ri_rc_popup_menu = None
self.disp_text = {}
self.disp_high = {}
self.disp_grid = {}
self.disp_fill_sels = {}
self.disp_bord_sels = {}
self.disp_resize_lines = {}
self.disp_dropdown = {}
self.disp_checkbox = {}
self.hidd_text = {}
self.hidd_high = {}
self.hidd_grid = {}
self.hidd_fill_sels = {}
self.hidd_bord_sels = {}
self.hidd_resize_lines = {}
self.hidd_dropdown = {}
self.hidd_checkbox = {}
self.row_drag_and_drop_perform = kwargs['row_drag_and_drop_perform']
self.index_fg = kwargs['index_fg']
self.index_grid_fg = kwargs['index_grid_fg']
self.index_border_fg = kwargs['index_border_fg']
self.index_selected_cells_bg = kwargs['index_selected_cells_bg']
self.index_selected_cells_fg = kwargs['index_selected_cells_fg']
self.index_selected_rows_bg = kwargs['index_selected_rows_bg']
self.index_selected_rows_fg = kwargs['index_selected_rows_fg']
self.index_hidden_rows_expander_bg = kwargs['index_hidden_rows_expander_bg']
self.index_bg = kwargs['index_bg']
self.drag_and_drop_bg = kwargs['drag_and_drop_bg']
self.resizing_line_fg = kwargs['resizing_line_fg']
self.align = kwargs['row_index_align']
self.show_default_index_for_empty = kwargs['show_default_index_for_empty']
self.auto_resize_width = kwargs['auto_resize_width']
self.default_index = kwargs['default_row_index'].lower()
self.basic_bindings()
def basic_bindings(self, enable=True):
if enable:
self.bind('<Motion>', self.mouse_motion)
self.bind('<ButtonPress-1>', self.b1_press)
self.bind('<B1-Motion>', self.b1_motion)
self.bind('<ButtonRelease-1>', self.b1_release)
self.bind('<Double-Button-1>', self.double_b1)
self.bind(rc_binding, self.rc)
else:
self.unbind('<Motion>')
self.unbind('<ButtonPress-1>')
self.unbind('<B1-Motion>')
self.unbind('<ButtonRelease-1>')
self.unbind('<Double-Button-1>')
self.unbind(rc_binding)
def set_width(self, new_width, set_TL=False):
self.current_width = new_width
try:
self.config(width=new_width)
except Exception:
return
if set_TL:
self.TL.set_dimensions(new_w=new_width)
def enable_bindings(self, binding):
if (binding == 'row_width_resize'):
self.width_resizing_enabled = True
elif (binding == 'row_height_resize'):
self.height_resizing_enabled = True
elif (binding == 'double_click_row_resize'):
self.double_click_resizing_enabled = True
elif (binding == 'row_select'):
self.row_selection_enabled = True
elif (binding == 'drag_and_drop'):
self.drag_and_drop_enabled = True
def disable_bindings(self, binding):
if (binding == 'row_width_resize'):
self.width_resizing_enabled = False
elif (binding == 'row_height_resize'):
self.height_resizing_enabled = False
elif (binding == 'double_click_row_resize'):
self.double_click_resizing_enabled = False
elif (binding == 'row_select'):
self.row_selection_enabled = False
elif (binding == 'drag_and_drop'):
self.drag_and_drop_enabled = False
def rc(self, event):
self.mouseclick_outside_editor_or_dropdown_all_canvases(inside=True)
self.focus_set()
popup_menu = None
if (self.MT.identify_row(y=event.y, allow_end=False) is None):
self.MT.deselect('all')
if self.MT.rc_popup_menus_enabled:
popup_menu = self.MT.empty_rc_popup_menu
elif (self.row_selection_enabled and (not self.currently_resizing_width) and (not self.currently_resizing_height)):
r = self.MT.identify_row(y=event.y)
if (r < (len(self.MT.row_positions) - 1)):
if self.MT.row_selected(r):
if self.MT.rc_popup_menus_enabled:
popup_menu = self.ri_rc_popup_menu
else:
if (self.MT.single_selection_enabled and self.MT.rc_select_enabled):
self.select_row(r, redraw=True)
elif (self.MT.toggle_selection_enabled and self.MT.rc_select_enabled):
self.toggle_select_row(r, redraw=True)
if self.MT.rc_popup_menus_enabled:
popup_menu = self.ri_rc_popup_menu
if (self.extra_rc_func is not None):
self.extra_rc_func(event)
if (popup_menu is not None):
self.popup_menu_loc = r
popup_menu.tk_popup(event.x_root, event.y_root)
def ctrl_b1_press(self, event=None):
self.mouseclick_outside_editor_or_dropdown_all_canvases(inside=True)
if ((self.drag_and_drop_enabled or self.row_selection_enabled) and self.MT.ctrl_select_enabled and (self.rsz_h is None) and (self.rsz_w is None)):
r = self.MT.identify_row(y=event.y)
if (r < (len(self.MT.row_positions) - 1)):
r_selected = self.MT.row_selected(r)
if ((not r_selected) and self.row_selection_enabled):
self.add_selection(r, set_as_current=True)
self.MT.main_table_redraw_grid_and_text(redraw_header=True, redraw_row_index=True)
if (self.ctrl_selection_binding_func is not None):
self.ctrl_selection_binding_func(SelectionBoxEvent('ctrl_select_rows', (r, (r + 1))))
elif r_selected:
self.dragged_row = DraggedRowColumn(dragged=r, to_move=get_seq_without_gaps_at_index(sorted(self.MT.get_selected_rows()), r))
elif (not self.MT.ctrl_select_enabled):
self.b1_press(event)
def ctrl_shift_b1_press(self, event):
self.mouseclick_outside_editor_or_dropdown_all_canvases(inside=True)
y = event.y
r = self.MT.identify_row(y=y)
if ((self.drag_and_drop_enabled or self.row_selection_enabled) and self.MT.ctrl_select_enabled and (self.rsz_h is None) and (self.rsz_w is None)):
if (r < (len(self.MT.row_positions) - 1)):
r_selected = self.MT.row_selected(r)
if ((not r_selected) and self.row_selection_enabled):
currently_selected = self.MT.currently_selected()
if (currently_selected and (currently_selected.type_ == 'row')):
min_r = int(currently_selected.row)
if (r > min_r):
self.MT.create_selected(min_r, 0, (r + 1), (len(self.MT.col_positions) - 1), 'rows')
func_event = tuple(range(min_r, (r + 1)))
elif (r < min_r):
self.MT.create_selected(r, 0, (min_r + 1), (len(self.MT.col_positions) - 1), 'rows')
func_event = tuple(range(r, (min_r + 1)))
else:
self.add_selection(r, set_as_current=True)
func_event = (r,)
self.MT.main_table_redraw_grid_and_text(redraw_header=True, redraw_row_index=True)
if (self.ctrl_selection_binding_func is not None):
self.ctrl_selection_binding_func(SelectionBoxEvent('ctrl_select_rows', func_event))
elif r_selected:
self.dragged_row = DraggedRowColumn(dragged=r, to_move=get_seq_without_gaps_at_index(sorted(self.MT.get_selected_rows()), r))
elif (not self.MT.ctrl_select_enabled):
self.shift_b1_press(event)
def shift_b1_press(self, event):
self.mouseclick_outside_editor_or_dropdown_all_canvases(inside=True)
y = event.y
r = self.MT.identify_row(y=y)
if ((self.drag_and_drop_enabled or self.row_selection_enabled) and (self.rsz_h is None) and (self.rsz_w is None)):
if (r < (len(self.MT.row_positions) - 1)):
r_selected = self.MT.row_selected(r)
if ((not r_selected) and self.row_selection_enabled):
currently_selected = self.MT.currently_selected()
if (currently_selected and (currently_selected.type_ == 'row')):
min_r = int(currently_selected.row)
self.MT.delete_selection_rects(delete_current=False)
if (r > min_r):
self.MT.create_selected(min_r, 0, (r + 1), (len(self.MT.col_positions) - 1), 'rows')
func_event = tuple(range(min_r, (r + 1)))
elif (r < min_r):
self.MT.create_selected(r, 0, (min_r + 1), (len(self.MT.col_positions) - 1), 'rows')
func_event = tuple(range(r, (min_r + 1)))
else:
self.select_row(r)
func_event = (r,)
self.MT.main_table_redraw_grid_and_text(redraw_header=True, redraw_row_index=True)
if (self.shift_selection_binding_func is not None):
self.shift_selection_binding_func(SelectionBoxEvent('shift_select_rows', func_event))
elif r_selected:
self.dragged_row = DraggedRowColumn(dragged=r, to_move=get_seq_without_gaps_at_index(sorted(self.MT.get_selected_rows()), r))
def create_resize_line(self, x1, y1, x2, y2, width, fill, tag):
if self.hidd_resize_lines:
(t, sh) = self.hidd_resize_lines.popitem()
self.coords(t, x1, y1, x2, y2)
if sh:
self.itemconfig(t, width=width, fill=fill, tag=tag)
else:
self.itemconfig(t, width=width, fill=fill, tag=tag, state='normal')
self.lift(t)
else:
t = self.create_line(x1, y1, x2, y2, width=width, fill=fill, tag=tag)
self.disp_resize_lines[t] = True
def delete_resize_lines(self):
self.hidd_resize_lines.update(self.disp_resize_lines)
self.disp_resize_lines = {}
for (t, sh) in self.hidd_resize_lines.items():
if sh:
self.itemconfig(t, state='hidden')
self.hidd_resize_lines[t] = False
def check_mouse_position_height_resizers(self, x, y):
for (r, (x1, y1, x2, y2)) in self.visible_row_dividers.items():
if ((x >= x1) and (y >= y1) and (x <= x2) and (y <= y2)):
return r
return None
def mouse_motion(self, event):
if ((not self.currently_resizing_height) and (not self.currently_resizing_width)):
x = self.canvasx(event.x)
y = self.canvasy(event.y)
mouse_over_resize = False
mouse_over_selected = False
if (self.height_resizing_enabled and (not mouse_over_resize)):
r = self.check_mouse_position_height_resizers(x, y)
if (r is not None):
(self.rsz_h, mouse_over_resize) = (r, True)
if (self.MT.current_cursor != 'sb_v_double_arrow'):
self.config(cursor='sb_v_double_arrow')
self.MT.current_cursor = 'sb_v_double_arrow'
else:
self.rsz_h = None
if (self.width_resizing_enabled and (not mouse_over_resize)):
try:
(x1, y1, x2, y2) = (self.row_width_resize_bbox[0], self.row_width_resize_bbox[1], self.row_width_resize_bbox[2], self.row_width_resize_bbox[3])
if ((x >= x1) and (y >= y1) and (x <= x2) and (y <= y2)):
(self.rsz_w, mouse_over_resize) = (True, True)
if (self.MT.current_cursor != 'sb_h_double_arrow'):
self.config(cursor='sb_h_double_arrow')
self.MT.current_cursor = 'sb_h_double_arrow'
else:
self.rsz_w = None
except Exception:
self.rsz_w = None
if (not mouse_over_resize):
if self.MT.row_selected(self.MT.identify_row(event, allow_end=False)):
mouse_over_selected = True
if (self.MT.current_cursor != 'hand2'):
self.config(cursor='hand2')
self.MT.current_cursor = 'hand2'
if ((not mouse_over_resize) and (not mouse_over_selected)):
self.MT.reset_mouse_motion_creations()
if (self.extra_motion_func is not None):
self.extra_motion_func(event)
def double_b1(self, event=None):
self.mouseclick_outside_editor_or_dropdown_all_canvases(inside=True)
self.focus_set()
if (self.double_click_resizing_enabled and self.height_resizing_enabled and (self.rsz_h is not None) and (not self.currently_resizing_height)):
row = (self.rsz_h - 1)
old_height = (self.MT.row_positions[self.rsz_h] - self.MT.row_positions[(self.rsz_h - 1)])
new_height = self.set_row_height(row)
self.MT.allow_auto_resize_rows = False
self.MT.main_table_redraw_grid_and_text(redraw_header=True, redraw_row_index=True)
if ((self.row_height_resize_func is not None) and (old_height != new_height)):
self.row_height_resize_func(ResizeEvent('row_height_resize', row, old_height, new_height))
elif (self.width_resizing_enabled and (self.rsz_h is None) and self.rsz_w):
self.set_width_of_index_to_text()
elif (self.row_selection_enabled and (self.rsz_h is None) and (self.rsz_w is None)):
r = self.MT.identify_row(y=event.y)
if (r < (len(self.MT.row_positions) - 1)):
if self.MT.single_selection_enabled:
self.select_row(r, redraw=True)
elif self.MT.toggle_selection_enabled:
self.toggle_select_row(r, redraw=True)
datarn = (r if self.MT.all_rows_displayed else self.MT.displayed_rows[r])
if (self.get_cell_kwargs(datarn, key='dropdown') or self.get_cell_kwargs(datarn, key='checkbox') or self.edit_cell_enabled):
self.open_cell(event)
self.rsz_h = None
self.mouse_motion(event)
if (self.extra_double_b1_func is not None):
self.extra_double_b1_func(event)
def b1_press(self, event=None):
self.MT.unbind('<MouseWheel>')
self.focus_set()
self.closed_dropdown = self.mouseclick_outside_editor_or_dropdown_all_canvases(inside=True)
x = self.canvasx(event.x)
y = self.canvasy(event.y)
r = self.MT.identify_row(y=event.y)
self.b1_pressed_loc = r
if (self.check_mouse_position_height_resizers(x, y) is None):
self.rsz_h = None
if ((not (x >= self.row_width_resize_bbox[0])) and (y >= self.row_width_resize_bbox[1]) and (x <= self.row_width_resize_bbox[2]) and (y <= self.row_width_resize_bbox[3])):
self.rsz_w = None
if (self.height_resizing_enabled and (self.rsz_h is not None)):
self.currently_resizing_height = True
y = self.MT.row_positions[self.rsz_h]
line2y = self.MT.row_positions[(self.rsz_h - 1)]
(x1, y1, x2, y2) = self.MT.get_canvas_visible_area()
self.create_resize_line(0, y, self.current_width, y, width=1, fill=self.resizing_line_fg, tag='rhl')
self.MT.create_resize_line(x1, y, x2, y, width=1, fill=self.resizing_line_fg, tag='rhl')
self.create_resize_line(0, line2y, self.current_width, line2y, width=1, fill=self.resizing_line_fg, tag='rhl2')
self.MT.create_resize_line(x1, line2y, x2, line2y, width=1, fill=self.resizing_line_fg, tag='rhl2')
elif (self.width_resizing_enabled and (self.rsz_h is None) and self.rsz_w):
self.currently_resizing_width = True
(x1, y1, x2, y2) = self.MT.get_canvas_visible_area()
x = int(event.x)
if (x < self.MT.min_column_width):
x = int(self.MT.min_column_width)
self.new_row_width = x
self.create_resize_line(x, y1, x, y2, width=1, fill=self.resizing_line_fg, tag='rwl')
elif (self.MT.identify_row(y=event.y, allow_end=False) is None):
self.MT.deselect('all')
elif (self.row_selection_enabled and (self.rsz_h is None) and (self.rsz_w is None)):
r = self.MT.identify_row(y=event.y)
if (r < (len(self.MT.row_positions) - 1)):
datarn = (r if self.MT.all_rows_displayed else self.MT.displayed_rows[r])
if (self.MT.row_selected(r) and (not self.event_over_dropdown(r, datarn, event, y)) and (not self.event_over_checkbox(r, datarn, event, y))):
self.dragged_row = DraggedRowColumn(dragged=r, to_move=get_seq_without_gaps_at_index(sorted(self.MT.get_selected_rows()), r))
else:
self.being_drawn_rect = (r, 0, (r + 1), (len(self.MT.col_positions) - 1), 'rows')
if self.MT.single_selection_enabled:
self.select_row(r, redraw=True)
elif self.MT.toggle_selection_enabled:
self.toggle_select_row(r, redraw=True)
if (self.extra_b1_press_func is not None):
self.extra_b1_press_func(event)
def b1_motion(self, event):
(x1, y1, x2, y2) = self.MT.get_canvas_visible_area()
if (self.height_resizing_enabled and (self.rsz_h is not None) and self.currently_resizing_height):
y = self.canvasy(event.y)
size = (y - self.MT.row_positions[(self.rsz_h - 1)])
if ((size >= self.MT.min_row_height) and (size < self.MT.max_row_height)):
self.delete_all_resize_and_ctrl_lines(ctrl_lines=False)
line2y = self.MT.row_positions[(self.rsz_h - 1)]
self.create_resize_line(0, y, self.current_width, y, width=1, fill=self.resizing_line_fg, tag='rhl')
self.MT.create_resize_line(x1, y, x2, y, width=1, fill=self.resizing_line_fg, tag='rhl')
self.create_resize_line(0, line2y, self.current_width, line2y, width=1, fill=self.resizing_line_fg, tag='rhl2')
self.MT.create_resize_line(x1, line2y, x2, line2y, width=1, fill=self.resizing_line_fg, tag='rhl2')
elif (self.width_resizing_enabled and (self.rsz_w is not None) and self.currently_resizing_width):
evx = event.x
self.delete_all_resize_and_ctrl_lines(ctrl_lines=False)
if (evx > self.current_width):
x = self.MT.canvasx((evx - self.current_width))
if (evx > self.MT.max_index_width):
evx = int(self.MT.max_index_width)
x = self.MT.canvasx((evx - self.current_width))
self.new_row_width = evx
self.MT.create_resize_line(x, y1, x, y2, width=1, fill=self.resizing_line_fg, tag='rwl')
else:
x = evx
if (x < self.MT.min_column_width):
x = int(self.MT.min_column_width)
self.new_row_width = x
self.create_resize_line(x, y1, x, y2, width=1, fill=self.resizing_line_fg, tag='rwl')
if (self.drag_and_drop_enabled and self.row_selection_enabled and (self.rsz_h is None) and (self.rsz_w is None) and (self.dragged_row is not None) and self.MT.anything_selected(exclude_cells=True, exclude_columns=True)):
y = self.canvasy(event.y)
if ((y > 0) and (y < self.MT.row_positions[(- 1)])):
self.show_drag_and_drop_indicators(self.drag_and_drop_motion(event), x1, x2, self.dragged_row.to_move[0], self.dragged_row.to_move[(- 1)])
elif (self.MT.drag_selection_enabled and self.row_selection_enabled and (self.rsz_h is None) and (self.rsz_w is None)):
need_redraw = False
end_row = self.MT.identify_row(y=event.y)
currently_selected = self.MT.currently_selected()
if ((end_row < (len(self.MT.row_positions) - 1)) and currently_selected):
if (currently_selected.type_ == 'row'):
start_row = currently_selected.row
if (end_row >= start_row):
rect = (start_row, 0, (end_row + 1), (len(self.MT.col_positions) - 1), 'rows')
func_event = tuple(range(start_row, (end_row + 1)))
elif (end_row < start_row):
rect = (end_row, 0, (start_row + 1), (len(self.MT.col_positions) - 1), 'rows')
func_event = tuple(range(end_row, (start_row + 1)))
if (self.being_drawn_rect != rect):
need_redraw = True
self.MT.delete_selection_rects(delete_current=False)
self.MT.create_selected(*rect)
self.being_drawn_rect = rect
if (self.drag_selection_binding_func is not None):
self.drag_selection_binding_func(SelectionBoxEvent('drag_select_rows', func_event))
if self.scroll_if_event_offscreen(event):
need_redraw = True
if need_redraw:
self.MT.main_table_redraw_grid_and_text(redraw_header=False, redraw_row_index=True)
if (self.extra_b1_motion_func is not None):
self.extra_b1_motion_func(event)
def ctrl_b1_motion(self, event):
(x1, y1, x2, y2) = self.MT.get_canvas_visible_area()
if (self.drag_and_drop_enabled and self.row_selection_enabled and (self.rsz_h is None) and (self.rsz_w is None) and (self.dragged_row is not None) and self.MT.anything_selected(exclude_cells=True, exclude_columns=True)):
y = self.canvasy(event.y)
if ((y > 0) and (y < self.MT.row_positions[(- 1)])):
self.show_drag_and_drop_indicators(self.drag_and_drop_motion(event), x1, x2, self.dragged_row.to_move[0], self.dragged_row.to_move[(- 1)])
elif (self.MT.ctrl_select_enabled and self.row_selection_enabled and self.MT.drag_selection_enabled and (self.rsz_h is None) and (self.rsz_w is None)):
need_redraw = False
end_row = self.MT.identify_row(y=event.y)
currently_selected = self.MT.currently_selected()
if ((end_row < (len(self.MT.row_positions) - 1)) and currently_selected):
if (currently_selected.type_ == 'row'):
start_row = currently_selected.row
if (end_row >= start_row):
rect = (start_row, 0, (end_row + 1), (len(self.MT.col_positions) - 1), 'rows')
func_event = tuple(range(start_row, (end_row + 1)))
elif (end_row < start_row):
rect = (end_row, 0, (start_row + 1), (len(self.MT.col_positions) - 1), 'rows')
func_event = tuple(range(end_row, (start_row + 1)))
if (self.being_drawn_rect != rect):
need_redraw = True
if (self.being_drawn_rect is not None):
self.MT.delete_selected(*self.being_drawn_rect)
self.MT.create_selected(*rect)
self.being_drawn_rect = rect
if (self.drag_selection_binding_func is not None):
self.drag_selection_binding_func(SelectionBoxEvent('drag_select_rows', func_event))
if self.scroll_if_event_offscreen(event):
need_redraw = True
if need_redraw:
self.MT.main_table_redraw_grid_and_text(redraw_header=False, redraw_row_index=True)
elif (not self.MT.ctrl_select_enabled):
self.b1_motion(event)
def drag_and_drop_motion(self, event):
y = event.y
hend = self.winfo_height()
ycheck = self.yview()
if ((y >= (hend - 0)) and (len(ycheck) > 1) and (ycheck[1] < 1)):
if (y >= (hend + 15)):
self.MT.yview_scroll(2, 'units')
self.yview_scroll(2, 'units')
else:
self.MT.yview_scroll(1, 'units')
self.yview_scroll(1, 'units')
self.fix_yview()
self.MT.y_move_synced_scrolls('moveto', self.MT.yview()[0])
self.MT.main_table_redraw_grid_and_text(redraw_row_index=True)
elif ((y <= 0) and (len(ycheck) > 1) and (ycheck[0] > 0)):
if (y >= (- 15)):
self.MT.yview_scroll((- 1), 'units')
self.yview_scroll((- 1), 'units')
else:
self.MT.yview_scroll((- 2), 'units')
self.yview_scroll((- 2), 'units')
self.fix_yview()
self.MT.y_move_synced_scrolls('moveto', self.MT.yview()[0])
self.MT.main_table_redraw_grid_and_text(redraw_row_index=True)
row = self.MT.identify_row(y=event.y)
if ((row >= self.dragged_row.to_move[0]) and (row <= self.dragged_row.to_move[(- 1)])):
ypos = self.MT.row_positions[self.dragged_row.to_move[0]]
elif (row < self.dragged_row.to_move[0]):
ypos = self.MT.row_positions[row]
else:
ypos = (self.MT.row_positions[(row + 1)] if ((len(self.MT.row_positions) - 1) > row) else self.MT.row_positions[row])
return ypos
def show_drag_and_drop_indicators(self, ypos, x1, x2, start_row, end_row):
self.delete_all_resize_and_ctrl_lines()
self.create_resize_line(0, ypos, self.current_width, ypos, width=3, fill=self.drag_and_drop_bg, tag='dd')
self.MT.create_resize_line(x1, ypos, x2, ypos, width=3, fill=self.drag_and_drop_bg, tag='dd')
self.MT.show_ctrl_outline(start_cell=(0, start_row), end_cell=((len(self.MT.col_positions) - 1), (end_row + 1)), dash=(), outline=self.drag_and_drop_bg, delete_on_timer=False)
def delete_all_resize_and_ctrl_lines(self, ctrl_lines=True):
self.delete_resize_lines()
self.MT.delete_resize_lines()
if ctrl_lines:
self.MT.delete_ctrl_outlines()
def scroll_if_event_offscreen(self, event):
ycheck = self.yview()
need_redraw = False
if ((event.y > self.winfo_height()) and (len(ycheck) > 1) and (ycheck[1] < 1)):
try:
self.MT.yview_scroll(1, 'units')
self.yview_scroll(1, 'units')
except Exception:
pass
self.fix_yview()
self.MT.y_move_synced_scrolls('moveto', self.MT.yview()[0])
need_redraw = True
elif ((event.y < 0) and (self.canvasy(self.winfo_height()) > 0) and ycheck and (ycheck[0] > 0)):
try:
self.yview_scroll((- 1), 'units')
self.MT.yview_scroll((- 1), 'units')
except Exception:
pass
self.fix_yview()
self.MT.y_move_synced_scrolls('moveto', self.MT.yview()[0])
need_redraw = True
return need_redraw
def fix_yview(self):
ycheck = self.yview()
if (ycheck and (ycheck[0] < 0)):
self.MT.set_yviews('moveto', 0)
if ((len(ycheck) > 1) and (ycheck[1] > 1)):
self.MT.set_yviews('moveto', 1)
def event_over_dropdown(self, r, datarn, event, canvasy):
if ((canvasy < (self.MT.row_positions[r] + self.MT.txt_h)) and self.get_cell_kwargs(datarn, key='dropdown') and (event.x > ((self.current_width - self.MT.txt_h) - 4))):
return True
return False
def event_over_checkbox(self, r, datarn, event, canvasy):
if ((canvasy < (self.MT.row_positions[r] + self.MT.txt_h)) and self.get_cell_kwargs(datarn, key='checkbox') and (event.x < (self.MT.txt_h + 4))):
return True
return False
def b1_release(self, event=None):
if (self.being_drawn_rect is not None):
self.MT.delete_selected(*self.being_drawn_rect)
to_sel = tuple(self.being_drawn_rect)
self.being_drawn_rect = None
self.MT.create_selected(*to_sel)
self.MT.bind('<MouseWheel>', self.MT.mousewheel)
if (self.height_resizing_enabled and (self.rsz_h is not None) and self.currently_resizing_height):
self.currently_resizing_height = False
new_row_pos = int(self.coords('rhl')[1])
self.delete_all_resize_and_ctrl_lines(ctrl_lines=False)
old_height = (self.MT.row_positions[self.rsz_h] - self.MT.row_positions[(self.rsz_h - 1)])
size = (new_row_pos - self.MT.row_positions[(self.rsz_h - 1)])
if (size < self.MT.min_row_height):
new_row_pos = ceil((self.MT.row_positions[(self.rsz_h - 1)] + self.MT.min_row_height))
elif (size > self.MT.max_row_height):
new_row_pos = floor((self.MT.row_positions[(self.rsz_h - 1)] + self.MT.max_row_height))
increment = (new_row_pos - self.MT.row_positions[self.rsz_h])
self.MT.row_positions[(self.rsz_h + 1):] = [(e + increment) for e in islice(self.MT.row_positions, (self.rsz_h + 1), len(self.MT.row_positions))]
self.MT.row_positions[self.rsz_h] = new_row_pos
self.MT.allow_auto_resize_rows = False
new_height = (self.MT.row_positions[self.rsz_h] - self.MT.row_positions[(self.rsz_h - 1)])
self.MT.recreate_all_selection_boxes()
self.MT.main_table_redraw_grid_and_text(redraw_header=True, redraw_row_index=True)
if ((self.row_height_resize_func is not None) and (old_height != new_height)):
self.row_height_resize_func(ResizeEvent('row_height_resize', (self.rsz_h - 1), old_height, new_height))
elif (self.width_resizing_enabled and (self.rsz_w is not None) and self.currently_resizing_width):
self.currently_resizing_width = False
self.delete_all_resize_and_ctrl_lines(ctrl_lines=False)
self.set_width(self.new_row_width, set_TL=True)
self.MT.main_table_redraw_grid_and_text(redraw_header=True, redraw_row_index=True)
if (self.drag_and_drop_enabled and self.MT.anything_selected(exclude_cells=True, exclude_columns=True) and self.row_selection_enabled and (self.rsz_h is None) and (self.rsz_w is None) and (self.dragged_row is not None)):
self.delete_all_resize_and_ctrl_lines()
y = event.y
r = self.MT.identify_row(y=y)
orig_selected = self.dragged_row.to_move
if ((r != self.dragged_row) and (r is not None) and ((r < self.dragged_row.to_move[0]) or (r > self.dragged_row.to_move[(- 1)])) and (len(orig_selected) != (len(self.MT.row_positions) - 1))):
rm1start = orig_selected[0]
totalrows = len(orig_selected)
extra_func_success = True
if (r >= (len(self.MT.row_positions) - 1)):
r -= 1
if (self.ri_extra_begin_drag_drop_func is not None):
try:
self.ri_extra_begin_drag_drop_func(BeginDragDropEvent('begin_row_index_drag_drop', tuple(orig_selected), int(r)))
except Exception:
extra_func_success = False
if extra_func_success:
(new_selected, dispset) = self.MT.move_rows_adjust_options_dict(r, rm1start, totalrows, move_data=self.row_drag_and_drop_perform)
if self.MT.undo_enabled:
self.MT.undo_storage.append(zlib.compress(pickle.dumps(('move_rows', orig_selected, new_selected))))
self.MT.main_table_redraw_grid_and_text(redraw_header=True, redraw_row_index=True)
if (self.ri_extra_end_drag_drop_func is not None):
self.ri_extra_end_drag_drop_func(EndDragDropEvent('end_row_index_drag_drop', orig_selected, new_selected, int(r)))
self.parentframe.emit_event('<<SheetModified>>')
elif ((self.b1_pressed_loc is not None) and (self.rsz_w is None) and (self.rsz_h is None)):
r = self.MT.identify_row(y=event.y)
if ((r is not None) and (r < (len(self.MT.row_positions) - 1)) and (r == self.b1_pressed_loc) and (self.b1_pressed_loc != self.closed_dropdown)):
datarn = (r if self.MT.all_rows_displayed else self.MT.displayed_rows[r])
canvasy = self.canvasy(event.y)
if (self.event_over_dropdown(r, datarn, event, canvasy) or self.event_over_checkbox(r, datarn, event, canvasy)):
self.open_cell(event)
else:
self.mouseclick_outside_editor_or_dropdown_all_canvases(inside=True)
self.b1_pressed_loc = None
self.closed_dropdown = None
self.dragged_row = None
self.currently_resizing_width = False
self.currently_resizing_height = False
self.rsz_w = None
self.rsz_h = None
self.mouse_motion(event)
if (self.extra_b1_release_func is not None):
self.extra_b1_release_func(event)
def readonly_index(self, rows=[], readonly=True):
if isinstance(rows, int):
rows_ = [rows]
else:
rows_ = rows
if (not readonly):
for r in rows_:
if ((r in self.cell_options) and ('readonly' in self.cell_options[r])):
del self.cell_options[r]['readonly']
else:
for r in rows_:
if (r not in self.cell_options):
self.cell_options[r] = {}
self.cell_options[r]['readonly'] = True
def toggle_select_row(self, row, add_selection=True, redraw=True, run_binding_func=True, set_as_current=True):
if add_selection:
if self.MT.row_selected(row):
self.MT.deselect(r=row, redraw=redraw)
else:
self.add_selection(r=row, redraw=redraw, run_binding_func=run_binding_func, set_as_current=set_as_current)
elif self.MT.row_selected(row):
self.MT.deselect(r=row, redraw=redraw)
else:
self.select_row(row, redraw=redraw)
def select_row(self, r, redraw=False):
self.MT.delete_selection_rects()
self.MT.create_selected(r, 0, (r + 1), (len(self.MT.col_positions) - 1), 'rows')
self.MT.set_currently_selected(r, 0, type_='row')
if redraw:
self.MT.main_table_redraw_grid_and_text(redraw_header=True, redraw_row_index=True)
if (self.selection_binding_func is not None):
self.selection_binding_func(SelectRowEvent('select_row', int(r)))
def add_selection(self, r, redraw=False, run_binding_func=True, set_as_current=True):
if set_as_current:
self.MT.set_currently_selected(r, 0, type_='row')
self.MT.create_selected(r, 0, (r + 1), (len(self.MT.col_positions) - 1), 'rows')
if redraw:
self.MT.main_table_redraw_grid_and_text(redraw_header=False, redraw_row_index=True)
if ((self.selection_binding_func is not None) and run_binding_func):
self.selection_binding_func(('select_row', r))
def get_cell_dimensions(self, datarn):
txt = self.get_valid_cell_data_as_str(datarn, fix=False)
if txt:
self.MT.txt_measure_canvas.itemconfig(self.MT.txt_measure_canvas_text, text=txt, font=self.MT.index_font)
b = self.MT.txt_measure_canvas.bbox(self.MT.txt_measure_canvas_text)
w = ((b[2] - b[0]) + 7)
h = ((b[3] - b[1]) + 5)
else:
w = self.MT.default_index_width
h = self.MT.min_row_height
if (self.get_cell_kwargs(datarn, key='dropdown') or self.get_cell_kwargs(datarn, key='checkbox')):
return ((w + self.MT.txt_h), h)
return (w, h)
def set_row_height(self, row, height=None, only_set_if_too_small=False, recreate=True, return_new_height=False, displayed_only=False):
r_norm = (row + 1)
r_extra = (row + 2)
min_rh = self.MT.min_row_height
datarn = (row if self.MT.all_rows_displayed else self.MT.displayed_rows[row])
if (height is None):
if self.MT.all_columns_displayed:
if displayed_only:
(x1, y1, x2, y2) = self.MT.get_canvas_visible_area()
(start_col, end_col) = self.MT.get_visible_columns(x1, x2)
else:
(start_col, end_col) = (0, (len(self.MT.data[row]) if self.MT.data else 0))
iterable = range(start_col, end_col)
else:
if displayed_only:
(x1, y1, x2, y2) = self.MT.get_canvas_visible_area()
(start_col, end_col) = self.MT.get_visible_columns(x1, x2)
else:
(start_col, end_col) = (0, len(self.MT.displayed_columns))
iterable = self.MT.displayed_columns[start_col:end_col]
new_height = int(min_rh)
(w_, h) = self.get_cell_dimensions(datarn)
if (h < min_rh):
h = int(min_rh)
elif (h > self.MT.max_row_height):
h = int(self.MT.max_row_height)
if (h > new_height):
new_height = h
if self.MT.data:
for datacn in iterable:
txt = self.MT.get_valid_cell_data_as_str(datarn, datacn, get_displayed=True)
if txt:
h = (self.MT.get_txt_h(txt) + 5)
else:
h = min_rh
if (h < min_rh):
h = int(min_rh)
elif (h > self.MT.max_row_height):
h = int(self.MT.max_row_height)
if (h > new_height):
new_height = h
else:
new_height = int(height)
if (new_height < min_rh):
new_height = int(min_rh)
elif (new_height > self.MT.max_row_height):
new_height = int(self.MT.max_row_height)
if (only_set_if_too_small and (new_height <= (self.MT.row_positions[(row + 1)] - self.MT.row_positions[row]))):
return (self.MT.row_positions[(row + 1)] - self.MT.row_positions[row])
if (not return_new_height):
new_row_pos = (self.MT.row_positions[row] + new_height)
increment = (new_row_pos - self.MT.row_positions[r_norm])
self.MT.row_positions[r_extra:] = [(e + increment) for e in islice(self.MT.row_positions, r_extra, len(self.MT.row_positions))]
self.MT.row_positions[r_norm] = new_row_pos
if recreate:
self.MT.recreate_all_selection_boxes()
return new_height
def set_width_of_index_to_text(self, text=None):
if (((text is None) and (not self.MT._row_index) and isinstance(self.MT._row_index, list)) or (isinstance(self.MT._row_index, int) and (self.MT._row_index >= len(self.MT.data)))):
return
qconf = self.MT.txt_measure_canvas.itemconfig
qbbox = self.MT.txt_measure_canvas.bbox
qtxtm = self.MT.txt_measure_canvas_text
new_width = int(self.MT.min_column_width)
self.fix_index()
if (text is not None):
if text:
qconf(qtxtm, text=text)
b = qbbox(qtxtm)
w = ((b[2] - b[0]) + 10)
if (w > new_width):
new_width = w
else:
w = self.MT.default_index_width
else:
if self.MT.all_rows_displayed:
if isinstance(self.MT._row_index, list):
iterable = range(len(self.MT._row_index))
else:
iterable = range(len(self.MT.data))
else:
iterable = self.MT.displayed_rows
if isinstance(self.MT._row_index, list):
for datarn in iterable:
(w, h_) = self.get_cell_dimensions(datarn)
if (w < self.MT.min_column_width):
w = int(self.MT.min_column_width)
elif (w > self.MT.max_index_width):
w = int(self.MT.max_index_width)
if self.get_cell_kwargs(datarn, key='checkbox'):
w += (self.MT.txt_h + 6)
elif self.get_cell_kwargs(datarn, key='dropdown'):
w += (self.MT.txt_h + 4)
if (w > new_width):
new_width = w
elif isinstance(self.MT._row_index, int):
datacn = self.MT._row_index
for datarn in iterable:
txt = self.MT.get_valid_cell_data_as_str(datarn, datacn, get_displayed=True)
if txt:
qconf(qtxtm, text=txt)
b = qbbox(qtxtm)
w = ((b[2] - b[0]) + 10)
else:
w = self.MT.default_index_width
if (w < self.MT.min_column_width):
w = int(self.MT.min_column_width)
elif (w > self.MT.max_index_width):
w = int(self.MT.max_index_width)
if (w > new_width):
new_width = w
if (new_width == self.MT.min_column_width):
new_width = (self.MT.min_column_width + 10)
self.set_width(new_width, set_TL=True)
self.MT.main_table_redraw_grid_and_text(redraw_header=True, redraw_row_index=True)
def set_height_of_all_rows(self, height=None, only_set_if_too_small=False, recreate=True):
if (height is None):
self.MT.row_positions = list(accumulate(chain([0], (self.set_row_height(rn, only_set_if_too_small=only_set_if_too_small, recreate=False, return_new_height=True) for rn in range(len(self.MT.data))))))
else:
self.MT.row_positions = list(accumulate(chain([0], (height for r in range(len(self.MT.data))))))
if recreate:
self.MT.recreate_all_selection_boxes()
def align_cells(self, rows=[], align='global'):
if isinstance(rows, int):
rows = [rows]
else:
rows = rows
if (align == 'global'):
for r in rows:
if ((r in self.cell_options) and ('align' in self.cell_options[r])):
del self.cell_options[r]['align']
else:
for r in rows:
if (r not in self.cell_options):
self.cell_options[r] = {}
self.cell_options[r]['align'] = align
def auto_set_index_width(self, end_row):
if ((not self.MT._row_index) and (not isinstance(self.MT._row_index, int)) and self.auto_resize_width):
if (self.default_index == 'letters'):
new_w = (self.MT.get_txt_w(f'{num2alpha(end_row)}', font=self.MT.index_font) + 20)
if (((self.current_width - new_w) > 15) or ((new_w - self.current_width) > 5)):
self.set_width(new_w, set_TL=True)
return True
elif (self.default_index == 'numbers'):
new_w = (self.MT.get_txt_w(f'{end_row}', font=self.MT.index_font) + 20)
if (((self.current_width - new_w) > 15) or ((new_w - self.current_width) > 5)):
self.set_width(new_w, set_TL=True)
return True
elif (self.default_index == 'both'):
new_w = (self.MT.get_txt_w(f'{(end_row + 1)} {num2alpha(end_row)}', font=self.MT.index_font) + 20)
if (((self.current_width - new_w) > 15) or ((new_w - self.current_width) > 5)):
self.set_width(new_w, set_TL=True)
return True
return False
def redraw_highlight_get_text_fg(self, fr, sr, r, c_2, c_3, selections, datarn):
redrawn = False
kwargs = self.get_cell_kwargs(datarn, key='highlight')
if kwargs:
if (kwargs[0] is not None):
c_1 = (kwargs[0] if kwargs[0].startswith('#') else Color_Map_[kwargs[0]])
if (('rows' in selections) and (r in selections['rows'])):
tf = (self.index_selected_rows_fg if ((kwargs[1] is None) or self.MT.display_selected_fg_over_highlights) else kwargs[1])
if (kwargs[0] is not None):
fill = ((f'#{int(((int(c_1[1:3], 16) + int(c_3[1:3], 16)) / 2)):02X}' + f'{int(((int(c_1[3:5], 16) + int(c_3[3:5], 16)) / 2)):02X}') + f'{int(((int(c_1[5:], 16) + int(c_3[5:], 16)) / 2)):02X}')
elif (('cells' in selections) and (r in selections['cells'])):
tf = (self.index_selected_cells_fg if ((kwargs[1] is None) or self.MT.display_selected_fg_over_highlights) else kwargs[1])
if (kwargs[0] is not None):
fill = ((f'#{int(((int(c_1[1:3], 16) + int(c_2[1:3], 16)) / 2)):02X}' + f'{int(((int(c_1[3:5], 16) + int(c_2[3:5], 16)) / 2)):02X}') + f'{int(((int(c_1[5:], 16) + int(c_2[5:], 16)) / 2)):02X}')
else:
tf = (self.index_fg if (kwargs[1] is None) else kwargs[1])
if (kwargs[0] is not None):
fill = kwargs[0]
if (kwargs[0] is not None):
redrawn = self.redraw_highlight(0, (fr + 1), (self.current_width - 1), sr, fill=fill, outline=(self.index_fg if (self.get_cell_kwargs(datarn, key='dropdown') and self.MT.show_dropdown_borders) else ''), tag='s')
elif (not kwargs):
if (('rows' in selections) and (r in selections['rows'])):
tf = self.index_selected_rows_fg
elif (('cells' in selections) and (r in selections['cells'])):
tf = self.index_selected_cells_fg
else:
tf = self.index_fg
return (tf, redrawn)
def redraw_highlight(self, x1, y1, x2, y2, fill, outline, tag):
coords = (x1, y1, x2, y2)
if self.hidd_high:
(iid, showing) = self.hidd_high.popitem()
self.coords(iid, coords)
if showing:
self.itemconfig(iid, fill=fill, outline=outline)
else:
self.itemconfig(iid, fill=fill, outline=outline, tag=tag, state='normal')
else:
iid = self.create_rectangle(coords, fill=fill, outline=outline, tag=tag)
self.disp_high[iid] = True
return True
def redraw_gridline(self, points, fill, width, tag):
if self.hidd_grid:
(t, sh) = self.hidd_grid.popitem()
self.coords(t, points)
if sh:
self.itemconfig(t, fill=fill, width=width, tag=tag)
else:
self.itemconfig(t, fill=fill, width=width, tag=tag, state='normal')
self.disp_grid[t] = True
else:
self.disp_grid[self.create_line(points, fill=fill, width=width, tag=tag)] = True
def redraw_dropdown(self, x1, y1, x2, y2, fill, outline, tag, draw_outline=True, draw_arrow=True, dd_is_open=False):
if (draw_outline and self.MT.show_dropdown_borders):
self.redraw_highlight((x1 + 1), (y1 + 1), x2, y2, fill='', outline=self.index_fg, tag=tag)
if draw_arrow:
topysub = floor((self.MT.half_txt_h / 2))
mid_y = (y1 + floor((self.MT.min_row_height / 2)))
if (((mid_y + topysub) + 1) >= ((y1 + self.MT.txt_h) - 1)):
mid_y -= 1
if (((mid_y - topysub) + 2) <= ((y1 + 4) + topysub)):
mid_y -= 1
ty1 = (((mid_y + topysub) + 1) if dd_is_open else ((mid_y - topysub) + 3))
ty2 = (((mid_y - topysub) + 3) if dd_is_open else ((mid_y + topysub) + 1))
ty3 = (((mid_y + topysub) + 1) if dd_is_open else ((mid_y - topysub) + 3))
else:
ty1 = (((mid_y + topysub) + 1) if dd_is_open else ((mid_y - topysub) + 2))
ty2 = (((mid_y - topysub) + 2) if dd_is_open else ((mid_y + topysub) + 1))
ty3 = (((mid_y + topysub) + 1) if dd_is_open else ((mid_y - topysub) + 2))
tx1 = ((x2 - self.MT.txt_h) + 1)
tx2 = ((x2 - self.MT.half_txt_h) - 1)
tx3 = (x2 - 3)
if ((tx2 - tx1) > (tx3 - tx2)):
tx1 += ((tx2 - tx1) - (tx3 - tx2))
elif ((tx2 - tx1) < (tx3 - tx2)):
tx1 -= ((tx3 - tx2) - (tx2 - tx1))
points = (tx1, ty1, tx2, ty2, tx3, ty3)
if self.hidd_dropdown:
(t, sh) = self.hidd_dropdown.popitem()
self.coords(t, points)
if sh:
self.itemconfig(t, fill=fill)
else:
self.itemconfig(t, fill=fill, tag=tag, state='normal')
self.lift(t)
else:
t = self.create_line(points, fill=fill, width=2, capstyle=tk.ROUND, joinstyle=tk.ROUND, tag=tag)
self.disp_dropdown[t] = True
def redraw_checkbox(self, x1, y1, x2, y2, fill, outline, tag, draw_check=False):
points = self.MT.get_checkbox_points(x1, y1, x2, y2)
if self.hidd_checkbox:
(t, sh) = self.hidd_checkbox.popitem()
self.coords(t, points)
if sh:
self.itemconfig(t, fill=outline, outline=fill)
else:
self.itemconfig(t, fill=outline, outline=fill, tag=tag, state='normal')
self.lift(t)
else:
t = self.create_polygon(points, fill=outline, outline=fill, tag=tag, smooth=True)
self.disp_checkbox[t] = True
if draw_check:
x1 = (x1 + 4)
y1 = (y1 + 4)
x2 = (x2 - 3)
y2 = (y2 - 3)
points = self.MT.get_checkbox_points(x1, y1, x2, y2, radius=4)
if self.hidd_checkbox:
(t, sh) = self.hidd_checkbox.popitem()
self.coords(t, points)
if sh:
self.itemconfig(t, fill=fill, outline=outline)
else:
self.itemconfig(t, fill=fill, outline=outline, tag=tag, state='normal')
self.lift(t)
else:
t = self.create_polygon(points, fill=fill, outline=outline, tag=tag, smooth=True)
self.disp_checkbox[t] = True
def redraw_grid_and_text(self, last_row_line_pos, scrollpos_top, y_stop, start_row, end_row, scrollpos_bot, row_pos_exists):
try:
self.configure(scrollregion=(0, 0, self.current_width, ((last_row_line_pos + self.MT.empty_vertical) + 2)))
except Exception:
return
self.hidd_text.update(self.disp_text)
self.disp_text = {}
self.hidd_high.update(self.disp_high)
self.disp_high = {}
self.hidd_grid.update(self.disp_grid)
self.disp_grid = {}
self.hidd_dropdown.update(self.disp_dropdown)
self.disp_dropdown = {}
self.hidd_checkbox.update(self.disp_checkbox)
self.disp_checkbox = {}
self.visible_row_dividers = {}
draw_y = self.MT.row_positions[start_row]
xend = (self.current_width - 6)
self.row_width_resize_bbox = ((self.current_width - 2), scrollpos_top, self.current_width, scrollpos_bot)
if ((self.MT.show_horizontal_grid or self.height_resizing_enabled) and row_pos_exists):
points = [(self.current_width - 1), (y_stop - 1), (self.current_width - 1), (scrollpos_top - 1), (- 1), (scrollpos_top - 1)]
for r in range((start_row + 1), end_row):
draw_y = self.MT.row_positions[r]
if self.height_resizing_enabled:
self.visible_row_dividers[r] = (1, (draw_y - 2), xend, (draw_y + 2))
points.extend(((- 1), draw_y, self.current_width, draw_y, (- 1), draw_y, (- 1), (self.MT.row_positions[(r + 1)] if ((len(self.MT.row_positions) - 1) > r) else draw_y)))
self.redraw_gridline(points=points, fill=self.index_grid_fg, width=1, tag='h')
c_2 = (self.index_selected_cells_bg if self.index_selected_cells_bg.startswith('#') else Color_Map_[self.index_selected_cells_bg])
c_3 = (self.index_selected_rows_bg if self.index_selected_rows_bg.startswith('#') else Color_Map_[self.index_selected_rows_bg])
font = self.MT.index_font
selections = self.get_redraw_selections(start_row, end_row)
dd_coords = self.get_existing_dropdown_coords()
for r in range(start_row, (end_row - 1)):
rtopgridln = self.MT.row_positions[r]
rbotgridln = self.MT.row_positions[(r + 1)]
if ((rbotgridln - rtopgridln) < self.MT.txt_h):
continue
datarn = (r if self.MT.all_rows_displayed else self.MT.displayed_rows[r])
(fill, dd_drawn) = self.redraw_highlight_get_text_fg(rtopgridln, rbotgridln, r, c_2, c_3, selections, datarn)
if ((datarn in self.cell_options) and ('align' in self.cell_options[datarn])):
align = self.cell_options[datarn]['align']
else:
align = self.align
dropdown_kwargs = self.get_cell_kwargs(datarn, key='dropdown')
if (align == 'w'):
draw_x = 3
if dropdown_kwargs:
mw = ((self.current_width - self.MT.txt_h) - 2)
self.redraw_dropdown(0, rtopgridln, (self.current_width - 1), (rbotgridln - 1), fill=fill, outline=fill, tag='dd', draw_outline=(not dd_drawn), draw_arrow=(mw >= 5), dd_is_open=(dd_coords == r))
else:
mw = (self.current_width - 2)
elif (align == 'e'):
if dropdown_kwargs:
mw = ((self.current_width - self.MT.txt_h) - 2)
draw_x = ((self.current_width - 5) - self.MT.txt_h)
self.redraw_dropdown(0, rtopgridln, (self.current_width - 1), (rbotgridln - 1), fill=fill, outline=fill, tag='dd', draw_outline=(not dd_drawn), draw_arrow=(mw >= 5), dd_is_open=(dd_coords == r))
else:
mw = (self.current_width - 2)
draw_x = (self.current_width - 3)
elif (align == 'center'):
if dropdown_kwargs:
mw = ((self.current_width - self.MT.txt_h) - 2)
draw_x = ceil(((self.current_width - self.MT.txt_h) / 2))
self.redraw_dropdown(0, rtopgridln, (self.current_width - 1), (rbotgridln - 1), fill=fill, outline=fill, tag='dd', draw_outline=(not dd_drawn), draw_arrow=(mw >= 5), dd_is_open=(dd_coords == r))
else:
mw = (self.current_width - 1)
draw_x = floor((self.current_width / 2))
checkbox_kwargs = self.get_cell_kwargs(datarn, key='checkbox')
if ((not dropdown_kwargs) and checkbox_kwargs and (mw > 2)):
box_w = (self.MT.txt_h + 1)
mw -= box_w
if (align == 'w'):
draw_x += (box_w + 1)
elif (align == 'center'):
draw_x += (ceil((box_w / 2)) + 1)
mw -= 1
else:
mw -= 3
try:
draw_check = (self.MT._row_index[datarn] if isinstance(self.MT._row_index, (list, tuple)) else self.MT.data[datarn][self.MT._row_index])
except Exception:
draw_check = False
self.redraw_checkbox(2, (rtopgridln + 2), (self.MT.txt_h + 3), ((rtopgridln + self.MT.txt_h) + 3), fill=(fill if (checkbox_kwargs['state'] == 'normal') else self.index_grid_fg), outline='', tag='cb', draw_check=draw_check)
lns = self.get_valid_cell_data_as_str(datarn, fix=False).split('\n')
if (lns == ['']):
if self.show_default_index_for_empty:
lns = (get_n2a(datarn, self.default_index),)
else:
continue
draw_y = (rtopgridln + self.MT.fl_ins)
if (mw > 5):
draw_y = (rtopgridln + self.MT.fl_ins)
start_ln = int(((scrollpos_top - rtopgridln) / self.MT.xtra_lines_increment))
if (start_ln < 0):
start_ln = 0
draw_y += (start_ln * self.MT.xtra_lines_increment)
if ((((draw_y + self.MT.half_txt_h) - 1) <= rbotgridln) and (len(lns) > start_ln)):
for txt in islice(lns, start_ln, None):
if self.hidd_text:
(iid, showing) = self.hidd_text.popitem()
self.coords(iid, draw_x, draw_y)
if showing:
self.itemconfig(iid, text=txt, fill=fill, font=font, anchor=align)
else:
self.itemconfig(iid, text=txt, fill=fill, font=font, anchor=align, state='normal')
self.tag_raise(iid)
else:
iid = self.create_text(draw_x, draw_y, text=txt, fill=fill, font=font, anchor=align, tag='t')
self.disp_text[iid] = True
wd = self.bbox(iid)
wd = (wd[2] - wd[0])
if (wd > mw):
if ((align == 'w') and dropdown_kwargs):
txt = txt[:int((len(txt) * (mw / wd)))]
self.itemconfig(iid, text=txt)
wd = self.bbox(iid)
while ((wd[2] - wd[0]) > mw):
txt = txt[:(- 1)]
self.itemconfig(iid, text=txt)
wd = self.bbox(iid)
elif ((align == 'e') and (dropdown_kwargs or checkbox_kwargs)):
txt = txt[(len(txt) - int((len(txt) * (mw / wd)))):]
self.itemconfig(iid, text=txt)
wd = self.bbox(iid)
while ((wd[2] - wd[0]) > mw):
txt = txt[1:]
self.itemconfig(iid, text=txt)
wd = self.bbox(iid)
elif ((align == 'center') and (dropdown_kwargs or checkbox_kwargs)):
tmod = ceil(((len(txt) - int((len(txt) * (mw / wd)))) / 2))
txt = txt[(tmod - 1):(- tmod)]
self.itemconfig(iid, text=txt)
wd = self.bbox(iid)
self.c_align_cyc = cycle(self.centre_alignment_text_mod_indexes)
while ((wd[2] - wd[0]) > mw):
txt = txt[next(self.c_align_cyc)]
self.itemconfig(iid, text=txt)
wd = self.bbox(iid)
self.coords(iid, draw_x, draw_y)
draw_y += self.MT.xtra_lines_increment
if (((draw_y + self.MT.half_txt_h) - 1) > rbotgridln):
break
for dct in (self.hidd_text, self.hidd_high, self.hidd_grid, self.hidd_dropdown, self.hidd_checkbox):
for (iid, showing) in dct.items():
if showing:
self.itemconfig(iid, state='hidden')
dct[iid] = False
def get_redraw_selections(self, startr, endr):
d = defaultdict(list)
for item in chain(self.find_withtag('cells'), self.find_withtag('rows')):
tags = self.gettags(item)
d[tags[0]].append(tuple((int(e) for e in tags[1].split('_') if e)))
d2 = {}
if ('cells' in d):
d2['cells'] = {r for r in range(startr, endr) for (r1, c1, r2, c2) in d['cells'] if ((r1 <= r) and (r2 > r))}
if ('rows' in d):
d2['rows'] = {r for r in range(startr, endr) for (r1, c1, r2, c2) in d['rows'] if ((r1 <= r) and (r2 > r))}
return d2
def open_cell(self, event=None, ignore_existing_editor=False):
if ((not self.MT.anything_selected()) or ((not ignore_existing_editor) and (self.text_editor_id is not None))):
return
currently_selected = self.MT.currently_selected()
if (not currently_selected):
return
r = int(currently_selected[0])
datarn = (r if self.MT.all_rows_displayed else self.MT.displayed_rows[r])
if self.get_cell_kwargs(datarn, key='readonly'):
return
elif (self.get_cell_kwargs(datarn, key='dropdown') or self.get_cell_kwargs(datarn, key='checkbox')):
if self.MT.event_opens_dropdown_or_checkbox(event):
if self.get_cell_kwargs(datarn, key='dropdown'):
self.open_dropdown_window(r, event=event)
elif self.get_cell_kwargs(datarn, key='checkbox'):
self.click_checkbox(r, datarn)
elif self.edit_cell_enabled:
self.open_text_editor(event=event, r=r, dropdown=False)
def get_cell_align(self, r):
datarn = (r if self.MT.all_rows_displayed else self.MT.displayed_rows[r])
if ((datarn in self.cell_options) and ('align' in self.cell_options[datarn])):
align = self.cell_options[datarn]['align']
else:
align = self.align
return align
def open_text_editor(self, event=None, r=0, text=None, state='normal', dropdown=False):
text = None
extra_func_key = '??'
if ((event is None) or self.MT.event_opens_dropdown_or_checkbox(event)):
if (event is not None):
if (hasattr(event, 'keysym') and (event.keysym == 'Return')):
extra_func_key = 'Return'
elif (hasattr(event, 'keysym') and (event.keysym == 'F2')):
extra_func_key = 'F2'
datarn = (r if self.MT.all_rows_displayed else self.MT.displayed_rows[r])
text = self.get_cell_data(datarn, none_to_empty_str=True, redirect_int=True)
elif ((event is not None) and ((hasattr(event, 'keysym') and (event.keysym == 'BackSpace')) or (event.keycode in (8, )))):
extra_func_key = 'BackSpace'
text = ''
elif ((event is not None) and ((hasattr(event, 'char') and event.char.isalpha()) or (hasattr(event, 'char') and event.char.isdigit()) or (hasattr(event, 'char') and (event.char in symbols_set)))):
extra_func_key = event.char
text = event.char
else:
return False
self.text_editor_loc = r
if (self.extra_begin_edit_cell_func is not None):
try:
text = self.extra_begin_edit_cell_func(EditIndexEvent(r, extra_func_key, text, 'begin_edit_index'))
except Exception:
return False
if (text is None):
return False
else:
text = (text if isinstance(text, str) else f'{text}')
text = ('' if (text is None) else text)
if self.MT.cell_auto_resize_enabled:
self.set_row_height_run_binding(r)
if ((r == self.text_editor_loc) and (self.text_editor is not None)):
self.text_editor.set_text(((self.text_editor.get() + '') if (not isinstance(text, str)) else text))
return
if (self.text_editor is not None):
self.destroy_text_editor()
if (not self.MT.see(r=r, c=0, keep_yscroll=True, check_cell_visibility=True)):
self.MT.refresh()
self.text_editor_loc = r
x = 0
y = (self.MT.row_positions[r] + 1)
w = (self.current_width + 1)
h = (self.MT.row_positions[(r + 1)] - y)
datarn = (r if self.MT.all_rows_displayed else self.MT.displayed_rows[r])
if (text is None):
text = self.get_cell_data(datarn, none_to_empty_str=True, redirect_int=True)
(bg, fg) = (self.index_bg, self.index_fg)
self.text_editor = TextEditor(self, text=text, font=self.MT.index_font, state=state, width=w, height=h, border_color=self.MT.table_selected_cells_border_fg, show_border=False, bg=bg, fg=fg, popup_menu_font=self.MT.popup_menu_font, popup_menu_fg=self.MT.popup_menu_fg, popup_menu_bg=self.MT.popup_menu_bg, popup_menu_highlight_bg=self.MT.popup_menu_highlight_bg, popup_menu_highlight_fg=self.MT.popup_menu_highlight_fg, align=self.get_cell_align(r), r=r, newline_binding=self.text_editor_newline_binding)
self.text_editor.update_idletasks()
self.text_editor_id = self.create_window((x, y), window=self.text_editor, anchor='nw')
if (not dropdown):
self.text_editor.textedit.focus_set()
self.text_editor.scroll_to_bottom()
self.text_editor.textedit.bind('<Alt-Return>', (lambda _x: self.text_editor_newline_binding(r=r)))
if (USER_OS == 'darwin'):
self.text_editor.textedit.bind('<Option-Return>', (lambda _x: self.text_editor_newline_binding(r=r)))
for (key, func) in self.MT.text_editor_user_bound_keys.items():
self.text_editor.textedit.bind(key, func)
self.text_editor.textedit.bind('<Tab>', (lambda _x: self.close_text_editor((r, 'Tab'))))
self.text_editor.textedit.bind('<Return>', (lambda _x: self.close_text_editor((r, 'Return'))))
if (not dropdown):
self.text_editor.textedit.bind('<FocusOut>', (lambda _x: self.close_text_editor((r, 'FocusOut'))))
self.text_editor.textedit.bind('<Escape>', (lambda _x: self.close_text_editor((r, 'Escape'))))
return True
def text_editor_newline_binding(self, r=0, c=0, event=None, check_lines=True):
if self.height_resizing_enabled:
datarn = (r if self.MT.all_rows_displayed else self.MT.displayed_rows[r])
curr_height = self.text_editor.winfo_height()
if ((not check_lines) or (self.MT.get_lines_cell_height((self.text_editor.get_num_lines() + 1), font=self.MT.index_font) > curr_height)):
new_height = (curr_height + self.MT.xtra_lines_increment)
space_bot = self.MT.get_space_bot(r)
if (new_height > space_bot):
new_height = space_bot
if (new_height != curr_height):
self.set_row_height(r, new_height)
self.MT.refresh()
self.text_editor.config(height=new_height)
self.coords(self.text_editor_id, 0, (self.MT.row_positions[r] + 1))
kwargs = self.get_cell_kwargs(datarn, key='dropdown')
if kwargs:
text_editor_h = self.text_editor.winfo_height()
(win_h, anchor) = self.get_dropdown_height_anchor(r, text_editor_h)
if (anchor == 'nw'):
self.coords(kwargs['canvas_id'], 0, ((self.MT.row_positions[r] + text_editor_h) - 1))
self.itemconfig(kwargs['canvas_id'], anchor=anchor, height=win_h)
elif (anchor == 'sw'):
self.coords(kwargs['canvas_id'], 0, self.MT.row_positions[r])
self.itemconfig(kwargs['canvas_id'], anchor=anchor, height=win_h)
def refresh_open_window_positions(self):
if (self.text_editor is not None):
r = self.text_editor_loc
self.text_editor.config(height=(self.MT.row_positions[(r + 1)] - self.MT.row_positions[r]))
self.coords(self.text_editor_id, 0, self.MT.row_positions[r])
if (self.existing_dropdown_window is not None):
r = self.get_existing_dropdown_coords()
if (self.text_editor is None):
text_editor_h = (self.MT.row_positions[(r + 1)] - self.MT.row_positions[r])
anchor = self.itemcget(self.existing_dropdown_canvas_id, 'anchor')
win_h = 0
else:
text_editor_h = self.text_editor.winfo_height()
(win_h, anchor) = self.get_dropdown_height_anchor(r, text_editor_h)
if (anchor == 'nw'):
self.coords(self.existing_dropdown_canvas_id, 0, ((self.MT.row_positions[r] + text_editor_h) - 1))
elif (anchor == 'sw'):
self.coords(self.existing_dropdown_canvas_id, 0, self.MT.row_positions[r])
def bind_cell_edit(self, enable=True):
if enable:
self.edit_cell_enabled = True
else:
self.edit_cell_enabled = False
def bind_text_editor_destroy(self, binding, r):
self.text_editor.textedit.bind('<Return>', (lambda _x: binding((r, 'Return'))))
self.text_editor.textedit.bind('<FocusOut>', (lambda _x: binding((r, 'FocusOut'))))
self.text_editor.textedit.bind('<Escape>', (lambda _x: binding((r, 'Escape'))))
self.text_editor.textedit.focus_set()
def destroy_text_editor(self, reason=None):
self.text_editor_loc = None
try:
self.delete(self.text_editor_id)
except Exception:
pass
try:
self.text_editor.destroy()
except Exception:
pass
self.text_editor = None
self.text_editor_id = None
if (reason == 'Escape'):
self.focus_set()
def close_text_editor(self, editor_info=None):
focused = self.focus_get()
try:
if (focused == self.text_editor.textedit.rc_popup_menu):
return 'break'
except Exception:
pass
if ((focused is None) and editor_info):
return 'break'
if (editor_info[1] == 'Escape'):
self.destroy_text_editor('Escape')
self.close_dropdown_window()
return
text_editor_value = self.text_editor.get()
self.destroy_text_editor()
r = editor_info[0]
datarn = (r if self.MT.all_rows_displayed else self.MT.displayed_rows[r])
if ((self.extra_end_edit_cell_func is None) and self.input_valid_for_cell(datarn, text_editor_value)):
self.set_cell_data_undo(r, datarn=datarn, value=text_editor_value, check_input_valid=False)
elif ((self.extra_end_edit_cell_func is not None) and (not self.MT.edit_cell_validation) and self.input_valid_for_cell(datarn, text_editor_value)):
self.set_cell_data_undo(r, datarn=datarn, value=text_editor_value, check_input_valid=False)
self.extra_end_edit_cell_func(EditIndexEvent(r, editor_info[1], f'{text_editor_value}', 'end_edit_index'))
elif ((self.extra_end_edit_cell_func is not None) and self.MT.edit_cell_validation):
validation = self.extra_end_edit_cell_func(EditIndexEvent(r, editor_info[1], f'{text_editor_value}', 'end_edit_index'))
if (validation is not None):
text_editor_value = validation
if self.input_valid_for_cell(datarn, text_editor_value):
self.set_cell_data_undo(r, datarn=datarn, value=text_editor_value, check_input_valid=False)
self.close_dropdown_window(r)
self.MT.recreate_all_selection_boxes()
self.MT.refresh()
if (editor_info[1] != 'FocusOut'):
self.focus_set()
return 'break'
def get_dropdown_height_anchor(self, r, text_editor_h=None):
win_h = 5
datarn = (r if self.MT.all_rows_displayed else self.MT.displayed_rows[r])
for (i, v) in enumerate(self.get_cell_kwargs(datarn, key='dropdown')['values']):
v_numlines = len((v.split('\n') if isinstance(v, str) else f'{v}'.split('\n')))
if (v_numlines > 1):
win_h += ((self.MT.fl_ins + (v_numlines * self.MT.xtra_lines_increment)) + 5)
else:
win_h += self.MT.min_row_height
if (i == 5):
break
if (win_h > 500):
win_h = 500
space_bot = self.MT.get_space_bot(0, text_editor_h)
win_h2 = int(win_h)
if (win_h > space_bot):
win_h = (space_bot - 1)
if (win_h < (self.MT.txt_h + 5)):
win_h = (self.MT.txt_h + 5)
elif (win_h > win_h2):
win_h = win_h2
return (win_h, 'nw')
def dropdown_text_editor_modified(self, dd_window, event, modified_func):
if modified_func:
modified_func(event)
dd_window.search_and_see(event)
def open_dropdown_window(self, r, datarn=None, event=None):
self.destroy_text_editor('Escape')
self.destroy_opened_dropdown_window()
if (datarn is None):
datarn = (r if self.MT.all_rows_displayed else self.MT.displayed_rows[r])
kwargs = self.get_cell_kwargs(datarn, key='dropdown')
if (kwargs['state'] == 'normal'):
if (not self.open_text_editor(event=event, r=r, dropdown=True)):
return
(win_h, anchor) = self.get_dropdown_height_anchor(r)
window = self.MT.parentframe.dropdown_class(self.MT.winfo_toplevel(), r, 0, width=self.current_width, height=win_h, font=self.MT.index_font, colors={'bg': self.MT.popup_menu_bg, 'fg': self.MT.popup_menu_fg, 'highlight_bg': self.MT.popup_menu_highlight_bg, 'highlight_fg': self.MT.popup_menu_highlight_fg}, outline_color=self.MT.popup_menu_fg, values=kwargs['values'], close_dropdown_window=self.close_dropdown_window, search_function=kwargs['search_function'], arrowkey_RIGHT=self.MT.arrowkey_RIGHT, arrowkey_LEFT=self.MT.arrowkey_LEFT, align='w', single_index='r')
ypos = self.MT.row_positions[(r + 1)]
kwargs['canvas_id'] = self.create_window((0, ypos), window=window, anchor=anchor)
if (kwargs['state'] == 'normal'):
self.text_editor.textedit.bind('<<TextModified>>', (lambda _x: self.dropdown_text_editor_modified(window, DropDownModifiedEvent('IndexComboboxModified', r, 0, self.text_editor.get()), kwargs['modified_function'])))
self.update_idletasks()
try:
self.after(1, (lambda : self.text_editor.textedit.focus()))
self.after(2, self.text_editor.scroll_to_bottom())
except Exception:
return
redraw = False
else:
window.bind('<FocusOut>', (lambda _x: self.close_dropdown_window(r)))
self.update_idletasks()
window.focus_set()
redraw = True
self.existing_dropdown_window = window
kwargs['window'] = window
self.existing_dropdown_canvas_id = kwargs['canvas_id']
if redraw:
self.MT.main_table_redraw_grid_and_text(redraw_header=False, redraw_row_index=True, redraw_table=False)
def close_dropdown_window(self, r=None, selection=None, redraw=True):
if ((r is not None) and (selection is not None)):
datarn = (r if self.MT.all_rows_displayed else self.MT.displayed_rows[r])
kwargs = self.get_cell_kwargs(datarn, key='dropdown')
if (kwargs['select_function'] is not None):
kwargs['select_function'](EditIndexEvent(r, 'IndexComboboxSelected', f'{selection}', 'end_edit_index'))
if (self.extra_end_edit_cell_func is None):
self.set_cell_data_undo(r, datarn=datarn, value=selection, redraw=(not redraw))
elif ((self.extra_end_edit_cell_func is not None) and self.MT.edit_cell_validation):
validation = self.extra_end_edit_cell_func(EditIndexEvent(r, 'IndexComboboxSelected', f'{selection}', 'end_edit_index'))
if (validation is not None):
selection = validation
self.set_cell_data_undo(r, datarn=datarn, value=selection, redraw=(not redraw))
elif ((self.extra_end_edit_cell_func is not None) and (not self.MT.edit_cell_validation)):
self.set_cell_data_undo(r, datarn=datarn, value=selection, redraw=(not redraw))
self.extra_end_edit_cell_func(EditIndexEvent(r, 'IndexComboboxSelected', f'{selection}', 'end_edit_index'))
self.focus_set()
self.MT.recreate_all_selection_boxes()
self.destroy_text_editor('Escape')
self.destroy_opened_dropdown_window(r)
if redraw:
self.MT.refresh()
def get_existing_dropdown_coords(self):
if (self.existing_dropdown_window is not None):
return int(self.existing_dropdown_window.r)
return None
def mouseclick_outside_editor_or_dropdown(self, inside=False):
closed_dd_coords = self.get_existing_dropdown_coords()
if ((self.text_editor_loc is not None) and (self.text_editor is not None)):
self.close_text_editor((self.text_editor_loc, 'ButtonPress-1'))
if (closed_dd_coords is not None):
self.destroy_opened_dropdown_window(closed_dd_coords)
if inside:
self.MT.main_table_redraw_grid_and_text(redraw_header=False, redraw_row_index=True, redraw_table=False)
return closed_dd_coords
def mouseclick_outside_editor_or_dropdown_all_canvases(self, inside=False):
self.CH.mouseclick_outside_editor_or_dropdown()
self.MT.mouseclick_outside_editor_or_dropdown()
return self.mouseclick_outside_editor_or_dropdown(inside)
def destroy_opened_dropdown_window(self, r=None, datarn=None):
if ((r is None) and (datarn is None) and (self.existing_dropdown_window is not None)):
r = self.get_existing_dropdown_coords()
if ((r is not None) or (datarn is not None)):
if (datarn is None):
datarn_ = (r if self.MT.all_rows_displayed else self.MT.displayed_rows[r])
else:
datarn_ = r
else:
datarn_ = None
try:
self.delete(self.existing_dropdown_canvas_id)
except Exception:
pass
self.existing_dropdown_canvas_id = None
try:
self.existing_dropdown_window.destroy()
except Exception:
pass
self.existing_dropdown_window = None
kwargs = self.get_cell_kwargs(datarn_, key='dropdown')
if kwargs:
kwargs['canvas_id'] = 'no dropdown open'
kwargs['window'] = 'no dropdown open'
try:
self.delete(kwargs['canvas_id'])
except Exception:
pass
def set_cell_data_undo(self, r=0, datarn=None, value='', cell_resize=True, undo=True, redraw=True, check_input_valid=True):
if (datarn is None):
datarn = (r if self.MT.all_rows_displayed else self.MT.displayed_rows[r])
if isinstance(self.MT._row_index, int):
self.MT.set_cell_data_undo(r=r, c=self.MT._row_index, datarn=datarn, value=value, undo=True)
else:
self.fix_index(datarn)
if ((not check_input_valid) or self.input_valid_for_cell(datarn, value)):
if (self.MT.undo_enabled and undo):
self.MT.undo_storage.append(zlib.compress(pickle.dumps(('edit_index', {datarn: self.MT._row_index[datarn]}, self.MT.get_boxes(include_current=False), self.MT.currently_selected()))))
self.set_cell_data(datarn=datarn, value=value)
if (cell_resize and self.MT.cell_auto_resize_enabled):
self.set_row_height_run_binding(r, only_set_if_too_small=False)
if redraw:
self.MT.refresh()
self.parentframe.emit_event('<<SheetModified>>')
def set_cell_data(self, datarn=None, value=''):
if isinstance(self.MT._row_index, int):
self.MT.set_cell_data(datarn=datarn, datacn=self.MT._row_index, value=value)
else:
self.fix_index(datarn)
if self.get_cell_kwargs(datarn, key='checkbox'):
self.MT._row_index[datarn] = try_to_bool(value)
else:
self.MT._row_index[datarn] = value
def input_valid_for_cell(self, datarn, value):
if self.get_cell_kwargs(datarn, key='readonly'):
return False
if self.get_cell_kwargs(datarn, key='checkbox'):
return is_bool_like(value)
if self.cell_equal_to(datarn, value):
return False
kwargs = self.get_cell_kwargs(datarn, key='dropdown')
if (kwargs and kwargs['validate_input'] and (value not in kwargs['values'])):
return False
return True
def cell_equal_to(self, datarn, value):
self.fix_index(datarn)
if isinstance(self.MT._row_index, list):
return (self.MT._row_index[datarn] == value)
elif isinstance(self.MT._row_index, int):
return self.MT.cell_equal_to(datarn, self.MT._row_index, value)
def get_cell_data(self, datarn, get_displayed=False, none_to_empty_str=False, redirect_int=False):
if get_displayed:
return self.get_valid_cell_data_as_str(datarn, fix=False)
if (redirect_int and isinstance(self.MT._row_index, int)):
return self.MT.get_cell_data(datarn, self.MT._row_index, none_to_empty_str=True)
if (isinstance(self.MT._row_index, int) or (not self.MT._row_index) or (datarn >= len(self.MT._row_index)) or ((self.MT._row_index[datarn] is None) and none_to_empty_str)):
return ''
return self.MT._row_index[datarn]
def get_valid_cell_data_as_str(self, datarn, fix=True) -> str:
kwargs = self.get_cell_kwargs(datarn, key='dropdown')
if kwargs:
if (kwargs['text'] is not None):
return f"{kwargs['text']}"
else:
kwargs = self.get_cell_kwargs(datarn, key='checkbox')
if kwargs:
return f"{kwargs['text']}"
if isinstance(self.MT._row_index, int):
return self.MT.get_valid_cell_data_as_str(datarn, self.MT._row_index, get_displayed=True)
if fix:
self.fix_index(datarn)
try:
return ('' if (self.MT._row_index[datarn] is None) else f'{self.MT._row_index[datarn]}')
except Exception:
return ''
def get_value_for_empty_cell(self, datarn, r_ops=True):
if self.get_cell_kwargs(datarn, key='checkbox', cell=r_ops):
return False
kwargs = self.get_cell_kwargs(datarn, key='dropdown', cell=r_ops)
if (kwargs and kwargs['validate_input'] and kwargs['values']):
return kwargs['values'][0]
return ''
def get_empty_index_seq(self, end, start=0, r_ops=True):
return [self.get_value_for_empty_cell(datarn, r_ops=r_ops) for datarn in range(start, end)]
def fix_index(self, datarn=None, fix_values=tuple()):
if isinstance(self.MT._row_index, int):
return
if isinstance(self.MT._row_index, float):
self.MT._row_index = int(self.MT._row_index)
return
if (not isinstance(self.MT._row_index, list)):
try:
self.MT._row_index = list(self.MT._row_index)
except Exception:
self.MT._row_index = []
if (isinstance(datarn, int) and (datarn >= len(self.MT._row_index))):
self.MT._row_index.extend(self.get_empty_index_seq(end=(datarn + 1), start=len(self.MT._row_index)))
if fix_values:
for (rn, v) in enumerate(islice(self.MT._row_index, fix_values[0], fix_values[1])):
if (not self.input_valid_for_cell(rn, v)):
self.MT._row_index[rn] = self.get_value_for_empty_cell(rn)
def set_row_height_run_binding(self, r, only_set_if_too_small=True):
old_height = (self.MT.row_positions[(r + 1)] - self.MT.row_positions[r])
new_height = self.set_row_height(r, only_set_if_too_small=only_set_if_too_small)
if ((self.row_height_resize_func is not None) and (old_height != new_height)):
self.row_height_resize_func(ResizeEvent('row_height_resize', r, old_height, new_height))
def click_checkbox(self, r, datarn=None, undo=True, redraw=True):
if (datarn is None):
datarn = (r if self.MT.all_rows_displayed else self.MT.displayed_rows[r])
kwargs = self.get_cell_kwargs(datarn, key='checkbox')
if (kwargs['state'] == 'normal'):
if isinstance(self.MT._row_index, list):
value = ((not self.MT._row_index[datarn]) if isinstance(self.MT._row_index[datarn], bool) else False)
elif isinstance(self.MT._row_index, int):
value = ((not self.MT.data[datarn][self.MT._row_index]) if isinstance(self.MT.data[datarn][self.MT._row_index], bool) else False)
else:
value = False
self.set_cell_data_undo(r, datarn=datarn, value=value, cell_resize=False)
if (kwargs['check_function'] is not None):
kwargs['check_function']((r, 0, 'IndexCheckboxClicked', (self.MT._row_index[datarn] if isinstance(self.MT._row_index, list) else self.MT.get_cell_data(datarn, self.MT._row_index))))
if (self.extra_end_edit_cell_func is not None):
self.extra_end_edit_cell_func(EditIndexEvent(r, 'Return', (self.MT._row_index[datarn] if isinstance(self.MT._row_index, list) else self.MT.get_cell_data(datarn, self.MT._row_index)), 'end_edit_index'))
if redraw:
self.MT.refresh()
def checkbox_index(self, **kwargs):
self.destroy_opened_dropdown_window()
if (('dropdown' in self.options) or ('checkbox' in self.options)):
self.delete_options_dropdown_and_checkbox()
if ('checkbox' not in self.options):
self.options['checkbox'] = {}
self.options['checkbox'] = get_checkbox_dict(**kwargs)
total_rows = self.MT.total_data_rows()
if isinstance(self.MT._row_index, int):
for datarn in range(total_rows):
self.MT.set_cell_data(datarn=datarn, datacn=self.MT._row_index, value=kwargs['checked'])
else:
for datarn in range(total_rows):
self.set_cell_data(datarn=datarn, value=kwargs['checked'])
def dropdown_index(self, **kwargs):
self.destroy_opened_dropdown_window()
if (('dropdown' in self.options) or ('checkbox' in self.options)):
self.delete_options_dropdown_and_checkbox()
if ('dropdown' not in self.options):
self.options['dropdown'] = {}
self.options['dropdown'] = get_dropdown_dict(**kwargs)
total_rows = self.MT.total_data_rows()
value = (kwargs['set_value'] if (kwargs['set_value'] is not None) else (kwargs['values'][0] if kwargs['values'] else ''))
if isinstance(self.MT._row_index, int):
for datarn in range(total_rows):
self.MT.set_cell_data(datarn=datarn, datacn=self.MT._row_index, value=value)
else:
for datarn in range(total_rows):
self.set_cell_data(datarn=datarn, value=value)
def create_checkbox(self, datarn=0, **kwargs):
if ((datarn in self.cell_options) and (('dropdown' in self.cell_options[datarn]) or ('checkbox' in self.cell_options[datarn]))):
self.delete_cell_options_dropdown_and_checkbox(datarn)
if (datarn not in self.cell_options):
self.cell_options[datarn] = {}
self.cell_options[datarn]['checkbox'] = get_checkbox_dict(**kwargs)
self.set_cell_data(datarn=datarn, value=kwargs['checked'])
def create_dropdown(self, datarn, **kwargs):
if ((datarn in self.cell_options) and (('dropdown' in self.cell_options[datarn]) or ('checkbox' in self.cell_options[datarn]))):
self.delete_cell_options_dropdown_and_checkbox(datarn)
if (datarn not in self.cell_options):
self.cell_options[datarn] = {}
self.cell_options[datarn]['dropdown'] = get_dropdown_dict(**kwargs)
self.set_cell_data(datarn=datarn, value=(kwargs['set_value'] if (kwargs['set_value'] is not None) else (kwargs['values'][0] if kwargs['values'] else '')))
def get_cell_kwargs(self, datarn, key='dropdown', cell=True, entire=True):
if (cell and (datarn in self.cell_options) and (key in self.cell_options[datarn])):
return self.cell_options[datarn][key]
if (entire and (key in self.options)):
return self.options[key]
return {}
def delete_options_dropdown(self):
self.destroy_opened_dropdown_window()
if ('dropdown' in self.options):
del self.options['dropdown']
def delete_options_checkbox(self):
if ('checkbox' in self.options):
del self.options['checkbox']
def delete_options_dropdown_and_checkbox(self):
self.delete_options_dropdown()
self.delete_options_checkbox()
def delete_cell_options_dropdown(self, datarn):
self.destroy_opened_dropdown_window(datarn=datarn)
if ((datarn in self.cell_options) and ('dropdown' in self.cell_options[datarn])):
del self.cell_options[datarn]['dropdown']
def delete_cell_options_checkbox(self, datarn):
if ((datarn in self.cell_options) and ('checkbox' in self.cell_options[datarn])):
del self.cell_options[datarn]['checkbox']
def delete_cell_options_dropdown_and_checkbox(self, datarn):
self.delete_cell_options_dropdown(datarn)
self.delete_cell_options_checkbox(datarn) |
def main():
response =
response.raise_for_status()
contents = response.text
distributions = defaultdict(list)
ordering_data = defaultdict(dict)
for (i, distribution_type) in enumerate(('DEFAULT_CPYTHON_DISTRIBUTIONS', 'DEFAULT_PYPY_DISTRIBUTIONS')):
for (identifier, data, source) in parse_distributions(contents, distribution_type):
ordering_data[i][identifier] = None
distributions[identifier].append((data, source))
ordered = [identifier for identifiers in ordering_data.values() for identifier in reversed(identifiers)]
output = ['from __future__ import annotations', '', '# fmt: off', 'ORDERED_DISTRIBUTIONS: tuple[str, ...] = (']
output.extend((f' {identifier!r},' for identifier in ordered))
output.extend((')', 'DISTRIBUTIONS: dict[str, dict[tuple[str, ...], str]] = {'))
for (identifier, data) in distributions.items():
output.append(f' {identifier!r}: {{')
for (d, source) in data:
output.extend((f' {d!r}:', f' {source!r},'))
output.append(' },')
output.extend(('}', ''))
output = '\n'.join(output)
with open(OUTPUT_FILE, 'w', encoding='utf-8') as f:
f.write(output) |
def set_obj_goal(self, obj_goal):
self._obj_goal = obj_goal
self._env.PLACE_POSE = pp.get_pose(self._obj_goal)
c = safepicking.geometry.Coordinate(*self._env.PLACE_POSE)
c.translate([0, 0, 0.2], wrt='world')
self._env.PRE_PLACE_POSE = c.pose
visual_file = pp.get_visual_data(self._obj_goal)[0].meshAssetFileName.decode()
mesh = trimesh.load(visual_file)
mesh.apply_transform(safepicking.geometry.transformation_matrix(*self._env.PLACE_POSE))
pp.draw_aabb(mesh.bounds, color=(1, 0, 0, 1)) |
class _Project():
prefs: Prefs
def __init__(self, fscommands):
self.observers = []
self.fscommands = fscommands
self.prefs = Prefs()
self.data_files = _DataFiles(self)
self._custom_source_folders = []
def get_resource(self, resource_name):
path = self._get_resource_path(resource_name)
if (not os.path.exists(path)):
raise exceptions.ResourceNotFoundError(('Resource <%s> does not exist' % resource_name))
elif os.path.isfile(path):
return File(self, resource_name)
elif os.path.isdir(path):
return Folder(self, resource_name)
else:
raise exceptions.ResourceNotFoundError(('Unknown resource ' + resource_name))
def get_module(self, name, folder=None):
pymod = self.pycore.builtin_module(name)
if (pymod is not None):
return pymod
module = self.find_module(name, folder)
if (module is None):
raise ModuleNotFoundError(('Module %s not found' % name))
return self.pycore.resource_to_pyobject(module)
def get_python_path_folders(self):
result = []
for src in (self.prefs.get('python_path', []) + sys.path):
with contextlib.suppress(exceptions.ResourceNotFoundError):
src_folder = get_no_project().get_resource(src)
result.append(src_folder)
return result
def get_source_folders(self):
if (self.root is None):
return []
result = list(self._custom_source_folders)
result.extend(self.pycore._find_source_folders(self.root))
return result
def validate(self, folder):
for observer in list(self.observers):
observer.validate(folder)
def add_observer(self, observer):
self.observers.append(observer)
def remove_observer(self, observer):
if (observer in self.observers):
self.observers.remove(observer)
def do(self, changes, task_handle=taskhandle.DEFAULT_TASK_HANDLE):
self.history.do(changes, task_handle=task_handle)
def get_pymodule(self, resource, force_errors=False):
return self.pycore.resource_to_pyobject(resource, force_errors)
def get_pycore(self):
return self.pycore
def get_file(self, path):
return File(self, path)
def get_folder(self, path):
return Folder(self, path)
def get_prefs(self):
return self.prefs
def get_relative_module(self, name, folder, level):
module = self.find_relative_module(name, folder, level)
if (module is None):
raise ModuleNotFoundError(('Module %s not found' % name))
return self.pycore.resource_to_pyobject(module)
def find_module(self, modname, folder=None) -> Optional[File]:
for src in self.get_source_folders():
module = _find_module_in_folder(src, modname)
if (module is not None):
return module
for src in self.get_python_path_folders():
module = _find_module_in_folder(src, modname)
if (module is not None):
return module
if (folder is not None):
module = _find_module_in_folder(folder, modname)
if (module is not None):
return module
return None
def find_relative_module(self, modname, folder, level):
for i in range((level - 1)):
folder = folder.parent
if (modname == ''):
return folder
else:
return _find_module_in_folder(folder, modname)
def is_ignored(self, resource):
return False
def _get_resource_path(self, name):
pass
def history(self):
return history.History(self)
def pycore(self):
return pycore.PyCore(self)
def close(self):
warnings.warn('Cannot close a NoProject', DeprecationWarning, stacklevel=2)
ropefolder = None |
class FC6_TestCase(FC3_TestCase):
def runTest(self):
FC3_TestCase.runTest(self)
cmd = FC6_Reboot()
self.assertFalse(cmd.eject)
cmd = self.assert_parse('reboot --eject')
self.assertEqual(cmd.action, KS_REBOOT)
self.assertEqual(cmd.eject, True)
self.assertEqual(str(cmd), '# Reboot after installation\nreboot --eject\n') |
def test_marker_without_description(pytester: Pytester) -> None:
pytester.makefile('.cfg', setup='\n [tool:pytest]\n markers=slow\n ')
pytester.makeconftest("\n import pytest\n pytest.mark.xfail('FAIL')\n ")
ftdir = pytester.mkdir('ft1_dummy')
pytester.path.joinpath('conftest.py').replace(ftdir.joinpath('conftest.py'))
rec = pytester.runpytest('--strict-markers')
rec.assert_outcomes() |
class DigitalPoleZeroResponse(FrequencyResponse):
zeros = List.T(Complex.T())
poles = List.T(Complex.T())
constant = Complex.T(default=(1.0 + 0j))
deltat = Float.T()
def __init__(self, zeros=None, poles=None, constant=(1.0 + 0j), deltat=None, **kwargs):
if (zeros is None):
zeros = []
if (poles is None):
poles = []
if (deltat is None):
raise ValueError('Sampling interval `deltat` must be given for DigitalPoleZeroResponse.')
FrequencyResponse.__init__(self, zeros=aslist(zeros), poles=aslist(poles), constant=constant, deltat=deltat, **kwargs)
def check_sampling_rate(self):
if (self.deltat == 0.0):
raise InvalidResponseError('Invalid digital response: sampling rate undefined.')
def get_fmax(self):
self.check_sampling_rate()
return (0.5 / self.deltat)
def evaluate(self, freqs):
self.check_sampling_rate()
return signal.freqz_zpk(self.zeros, self.poles, self.constant, (freqs * ((2.0 * math.pi) * self.deltat)))[1]
def is_scalar(self):
return ((len(self.zeros) == 0) and (len(self.poles) == 0))
def get_scalar(self):
if self.is_scalar():
return self.constant
else:
raise IsNotScalar()
def to_digital(self, deltat):
self.check_sampling_rate()
from scipy.signal import zpk2tf
(b, a) = zpk2tf(self.zeros, self.poles, self.constant)
return DigitalFilterResponse(b, a, deltat)
def summary(self):
if self.is_scalar():
return str_gain(self.get_scalar())
return ('dpz{%i,%i,%s}' % (len(self.poles), len(self.zeros), str_fmax_failsafe(self))) |
class TestBasic(TestCase):
def test_basic(self):
ann = Annotations()
a = Symbol('a')
next_a = Symbol('next(a)')
init_a = Symbol('init(a)')
ann.add(a, 'next', next_a)
ann.add(a, 'init', init_a)
ann.add(a, 'related', next_a)
ann.add(a, 'related', init_a)
self.assertIn(a, ann)
self.assertEqual(set([next_a]), ann.annotations(a)['next'])
self.assertEqual(set([init_a]), ann.annotations(a)['init'])
self.assertEqual(set([init_a, next_a]), ann.annotations(a)['related'])
self.assertEqual(set([a]), ann.all_annotated_formulae('next'))
self.assertEqual(set([a]), ann.all_annotated_formulae('init'))
self.assertEqual(set([a]), ann.all_annotated_formulae('related'))
self.assertEqual(set(), ann.all_annotated_formulae('non-existent'))
def test_remove(self):
ann = Annotations()
a = Symbol('a')
next_a = Symbol('next(a)')
init_a = Symbol('init(a)')
ann.add(a, 'next', next_a)
ann.add(a, 'init', init_a)
ann.add(a, 'related', next_a)
ann.add(a, 'related', init_a)
self.assertIn(a, ann)
ann.remove(a)
self.assertNotIn(a, ann)
self.assertEqual(None, ann.annotations(a))
self.assertEqual(set([]), ann.all_annotated_formulae('next'))
self.assertEqual(set([]), ann.all_annotated_formulae('init'))
self.assertEqual(set([]), ann.all_annotated_formulae('related'))
self.assertEqual(set(), ann.all_annotated_formulae('non-existent'))
def test_remove_annotation(self):
ann = Annotations()
a = Symbol('a')
next_a = Symbol('next(a)')
init_a = Symbol('init(a)')
ann.add(a, 'next', next_a)
ann.add(a, 'init', init_a)
ann.add(a, 'related', next_a)
ann.add(a, 'related', init_a)
ann.remove_annotation(a, 'next')
self.assertNotIn('next', ann.annotations(a))
self.assertEqual(set([init_a]), ann.annotations(a)['init'])
self.assertEqual(set([init_a, next_a]), ann.annotations(a)['related'])
self.assertEqual(set([]), ann.all_annotated_formulae('next'))
self.assertEqual(set([a]), ann.all_annotated_formulae('init'))
self.assertEqual(set([a]), ann.all_annotated_formulae('related'))
self.assertEqual(set(), ann.all_annotated_formulae('non-existent'))
def test_remove_value(self):
ann = Annotations()
a = Symbol('a')
next_a = Symbol('next(a)')
init_a = Symbol('init(a)')
ann.add(a, 'next', next_a)
ann.add(a, 'init', init_a)
ann.add(a, 'related', next_a)
ann.add(a, 'related', init_a)
self.assertNotEqual(ann.annotations(a)['init'], ann.annotations(a)['related'])
ann.remove_value(a, 'related', next_a)
self.assertEqual(ann.annotations(a)['related'], ann.annotations(a)['init'])
def test_vmt(self):
parser = SmtLibParser()
fname = os.path.join(SMTLIB_DIR, 'small_set/vmt/c432_0f.vmt')
script = parser.get_script_fname(fname)
ann = script.annotations
self.assertIn('A_1__AT0 ->', str(ann))
a1 = Symbol('A_1__AT0')
self.assertIn(a1, ann)
self.assertTrue(ann.has_annotation(a1, 'next'))
self.assertFalse(ann.has_annotation(a1, 'non-existent'))
self.assertTrue(ann.has_annotation(a1, 'next', 'A_1__AT1'))
self.assertFalse(ann.has_annotation(a1, 'next', 'non-existent'))
self.assertIn('A_1__AT1', ann.annotations(a1)['next'])
self.assertIn('A_1__AT1', ann[a1]['next'])
curr_a1 = ann.all_annotated_formulae('next', 'A_1__AT1')
self.assertEqual(curr_a1, set([a1]))
def test_interpreting_annotations(self):
source = '(declare-fun |"v__AT0"| () Bool)\n(declare-fun |"v__AT1"| () Bool)\n(define-fun .def_1 () Bool (! |"v__AT0"| :next |"v__AT1"|))\n'
buf = StringIO(source)
parser = SmtLibParser()
script = parser.get_script(buf)
ann = script.annotations
v0 = self.env.formula_manager.get_symbol('"v__AT0"')
v1_str = next(iter(ann[v0]['next']))
self.env.formula_manager.get_symbol(v1_str)
self.assertEqual(v1_str, '"v__AT1"')
def test_complex_annotations_values(self):
source = '(declare-fun |"v__AT0"| () Bool)\n(define-fun .def_1 () Bool (! |"v__AT0"| :next (+ 1 meaningless)))\n'
buf = StringIO(source)
parser = SmtLibParser()
script = parser.get_script(buf)
ann = script.annotations
v0 = self.env.formula_manager.get_symbol('"v__AT0"')
v1_str = next(iter(ann[v0]['next']))
self.assertEqual(v1_str, '(+ 1 meaningless)')
def test_annotations_colon_values(self):
source = '(declare-fun |"v__AT0"| () Bool)\n(define-fun .def_1 () Bool (! |"v__AT0"| :next :this_is_considered_a_value))\n'
buf = StringIO(source)
parser = SmtLibParser()
script = parser.get_script(buf)
ann = script.annotations
v0 = self.env.formula_manager.get_symbol('"v__AT0"')
v1_str = next(iter(ann[v0]['next']))
self.assertEqual(v1_str, ':this_is_considered_a_value') |
class TriGamma(UnaryScalarOp):
def st_impl(x):
return scipy.special.polygamma(1, x)
def impl(self, x):
return TriGamma.st_impl(x)
def L_op(self, inputs, outputs, outputs_gradients):
(x,) = inputs
(g_out,) = outputs_gradients
if (x in complex_types):
raise NotImplementedError('gradient not implemented for complex types')
return [(g_out * polygamma(2, x))]
def c_support_code(self, **kwargs):
return '\n // For GPU support\n #ifdef WITHIN_KERNEL\n #define DEVICE WITHIN_KERNEL\n #else\n #define DEVICE\n #endif\n\n #ifndef ga_double\n #define ga_double double\n #endif\n\n #ifndef _TRIGAMMAFUNCDEFINED\n #define _TRIGAMMAFUNCDEFINED\n\n DEVICE double _tri_gamma(ga_double x) {\n\n double a = 0.0001;\n double b = 5.0;\n double b2 = 0.;\n double b4 = -0.;\n double b6 = 0.;\n double b8 = -0.;\n double value;\n double y;\n double z;\n\n if (x <= 0) {\n return 0.0;\n }\n\n if ( x <= a ) {\n value = 1.0 / x / x;\n return value;\n }\n\n value = 0.0;\n z = x;\n\n while ( z < b ) {\n value += 1.0 / z / z;\n z += 1.0;\n }\n\n y = 1.0 / z / z;\n\n value += 0.5 * y + (1.0 + y * (b2 + y * (b4 + y * (b6 + y * b8 )))) / z;\n\n return value;\n }\n #endif\n '
def c_code(self, node, name, inp, out, sub):
(x,) = inp
(z,) = out
if (node.inputs[0].type in float_types):
return f'''{z} =
_tri_gamma({x});'''
raise NotImplementedError('only floating point is implemented') |
def _parse_baseplate_script_args() -> Tuple[(argparse.Namespace, List[str])]:
parser = argparse.ArgumentParser(description='Run a function with app configuration loaded.', formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--debug', action='store_true', default=False, help='enable extra-verbose debug logging')
parser.add_argument('--app-name', default='main', metavar='NAME', help='name of app to load from config_file (default: main)')
parser.add_argument('config_file', type=argparse.FileType('r'), help='path to a configuration file')
parser.add_argument('entrypoint', type=str, help='function to call, e.g. module.path:fn_name')
return parser.parse_known_args(sys.argv[1:]) |
def main(_):
data_dir = FLAGS.data_dir
label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map_path)
logging.info('Reading from Pet dataset.')
image_dir = os.path.join(data_dir, 'images')
annotations_dir = os.path.join(data_dir, 'annotations')
examples_path = os.path.join(annotations_dir, 'trainval.txt')
examples_list = dataset_util.read_examples_list(examples_path)
random.seed(42)
random.shuffle(examples_list)
num_examples = len(examples_list)
num_train = int((0.7 * num_examples))
train_examples = examples_list[:num_train]
val_examples = examples_list[num_train:]
logging.info('%d training and %d validation examples.', len(train_examples), len(val_examples))
train_output_path = os.path.join(FLAGS.output_dir, 'pet_train.record')
val_output_path = os.path.join(FLAGS.output_dir, 'pet_val.record')
create_tf_record(train_output_path, label_map_dict, annotations_dir, image_dir, train_examples)
create_tf_record(val_output_path, label_map_dict, annotations_dir, image_dir, val_examples) |
class CueInput(reahl.web.ui.WrappedInput):
def __init__(self, html_input, cue_widget):
super().__init__(html_input)
div = self.add_child(Div(self.view))
self.set_html_representation(div)
div.append_class('reahl-bootstrapcueinput')
cue_widget.append_class('reahl-bootstrapcue')
div.add_child(html_input)
self.cue_widget = div.add_child(cue_widget)
def get_js(self, context=None):
js = ['$(".reahl-bootstrapcueinput").bootstrapcueinput();']
return (super().get_js(context=context) + js)
def includes_label(self):
return self.input_widget.includes_label |
.parametrize('trust_enabled,tuf_root', [(True, QUAY_TUF_ROOT), (False, DISABLED_TUF_ROOT)])
def test_trust_disabled(trust_enabled, tuf_root):
(app, principal) = app_with_principal()
with app.test_request_context('/'):
principal.set_identity(read_identity('namespace', 'repo'))
actual = _get_tuf_root(Mock(trust_enabled=trust_enabled), 'namespace', 'repo')
assert (actual == tuf_root), ('should be %s, but was %s' % (tuf_root, actual)) |
class GroupEpicNoteAwardEmojiManager(NoUpdateMixin, RESTManager):
_path = '/groups/{group_id}/epics/{epic_iid}/notes/{note_id}/award_emoji'
_obj_cls = GroupEpicNoteAwardEmoji
_from_parent_attrs = {'group_id': 'group_id', 'epic_iid': 'epic_iid', 'note_id': 'id'}
_create_attrs = RequiredOptional(required=('name',))
def get(self, id: Union[(str, int)], lazy: bool=False, **kwargs: Any) -> GroupEpicNoteAwardEmoji:
return cast(GroupEpicNoteAwardEmoji, super().get(id=id, lazy=lazy, **kwargs)) |
def load_json(p):
with p.open('r', encoding='utf-8') as f:
json_data = json.load(f)
article = json_data['article']
abstract = json_data['abstract']
source = [[tk.lower() for tk in sen.strip().split()] for sen in article]
tgt = [[tk.lower() for tk in sen.strip().split()] for sen in abstract]
source = [clean(' '.join(sent)).split() for sent in source]
tgt = [clean(' '.join(sent)).split() for sent in tgt]
return (source, tgt) |
class CommonTime_Tests(unittest.TestCase):
def test(self):
a = gpstk.CommonTime()
a.addDays(1234)
b = gpstk.CommonTime(a)
b.addSeconds(123.4)
c = (b - a)
self.assertAlmostEqual(1234.0, a.getDays())
self.assertEqual('0001234 0. UNK', str(a))
self.assertAlmostEqual(1234., b.getDays())
self.assertEqual('0001234 0. UNK', str(b))
self.assertAlmostEqual(123.4, c)
def test_exception(self):
a = gpstk.CommonTime(gpstk.TimeSystem('GPS'))
b = gpstk.CommonTime(gpstk.TimeSystem('GLO'))
self.assertRaises(gpstk.InvalidRequest, a.__sub__, b)
def test_times_gen(self):
start = gpstk.CommonTime()
start.addSeconds(100.0)
end = gpstk.CommonTime()
end.addSeconds(900.0)
times = gpstk.times(start, end, seconds=200.0)
self.assertEqual(100.0, times.next().getSecondOfDay())
self.assertEqual(300.0, times.next().getSecondOfDay())
self.assertEqual(500.0, times.next().getSecondOfDay())
self.assertEqual(700.0, times.next().getSecondOfDay())
self.assertEqual(900.0, times.next().getSecondOfDay())
self.assertRaises(StopIteration, times.next)
def test_times_list(self):
start = gpstk.CommonTime()
start.addSeconds(100.0)
end = gpstk.CommonTime()
end.addSeconds(900.0)
times = list(gpstk.times(start, end, seconds=200.0))
self.assertEqual(100.0, times[0].getSecondOfDay())
self.assertEqual(300.0, times[1].getSecondOfDay())
self.assertEqual(500.0, times[2].getSecondOfDay())
self.assertEqual(700.0, times[3].getSecondOfDay())
self.assertEqual(900.0, times[4].getSecondOfDay())
times = list(gpstk.times(start, end))
self.assertEqual(2, len(times))
self.assertEqual(times[0], start)
self.assertEqual(times[1], end) |
def main():
scene = SceneManager.AddScene('Scene')
canvas = GameObject('Canvas')
scene.mainCamera.canvas = canvas.AddComponent(Canvas)
scene.Add(canvas)
imgObject = GameObject('Image', canvas)
rectTransform = imgObject.AddComponent(RectTransform)
rectTransform.offset = RectOffset.Rectangle(100)
imgObject.AddComponent(Mover2D).rectTransform = rectTransform
img = imgObject.AddComponent(Image2D)
img.depth = (- 0.1)
img.texture = Texture2D(resolver.getPath('examples/example8/logo.png'))
scene.Add(imgObject)
buttonFont = FontLoader.ChooseFont(['Cascadia Code Regular', 'Consolas'], 18)
(rect, button, text) = Gui.MakeButton('Button', scene, '-> Click me', buttonFont)
rect.transform.ReparentTo(canvas.transform)
rect.offset = RectOffset(Vector2(40, 25), Vector2(190, 50))
receiver = button.AddComponent(CallbackReceiver)
button.callback = Event(receiver.Callback)
(rect, checkbox) = Gui.MakeCheckBox('Checkbox', scene)
rect.transform.ReparentTo(canvas.transform)
rect.offset = RectOffset(Vector2(300, 50), Vector2(325, 75))
label = GameObject('Label')
text = label.AddComponent(Text)
text.text = 'Off'
text.color = RGB(0, 0, 0)
label.AddComponent(RectTransform).offset = RectOffset(Vector2(330, 50), Vector2(425, 75))
label.transform.ReparentTo(canvas.transform)
scene.Add(label)
tracker = rect.AddComponent(CheckboxTracker)
tracker.text = text
tracker.check = checkbox
t = GameObject('Text', canvas)
rect = t.AddComponent(RectTransform)
rect.anchors.SetPoint(Vector2(1, 0))
rect.offset.min = Vector2((- 150), 25)
text = t.AddComponent(Text)
text.text = '60'
text.color = RGB(0, 0, 0)
t.AddComponent(FPSTracker).text = text
scene.Add(t)
cam = GameObject('Camera')
cam.transform.position = Vector3((- 5), 2, (- 5))
cam.transform.LookAtPoint(Vector3.zero())
camera = cam.AddComponent(Camera)
camera.shadows = False
scene.Add(cam)
target = GameObject('Target', canvas)
rect = target.AddComponent(RectTransform)
rect.anchors.min = Vector2(0.6, 0.6)
rect.anchors.max = Vector2(1, 1)
target.AddComponent(RenderTarget).source = camera
scene.Add(target)
label = GameObject('Label', canvas)
rect = label.AddComponent(RectTransform)
rect.anchors.min = Vector2(0.6, 0.55)
rect.anchors.max = Vector2(1, 0.6)
text = label.AddComponent(Text)
text.text = 'RenderTarget'
text.color = RGB(0, 0, 0)
scene.Add(label)
cube = GameObject('Cube')
renderer = cube.AddComponent(MeshRenderer)
renderer.mesh = Mesh.cube(2)
renderer.mat = Material(RGB(255, 0, 0))
cube.AddComponent(Rotator)
scene.Add(cube)
SceneManager.LoadScene(scene) |
class _EntityConditionFactory():
def parse_entity_condition(element):
if (element.find('EndOfRoadCondition') is not None):
return EndOfRoadCondition.parse(element)
elif (element.find('CollisionCondition') is not None):
return CollisionCondition.parse(element)
elif (element.find('OffroadCondition') is not None):
return OffroadCondition.parse(element)
elif (element.find('TimeHeadwayCondition') is not None):
return TimeHeadwayCondition.parse(element)
elif (element.find('TimeToCollisionCondition') is not None):
return TimeToCollisionCondition.parse(element)
elif (element.find('AccelerationCondition') is not None):
return AccelerationCondition.parse(element)
elif (element.find('StandStillCondition') is not None):
return StandStillCondition.parse(element)
elif (element.find('SpeedCondition') is not None):
return SpeedCondition.parse(element)
elif (element.find('RelativeSpeedCondition') is not None):
return RelativeSpeedCondition.parse(element)
elif (element.find('TraveledDistanceCondition') is not None):
return TraveledDistanceCondition.parse(element)
elif (element.find('ReachPositionCondition') is not None):
return ReachPositionCondition.parse(element)
elif (element.find('DistanceCondition') is not None):
return DistanceCondition.parse(element)
elif (element.find('RelativeDistanceCondition') is not None):
return RelativeDistanceCondition.parse(element)
else:
raise NotAValidElement('element ', element, 'is not a valid entity condition') |
class LayerDepwiseDecode(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size=3, stride=1):
super(LayerDepwiseDecode, self).__init__()
block = [nn.Conv2d(in_channels=in_channel, out_channels=in_channel, kernel_size=kernel_size, stride=stride, padding=1, groups=in_channel), nn.Conv2d(in_channels=in_channel, out_channels=out_channel, kernel_size=1, stride=stride), nn.ReLU(inplace=True)]
self.layer = nn.Sequential(*block)
def forward(self, x):
out = self.layer(x)
return out |
def format_time(time: (((datetime.time | datetime.datetime) | float) | None)=None, format: (_PredefinedTimeFormat | str)='medium', tzinfo: (datetime.tzinfo | None)=None, locale: ((Locale | str) | None)=LC_TIME) -> str:
ref_date = (time.date() if isinstance(time, datetime.datetime) else None)
time = _get_time(time, tzinfo)
locale = Locale.parse(locale)
if (format in ('full', 'long', 'medium', 'short')):
format = get_time_format(format, locale=locale)
return parse_pattern(format).apply(time, locale, reference_date=ref_date) |
def test_align_left_multiline():
text = 'foo\nshoes'
fill_char = '-'
width = 7
aligned = cu.align_left(text, fill_char=fill_char, width=width)
assert (aligned == 'foo----\nshoes--')
reset_all = str(ansi.TextStyle.RESET_ALL)
blue = str(ansi.Fg.BLUE)
red = str(ansi.Fg.RED)
green = str(ansi.Fg.GREEN)
fg_reset = str(ansi.Fg.RESET)
text = f'''{blue}foo{red}moo
shoes{fg_reset}'''
fill_char = f'{green}-{fg_reset}'
width = 7
aligned = cu.align_left(text, fill_char=fill_char, width=width)
expected = f'''{reset_all}{blue}foo{red}moo{reset_all}{green}-{fg_reset}{reset_all}
'''
expected += f'{reset_all}{red}shoes{fg_reset}{reset_all}{green}--{fg_reset}{reset_all}'
assert (aligned == expected) |
class MLP(nn.Module):
class Block(nn.Module):
def __init__(self, *, d_in: int, d_out: int, bias: bool, activation: str, dropout: float) -> None:
super().__init__()
self.linear = nn.Linear(d_in, d_out, bias)
self.activation = make_module(activation)
self.dropout = nn.Dropout(dropout)
def forward(self, x: Tensor) -> Tensor:
return self.dropout(self.activation(self.linear(x)))
Head = nn.Linear
def __init__(self, *, d_in: int, d_out: Optional[int], n_blocks: int, d_layer: int, activation: str, dropout: float) -> None:
assert (n_blocks > 0)
super().__init__()
self.blocks = nn.Sequential(*[MLP.Block(d_in=(d_layer if block_i else d_in), d_out=d_layer, bias=True, activation=activation, dropout=dropout) for block_i in range(n_blocks)])
self.head = (None if (d_out is None) else MLP.Head(d_layer, d_out))
def d_out(self) -> int:
return (self.blocks[(- 1)].linear.out_features if (self.head is None) else self.head.out_features)
def forward(self, x: Tensor) -> Tensor:
x = self.blocks(x)
if (self.head is not None):
x = self.head(x)
return x |
def logSetup(filename, log_size, daemon):
logger = logging.getLogger('TinyHTTPProxy')
logger.setLevel(logging.INFO)
if (not filename):
if (not daemon):
handler = logging.StreamHandler()
else:
handler = logging.handlers.RotatingFileHandler(DEFAULT_LOG_FILENAME, maxBytes=(log_size * (1 << 20)), backupCount=5)
else:
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=(log_size * (1 << 20)), backupCount=5)
fmt = logging.Formatter('[%(asctime)-12s.%(msecs)03d] %(levelname)-8s {%(name)s %(threadName)s} %(message)s', '%Y-%m-%d %H:%M:%S')
handler.setFormatter(fmt)
logger.addHandler(handler)
return logger |
class KeySequence():
_MAX_LEN = 4
def __init__(self, *keys: KeyInfo) -> None:
self._sequences: List[QKeySequence] = []
for sub in utils.chunk(keys, self._MAX_LEN):
try:
args = [info.to_qt() for info in sub]
except InvalidKeyError as e:
raise KeyParseError(keystr=None, error=f'Got invalid key: {e}')
sequence = QKeySequence(*args)
self._sequences.append(sequence)
if keys:
assert self
self._validate()
def __str__(self) -> str:
parts = []
for info in self:
parts.append(str(info))
return ''.join(parts)
def __iter__(self) -> Iterator[KeyInfo]:
sequences = cast(List[Iterable[_KeyInfoType]], self._sequences)
for combination in itertools.chain.from_iterable(sequences):
(yield KeyInfo.from_qt(combination))
def __repr__(self) -> str:
return utils.get_repr(self, keys=str(self))
def __lt__(self, other: 'KeySequence') -> bool:
return (self._sequences < other._sequences)
def __gt__(self, other: 'KeySequence') -> bool:
return (self._sequences > other._sequences)
def __le__(self, other: 'KeySequence') -> bool:
return (self._sequences <= other._sequences)
def __ge__(self, other: 'KeySequence') -> bool:
return (self._sequences >= other._sequences)
def __eq__(self, other: object) -> bool:
if (not isinstance(other, KeySequence)):
return NotImplemented
return (self._sequences == other._sequences)
def __ne__(self, other: object) -> bool:
if (not isinstance(other, KeySequence)):
return NotImplemented
return (self._sequences != other._sequences)
def __hash__(self) -> int:
return hash(tuple(self._sequences))
def __len__(self) -> int:
return sum((len(seq) for seq in self._sequences))
def __bool__(self) -> bool:
return bool(self._sequences)
def __getitem__(self, item: int) -> KeyInfo:
...
def __getitem__(self, item: slice) -> 'KeySequence':
...
def __getitem__(self, item: Union[(int, slice)]) -> Union[(KeyInfo, 'KeySequence')]:
infos = list(self)
if isinstance(item, slice):
return self.__class__(*infos[item])
else:
return infos[item]
def _validate(self, keystr: str=None) -> None:
try:
for info in self:
if ((info.key < Qt.Key.Key_Space) or (info.key >= Qt.Key.Key_unknown)):
raise KeyParseError(keystr, 'Got invalid key!')
except InvalidKeyError as e:
raise KeyParseError(keystr, f'Got invalid key: {e}')
for seq in self._sequences:
if (not seq):
raise KeyParseError(keystr, 'Got invalid key!')
def matches(self, other: 'KeySequence') -> QKeySequence.SequenceMatch:
if (len(self._sequences) > len(other._sequences)):
return QKeySequence.SequenceMatch.NoMatch
for (entered, configured) in zip(self._sequences, other._sequences):
match = entered.matches(configured)
if (match != QKeySequence.SequenceMatch.ExactMatch):
return match
if (len(self._sequences) == len(other._sequences)):
return QKeySequence.SequenceMatch.ExactMatch
elif (len(self._sequences) < len(other._sequences)):
return QKeySequence.SequenceMatch.PartialMatch
else:
raise utils.Unreachable('self={!r} other={!r}'.format(self, other))
def append_event(self, ev: QKeyEvent) -> 'KeySequence':
try:
key = Qt.Key(ev.key())
except ValueError as e:
raise KeyParseError(None, f'Got invalid key: {e}')
_assert_plain_key(key)
_assert_plain_modifier(ev.modifiers())
key = _remap_unicode(key, ev.text())
modifiers: _ModifierType = ev.modifiers()
if (key == _NIL_KEY):
raise KeyParseError(None, 'Got nil key!')
modifiers = _unset_modifier_bits(modifiers, Qt.KeyboardModifier.GroupSwitchModifier)
if ((modifiers & Qt.KeyboardModifier.ShiftModifier) and (key == Qt.Key.Key_Backtab)):
key = Qt.Key.Key_Tab
shift_modifier = Qt.KeyboardModifier.ShiftModifier
if ((modifiers == shift_modifier) and _is_printable(key) and (not ev.text().isupper())):
modifiers = Qt.KeyboardModifier.NoModifier
if utils.is_mac:
if ((modifiers & Qt.KeyboardModifier.ControlModifier) and (modifiers & Qt.KeyboardModifier.MetaModifier)):
pass
elif (modifiers & Qt.KeyboardModifier.ControlModifier):
modifiers = _unset_modifier_bits(modifiers, Qt.KeyboardModifier.ControlModifier)
modifiers |= Qt.KeyboardModifier.MetaModifier
elif (modifiers & Qt.KeyboardModifier.MetaModifier):
modifiers = _unset_modifier_bits(modifiers, Qt.KeyboardModifier.MetaModifier)
modifiers |= Qt.KeyboardModifier.ControlModifier
infos = list(self)
infos.append(KeyInfo(key, modifiers))
return self.__class__(*infos)
def strip_modifiers(self) -> 'KeySequence':
modifiers = Qt.KeyboardModifier.KeypadModifier
infos = [info.with_stripped_modifiers(modifiers) for info in self]
return self.__class__(*infos)
def with_mappings(self, mappings: Mapping[('KeySequence', 'KeySequence')]) -> 'KeySequence':
infos: List[KeyInfo] = []
for info in self:
key_seq = KeySequence(info)
if (key_seq in mappings):
infos += mappings[key_seq]
else:
infos.append(info)
return self.__class__(*infos)
def parse(cls, keystr: str) -> 'KeySequence':
new = cls()
strings = list(_parse_keystring(keystr))
for sub in utils.chunk(strings, cls._MAX_LEN):
sequence = QKeySequence(', '.join(sub))
new._sequences.append(sequence)
if keystr:
assert new, keystr
new._validate(keystr)
return new |
def main():
large_parameters = dict()
large_parameters['hidden_dim'] = 256
large_parameters['dim_feedforward'] = 512
large_parameters['class_embed_dim'] = 256
large_parameters['class_embed_num'] = 3
large_parameters['box_embed_dim'] = 256
large_parameters['box_embed_num'] = 3
large_parameters['endpoint_embed_dim'] = 256
large_parameters['endpoint_embed_num'] = 3
large_parameters['assoc_embed_dim'] = 256
large_parameters['assoc_embed_last_dim'] = 128
large_parameters['assoc_embed_num'] = 3
large_parameters['assoc_classifier_dim'] = 256
large_parameters['assoc_classifier_num'] = 3
num_queries = 100
num_enc_layers = 4
num_dec_layers = 4
parser = ArgumentParser()
parser.add_argument('--split_pe', type=bool, default=split_pe, help='whether it is on dgx')
parser.add_argument('--object_refinement', type=bool, default=object_refinement, help='whether it is on dgx')
parser.add_argument('--only_bev_pe', type=bool, default=only_bev_pe, help='whether it is on dgx')
parser.add_argument('--bev_pe', type=bool, default=apply_bev_pe, help='whether it is on dgx')
parser.add_argument('--abs_bev', type=bool, default=abs_bev, help='whether it is on dgx')
parser.add_argument('--apply_poly_loss', type=bool, default=apply_poly_loss, help='whether it is on dgx')
parser.add_argument('--objects', type=bool, default=True, help='whether estimate objects')
parser.add_argument('--num_object_queries', default=100, type=int, help='Number of query slots')
parser.add_argument('--num_object_classes', default=num_object_classes, type=int, help='Num object classes')
parser.add_argument('--num_spline_points', default=3, type=int, help='Num object classes')
parser.add_argument('--frozen_weights', type=str, default=None, help='Path to the pretrained model. If set, only the mask head will be trained')
parser.add_argument('--backbone', default='resnet50', type=str, help='Name of the convolutional backbone to use')
parser.add_argument('--dilation', default=True, help='If true, we replace stride with dilation in the last convolutional block (DC5)')
parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'), help='Type of positional embedding to use on top of the image features')
parser.add_argument('--enc_layers', default=num_enc_layers, type=int, help='Number of encoding layers in the transformer')
parser.add_argument('--dec_layers', default=num_dec_layers, type=int, help='Number of decoding layers in the transformer')
parser.add_argument('--dim_feedforward', default=large_parameters['dim_feedforward'], type=int, help='Intermediate size of the feedforward layers in the transformer blocks')
parser.add_argument('--hidden_dim', default=large_parameters['hidden_dim'], type=int, help='Size of the embeddings (dimension of the transformer)')
parser.add_argument('--dropout', default=0.0, type=float, help='Dropout applied in the transformer')
parser.add_argument('--nheads', default=4, type=int, help="Number of attention heads inside the transformer's attentions")
parser.add_argument('--num_queries', default=num_queries, type=int, help='Number of query slots')
parser.add_argument('--pre_norm', action='store_true')
parser.add_argument('--masks', default=False, help='Train segmentation head if the flag is provided')
parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false', help='Disables auxiliary decoding losses (loss at each layer)')
parser.add_argument('--set_obj_cost_class', default=3, type=float, help='Class coefficient in the matching cost')
parser.add_argument('--set_obj_cost_center', default=2, type=float, help='Class coefficient in the matching cost')
parser.add_argument('--set_obj_cost_len', default=1, type=float, help='Class coefficient in the matching cost')
parser.add_argument('--set_obj_cost_orient', default=1, type=float, help='Class coefficient in the matching cost')
parser.add_argument('--set_cost_class', default=2, type=float, help='Class coefficient in the matching cost')
parser.add_argument('--set_cost_bbox', default=1, type=float, help='L1 box coefficient in the matching cost')
parser.add_argument('--set_cost_end', default=1, type=float, help='L1 endpoint coefficient in the matching cost')
parser.add_argument('--set_cost_giou', default=1, type=float, help='giou box coefficient in the matching cost')
parser.add_argument('--object_detection_loss_coef', default=3, type=float)
parser.add_argument('--object_center_loss_coef', default=3, type=float)
parser.add_argument('--object_len_loss_coef', default=1, type=float)
parser.add_argument('--object_orient_loss_coef', default=2, type=float)
parser.add_argument('--object_refine_loss_coef', default=1, type=float)
parser.add_argument('--polyline_loss_coef', default=2, type=float)
parser.add_argument('--mask_loss_coef', default=1, type=float)
parser.add_argument('--dice_loss_coef', default=1, type=float)
parser.add_argument('--assoc_loss_coef', default=1, type=float)
parser.add_argument('--detection_loss_coef', default=2, type=float)
parser.add_argument('--endpoints_loss_coef', default=1, type=float)
parser.add_argument('--bbox_loss_coef', default=2, type=float)
parser.add_argument('--focal_loss_coef', default=0.1, type=float)
parser.add_argument('--loss_end_match_coef', default=1, type=float)
parser.add_argument('--giou_loss_coef', default=2, type=float)
parser.add_argument('--visible_loss_coef', default=1, type=float)
parser.add_argument('--eos_coef', default=0.3, type=float, help='Relative classification weight of the no-object class')
parser.add_argument('--object_eos_coef', default=0.1, type=float, help='Relative classification weight of the no-object class')
parser.add_argument('--dataset_file', default='coco')
parser.add_argument('--coco_path', type=str)
parser.add_argument('--coco_panoptic_path', type=str)
parser.add_argument('--remove_difficult', action='store_true')
parser.add_argument('--output_dir', default='', help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda', help='device to use for training / testing')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch')
parser.add_argument('--eval', default=False, action='store_true')
parser.add_argument('--num_workers', default=2, type=int)
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
args = parser.parse_args()
print('GOT ARGS ')
logging.error(str(args))
config = get_configuration(args)
logdir = create_experiment(config, name=('tr_lanefinder_' + str(config.train_dataset)))
config.save_logdir = logdir
config.n_control_points = args.num_spline_points
config.freeze()
device = torch.device(args.device)
(model, criterion, postprocessors) = build(args, config, large_parameters)
model.to(device)
if (config.train_dataset == 'nuscenes'):
(train_loader, train_dataset, val_loader, val_dataset) = data_factory.build_nuscenes_dataloader(config, args, val=True)
else:
(train_loader, train_dataset, val_loader, val_dataset) = data_factory.build_argoverse_dataloader(config, args, val=True)
(epoch, best_iou, iteration) = load_checkpoint(os.path.join(base_dir, 'maxi_poly_loss_split_True_refineTrue', 'keep', 'latest.pth'), model)
logging.error('LOADED MY CHECKPOINT')
freeze_backbone_layers(model)
thresh = 0.3
val_con = evaluate(val_loader, model, criterion, postprocessors, BinaryConfusionMatrix(1, args.num_object_classes), config, args, thresh)
(static_res_dict, object_res_dict) = val_con.get_res_dict
file1 = open(os.path.join(logdir, (('val_res_thresh_' + str(thresh)) + '.txt')), 'a')
for k in static_res_dict.keys():
logging.error(((str(k) + ' : ') + str(static_res_dict[k])))
file1.write((((str(k) + ' : ') + str(static_res_dict[k])) + ' \n'))
for k in object_res_dict.keys():
logging.error(((str(k) + ' : ') + str(object_res_dict[k])))
file1.write((((str(k) + ' : ') + str(object_res_dict[k])) + ' \n'))
file1.close() |
class QueryScheduler():
__slots__ = ('_zc', '_types', '_addr', '_port', '_multicast', '_first_random_delay_interval', '_min_time_between_queries_millis', '_loop', '_startup_queries_sent', '_next_scheduled_for_alias', '_query_heap', '_next_run', '_clock_resolution_millis', '_question_type')
def __init__(self, zc: 'Zeroconf', types: Set[str], addr: Optional[str], port: int, multicast: bool, delay: int, first_random_delay_interval: Tuple[(int, int)], question_type: Optional[DNSQuestionType]) -> None:
self._zc = zc
self._types = types
self._addr = addr
self._port = port
self._multicast = multicast
self._first_random_delay_interval = first_random_delay_interval
self._min_time_between_queries_millis = delay
self._loop: Optional[asyncio.AbstractEventLoop] = None
self._startup_queries_sent = 0
self._next_scheduled_for_alias: Dict[(str, _ScheduledPTRQuery)] = {}
self._query_heap: list[_ScheduledPTRQuery] = []
self._next_run: Optional[asyncio.TimerHandle] = None
self._clock_resolution_millis = (time.get_clock_info('monotonic').resolution * 1000)
self._question_type = question_type
def start(self, loop: asyncio.AbstractEventLoop) -> None:
start_delay = millis_to_seconds(random.randint(*self._first_random_delay_interval))
self._loop = loop
self._next_run = loop.call_later(start_delay, self._process_startup_queries)
def stop(self) -> None:
if (self._next_run is not None):
self._next_run.cancel()
self._next_run = None
self._next_scheduled_for_alias.clear()
self._query_heap.clear()
def _schedule_ptr_refresh(self, pointer: DNSPointer, expire_time_millis: float_, refresh_time_millis: float_) -> None:
ttl = (int(pointer.ttl) if isinstance(pointer.ttl, float) else pointer.ttl)
scheduled_ptr_query = _ScheduledPTRQuery(pointer.alias, pointer.name, ttl, expire_time_millis, refresh_time_millis)
self._schedule_ptr_query(scheduled_ptr_query)
def _schedule_ptr_query(self, scheduled_query: _ScheduledPTRQuery) -> None:
self._next_scheduled_for_alias[scheduled_query.alias] = scheduled_query
heappush(self._query_heap, scheduled_query)
def cancel_ptr_refresh(self, pointer: DNSPointer) -> None:
scheduled = self._next_scheduled_for_alias.pop(pointer.alias, None)
if scheduled:
scheduled.cancelled = True
def reschedule_ptr_first_refresh(self, pointer: DNSPointer) -> None:
current = self._next_scheduled_for_alias.get(pointer.alias)
refresh_time_millis = pointer.get_expiration_time(_EXPIRE_REFRESH_TIME_PERCENT)
if (current is not None):
if ((- self._min_time_between_queries_millis) <= (refresh_time_millis - current.when_millis) <= self._min_time_between_queries_millis):
return
current.cancelled = True
del self._next_scheduled_for_alias[pointer.alias]
expire_time_millis = pointer.get_expiration_time(100)
self._schedule_ptr_refresh(pointer, expire_time_millis, refresh_time_millis)
def schedule_rescue_query(self, query: _ScheduledPTRQuery, now_millis: float_, additional_percentage: float_) -> None:
ttl_millis = (query.ttl * 1000)
additional_wait = (ttl_millis * additional_percentage)
next_query_time = (now_millis + additional_wait)
if (next_query_time >= query.expire_time_millis):
return
scheduled_ptr_query = _ScheduledPTRQuery(query.alias, query.name, query.ttl, query.expire_time_millis, next_query_time)
self._schedule_ptr_query(scheduled_ptr_query)
def _process_startup_queries(self) -> None:
if TYPE_CHECKING:
assert (self._loop is not None)
if self._zc.done:
return
now_millis = current_time_millis()
self.async_send_ready_queries((self._startup_queries_sent == 0), now_millis, self._types)
self._startup_queries_sent += 1
if (self._startup_queries_sent >= STARTUP_QUERIES):
self._next_run = self._loop.call_at(millis_to_seconds((now_millis + self._min_time_between_queries_millis)), self._process_ready_types)
return
self._next_run = self._loop.call_later((self._startup_queries_sent ** 2), self._process_startup_queries)
def _process_ready_types(self) -> None:
if TYPE_CHECKING:
assert (self._loop is not None)
if self._zc.done:
return
now_millis = current_time_millis()
ready_types: Set[str] = set()
next_scheduled: Optional[_ScheduledPTRQuery] = None
end_time_millis = (now_millis + self._clock_resolution_millis)
schedule_rescue: List[_ScheduledPTRQuery] = []
while self._query_heap:
query = self._query_heap[0]
if query.cancelled:
heappop(self._query_heap)
continue
if (query.when_millis > end_time_millis):
next_scheduled = query
break
query = heappop(self._query_heap)
ready_types.add(query.name)
del self._next_scheduled_for_alias[query.alias]
schedule_rescue.append(query)
for query in schedule_rescue:
self.schedule_rescue_query(query, now_millis, RESCUE_RECORD_RETRY_TTL_PERCENTAGE)
if ready_types:
self.async_send_ready_queries(False, now_millis, ready_types)
next_time_millis = (now_millis + self._min_time_between_queries_millis)
if ((next_scheduled is not None) and (next_scheduled.when_millis > next_time_millis)):
next_when_millis = next_scheduled.when_millis
else:
next_when_millis = next_time_millis
self._next_run = self._loop.call_at(millis_to_seconds(next_when_millis), self._process_ready_types)
def async_send_ready_queries(self, first_request: bool, now_millis: float_, ready_types: Set[str]) -> None:
question_type = (QU_QUESTION if ((self._question_type is None) and first_request) else self._question_type)
outs = generate_service_query(self._zc, now_millis, ready_types, self._multicast, question_type)
if outs:
for out in outs:
self._zc.async_send(out, self._addr, self._port) |
def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
class_info_set = set()
for (_, module_value) in inspect.getmembers(gitlab.v4.objects):
if (not inspect.ismodule(module_value)):
continue
for (class_name, class_value) in inspect.getmembers(module_value):
if (not inspect.isclass(class_value)):
continue
module_name = class_value.__module__
if (module_name == 'gitlab.base'):
continue
if (not class_name.endswith('Manager')):
continue
class_info_set.add(ClassInfo(name=class_name, type=class_value))
metafunc.parametrize('class_info', sorted(class_info_set)) |
_module
class FocalLoss(nn.Module):
def __init__(self, use_sigmoid=True, gamma=2.0, alpha=0.25, reduction='mean', loss_weight=1.0):
super(FocalLoss, self).__init__()
assert (use_sigmoid is True), 'Only sigmoid focal loss supported now.'
self.use_sigmoid = use_sigmoid
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None):
assert (reduction_override in (None, 'none', 'mean', 'sum'))
reduction = (reduction_override if reduction_override else self.reduction)
if self.use_sigmoid:
loss_cls = (self.loss_weight * sigmoid_focal_loss(pred, target, weight, gamma=self.gamma, alpha=self.alpha, reduction=reduction, avg_factor=avg_factor))
else:
raise NotImplementedError
return loss_cls |
class BaseModel():
def name(self):
return self.__class__.__name__.lower()
def initialize(self, opt):
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.Tensor = (torch.cuda.FloatTensor if self.gpu_ids else torch.Tensor)
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
self._count = 0
def set_input(self, input):
self.input = input
def forward(self):
pass
def test(self):
pass
def get_image_paths(self):
pass
def optimize_parameters(self):
pass
def get_current_visuals(self):
return self.input
def get_current_errors(self):
return {}
def save(self, label):
pass
def update_learning_rate(self):
for scheduler in self.schedulers:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print(('learning rate = %.7f' % lr))
def print_optimizer_param(self):
print(self.optimizers[(- 1)])
def save(self, label=None):
epoch = self.epoch
iterations = self.iterations
if (label is None):
model_name = os.path.join(self.save_dir, ('model' + ('_%03d_%08d.pt' % (epoch, iterations))))
else:
model_name = os.path.join(self.save_dir, ((('model' + '_') + label) + '.pt'))
torch.save(self.state_dict(), model_name)
def _init_optimizer(self, optimizers):
self.optimizers = optimizers
self.schedulers = []
for optimizer in self.optimizers:
util.set_opt_param(optimizer, 'initial_lr', self.opt.lr)
util.set_opt_param(optimizer, 'weight_decay', self.opt.wd) |
class TestHistoryProgress():
def progress(self):
return history.HistoryProgress()
def test_no_start(self, progress):
progress.tick()
assert (progress._value == 1)
progress.finish()
assert (progress._progress is None)
def test_gui(self, qtbot, progress):
progress.start('Hello World')
dialog = progress._progress
qtbot.add_widget(dialog)
progress.tick()
assert dialog.isVisible()
assert (dialog.labelText() == 'Hello World')
assert (dialog.minimum() == 0)
assert (dialog.value() == 1)
assert (dialog.minimumDuration() == 0)
assert (dialog.maximum() == 0)
progress.set_maximum(42)
assert (dialog.maximum() == 42)
progress.finish()
assert (not dialog.isVisible()) |
def test_imapwidget_keyring_error(fake_qtile, monkeypatch, fake_window, patched_imap):
patched_imap.keyring.valid = False
imap = patched_imap.ImapWidget(user='qtile')
fakebar = FakeBar([imap], window=fake_window)
imap._configure(fake_qtile, fakebar)
text = imap.poll()
assert (text == 'Gnome Keyring Error') |
class ExportDialog(QtWidgets.QWidget):
def __init__(self, scene):
QtWidgets.QWidget.__init__(self)
self.setVisible(False)
self.setWindowTitle('Export')
self.shown = False
self.currentExporter = None
self.scene = scene
self.selectBox = QtWidgets.QGraphicsRectItem()
self.selectBox.setPen(fn.mkPen('y', width=3, style=QtCore.Qt.PenStyle.DashLine))
self.selectBox.hide()
self.scene.addItem(self.selectBox)
self.ui = ui_template.Ui_Form()
self.ui.setupUi(self)
self.ui.closeBtn.clicked.connect(self.close)
self.ui.exportBtn.clicked.connect(self.exportClicked)
self.ui.copyBtn.clicked.connect(self.copyClicked)
self.ui.itemTree.currentItemChanged.connect(self.exportItemChanged)
self.ui.formatList.currentItemChanged.connect(self.exportFormatChanged)
def show(self, item=None):
if (item is not None):
while ((not isinstance(item, ViewBox)) and (not isinstance(item, PlotItem)) and (item is not None)):
item = item.parentItem()
if (isinstance(item, ViewBox) and isinstance(item.parentItem(), PlotItem)):
item = item.parentItem()
self.updateItemList(select=item)
self.setVisible(True)
self.activateWindow()
self.raise_()
self.selectBox.setVisible(True)
if (not self.shown):
self.shown = True
vcenter = self.scene.getViewWidget().geometry().center()
x = max(0, int((vcenter.x() - (self.width() / 2))))
y = max(0, int((vcenter.y() - (self.height() / 2))))
self.move(x, y)
def updateItemList(self, select=None):
self.ui.itemTree.clear()
si = QtWidgets.QTreeWidgetItem(['Entire Scene'])
si.gitem = self.scene
self.ui.itemTree.addTopLevelItem(si)
self.ui.itemTree.setCurrentItem(si)
si.setExpanded(True)
for child in self.scene.items():
if (child.parentItem() is None):
self.updateItemTree(child, si, select=select)
def updateItemTree(self, item, treeItem, select=None):
si = None
if isinstance(item, ViewBox):
si = QtWidgets.QTreeWidgetItem(['ViewBox'])
elif isinstance(item, PlotItem):
si = QtWidgets.QTreeWidgetItem(['Plot'])
if (si is not None):
si.gitem = item
treeItem.addChild(si)
treeItem = si
if (si.gitem is select):
self.ui.itemTree.setCurrentItem(si)
for ch in item.childItems():
self.updateItemTree(ch, treeItem, select=select)
def exportItemChanged(self, item, prev):
if (item is None):
return
if (item.gitem is self.scene):
newBounds = self.scene.views()[0].viewRect()
else:
newBounds = item.gitem.sceneBoundingRect()
self.selectBox.setRect(newBounds)
self.selectBox.show()
self.updateFormatList()
def updateFormatList(self):
current = self.ui.formatList.currentItem()
self.ui.formatList.clear()
gotCurrent = False
for exp in exporters.listExporters():
item = FormatExportListWidgetItem(exp, QtCore.QCoreApplication.translate('Exporter', exp.Name))
self.ui.formatList.addItem(item)
if (item is current):
self.ui.formatList.setCurrentRow((self.ui.formatList.count() - 1))
gotCurrent = True
if (not gotCurrent):
self.ui.formatList.setCurrentRow(0)
def exportFormatChanged(self, item, prev):
if (item is None):
self.currentExporter = None
self.ui.paramTree.clear()
return
expClass = item.expClass
exp = expClass(item=self.ui.itemTree.currentItem().gitem)
params = exp.parameters()
if (params is None):
self.ui.paramTree.clear()
else:
self.ui.paramTree.setParameters(params)
self.currentExporter = exp
self.ui.copyBtn.setEnabled(exp.allowCopy)
def exportClicked(self):
self.selectBox.hide()
self.currentExporter.export()
def copyClicked(self):
self.selectBox.hide()
self.currentExporter.export(copy=True)
def close(self):
self.selectBox.setVisible(False)
self.setVisible(False)
def closeEvent(self, event):
self.close()
super().closeEvent(event) |
class nnUNetDataset(object):
def __init__(self, folder: str, case_identifiers: List[str]=None, num_images_properties_loading_threshold: int=0, folder_with_segs_from_previous_stage: str=None):
super().__init__()
if (case_identifiers is None):
case_identifiers = get_case_identifiers(folder)
case_identifiers.sort()
self.dataset = {}
for c in case_identifiers:
self.dataset[c] = {}
self.dataset[c]['data_file'] = join(folder, ('%s.npz' % c))
self.dataset[c]['properties_file'] = join(folder, ('%s.pkl' % c))
if (folder_with_segs_from_previous_stage is not None):
self.dataset[c]['seg_from_prev_stage_file'] = join(folder_with_segs_from_previous_stage, ('%s.npz' % c))
if (len(case_identifiers) <= num_images_properties_loading_threshold):
for i in self.dataset.keys():
self.dataset[i]['properties'] = load_pickle(self.dataset[i]['properties_file'])
self.keep_files_open = (('nnUNet_keep_files_open' in os.environ.keys()) and (os.environ['nnUNet_keep_files_open'].lower() in ('true', '1', 't')))
def __getitem__(self, key):
ret = {**self.dataset[key]}
if ('properties' not in ret.keys()):
ret['properties'] = load_pickle(ret['properties_file'])
return ret
def __setitem__(self, key, value):
return self.dataset.__setitem__(key, value)
def keys(self):
return self.dataset.keys()
def __len__(self):
return self.dataset.__len__()
def items(self):
return self.dataset.items()
def values(self):
return self.dataset.values()
def load_case(self, key):
entry = self[key]
if ('open_data_file' in entry.keys()):
data = entry['open_data_file']
elif isfile((entry['data_file'][:(- 4)] + '.npy')):
data = np.load((entry['data_file'][:(- 4)] + '.npy'), 'r')
if self.keep_files_open:
self.dataset[key]['open_data_file'] = data
else:
data = np.load(entry['data_file'])['data']
if ('open_seg_file' in entry.keys()):
seg = entry['open_seg_file']
elif isfile((entry['data_file'][:(- 4)] + '_seg.npy')):
seg = np.load((entry['data_file'][:(- 4)] + '_seg.npy'), 'r')
if self.keep_files_open:
self.dataset[key]['open_seg_file'] = seg
else:
seg = np.load(entry['data_file'])['seg']
if ('seg_from_prev_stage_file' in entry.keys()):
if isfile((entry['seg_from_prev_stage_file'][:(- 4)] + '.npy')):
seg_prev = np.load((entry['seg_from_prev_stage_file'][:(- 4)] + '.npy'), 'r')
else:
seg_prev = np.load(entry['seg_from_prev_stage_file'])['seg']
seg = np.vstack((seg, seg_prev[None]))
return (data, seg, entry['properties']) |
def get_shared_secrets_along_route(payment_path_pubkeys: Sequence[bytes], session_key: bytes) -> Sequence[bytes]:
num_hops = len(payment_path_pubkeys)
hop_shared_secrets = (num_hops * [b''])
ephemeral_key = session_key
for i in range(0, num_hops):
hop_shared_secrets[i] = get_ecdh(ephemeral_key, payment_path_pubkeys[i])
ephemeral_pubkey = ecc.ECPrivkey(ephemeral_key).get_public_key_bytes()
blinding_factor = sha256((ephemeral_pubkey + hop_shared_secrets[i]))
blinding_factor_int = int.from_bytes(blinding_factor, byteorder='big')
ephemeral_key_int = int.from_bytes(ephemeral_key, byteorder='big')
ephemeral_key_int = ((ephemeral_key_int * blinding_factor_int) % ecc.CURVE_ORDER)
ephemeral_key = ephemeral_key_int.to_bytes(32, byteorder='big')
return hop_shared_secrets |
class TestModelStatsCalculator(unittest.TestCase):
def test_compute_compression_ratio(self):
logger.debug(self.id())
network_cost = cc.Cost(50, 100)
with unittest.mock.patch('aimet_common.cost_calculator.CostCalculator.compute_network_cost') as mock_func:
mock_func.return_value = cc.Cost(40, 50)
ratio = ms.compute_compression_ratio(None, aimet_torch.svd.svd_intf_defs_deprecated.CostMetric.memory, network_cost)
self.assertEqual(0.2, ratio)
def test_compute_objective_score(self):
obj_score = ms.compute_objective_score(model_perf=0.2, compression_score=1.2, error_margin=1, baseline_perf=1)
self.assertEqual(0.8, obj_score) |
class Transaction():
def __init__(self, current_packages: list[Package], result_packages: list[tuple[(Package, int)]], installed_packages: (list[Package] | None)=None, root_package: (Package | None)=None) -> None:
self._current_packages = current_packages
self._result_packages = result_packages
if (installed_packages is None):
installed_packages = []
self._installed_packages = installed_packages
self._root_package = root_package
def calculate_operations(self, with_uninstalls: bool=True, synchronize: bool=False, *, skip_directory: bool=False) -> list[Operation]:
from poetry.installation.operations import Install
from poetry.installation.operations import Uninstall
from poetry.installation.operations import Update
operations: list[Operation] = []
for (result_package, priority) in self._result_packages:
installed = False
for installed_package in self._installed_packages:
if (result_package.name == installed_package.name):
installed = True
if ((result_package.version != installed_package.version) or ((installed_package.source_type or (result_package.source_type != 'legacy')) and (not result_package.is_same_package_as(installed_package)))):
operations.append(Update(installed_package, result_package, priority=priority))
else:
operations.append(Install(result_package).skip('Already installed'))
break
if (not (installed or (skip_directory and (result_package.source_type == 'directory')))):
operations.append(Install(result_package, priority=priority))
if with_uninstalls:
uninstalls: set[str] = set()
for current_package in self._current_packages:
found = any(((current_package.name == result_package.name) for (result_package, _) in self._result_packages))
if (not found):
for installed_package in self._installed_packages:
if (installed_package.name == current_package.name):
uninstalls.add(installed_package.name)
operations.append(Uninstall(current_package))
if synchronize:
result_package_names = {result_package.name for (result_package, _) in self._result_packages}
preserved_package_names = ({'pip'} - result_package_names)
for installed_package in self._installed_packages:
if (installed_package.name in uninstalls):
continue
if (self._root_package and (installed_package.name == self._root_package.name)):
continue
if (installed_package.name in preserved_package_names):
continue
if (installed_package.name not in result_package_names):
uninstalls.add(installed_package.name)
operations.append(Uninstall(installed_package))
return sorted(operations, key=(lambda o: ((- o.priority), o.package.name, o.package.version))) |
class RegNetYLayer(nn.Module):
def __init__(self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int=1):
super().__init__()
should_apply_shortcut = ((in_channels != out_channels) or (stride != 1))
groups = max(1, (out_channels // config.groups_width))
self.shortcut = (RegNetShortCut(in_channels, out_channels, stride=stride) if should_apply_shortcut else nn.Identity())
self.layer = nn.Sequential(RegNetConvLayer(in_channels, out_channels, kernel_size=1, activation=config.hidden_act), RegNetConvLayer(out_channels, out_channels, stride=stride, groups=groups, activation=config.hidden_act), RegNetSELayer(out_channels, reduced_channels=int(round((in_channels / 4)))), RegNetConvLayer(out_channels, out_channels, kernel_size=1, activation=None))
self.activation = ACT2FN[config.hidden_act]
def forward(self, hidden_state):
residual = hidden_state
hidden_state = self.layer(hidden_state)
residual = self.shortcut(residual)
hidden_state += residual
hidden_state = self.activation(hidden_state)
return hidden_state |
class PlayVehicleMoveClientBound(Packet):
id = 43
to = 1
def __init__(self, x: float, y: float, z: float, yaw: float, pitch: float) -> None:
super().__init__()
(self.x, self.y, self.z) = (x, y, z)
self.yaw = yaw
self.pitch = pitch
def encode(self) -> bytes:
return ((((Buffer.pack('d', self.x) + Buffer.pack('d', self.y)) + Buffer.pack('d', self.z)) + Buffer.pack('f', self.yaw)) + Buffer.pack('f', self.pitch)) |
class ViewAdminForm(forms.ModelForm):
uri_path = forms.SlugField(required=True)
class Meta():
model = View
fields = ['uri', 'uri_prefix', 'uri_path', 'comment', 'locked', 'catalogs', 'sites', 'editors', 'groups', 'template', 'title_lang1', 'title_lang2', 'title_lang3', 'title_lang4', 'title_lang5', 'help_lang1', 'help_lang2', 'help_lang3', 'help_lang4', 'help_lang5', 'available']
def clean(self):
ViewUniqueURIValidator(self.instance)(self.cleaned_data)
ViewLockedValidator(self.instance)(self.cleaned_data) |
('the reported width of the cell is {width}')
def then_the_reported_width_of_the_cell_is_width(context, width):
expected_width = {'None': None, '1 inch': Inches(1)}[width]
actual_width = context.cell.width
assert (actual_width == expected_width), ('expected %s, got %s' % (expected_width, actual_width)) |
.parametrize('M, a, p, size', [(np.array(10, dtype=np.int64), np.array(0.5, dtype=config.floatX), np.array(0.5, dtype=config.floatX), None), (np.array(10, dtype=np.int64), np.array(0.5, dtype=config.floatX), np.array(0.5, dtype=config.floatX), []), (np.array(10, dtype=np.int64), np.array(0.5, dtype=config.floatX), np.array(0.5, dtype=config.floatX), [2, 3]), (np.full((1, 2), 10, dtype=np.int64), np.array(0.5, dtype=config.floatX), np.array(0.5, dtype=config.floatX), None)])
def test_betabinom_samples(M, a, p, size):
compare_sample_values(betabinom, M, a, p, size=size, test_fn=(lambda *args, size=None, random_state=None, **kwargs: betabinom.rng_fn(random_state, *(args + (size,))))) |
def downsample(img0, size, filter=None):
down = (img0.size((- 1)) // size)
if (down <= 1):
return img0
if (filter is not None):
from third_party.stylegan2_official_ops import upfirdn2d
for _ in range(int(math.log2(down))):
img0 = upfirdn2d.downsample2d(img0, filter, down=2)
else:
img0 = F.interpolate(img0, (size, size), mode='bilinear', align_corners=False)
return img0 |
class FullConvolutionFunction(Function):
def forward(ctx, input_features, weight, bias, input_metadata, output_metadata, input_spatial_size, output_spatial_size, dimension, filter_size, filter_stride):
output_features = input_features.new()
ctx.input_metadata = input_metadata
ctx.output_metadata = output_metadata
ctx.dimension = dimension
ctx.save_for_backward(input_features, input_spatial_size, weight, bias, output_spatial_size, filter_size, filter_stride)
sparseconvnet.forward_pass_multiplyAdd_count += sparseconvnet.SCN.FullConvolution_updateOutput(input_spatial_size, output_spatial_size, filter_size, filter_stride, input_metadata, output_metadata, input_features, output_features, weight, bias)
sparseconvnet.forward_pass_hidden_states += output_features.nelement()
return output_features
def backward(ctx, grad_output):
(input_features, input_spatial_size, weight, bias, output_spatial_size, filter_size, filter_stride) = ctx.saved_tensors
grad_input = grad_output.new()
grad_weight = torch.zeros_like(weight)
grad_bias = torch.zeros_like(bias)
sparseconvnet.SCN.FullConvolution_backward(input_spatial_size, output_spatial_size, filter_size, filter_stride, ctx.input_metadata, ctx.output_metadata, input_features, grad_input, grad_output.contiguous(), weight, grad_weight, grad_bias)
return (grad_input, grad_weight, optionalTensorReturn(grad_bias), None, None, None, None, None, None, None) |
class WarmUpLR(_LRScheduler):
def __init__(self, optimizer, total_iters, last_epoch=(- 1)):
self.total_iters = total_iters
super().__init__(optimizer, last_epoch)
def get_lr(self):
return [((base_lr * self.last_epoch) / (self.total_iters + 1e-08)) for base_lr in self.base_lrs] |
class NormalImport(ImportInfo):
def __init__(self, names_and_aliases):
self.names_and_aliases = names_and_aliases
def get_imported_primaries(self, context):
result = []
for (name, alias) in self.names_and_aliases:
if alias:
result.append(alias)
else:
result.append(name)
return result
def get_import_statement(self):
result = 'import '
for (name, alias) in self.names_and_aliases:
result += name
if alias:
result += (' as ' + alias)
result += ', '
return result[:(- 2)]
def is_empty(self):
return (len(self.names_and_aliases) == 0) |
def one_round(ql: Qiling, key: bytes, key_address):
gkeys = generate_key(key)
ql.mem.write(key_address, gkeys)
ql.run(begin=verfication_start_ip, end=(verfication_start_ip + 6))
lba37 = ql.mem.read((ql.arch.regs.sp + 544), 512)
for ch in lba37:
if (ch != 55):
return False
return True |
def _lex(term, others, operator, matrix):
if (len(others) == 0):
lists = [flatten(l) for l in term]
assert is_matrix(lists, Variable)
elif (not is_1d_list(term, Variable)):
(l1, l2) = (flatten(term), flatten(others))
assert (len(l1) == len(l2))
lists = [l1, l2]
elif ((len(others) == 1) and is_1d_list(others[0], int)):
assert (matrix is False)
lists = ([flatten(term)] + [flatten(others[0])])
else:
assert all((is_1d_list(l, Variable) for l in others))
lists = ([flatten(term)] + [flatten(l) for l in others])
assert is_matrix(lists)
assert all(((len(l) == len(lists[0])) for l in lists))
assert all((checkType(l, ([int, Variable] if (i == 1) else [Variable])) for (i, l) in enumerate(lists)))
checkType(operator, TypeOrderedOperator)
return (ECtr(ConstraintLexMatrix(lists, operator)) if matrix else ECtr(ConstraintLex(lists, operator))) |
class PIXELTrainer(Trainer):
def compute_loss(self, model, inputs, return_outputs=False):
if ((self.label_smoother is not None) and ('labels' in inputs)):
labels = inputs.pop('labels')
else:
labels = None
outputs = model(**inputs)
if (self.args.past_index >= 0):
self._past = outputs[self.args.past_index]
if (labels is not None):
loss = self.label_smoother(outputs, labels)
else:
loss = (outputs['loss'] if isinstance(outputs, dict) else outputs[0])
return ((loss, outputs) if return_outputs else loss) |
_torch
_sentencepiece
_tokenizers
class TrainerIntegrationPrerunTest(TestCasePlus, TrainerIntegrationCommon):
def setUp(self):
super().setUp()
args = TrainingArguments('..')
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
trainer = get_regression_trainer(learning_rate=0.1)
trainer.train()
self.default_trained_model = (trainer.model.a, trainer.model.b)
trainer = get_regression_trainer(learning_rate=0.1, seed=314)
trainer.train()
self.alternate_trained_model = (trainer.model.a, trainer.model.b)
def check_trained_model(self, model, alternate_seed=False):
(a, b) = (self.alternate_trained_model if alternate_seed else self.default_trained_model)
self.assertTrue(torch.allclose(model.a, a))
self.assertTrue(torch.allclose(model.b, b))
def test_reproducible_training(self):
trainer = get_regression_trainer(learning_rate=0.1)
trainer.train()
self.check_trained_model(trainer.model)
trainer = get_regression_trainer(learning_rate=0.1, seed=314)
trainer.train()
self.check_trained_model(trainer.model, alternate_seed=True)
def test_trainer_with_datasets(self):
import datasets
np.random.seed(42)
x = np.random.normal(size=(64,)).astype(np.float32)
y = (((2.0 * x) + 3.0) + np.random.normal(scale=0.1, size=(64,)))
train_dataset = datasets.Dataset.from_dict({'input_x': x, 'label': y})
model = RegressionModel()
args = TrainingArguments('./regression', learning_rate=0.1)
trainer = Trainer(model, args, train_dataset=train_dataset)
trainer.train()
self.check_trained_model(trainer.model)
train_dataset.set_format(type='torch', dtype=torch.float32)
model = RegressionModel()
trainer = Trainer(model, args, train_dataset=train_dataset)
trainer.train()
self.check_trained_model(trainer.model)
z = np.random.normal(size=(64,)).astype(np.float32)
train_dataset = datasets.Dataset.from_dict({'input_x': x, 'label': y, 'extra': z})
model = RegressionModel()
trainer = Trainer(model, args, train_dataset=train_dataset)
trainer.train()
self.check_trained_model(trainer.model)
def test_model_init(self):
train_dataset = RegressionDataset()
args = TrainingArguments('./regression', learning_rate=0.1)
trainer = Trainer(args=args, train_dataset=train_dataset, model_init=(lambda : RegressionModel()))
trainer.train()
self.check_trained_model(trainer.model)
trainer.train()
self.check_trained_model(trainer.model)
trainer.args.seed = 314
trainer.train()
self.check_trained_model(trainer.model, alternate_seed=True)
def test_gradient_accumulation(self):
trainer = get_regression_trainer(gradient_accumulation_steps=2, per_device_train_batch_size=4, learning_rate=0.1)
trainer.train()
self.check_trained_model(trainer.model)
def test_training_loss(self):
n_gpus = max(1, get_gpu_count())
trainer = get_regression_trainer(logging_steps=(64 / (8 * n_gpus)))
trainer.train()
log_history = trainer.state.log_history
losses = [log['loss'] for log in log_history if ('loss' in log)]
train_loss = log_history[(- 1)]['train_loss']
self.assertAlmostEqual((sum(losses) / len(losses)), train_loss, places=4)
trainer = get_regression_trainer(logging_steps=5)
trainer.train()
log_history = trainer.state.log_history
new_train_loss = log_history[(- 1)]['train_loss']
self.assertAlmostEqual(train_loss, new_train_loss, places=4)
def test_custom_optimizer(self):
train_dataset = RegressionDataset()
args = TrainingArguments('./regression')
model = RegressionModel()
optimizer = torch.optim.SGD(model.parameters(), lr=1.0)
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=(lambda x: 1.0))
trainer = Trainer(model, args, train_dataset=train_dataset, optimizers=(optimizer, lr_scheduler))
trainer.train()
(a, b) = self.default_trained_model
self.assertFalse(torch.allclose(trainer.model.a, a))
self.assertFalse(torch.allclose(trainer.model.b, b))
self.assertEqual(trainer.optimizer.state_dict()['param_groups'][0]['lr'], 1.0)
def test_adafactor_lr_none(self):
from transformers.optimization import Adafactor, AdafactorSchedule
train_dataset = RegressionDataset()
args = TrainingArguments('./regression')
model = RegressionModel()
optimizer = Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None)
lr_scheduler = AdafactorSchedule(optimizer)
trainer = Trainer(model, args, train_dataset=train_dataset, optimizers=(optimizer, lr_scheduler))
trainer.train()
(a, b) = self.default_trained_model
self.assertFalse(torch.allclose(trainer.model.a, a))
self.assertFalse(torch.allclose(trainer.model.b, b))
self.assertGreater(trainer.optimizer.state_dict()['param_groups'][0]['lr'], 0)
_torch_gpu
_torch_bf16
def test_mixed_bf16(self):
trainer = get_regression_trainer(learning_rate=0.1, bf16=True)
trainer.train()
self.check_trained_model(trainer.model)
with self.assertRaises(ValueError):
trainer = get_regression_trainer(learning_rate=0.1, bf16=True, half_precision_backend='apex')
_torch_gpu
_torch_tf32
def test_tf32(self):
trainer = get_regression_trainer(learning_rate=0.1, tf32=True)
trainer.train()
self.check_trained_model(trainer.model) |
class ColorBufferImage(BufferImage):
gl_format = GL_RGBA
format = 'RGBA'
def get_texture(self, rectangle=False):
texture = Texture.create(self.width, self.height, GL_TEXTURE_2D, GL_RGBA, blank_data=False)
self.blit_to_texture(texture.target, texture.level, self.anchor_x, self.anchor_y, 0)
return texture
def blit_to_texture(self, target, level, x, y, z):
glReadBuffer(self.gl_buffer)
glCopyTexSubImage2D(target, level, (x - self.anchor_x), (y - self.anchor_y), self.x, self.y, self.width, self.height) |
class MegaupNet(SimpleDownloader):
__name__ = 'MegaupNet'
__type__ = 'downloader'
__version__ = '0.03'
__status__ = 'testing'
__pattern__ = '
__config__ = [('enabled', 'bool', 'Activated', True), ('use_premium', 'bool', 'Use premium account if available', True), ('fallback', 'bool', 'Fallback to free download if premium fails', True), ('chk_filesize', 'bool', 'Check file size', True), ('max_wait', 'int', 'Reconnect if waiting time is greater than minutes', 10)]
__description__ = 'Megaup.net downloader plugin'
__license__ = 'GPLv3'
__authors__ = [('GammaC0de', 'nitzo2001[AT]yahoo[DOT]com')]
NAME_PATTERN = 'File: (?P<N>.+?)<'
SIZE_PATTERN = 'Size: (?P<S>[\\d.,]+) (?P<U>[\\w^_]+)'
OFFLINE_PATTERN = 'The file you are trying to download is no longer available!'
WAIT_PATTERN = 'var seconds = (\\d+);'
LINK_PATTERN = 'window.location.replace\\("(.+?)"\\)'
def handle_free(self, pyfile):
s = [x for x in re.findall('<script[\\s\\S]*?>([\\s\\S]*?)</script>', self.data, re.I) if ('function DeObfuscate_String_and_Create_Form_With_Mhoa_URL' in x)]
if (len(s) != 1):
self.fail(self._('deobfuscate function not found'))
init = 'window = {\n innerWidth: 1280,\n innerHeight: 567,\n };\n var document = {\n documentElement: {clientWidth: 1280, clientHeight: 567},\n body: {clientWidth: 1280, clientHeight: 567}\n };'
deobfuscate_script = (init + s[0])
deobfuscate_script = re.sub('if\\s*\\(width_trinh_duyet[\\s\\S]*', 'return JSON.stringify({idurl:url_da_encrypt, idfilename:FileName, idfilesize:FileSize})};', deobfuscate_script)
m = re.search('if \\(seconds == 0\\)[\\s\\S]+?(DeObfuscate_String_and_Create_Form_With_Mhoa_URL\\(.+?\\);)', self.data)
if (m is None):
self.fail(self._('deobfuscate function call not found'))
deobfuscate_script += m.group(1)
json_data = eval_js(deobfuscate_script)
if (not json_data.startswith('{')):
self.fail(self._('Unexpected response, expected JSON data'))
params = json.loads(json_data)
self.data = self.load(' get=params)
m = re.search(self.LINK_PATTERN, self.data)
if (m is not None):
self.link = m.group(1) |
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(self, optimizer: torch.optim.Optimizer, milestones: List[int], gamma: float=0.1, warmup_factor: float=0.001, warmup_iters: int=1000, warmup_method: str='linear', last_epoch: int=(- 1)):
if (not (list(milestones) == sorted(milestones))):
raise ValueError('Milestones should be a list of increasing integers. Got {}', milestones)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super().__init__(optimizer, last_epoch)
def get_lr(self) -> List[float]:
warmup_factor = _get_warmup_factor_at_iter(self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor)
return [((base_lr * warmup_factor) * (self.gamma ** bisect_right(self.milestones, self.last_epoch))) for base_lr in self.base_lrs]
def _compute_values(self) -> List[float]:
return self.get_lr() |
def test_paramdeclaration():
pardec = OSC.ParameterDeclarations()
pardec.add_parameter(OSC.Parameter('myparam1', OSC.ParameterType.boolean, 'true'))
pardec.add_parameter(OSC.Parameter('myparam1', OSC.ParameterType.double, '0.01'))
pardec2 = OSC.ParameterDeclarations()
pardec2.add_parameter(OSC.Parameter('myparam1', OSC.ParameterType.boolean, 'true'))
pardec2.add_parameter(OSC.Parameter('myparam1', OSC.ParameterType.double, '0.01'))
pardec3 = OSC.ParameterDeclarations.parse(pardec.get_element())
prettyprint(pardec.get_element())
assert (pardec == pardec2)
assert (pardec == pardec3)
pardec4 = OSC.ParameterDeclarations()
pardec4.add_parameter(OSC.Parameter('myparam2', OSC.ParameterType.int, '1'))
pardec4.add_parameter(OSC.Parameter('myparam2', OSC.ParameterType.double, '0.01'))
assert (pardec4 != pardec)
assert (version_validation('ParameterDeclarations', pardec, 0) == ValidationResponse.OK)
assert (version_validation('ParameterDeclarations', pardec, 1) == ValidationResponse.OK)
assert (version_validation('ParameterDeclarations', pardec, 2) == ValidationResponse.OK) |
def _flatten_obs(obs):
assert (isinstance(obs, list) or isinstance(obs, tuple))
assert (len(obs) > 0)
if isinstance(obs[0], dict):
import collections
assert isinstance(obs, collections.OrderedDict)
keys = obs[0].keys()
return {k: np.stack([o[k] for o in obs]) for k in keys}
else:
return np.stack(obs) |
class Encoding(BaseType):
def to_py(self, value: _StrUnset) -> _StrUnsetNone:
self._basic_py_validation(value, str)
if isinstance(value, usertypes.Unset):
return value
elif (not value):
return None
try:
codecs.lookup(value)
except LookupError:
raise configexc.ValidationError(value, 'is not a valid encoding!')
return value |
class Generator(nn.Module):
def __init__(self, in_channels):
super(Generator, self).__init__()
self.generator = nn.Sequential(nn.ReflectionPad1d(3), nn.utils.weight_norm(nn.Conv1d(in_channels, 512, kernel_size=7)), nn.LeakyReLU(0.2, True), UpsampleNet(512, 256, 8), ResStack(256), nn.LeakyReLU(0.2, True), UpsampleNet(256, 128, 5), ResStack(128), nn.LeakyReLU(0.2, True), UpsampleNet(128, 64, 5), ResStack(64), nn.LeakyReLU(0.2, True), nn.ReflectionPad1d(3), nn.utils.weight_norm(nn.Conv1d(64, 1, kernel_size=7)), nn.Tanh())
self.num_params()
def forward(self, conditions):
return self.generator(conditions)
def num_params(self):
parameters = filter((lambda p: p.requires_grad), self.parameters())
parameters = (sum([np.prod(p.size()) for p in parameters]) / 1000000)
print(('Trainable Parameters: %.3f million' % parameters))
def remove_weight_norm(self):
def _remove_weight_norm(m):
try:
torch.nn.utils.remove_weight_norm(m)
except ValueError:
return
self.apply(_remove_weight_norm) |
def ssim(img1, img2, window_size=11, size_average=True, mask=None, sigma=0.5):
img1 = img1.mean(1).unsqueeze(1)
img2 = img2.mean(1).unsqueeze(1)
(_, channel, _, _) = img1.size()
window = create_window(window_size, channel, sigma)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average, mask=mask) |
class TrainRegSet(torch.utils.data.Dataset):
def __init__(self, data_root, image_size):
super().__init__()
self.data_root = data_root
self.img_file = [l.split(',')[1].strip() for l in open(os.path.join(data_root, 'data_train.csv'))][1:]
with open(os.path.join(data_root, 'data_train.json'), 'r') as f:
self.keypoints = json.load(f)
self.keypoints = [self.keypoints[i]['keypoints'] for i in range(len(self.keypoints))]
self.keypoints = torch.tensor(self.keypoints).roll(shifts=1, dims=(- 1))
self.transform = transforms.Compose([transforms.Resize(image_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
def __getitem__(self, idx):
img = Image.open(os.path.join(self.data_root, 'img', self.img_file[idx]))
sample = {'img': self.transform(img), 'keypoints': self.keypoints[idx]}
return sample
def __len__(self):
return len(self.img_file) |
class Widgets(object):
def top(self, Form):
if (not Form.objectName()):
Form.setObjectName(u'Form')
self.container_top = QFrame(Form)
self.container_top.setObjectName(u'container_top')
self.container_top.setGeometry(QRect(0, 0, 500, 10))
self.container_top.setMinimumSize(QSize(0, 10))
self.container_top.setMaximumSize(QSize(, 10))
self.container_top.setFrameShape(QFrame.Shape.NoFrame)
self.container_top.setFrameShadow(QFrame.Shadow.Raised)
self.top_layout = QHBoxLayout(self.container_top)
self.top_layout.setSpacing(0)
self.top_layout.setObjectName(u'top_layout')
self.top_layout.setContentsMargins(0, 0, 0, 0)
self.top_left = QFrame(self.container_top)
self.top_left.setObjectName(u'top_left')
self.top_left.setMinimumSize(QSize(10, 10))
self.top_left.setMaximumSize(QSize(10, 10))
self.top_left.setCursor(QCursor(Qt.CursorShape.SizeFDiagCursor))
self.top_left.setStyleSheet(u'background-color: rgb(33, 37, 43);')
self.top_left.setFrameShape(QFrame.Shape.NoFrame)
self.top_left.setFrameShadow(QFrame.Shadow.Raised)
self.top_layout.addWidget(self.top_left)
self.top = QFrame(self.container_top)
self.top.setObjectName(u'top')
self.top.setCursor(QCursor(Qt.CursorShape.SizeVerCursor))
self.top.setStyleSheet(u'background-color: rgb(85, 255, 255);')
self.top.setFrameShape(QFrame.Shape.NoFrame)
self.top.setFrameShadow(QFrame.Shadow.Raised)
self.top_layout.addWidget(self.top)
self.top_right = QFrame(self.container_top)
self.top_right.setObjectName(u'top_right')
self.top_right.setMinimumSize(QSize(10, 10))
self.top_right.setMaximumSize(QSize(10, 10))
self.top_right.setCursor(QCursor(Qt.CursorShape.SizeBDiagCursor))
self.top_right.setStyleSheet(u'background-color: rgb(33, 37, 43);')
self.top_right.setFrameShape(QFrame.Shape.NoFrame)
self.top_right.setFrameShadow(QFrame.Shadow.Raised)
self.top_layout.addWidget(self.top_right)
def bottom(self, Form):
if (not Form.objectName()):
Form.setObjectName(u'Form')
self.container_bottom = QFrame(Form)
self.container_bottom.setObjectName(u'container_bottom')
self.container_bottom.setGeometry(QRect(0, 0, 500, 10))
self.container_bottom.setMinimumSize(QSize(0, 10))
self.container_bottom.setMaximumSize(QSize(, 10))
self.container_bottom.setFrameShape(QFrame.Shape.NoFrame)
self.container_bottom.setFrameShadow(QFrame.Shadow.Raised)
self.bottom_layout = QHBoxLayout(self.container_bottom)
self.bottom_layout.setSpacing(0)
self.bottom_layout.setObjectName(u'bottom_layout')
self.bottom_layout.setContentsMargins(0, 0, 0, 0)
self.bottom_left = QFrame(self.container_bottom)
self.bottom_left.setObjectName(u'bottom_left')
self.bottom_left.setMinimumSize(QSize(10, 10))
self.bottom_left.setMaximumSize(QSize(10, 10))
self.bottom_left.setCursor(QCursor(Qt.CursorShape.SizeBDiagCursor))
self.bottom_left.setStyleSheet(u'background-color: rgb(33, 37, 43);')
self.bottom_left.setFrameShape(QFrame.Shape.NoFrame)
self.bottom_left.setFrameShadow(QFrame.Shadow.Raised)
self.bottom_layout.addWidget(self.bottom_left)
self.bottom = QFrame(self.container_bottom)
self.bottom.setObjectName(u'bottom')
self.bottom.setCursor(QCursor(Qt.CursorShape.SizeVerCursor))
self.bottom.setStyleSheet(u'background-color: rgb(85, 170, 0);')
self.bottom.setFrameShape(QFrame.Shape.NoFrame)
self.bottom.setFrameShadow(QFrame.Shadow.Raised)
self.bottom_layout.addWidget(self.bottom)
self.bottom_right = QFrame(self.container_bottom)
self.bottom_right.setObjectName(u'bottom_right')
self.bottom_right.setMinimumSize(QSize(10, 10))
self.bottom_right.setMaximumSize(QSize(10, 10))
self.bottom_right.setCursor(QCursor(Qt.CursorShape.SizeFDiagCursor))
self.bottom_right.setStyleSheet(u'background-color: rgb(33, 37, 43);')
self.bottom_right.setFrameShape(QFrame.Shape.NoFrame)
self.bottom_right.setFrameShadow(QFrame.Shadow.Raised)
self.bottom_layout.addWidget(self.bottom_right)
def left(self, Form):
if (not Form.objectName()):
Form.setObjectName(u'Form')
self.leftgrip = QFrame(Form)
self.leftgrip.setObjectName(u'left')
self.leftgrip.setGeometry(QRect(0, 10, 10, 480))
self.leftgrip.setMinimumSize(QSize(10, 0))
self.leftgrip.setCursor(QCursor(Qt.CursorShape.SizeHorCursor))
self.leftgrip.setStyleSheet(u'background-color: rgb(255, 121, 198);')
self.leftgrip.setFrameShape(QFrame.Shape.NoFrame)
self.leftgrip.setFrameShadow(QFrame.Shadow.Raised)
def right(self, Form):
if (not Form.objectName()):
Form.setObjectName(u'Form')
Form.resize(500, 500)
self.rightgrip = QFrame(Form)
self.rightgrip.setObjectName(u'right')
self.rightgrip.setGeometry(QRect(0, 0, 10, 500))
self.rightgrip.setMinimumSize(QSize(10, 0))
self.rightgrip.setCursor(QCursor(Qt.CursorShape.SizeHorCursor))
self.rightgrip.setStyleSheet(u'background-color: rgb(255, 0, 127);')
self.rightgrip.setFrameShape(QFrame.Shape.NoFrame)
self.rightgrip.setFrameShadow(QFrame.Shadow.Raised) |
_optimizer('lamb')
class FairseqLAMB(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
try:
from apex.optimizers import FusedLAMB
self._optimizer = FusedLAMB(params, **self.optimizer_config)
except ImportError:
raise ImportError('Please install apex to use LAMB optimizer')
def add_args(parser):
parser.add_argument('--lamb-betas', default='(0.9, 0.999)', metavar='B', help='betas for LAMB optimizer')
parser.add_argument('--lamb-eps', type=float, default=1e-08, metavar='D', help='epsilon for LAMB optimizer')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay')
def optimizer_config(self):
return {'lr': self.args.lr[0], 'betas': eval(self.args.lamb_betas), 'eps': self.args.lamb_eps, 'weight_decay': self.args.weight_decay} |
def combine_trk_cat(split, dataset, method, suffix, num_hypo):
file_path = os.path.dirname(os.path.realpath(__file__))
root_dir = os.path.join(file_path, '../../results', dataset)
(_, det_id2str, _, seq_list, _) = get_subfolder_seq(dataset, split)
config_path = os.path.join(file_path, ('../../configs/%s.yml' % dataset))
(cfg, _) = Config(config_path)
log = os.path.join(root_dir, ('%s_%s_%s' % (method, split, suffix)), 'combine_log.txt')
mkdir_if_missing(log)
log = open(log, 'w+')
subset = [('%s_%s_%s_%s' % (method, cat, split, suffix)) for cat in cfg.cat_list]
for hypo_index in range(num_hypo):
data_suffix = ('_%d' % hypo_index)
frame_dict = find_all_frames(root_dir, subset, data_suffix, seq_list)
save_root = os.path.join(root_dir, ('%s_%s_%s' % (method, split, suffix)), ('data' + data_suffix))
mkdir_if_missing(save_root)
for seq_tmp in seq_list:
file_list_tmp = list()
for subset_tmp in subset:
file_tmp = os.path.join(root_dir, subset_tmp, ('data' + data_suffix), (seq_tmp + '.txt'))
file_list_tmp.append(file_tmp)
save_path_tmp = os.path.join(save_root, (seq_tmp + '.txt'))
combine_files(file_list_tmp, save_path_tmp)
save_root = os.path.join(root_dir, ('%s_%s_%s' % (method, split, suffix)), ('trk_withid' + data_suffix))
for seq_tmp in seq_list:
save_dir = os.path.join(save_root, seq_tmp)
mkdir_if_missing(save_dir)
for frame_tmp in frame_dict[seq_tmp]:
file_list_tmp = list()
for subset_tmp in subset:
file_tmp = os.path.join(root_dir, subset_tmp, ('trk_withid' + data_suffix), seq_tmp, (frame_tmp + '.txt'))
if is_path_exists(file_tmp):
file_list_tmp.append(file_tmp)
save_path_tmp = os.path.join(save_dir, (frame_tmp + '.txt'))
combine_files(file_list_tmp, save_path_tmp, sort=False) |
class TemporalBottleneck(nn.Module):
def __init__(self, net, n_segment=8, t_kernel_size=3, t_stride=1, t_padding=1):
super(TemporalBottleneck, self).__init__()
self.net = net
assert isinstance(net, torchvision.models.resnet.Bottleneck)
self.n_segment = n_segment
self.tam = TAM(in_channels=net.conv1.out_channels, n_segment=n_segment, kernel_size=t_kernel_size, stride=t_stride, padding=t_padding)
def forward(self, x):
identity = x
out = self.net.conv1(x)
out = self.net.bn1(out)
out = self.net.relu(out)
out = self.tam(out)
out = self.net.conv2(out)
out = self.net.bn2(out)
out = self.net.relu(out)
out = self.net.conv3(out)
out = self.net.bn3(out)
if (self.net.downsample is not None):
identity = self.net.downsample(x)
out += identity
out = self.net.relu(out)
return out |
def parse_options():
try:
(opts, args) = getopt.getopt(sys.argv[1:], 'hs:v', ['help', 'solver=', 'verbose'])
except getopt.GetoptError as err:
sys.stderr.write(str(err).capitalize())
usage()
sys.exit(1)
solver = 'm22'
verbose = 0
for (opt, arg) in opts:
if (opt in ('-h', '--help')):
usage()
sys.exit(0)
elif (opt in ('-s', '--solver')):
solver = str(arg)
elif (opt in ('-v', '--verbose')):
verbose += 1
else:
assert False, 'Unhandled option: {0} {1}'.format(opt, arg)
return (solver, verbose, args) |
class MultiLatentRPN(RPN):
def __init__(self, anchor_num, in_channels, weighted=False):
super(MultiLatentRPN, self).__init__()
self.weighted = weighted
for i in range(len(in_channels)):
self.add_module(('rpn' + str((i + 2))), LatentDepthwiseRPN(anchor_num, in_channels[i], in_channels[i]))
if self.weighted:
self.cls_weight = nn.Parameter(torch.ones(len(in_channels)))
self.loc_weight = nn.Parameter(torch.ones(len(in_channels)))
def log_softmax(self, cls):
(b, a2, h, w) = cls.size()
cls = cls.view(b, 2, (a2 // 2), h, w)
cls = cls.permute(0, 2, 3, 4, 1).contiguous()
cls = F.log_softmax(cls, dim=4)
return cls
def avg(self, lst):
return (sum(lst) / len(lst))
def weighted_avg(self, lst, weight):
s = 0
for i in range(len(weight)):
s += (lst[i] * weight[i])
return s
def get_grads(self, cls_feas, loc_feas, label_cls, label_loc, label_loc_weight):
cls = []
loc = []
(cls_lws, loc_lws) = ([], [])
for (idx, (cls_fea, loc_fea)) in enumerate(zip(cls_feas, loc_feas), start=2):
rpn = getattr(self, ('rpn' + str(idx)))
cls_fea = cls_fea.data.detach()
cls_fea.requires_grad = True
c = F.conv2d(cls_fea, weight=rpn.cls.last_weights.detach(), bias=rpn.cls.last_bias.detach())
loc_fea = loc_fea.data.detach()
loc_fea.requires_grad = True
l = F.conv2d(loc_fea, weight=rpn.loc.last_weights.detach(), bias=rpn.loc.last_bias.detach())
cls.append(c)
loc.append(l)
cls_feas[(idx - 2)] = cls_fea
loc_feas[(idx - 2)] = loc_fea
clw = rpn.cls.layer_weight
llw = rpn.loc.layer_weight
cls_lws.append(clw)
loc_lws.append(llw)
if self.weighted:
cls_weight = F.softmax((self.cls_weight + torch.cat(cls_lws).detach()), 0)
loc_weight = F.softmax((self.loc_weight + torch.cat(loc_lws).detach()), 0)
if self.weighted:
(cls, loc) = (self.weighted_avg(cls, cls_weight), self.weighted_avg(loc, loc_weight))
else:
(cls, loc) = (self.avg(cls), self.avg(loc))
cls = self.log_softmax(cls)
cls_loss = select_cross_entropy_loss(cls, label_cls)
loc_loss = weight_l1_loss(loc, label_loc, label_loc_weight)
loss = ((cfg.TRAIN.CLS_WEIGHT * cls_loss) + (cfg.TRAIN.LOC_WEIGHT * loc_loss))
loss.backward()
cls_grads = []
loc_grads = []
for (idx, (cls_fea, loc_fea)) in enumerate(zip(cls_feas, loc_feas)):
cls_grads.append((cls_fea.grad.data.detach() * 10000))
loc_grads.append((loc_fea.grad.data.detach() * 10000))
return (cls_grads, loc_grads)
def update_weights(self, cls_feas, loc_feas, label_cls):
kl = 0
for (idx, (cls_fea, loc_fea)) in enumerate(zip(cls_feas, loc_feas), start=2):
rpn = getattr(self, ('rpn' + str(idx)))
kl1 = rpn.cls.update_weight(cls_fea, label_cls)
kl2 = rpn.loc.update_weight(loc_fea, label_cls)
kl = ((kl + kl1) + kl2)
return kl
def get_cls_loc(self, cls_feas, loc_feas):
cls = []
loc = []
(cls_lws, loc_lws) = ([], [])
for (idx, (cls_fea, loc_fea)) in enumerate(zip(cls_feas, loc_feas), start=2):
rpn = getattr(self, ('rpn' + str(idx)))
c = F.conv2d(cls_fea, weight=rpn.cls.last_weights, bias=rpn.cls.last_bias)
l = F.conv2d(loc_fea, weight=rpn.loc.last_weights, bias=rpn.loc.last_bias)
cls.append(c)
loc.append(l)
clw = rpn.cls.layer_weight
llw = rpn.loc.layer_weight
cls_lws.append(clw)
loc_lws.append(llw)
if self.weighted:
cls_weight = F.softmax((self.cls_weight + torch.cat(cls_lws)), 0)
loc_weight = F.softmax((self.loc_weight + torch.cat(loc_lws)), 0)
return (self.weighted_avg(cls, cls_weight), self.weighted_avg(loc, loc_weight))
else:
return (self.avg(cls), self.avg(loc))
def get_cls_loc0(self, cls_feas, loc_feas):
cls = []
loc = []
for (idx, (cls_fea, loc_fea)) in enumerate(zip(cls_feas, loc_feas), start=2):
rpn = getattr(self, ('rpn' + str(idx)))
c = F.conv2d(cls_fea, weight=rpn.cls.last_weights0, bias=rpn.cls.last_bias0)
l = F.conv2d(loc_fea, weight=rpn.loc.last_weights0, bias=rpn.loc.last_bias0)
cls.append(c)
loc.append(l)
if self.weighted:
cls_weight = F.softmax(self.cls_weight, 0)
loc_weight = F.softmax(self.loc_weight, 0)
return (self.weighted_avg(cls, cls_weight), self.weighted_avg(loc, loc_weight))
else:
return (self.avg(cls), self.avg(loc))
def forward(self, z_fs, x_fs):
cls_feas = []
loc_feas = []
for (idx, (z_f, x_f)) in enumerate(zip(z_fs, x_fs), start=2):
rpn = getattr(self, ('rpn' + str(idx)))
(cls_fea, loc_fea) = rpn(z_f, x_f)
cls_feas.append(cls_fea)
loc_feas.append(loc_fea)
return (cls_feas, loc_feas) |
class MV2Block(nn.Module):
def __init__(self, inp, out, stride=1, expansion=4):
super().__init__()
self.stride = stride
hidden_dim = (inp * expansion)
self.use_res_connection = ((stride == 1) and (inp == out))
if (expansion == 1):
self.conv = nn.Sequential(nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=self.stride, padding=1, groups=hidden_dim, bias=False), nn.BatchNorm2d(hidden_dim), nn.SiLU(), nn.Conv2d(hidden_dim, out, kernel_size=1, stride=1, bias=False), nn.BatchNorm2d(out))
else:
self.conv = nn.Sequential(nn.Conv2d(inp, hidden_dim, kernel_size=1, stride=1, bias=False), nn.BatchNorm2d(hidden_dim), nn.SiLU(), nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=1, padding=1, groups=hidden_dim, bias=False), nn.BatchNorm2d(hidden_dim), nn.SiLU(), nn.Conv2d(hidden_dim, out, kernel_size=1, stride=1, bias=False), nn.SiLU(), nn.BatchNorm2d(out))
def forward(self, x):
if self.use_res_connection:
out = (x + self.conv(x))
else:
out = self.conv(x)
return out |
def render_image(state, messages, wumpus, creature):
board = Image.open(io.BytesIO(images['board'])).convert('RGBA')
for img in (wumpus.images + creature.images):
i = Image.open(io.BytesIO(images[img])).convert('RGBA')
i = ImageEnhance.Color(i).enhance(0.0)
i = ImageEnhance.Brightness(i).enhance(100.0)
board = Image.alpha_composite(board, i)
board = Image.alpha_composite(board, i)
board = Image.alpha_composite(board, i)
i = Image.open(io.BytesIO(images[img])).convert('RGBA')
board = Image.alpha_composite(board, i)
i = Image.open(io.BytesIO(images['frame'])).convert('RGBA')
board = Image.alpha_composite(board, i)
draw = ImageDraw.Draw(board)
font = ImageFont.truetype(fonts['blk'], 10)
draw.text((228, 139), creature.name, (0, 0, 0), font=font)
font = ImageFont.truetype(fonts['med'], 9)
draw.text((363, 139), f'{creature.attack[0]}-{creature.attack[1]}ATK', (0, 0, 0), font=font, anchor='ra')
font = ImageFont.truetype(fonts['blk'], 11)
for (i, msg) in enumerate(messages):
draw.text((46, (184 + (i * 12))), msg, (0, 0, 0), font=font)
for health_bar in [((30, 47), wumpus), ((227, 152), creature)]:
health_size = (140, 10)
health_pos = health_bar[0]
health_delta = int(max(((140 * health_bar[1].hp) / health_bar[1].hp_max), 0))
crop_coords = ((health_pos[0] + health_delta), health_pos[1], (health_pos[0] + health_size[0]), (health_pos[1] + health_size[1]))
health = board.crop(crop_coords)
health = ImageEnhance.Color(health).enhance(0.0)
health = ImageEnhance.Brightness(health).enhance(2.0)
board.paste(health, crop_coords[:2])
if state:
i = Image.open(io.BytesIO(images[f'end_{state}'])).convert('RGBA')
board = Image.alpha_composite(board, i)
draw = ImageDraw.Draw(board)
font = ImageFont.truetype(fonts['med'], 14)
msg = {'good': f'''You successfully
defeated the {creature.name}!''', 'bad': f'''You got your rear-end
handed to you!''', 'best': f'''You successfully
befriended the {creature.name}!'''}[state]
for (i, line) in enumerate(msg.split('\n')):
draw.text(((395 // 2), (105 + (i * 15))), line, (255, 255, 255), font=font, anchor='ma')
out = io.BytesIO()
board.save(out, 'PNG')
return out.getvalue() |
def test_excinfo_getstatement():
def g():
raise ValueError
def f():
g()
try:
f()
except ValueError:
excinfo = _pytest._code.ExceptionInfo.from_current()
linenumbers = [((f.__code__.co_firstlineno - 1) + 4), ((f.__code__.co_firstlineno - 1) + 1), ((g.__code__.co_firstlineno - 1) + 1)]
values = list(excinfo.traceback)
foundlinenumbers = [x.lineno for x in values]
assert (foundlinenumbers == linenumbers) |
class MakeAnyNonExplicit(TrivialSyntheticTypeTranslator):
def visit_any(self, t: AnyType) -> Type:
if (t.type_of_any == TypeOfAny.explicit):
return t.copy_modified(TypeOfAny.special_form)
return t
def visit_type_alias_type(self, t: TypeAliasType) -> Type:
return t.copy_modified(args=[a.accept(self) for a in t.args]) |
class PhysPkgReader():
def __new__(cls, pkg_file):
if isinstance(pkg_file, str):
if os.path.isdir(pkg_file):
reader_cls = _DirPkgReader
elif is_zipfile(pkg_file):
reader_cls = _ZipPkgReader
else:
raise PackageNotFoundError(("Package not found at '%s'" % pkg_file))
else:
reader_cls = _ZipPkgReader
return super(PhysPkgReader, cls).__new__(reader_cls) |
class TestIncompleteExp(unittest.TestCase):
def IncompleteExp(self, name, fields):
expr = MockTemplate(name, fields)
self.assertIsInstance(expr, grammar.IncompleteExp)
return expr
def test_construction_sanity(self):
expr = MockTemplate('foo')
with self.assertRaisesRegex(ValueError, 'no fields'):
expr = grammar.IncompleteExp(expr.template)
def test_mod(self):
with self.assertRaisesRegex(TypeError, 'predicate'):
(self.IncompleteExp('foo', ('a',)) % ...)
def test_or(self):
with self.assertRaisesRegex(TypeError, 'union'):
(self.IncompleteExp('foo', ('a',)) | ...)
def test_and(self):
with self.assertRaisesRegex(TypeError, 'intersect'):
(self.IncompleteExp('foo', ('a',)) & ...)
def test_repr(self):
self.assertEqual(repr(self.IncompleteExp('Example', ('foo',))), 'Example[{foo}]')
self.assertEqual(repr(self.IncompleteExp('Example', ('f', 'b'))), 'Example[{f}, {b}]')
def test_le(self):
expr_a = self.IncompleteExp('Foo', ('a',))
expr_b = self.IncompleteExp('Bar', ('b',))
with self.assertRaisesRegex(TypeError, 'missing arguments'):
(expr_a <= expr_b)
def test_ge(self):
expr_a = self.IncompleteExp('Foo', ('a',))
expr_b = self.IncompleteExp('Bar', ('b',))
with self.assertRaisesRegex(TypeError, 'missing arguments'):
(expr_a >= expr_b)
def test_in(self):
expr_a = self.IncompleteExp('Foo', ('a',))
with self.assertRaisesRegex(TypeError, 'missing arguments'):
(... in expr_a)
def test_field_w_typeexp(self):
expr_a = self.IncompleteExp('Foo', ('baz',))
expr_inner = MockTemplate('Bar')
result = expr_a[expr_inner]
self.assertEqual(repr(result), 'Foo[Bar]')
self.assertIsInstance(result, grammar.TypeExp)
self.assertEqual(expr_a.template.test_data['validate_field'], 'baz')
def test_field_w_incompleteexp(self):
expr_a = self.IncompleteExp('Foo', ('a',))
expr_b = self.IncompleteExp('Bar', ('b',))
with self.assertRaisesRegex(TypeError, 'complete type expression'):
expr_a[expr_b]
def test_field_w_nonsense(self):
expr_a = self.IncompleteExp('Foo', ('a',))
with self.assertRaisesRegex(TypeError, 'complete type expression'):
expr_a[...]
def test_field_wrong_length(self):
X = MockTemplate('X')
C = self.IncompleteExp('C', ['foo', 'bar'])
with self.assertRaisesRegex(TypeError, '1'):
C[X]
C = self.IncompleteExp('C', ['foo'])
with self.assertRaisesRegex(TypeError, '2'):
C[(X, X)]
def test_field_nested_expression(self):
X = MockTemplate('X')
C = self.IncompleteExp('C', ['foo', 'bar'])
self.assertEqual(repr(C[(X, C[(C[(X, X)], X)])]), 'C[X, C[C[X, X], X]]')
def test_field_invalid_member(self):
C = self.IncompleteExp('C', ['foo'])
InvalidMember = MockTemplate('InvalidMember')
with self.assertRaisesRegex(TypeError, 'InvalidMember'):
C[InvalidMember]
def test_field_union(self):
X = MockTemplate('X')
Y = MockTemplate('Y')
Z = MockTemplate('Z')
C = self.IncompleteExp('C', ['foo'])
result = C[((X | Y) | Z)]
self.assertEqual(repr(result), 'C[X | Y | Z]')
def test_field_invalid_union(self):
X = MockTemplate('X')
InvalidMember = MockTemplate('InvalidMember')
Z = MockTemplate('Z')
C = self.IncompleteExp('C', ['foo'])
with self.assertRaisesRegex(TypeError, 'InvalidMember'):
C[((X | InvalidMember) | Z)]
def test_field_insane(self):
X = MockTemplate('X')
Y = MockTemplate('Y')
Z = MockTemplate('Z')
InvalidIntersection = grammar.IntersectionExp(members=(MockTemplate('InvalidMember'), Y))
C = self.IncompleteExp('C', ['foo'])
with self.assertRaisesRegex(TypeError, 'InvalidMember'):
C[((X | InvalidIntersection) | Z)]
def test_iter_symbols(self):
expr = self.IncompleteExp('Example', ('foo',))
self.assertEqual(list(expr.iter_symbols()), ['Example'])
def test_is_concrete(self):
expr = self.IncompleteExp('Example', ('foo',))
self.assertFalse(expr.is_concrete())
def test_pickle(self):
expr = self.IncompleteExp('Example', ('foo',))
clone = pickle.loads(pickle.dumps(expr))
self.assertEqual(expr, clone)
def test_proxy(self):
expr = self.IncompleteExp('Example', ('foo',))
self.assertIs(expr.example(), ...)
self.assertTrue(expr.template.test_data['example'])
def test_eq_nonsense(self):
expr_a = self.IncompleteExp('Example', ('foo',))
self.assertEqual(expr_a.__eq__(...), NotImplemented)
def test_hash_eq_equals(self):
expr_a = self.IncompleteExp('Example', ('foo',))
expr_b = self.IncompleteExp('Example', ('foo',))
self.assertEqual(hash(expr_a), hash(expr_b))
self.assertEqual(expr_a, expr_b)
self.assertTrue(expr_a.equals(expr_b))
def test_not_hash_eq_equals_field_mismatch(self):
expr_a = self.IncompleteExp('Example', ('foo',))
expr_b = self.IncompleteExp('Example', ('something_else',))
self.assertNotEqual(hash(expr_a), hash(expr_b))
self.assertNotEqual(expr_a, expr_b)
self.assertFalse(expr_a.equals(expr_b)) |
def main(input_csv, output_dir, anno_file, num_jobs=24, is_bsn_case=False):
youtube_ids = parse_activitynet_annotations(input_csv, is_bsn_case)
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
if (num_jobs == 1):
status_list = []
for index in youtube_ids:
status_list.append(download_clip_wrapper(index, output_dir))
else:
status_list = Parallel(n_jobs=num_jobs)((delayed(download_clip_wrapper)(index, output_dir) for index in youtube_ids))
mmcv.dump(status_list, 'download_report.json')
annotation = mmcv.load(anno_file)
downloaded = {status[0]: status[1] for status in status_list}
annotation = {k: v for (k, v) in annotation.items() if downloaded[k]}
if is_bsn_case:
anno_file_bak = anno_file.replace('.json', '_bak.json')
os.system(f'mv {anno_file} {anno_file_bak}')
mmcv.dump(annotation, anno_file) |
class InterceptingSocket():
def __init__(self, socket):
self.socket = socket
self.delay_sendall = None
self.delay_shutdown = None
self.drop_sendall = False
self.drop_shutdown = False
def __getattr__(self, name):
return getattr(self.socket, name)
def sendall(self, bytes, flags=0):
if (self.delay_sendall is not None):
time.sleep(self.delay_sendall)
if (not self.drop_sendall):
self.socket.sendall(bytes, flags)
def shutdown(self, how):
if (self.delay_shutdown is not None):
time.sleep(self.delay_shutdown)
if (not self.drop_shutdown):
self.socket.shutdown(how) |
class ObjectIdentityTestCase(TestCase):
def assertSameObject(self, *objs):
first = objs[0]
for obj in objs:
self.assertIs(first, obj)
def assertDifferentObjects(self, *objs):
id_counts = Counter(map(id, objs))
((most_common_id, count),) = id_counts.most_common(1)
if (count > 1):
dupe = [o for o in objs if (id(o) == most_common_id)][0]
self.fail(('%s appeared %d times in %s' % (dupe, count, objs)))
def test_instance_caching(self):
self.assertSameObject(*gen_equivalent_factors())
self.assertIs(SomeFactor(window_length=(SomeFactor.window_length + 1)), SomeFactor(window_length=(SomeFactor.window_length + 1)))
self.assertIs(SomeFactor(dtype=float64_dtype), SomeFactor(dtype=float64_dtype))
self.assertIs(SomeFactor(inputs=[SomeFactor.inputs[1], SomeFactor.inputs[0]]), SomeFactor(inputs=[SomeFactor.inputs[1], SomeFactor.inputs[0]]))
mask = (SomeFactor() + SomeOtherFactor())
self.assertIs(SomeFactor(mask=mask), SomeFactor(mask=mask))
def test_instance_caching_multiple_outputs(self):
self.assertIs(MultipleOutputs(), MultipleOutputs())
self.assertIs(MultipleOutputs(), MultipleOutputs(outputs=MultipleOutputs.outputs))
self.assertIs(MultipleOutputs(outputs=[MultipleOutputs.outputs[1], MultipleOutputs.outputs[0]]), MultipleOutputs(outputs=[MultipleOutputs.outputs[1], MultipleOutputs.outputs[0]]))
multiple_outputs = MultipleOutputs()
(alpha, beta) = MultipleOutputs()
self.assertIs(alpha, multiple_outputs.alpha)
self.assertIs(beta, multiple_outputs.beta)
def test_instance_caching_of_slices(self):
my_asset = Asset(1, exchange_info=ExchangeInfo('TEST FULL', 'TEST', 'US'))
f = GenericCustomFactor()
f_slice = f[my_asset]
self.assertIs(f_slice, type(f_slice)(GenericCustomFactor(), my_asset))
filt = GenericFilter()
filt_slice = filt[my_asset]
self.assertIs(filt_slice, type(filt_slice)(GenericFilter(), my_asset))
c = GenericClassifier()
c_slice = c[my_asset]
self.assertIs(c_slice, type(c_slice)(GenericClassifier(), my_asset))
def test_instance_non_caching(self):
f = SomeFactor()
self.assertIsNot(f, SomeFactor(window_length=(SomeFactor.window_length + 1)))
self.assertIsNot(f, SomeFactor(dtype=datetime64ns_dtype))
self.assertIsNot(f, SomeFactor(inputs=[SomeFactor.inputs[1], SomeFactor.inputs[0]]))
def test_instance_non_caching_redefine_class(self):
orig_foobar_instance = SomeFactorAlias()
class SomeFactor(Factor):
dtype = float64_dtype
window_length = 5
inputs = [SomeDataSet.foo, SomeDataSet.bar]
self.assertIsNot(orig_foobar_instance, SomeFactor())
def test_instance_non_caching_multiple_outputs(self):
multiple_outputs = MultipleOutputs()
self.assertIsNot(MultipleOutputs(), MultipleOutputs(outputs=['beta', 'gamma']))
self.assertIsNot(multiple_outputs, MultipleOutputs(outputs=[MultipleOutputs.outputs[1], MultipleOutputs.outputs[0]]))
orig_beta = multiple_outputs.beta
(beta, gamma) = MultipleOutputs(outputs=['beta', 'gamma'])
self.assertIsNot(beta, orig_beta)
def test_instance_caching_binops(self):
f = SomeFactor()
g = SomeOtherFactor()
for (lhs, rhs) in product([f, g], [f, g]):
self.assertIs((lhs + rhs), (lhs + rhs))
self.assertIs((lhs - rhs), (lhs - rhs))
self.assertIs((lhs * rhs), (lhs * rhs))
self.assertIs((lhs / rhs), (lhs / rhs))
self.assertIs((lhs ** rhs), (lhs ** rhs))
self.assertIs((1 + rhs), (1 + rhs))
self.assertIs((rhs + 1), (rhs + 1))
self.assertIs((1 - rhs), (1 - rhs))
self.assertIs((rhs - 1), (rhs - 1))
self.assertIs((2 * rhs), (2 * rhs))
self.assertIs((rhs * 2), (rhs * 2))
self.assertIs((2 / rhs), (2 / rhs))
self.assertIs((rhs / 2), (rhs / 2))
self.assertIs((2 ** rhs), (2 ** rhs))
self.assertIs((rhs ** 2), (rhs ** 2))
self.assertIs(((f + g) + (f + g)), ((f + g) + (f + g)))
def test_instance_caching_unary_ops(self):
f = SomeFactor()
self.assertIs((- f), (- f))
self.assertIs((- (- f)), (- (- f)))
self.assertIs((- (- (- f))), (- (- (- f))))
def test_instance_caching_math_funcs(self):
f = SomeFactor()
for funcname in NUMEXPR_MATH_FUNCS:
method = getattr(f, funcname)
self.assertIs(method(), method())
def test_instance_caching_grouped_transforms(self):
f = SomeFactor()
c = GenericClassifier()
m = GenericFilter()
for meth in (f.demean, f.zscore, f.rank):
self.assertIs(meth(), meth())
self.assertIs(meth(groupby=c), meth(groupby=c))
self.assertIs(meth(mask=m), meth(mask=m))
self.assertIs(meth(groupby=c, mask=m), meth(groupby=c, mask=m))
class SomeFactorParameterized(SomeFactor):
params = ('a', 'b')
def test_parameterized_term(self):
f = self.SomeFactorParameterized(a=1, b=2)
self.assertEqual(f.params, {'a': 1, 'b': 2})
g = self.SomeFactorParameterized(a=1, b=3)
h = self.SomeFactorParameterized(a=2, b=2)
self.assertDifferentObjects(f, g, h)
f2 = self.SomeFactorParameterized(a=1, b=2)
f3 = self.SomeFactorParameterized(b=2, a=1)
self.assertSameObject(f, f2, f3)
self.assertEqual(f.params['a'], 1)
self.assertEqual(f.params['b'], 2)
self.assertEqual(f.window_length, SomeFactor.window_length)
self.assertEqual(f.inputs, tuple(SomeFactor.inputs))
def test_parameterized_term_non_hashable_arg(self):
with assert_raises(TypeError) as e:
self.SomeFactorParameterized(a=[], b=1)
assert_equal(str(e.exception), "SomeFactorParameterized expected a hashable value for parameter 'a', but got [] instead.")
with assert_raises(TypeError) as e:
self.SomeFactorParameterized(a=1, b=[])
assert_equal(str(e.exception), "SomeFactorParameterized expected a hashable value for parameter 'b', but got [] instead.")
with assert_raises(TypeError) as e:
self.SomeFactorParameterized(a=[], b=[])
assert_regex(str(e.exception), "SomeFactorParameterized expected a hashable value for parameter '(a|b)', but got \\[\\] instead\\.")
def test_parameterized_term_default_value(self):
defaults = {'a': 'default for a', 'b': 'default for b'}
class F(Factor):
params = defaults
inputs = (SomeDataSet.foo,)
dtype = 'f8'
window_length = 5
assert_equal(F().params, defaults)
assert_equal(F(a='new a').params, assoc(defaults, 'a', 'new a'))
assert_equal(F(b='new b').params, assoc(defaults, 'b', 'new b'))
assert_equal(F(a='new a', b='new b').params, {'a': 'new a', 'b': 'new b'})
def test_parameterized_term_default_value_with_not_specified(self):
defaults = {'a': 'default for a', 'b': NotSpecified}
class F(Factor):
params = defaults
inputs = (SomeDataSet.foo,)
dtype = 'f8'
window_length = 5
pattern = "F expected a keyword parameter 'b'\\."
with assert_raises_regex(TypeError, pattern):
F()
with assert_raises_regex(TypeError, pattern):
F(a='new a')
assert_equal(F(b='new b').params, assoc(defaults, 'b', 'new b'))
assert_equal(F(a='new a', b='new b').params, {'a': 'new a', 'b': 'new b'})
def test_bad_input(self):
class SomeFactor(Factor):
dtype = float64_dtype
class SomeFactorDefaultInputs(SomeFactor):
inputs = (SomeDataSet.foo, SomeDataSet.bar)
class SomeFactorDefaultLength(SomeFactor):
window_length = 10
class SomeFactorNoDType(SomeFactor):
window_length = 10
inputs = (SomeDataSet.foo,)
dtype = NotSpecified
with self.assertRaises(TermInputsNotSpecified):
SomeFactor(window_length=1)
with self.assertRaises(TermInputsNotSpecified):
SomeFactorDefaultLength()
with self.assertRaises(NonPipelineInputs):
SomeFactor(window_length=1, inputs=[2])
with self.assertRaises(WindowLengthNotSpecified):
SomeFactor(inputs=(SomeDataSet.foo,))
with self.assertRaises(WindowLengthNotSpecified):
SomeFactorDefaultInputs()
with self.assertRaises(DTypeNotSpecified):
SomeFactorNoDType()
with self.assertRaises(NotDType):
SomeFactor(dtype=1)
with self.assertRaises(NoDefaultMissingValue):
SomeFactor(dtype=int64_dtype)
with self.assertRaises(UnsupportedDType):
SomeFactor(dtype=complex128_dtype)
with self.assertRaises(TermOutputsEmpty):
MultipleOutputs(outputs=[])
def test_bad_output_access(self):
with self.assertRaises(AttributeError) as e:
SomeFactor().not_an_attr
errmsg = str(e.exception)
self.assertEqual(errmsg, "'SomeFactor' object has no attribute 'not_an_attr'")
mo = MultipleOutputs()
with self.assertRaises(AttributeError) as e:
mo.not_an_attr
errmsg = str(e.exception)
expected = "Instance of MultipleOutputs has no output named 'not_an_attr'. Possible choices are: ('alpha', 'beta')."
self.assertEqual(errmsg, expected)
with self.assertRaises(ValueError) as e:
(alpha, beta) = GenericCustomFactor()
errmsg = str(e.exception)
self.assertEqual(errmsg, 'GenericCustomFactor does not have multiple outputs.')
conflicting_output_names = ['zscore', 'some_method']
mo = MultipleOutputs(outputs=conflicting_output_names)
for name in conflicting_output_names:
self.assertIsInstance(getattr(mo, name), RecarrayField)
disallowed_output_names = ['inputs', '_init', '__add__']
for name in disallowed_output_names:
with self.assertRaises(InvalidOutputName):
GenericCustomFactor(outputs=[name])
def test_require_super_call_in_validate(self):
class MyFactor(Factor):
inputs = ()
dtype = float64_dtype
window_length = 0
def _validate(self):
with self.assertRaises(AssertionError) as e:
MyFactor()
errmsg = str(e.exception)
self.assertEqual(errmsg, 'Term._validate() was not called.\nThis probably means that you overrode _validate without calling super().')
def test_latest_on_different_dtypes(self):
factor_dtypes = (float64_dtype, datetime64ns_dtype)
for column in TestingDataSet.columns:
if (column.dtype == bool_dtype):
self.assertIsInstance(column.latest, Filter)
elif ((column.dtype == int64_dtype) or (column.dtype.kind in ('O', 'S', 'U'))):
self.assertIsInstance(column.latest, Classifier)
elif (column.dtype in factor_dtypes):
self.assertIsInstance(column.latest, Factor)
else:
self.fail(('Unknown dtype %s for column %s' % (column.dtype, column)))
self.assertIs(column.missing_value, column.latest.missing_value)
def test_failure_timing_on_bad_dtypes(self):
Column(dtype=int64_dtype)
with self.assertRaises(NoDefaultMissingValue) as e:
class BadDataSet(DataSet):
bad_column = Column(dtype=int64_dtype)
float_column = Column(dtype=float64_dtype)
int_column = Column(dtype=int64_dtype, missing_value=3)
self.assertTrue(str(e.exception.args[0]).startswith("Failed to create Column with name 'bad_column'"))
Column(dtype=complex128_dtype)
with self.assertRaises(UnsupportedDType):
class BadDataSetComplex(DataSet):
bad_column = Column(dtype=complex128_dtype)
float_column = Column(dtype=float64_dtype)
int_column = Column(dtype=int64_dtype, missing_value=3) |
def load_backbone_pretrained(model, backbone):
if ((cfg.PHASE == 'train') and cfg.TRAIN.BACKBONE_PRETRAINED and (not cfg.TRAIN.PRETRAINED_MODEL_PATH)):
if os.path.isfile(cfg.TRAIN.BACKBONE_PRETRAINED_PATH):
logging.info('Load backbone pretrained model from {}'.format(cfg.TRAIN.BACKBONE_PRETRAINED_PATH))
msg = model.load_state_dict(torch.load(cfg.TRAIN.BACKBONE_PRETRAINED_PATH), strict=False)
logging.info(msg)
elif (backbone not in model_urls):
logging.info('{} has no pretrained model'.format(backbone))
return
else:
logging.info('load backbone pretrained model from url..')
msg = model.load_state_dict(model_zoo.load_url(model_urls[backbone]), strict=False)
logging.info(msg) |
def split_tensors(n, x):
if torch.is_tensor(x):
assert ((x.shape[0] % n) == 0)
x = x.reshape((x.shape[0] // n), n, *x.shape[1:]).unbind(1)
elif ((type(x) is list) or (type(x) is tuple)):
x = [split_tensors(n, _) for _ in x]
elif (x is None):
x = ([None] * n)
return x |
def main():
phi = 1
weighted_bifpn = False
model_path = 'checkpoints/2019-12-03/pascal_05_0.6283_1.1975_0.8029.h5'
image_sizes = (512, 640, 768, 896, 1024, 1280, 1408)
image_size = image_sizes[phi]
classes = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
num_classes = len(classes)
score_threshold = 0.5
(model, prediction_model) = efficientdet(phi=phi, weighted_bifpn=weighted_bifpn, num_classes=num_classes, score_threshold=score_threshold)
prediction_model.load_weights(model_path, by_name=True)
frozen_graph = freeze_session(K.get_session(), output_names=[out.op.name for out in prediction_model.outputs])
tf.train.write_graph(frozen_graph, './checkpoints/2019-12-03/', 'pascal_05.pb', as_text=False) |
class Tickers():
def __repr__(self):
return f"yfinance.Tickers object <{','.join(self.symbols)}>"
def __init__(self, tickers, session=None):
tickers = (tickers if isinstance(tickers, list) else tickers.replace(',', ' ').split())
self.symbols = [ticker.upper() for ticker in tickers]
self.tickers = {ticker: Ticker(ticker, session=session) for ticker in self.symbols}
def history(self, period='1mo', interval='1d', start=None, end=None, prepost=False, actions=True, auto_adjust=True, repair=False, proxy=None, threads=True, group_by='column', progress=True, timeout=10, **kwargs):
return self.download(period, interval, start, end, prepost, actions, auto_adjust, repair, proxy, threads, group_by, progress, timeout, **kwargs)
def download(self, period='1mo', interval='1d', start=None, end=None, prepost=False, actions=True, auto_adjust=True, repair=False, proxy=None, threads=True, group_by='column', progress=True, timeout=10, **kwargs):
data = multi.download(self.symbols, start=start, end=end, actions=actions, auto_adjust=auto_adjust, repair=repair, period=period, interval=interval, prepost=prepost, proxy=proxy, group_by='ticker', threads=threads, progress=progress, timeout=timeout, **kwargs)
for symbol in self.symbols:
self.tickers.get(symbol, {})._history = data[symbol]
if (group_by == 'column'):
data.columns = data.columns.swaplevel(0, 1)
data.sort_index(level=0, axis=1, inplace=True)
return data
def news(self):
return {ticker: [item for item in Ticker(ticker).news] for ticker in self.symbols} |
class NovoGrad(Optimizer):
def __init__(self, params, grad_averaging=False, lr=0.1, betas=(0.95, 0.98), eps=1e-08, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(NovoGrad, self).__init__(params, defaults)
self._lr = lr
self._beta1 = betas[0]
self._beta2 = betas[1]
self._eps = eps
self._wd = weight_decay
self._grad_averaging = grad_averaging
self._momentum_initialized = False
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
if (not self._momentum_initialized):
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
state = self.state[p]
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('NovoGrad does not support sparse gradients')
v = (torch.norm(grad) ** 2)
m = ((grad / (torch.sqrt(v) + self._eps)) + (self._wd * p.data))
state['step'] = 0
state['v'] = v
state['m'] = m
state['grad_ema'] = None
self._momentum_initialized = True
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
state = self.state[p]
state['step'] += 1
(step, v, m) = (state['step'], state['v'], state['m'])
grad_ema = state['grad_ema']
grad = p.grad.data
g2 = (torch.norm(grad) ** 2)
grad_ema = (g2 if (grad_ema is None) else ((grad_ema * self._beta2) + (g2 * (1.0 - self._beta2))))
grad *= (1.0 / (torch.sqrt(grad_ema) + self._eps))
if self._grad_averaging:
grad *= (1.0 - self._beta1)
g2 = (torch.norm(grad) ** 2)
v = ((self._beta2 * v) + ((1.0 - self._beta2) * g2))
m = ((self._beta1 * m) + ((grad / (torch.sqrt(v) + self._eps)) + (self._wd * p.data)))
bias_correction1 = (1 - (self._beta1 ** step))
bias_correction2 = (1 - (self._beta2 ** step))
step_size = ((group['lr'] * math.sqrt(bias_correction2)) / bias_correction1)
(state['v'], state['m']) = (v, m)
state['grad_ema'] = grad_ema
p.data.add_((- step_size), m)
return loss |
class MetricLogger(object):
def __init__(self, delimiter='\t'):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for (k, v) in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if (attr in self.meters):
return self.meters[attr]
if (attr in self.__dict__):
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
def __str__(self):
loss_str = []
for (name, meter) in self.meters.items():
loss_str.append('{}: {}'.format(name, str(meter)))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None, length_total=None):
i = 0
if (length_total is None):
length_total = len(iterable)
if (not header):
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ((':' + str(len(str(length_total)))) + 'd')
if torch.cuda.is_available():
log_msg = self.delimiter.join([header, (('[{0' + space_fmt) + '}/{1}]'), 'eta: {eta}', '{meters}', 'time: {time}', 'data: {data}', 'max mem: {memory:.0f}'])
else:
log_msg = self.delimiter.join([header, (('[{0' + space_fmt) + '}/{1}]'), 'eta: {eta}', '{meters}', 'time: {time}', 'data: {data}'])
MB = (1024.0 * 1024.0)
for obj in iterable:
data_time.update((time.time() - end))
(yield obj)
iter_time.update((time.time() - end))
if (((i % print_freq) == 0) or (i == (length_total - 1))):
eta_seconds = (iter_time.global_avg * (length_total - i))
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
try:
print(log_msg.format(i, length_total, eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time), memory=(torch.cuda.max_memory_allocated() / MB)))
except Exception as e:
import pdb
pdb.set_trace()
else:
print(log_msg.format(i, length_total, eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(header, total_time_str, (total_time / length_total))) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.