code stringlengths 281 23.7M |
|---|
class Canvas(QLabel):
mode = 'rectangle'
primary_color = QColor(Qt.black)
secondary_color = None
primary_color_updated = pyqtSignal(str)
secondary_color_updated = pyqtSignal(str)
config = {'size': 1, 'fill': True, 'font': QFont('Times'), 'fontsize': 12, 'bold': False, 'italic': False, 'underline': False}
active_color = None
preview_pen = None
timer_event = None
current_stamp = None
def initialize(self):
self.background_color = (QColor(self.secondary_color) if self.secondary_color else QColor(Qt.white))
self.eraser_color = (QColor(self.secondary_color) if self.secondary_color else QColor(Qt.white))
self.eraser_color.setAlpha(100)
self.reset()
def reset(self):
self.setPixmap(QPixmap(*CANVAS_DIMENSIONS))
self.pixmap().fill(self.background_color)
def set_primary_color(self, hex):
self.primary_color = QColor(hex)
def set_secondary_color(self, hex):
self.secondary_color = QColor(hex)
def set_config(self, key, value):
self.config[key] = value
def set_mode(self, mode):
self.timer_cleanup()
self.active_shape_fn = None
self.active_shape_args = ()
self.origin_pos = None
self.current_pos = None
self.last_pos = None
self.history_pos = None
self.last_history = []
self.current_text = ''
self.last_text = ''
self.last_config = {}
self.dash_offset = 0
self.locked = False
self.mode = mode
def reset_mode(self):
self.set_mode(self.mode)
def on_timer(self):
if self.timer_event:
self.timer_event()
def timer_cleanup(self):
if self.timer_event:
timer_event = self.timer_event
self.timer_event = None
timer_event(final=True)
def mousePressEvent(self, e):
fn = getattr(self, ('%s_mousePressEvent' % self.mode), None)
if fn:
return fn(e)
def mouseMoveEvent(self, e):
fn = getattr(self, ('%s_mouseMoveEvent' % self.mode), None)
if fn:
return fn(e)
def mouseReleaseEvent(self, e):
fn = getattr(self, ('%s_mouseReleaseEvent' % self.mode), None)
if fn:
return fn(e)
def mouseDoubleClickEvent(self, e):
fn = getattr(self, ('%s_mouseDoubleClickEvent' % self.mode), None)
if fn:
return fn(e)
def generic_mousePressEvent(self, e):
self.last_pos = e.pos()
if (e.button() == Qt.LeftButton):
self.active_color = self.primary_color
else:
self.active_color = self.secondary_color
def generic_mouseReleaseEvent(self, e):
self.last_pos = None
def selectpoly_mousePressEvent(self, e):
if ((not self.locked) or (e.button == Qt.RightButton)):
self.active_shape_fn = 'drawPolygon'
self.preview_pen = SELECTION_PEN
self.generic_poly_mousePressEvent(e)
def selectpoly_timerEvent(self, final=False):
self.generic_poly_timerEvent(final)
def selectpoly_mouseMoveEvent(self, e):
if (not self.locked):
self.generic_poly_mouseMoveEvent(e)
def selectpoly_mouseDoubleClickEvent(self, e):
self.current_pos = e.pos()
self.locked = True
def selectpoly_copy(self):
self.timer_cleanup()
pixmap = self.pixmap().copy()
bitmap = QBitmap(*CANVAS_DIMENSIONS)
bitmap.clear()
p = QPainter(bitmap)
userpoly = QPolygon((self.history_pos + [self.current_pos]))
p.setPen(QPen(Qt.color1))
p.setBrush(QBrush(Qt.color1))
p.drawPolygon(userpoly)
p.end()
pixmap.setMask(bitmap)
return pixmap.copy(userpoly.boundingRect())
def selectrect_mousePressEvent(self, e):
self.active_shape_fn = 'drawRect'
self.preview_pen = SELECTION_PEN
self.generic_shape_mousePressEvent(e)
def selectrect_timerEvent(self, final=False):
self.generic_shape_timerEvent(final)
def selectrect_mouseMoveEvent(self, e):
if (not self.locked):
self.current_pos = e.pos()
def selectrect_mouseReleaseEvent(self, e):
self.current_pos = e.pos()
self.locked = True
def selectrect_copy(self):
self.timer_cleanup()
return self.pixmap().copy(QRect(self.origin_pos, self.current_pos))
def eraser_mousePressEvent(self, e):
self.generic_mousePressEvent(e)
def eraser_mouseMoveEvent(self, e):
if self.last_pos:
p = QPainter(self.pixmap())
p.setPen(QPen(self.eraser_color, 30, Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin))
p.drawLine(self.last_pos, e.pos())
self.last_pos = e.pos()
self.update()
def eraser_mouseReleaseEvent(self, e):
self.generic_mouseReleaseEvent(e)
def stamp_mousePressEvent(self, e):
p = QPainter(self.pixmap())
stamp = self.current_stamp
p.drawPixmap((e.x() - (stamp.width() // 2)), (e.y() - (stamp.height() // 2)), stamp)
self.update()
def pen_mousePressEvent(self, e):
self.generic_mousePressEvent(e)
def pen_mouseMoveEvent(self, e):
if self.last_pos:
p = QPainter(self.pixmap())
p.setPen(QPen(self.active_color, self.config['size'], Qt.SolidLine, Qt.SquareCap, Qt.RoundJoin))
p.drawLine(self.last_pos, e.pos())
self.last_pos = e.pos()
self.update()
def pen_mouseReleaseEvent(self, e):
self.generic_mouseReleaseEvent(e)
def brush_mousePressEvent(self, e):
self.generic_mousePressEvent(e)
def brush_mouseMoveEvent(self, e):
if self.last_pos:
p = QPainter(self.pixmap())
p.setPen(QPen(self.active_color, (self.config['size'] * BRUSH_MULT), Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin))
p.drawLine(self.last_pos, e.pos())
self.last_pos = e.pos()
self.update()
def brush_mouseReleaseEvent(self, e):
self.generic_mouseReleaseEvent(e)
def spray_mousePressEvent(self, e):
self.generic_mousePressEvent(e)
def spray_mouseMoveEvent(self, e):
if self.last_pos:
p = QPainter(self.pixmap())
p.setPen(QPen(self.active_color, 1))
for n in range((self.config['size'] * SPRAY_PAINT_N)):
xo = random.gauss(0, (self.config['size'] * SPRAY_PAINT_MULT))
yo = random.gauss(0, (self.config['size'] * SPRAY_PAINT_MULT))
p.drawPoint((e.x() + xo), (e.y() + yo))
self.update()
def spray_mouseReleaseEvent(self, e):
self.generic_mouseReleaseEvent(e)
def keyPressEvent(self, e):
if (self.mode == 'text'):
if (e.key() == Qt.Key_Backspace):
self.current_text = self.current_text[:(- 1)]
else:
self.current_text = (self.current_text + e.text())
def text_mousePressEvent(self, e):
if ((e.button() == Qt.LeftButton) and (self.current_pos is None)):
self.current_pos = e.pos()
self.current_text = ''
self.timer_event = self.text_timerEvent
elif (e.button() == Qt.LeftButton):
self.timer_cleanup()
p = QPainter(self.pixmap())
p.setRenderHints(QPainter.Antialiasing)
font = build_font(self.config)
p.setFont(font)
pen = QPen(self.primary_color, 1, Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin)
p.setPen(pen)
p.drawText(self.current_pos, self.current_text)
self.update()
self.reset_mode()
elif ((e.button() == Qt.RightButton) and self.current_pos):
self.reset_mode()
def text_timerEvent(self, final=False):
p = QPainter(self.pixmap())
p.setCompositionMode(QPainter.RasterOp_SourceXorDestination)
pen = PREVIEW_PEN
p.setPen(pen)
if self.last_text:
font = build_font(self.last_config)
p.setFont(font)
p.drawText(self.current_pos, self.last_text)
if (not final):
font = build_font(self.config)
p.setFont(font)
p.drawText(self.current_pos, self.current_text)
self.last_text = self.current_text
self.last_config = self.config.copy()
self.update()
def fill_mousePressEvent(self, e):
if (e.button() == Qt.LeftButton):
self.active_color = self.primary_color
else:
self.active_color = self.secondary_color
image = self.pixmap().toImage()
(w, h) = (image.width(), image.height())
(x, y) = (e.x(), e.y())
target_color = image.pixel(x, y)
have_seen = set()
queue = [(x, y)]
def get_cardinal_points(have_seen, center_pos):
points = []
(cx, cy) = center_pos
for (x, y) in [(1, 0), (0, 1), ((- 1), 0), (0, (- 1))]:
(xx, yy) = ((cx + x), (cy + y))
if ((xx >= 0) and (xx < w) and (yy >= 0) and (yy < h) and ((xx, yy) not in have_seen)):
points.append((xx, yy))
have_seen.add((xx, yy))
return points
p = QPainter(self.pixmap())
p.setPen(QPen(self.active_color))
while queue:
(x, y) = queue.pop()
if (image.pixel(x, y) == target_color):
p.drawPoint(QPoint(x, y))
queue.extend(get_cardinal_points(have_seen, (x, y)))
self.update()
def dropper_mousePressEvent(self, e):
c = self.pixmap().toImage().pixel(e.pos())
hex = QColor(c).name()
if (e.button() == Qt.LeftButton):
self.set_primary_color(hex)
self.primary_color_updated.emit(hex)
elif (e.button() == Qt.RightButton):
self.set_secondary_color(hex)
self.secondary_color_updated.emit(hex)
def generic_shape_mousePressEvent(self, e):
self.origin_pos = e.pos()
self.current_pos = e.pos()
self.timer_event = self.generic_shape_timerEvent
def generic_shape_timerEvent(self, final=False):
p = QPainter(self.pixmap())
p.setCompositionMode(QPainter.RasterOp_SourceXorDestination)
pen = self.preview_pen
pen.setDashOffset(self.dash_offset)
p.setPen(pen)
if self.last_pos:
getattr(p, self.active_shape_fn)(QRect(self.origin_pos, self.last_pos), *self.active_shape_args)
if (not final):
self.dash_offset -= 1
pen.setDashOffset(self.dash_offset)
p.setPen(pen)
getattr(p, self.active_shape_fn)(QRect(self.origin_pos, self.current_pos), *self.active_shape_args)
self.update()
self.last_pos = self.current_pos
def generic_shape_mouseMoveEvent(self, e):
self.current_pos = e.pos()
def generic_shape_mouseReleaseEvent(self, e):
if self.last_pos:
self.timer_cleanup()
p = QPainter(self.pixmap())
p.setPen(QPen(self.primary_color, self.config['size'], Qt.SolidLine, Qt.SquareCap, Qt.MiterJoin))
if self.config['fill']:
p.setBrush(QBrush(self.secondary_color))
getattr(p, self.active_shape_fn)(QRect(self.origin_pos, e.pos()), *self.active_shape_args)
self.update()
self.reset_mode()
def line_mousePressEvent(self, e):
self.origin_pos = e.pos()
self.current_pos = e.pos()
self.preview_pen = PREVIEW_PEN
self.timer_event = self.line_timerEvent
def line_timerEvent(self, final=False):
p = QPainter(self.pixmap())
p.setCompositionMode(QPainter.RasterOp_SourceXorDestination)
pen = self.preview_pen
p.setPen(pen)
if self.last_pos:
p.drawLine(self.origin_pos, self.last_pos)
if (not final):
p.drawLine(self.origin_pos, self.current_pos)
self.update()
self.last_pos = self.current_pos
def line_mouseMoveEvent(self, e):
self.current_pos = e.pos()
def line_mouseReleaseEvent(self, e):
if self.last_pos:
self.timer_cleanup()
p = QPainter(self.pixmap())
p.setPen(QPen(self.primary_color, self.config['size'], Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin))
p.drawLine(self.origin_pos, e.pos())
self.update()
self.reset_mode()
def generic_poly_mousePressEvent(self, e):
if (e.button() == Qt.LeftButton):
if self.history_pos:
self.history_pos.append(e.pos())
else:
self.history_pos = [e.pos()]
self.current_pos = e.pos()
self.timer_event = self.generic_poly_timerEvent
elif ((e.button() == Qt.RightButton) and self.history_pos):
self.timer_cleanup()
self.reset_mode()
def generic_poly_timerEvent(self, final=False):
p = QPainter(self.pixmap())
p.setCompositionMode(QPainter.RasterOp_SourceXorDestination)
pen = self.preview_pen
pen.setDashOffset(self.dash_offset)
p.setPen(pen)
if self.last_history:
getattr(p, self.active_shape_fn)(*self.last_history)
if (not final):
self.dash_offset -= 1
pen.setDashOffset(self.dash_offset)
p.setPen(pen)
getattr(p, self.active_shape_fn)(*(self.history_pos + [self.current_pos]))
self.update()
self.last_pos = self.current_pos
self.last_history = (self.history_pos + [self.current_pos])
def generic_poly_mouseMoveEvent(self, e):
self.current_pos = e.pos()
def generic_poly_mouseDoubleClickEvent(self, e):
self.timer_cleanup()
p = QPainter(self.pixmap())
p.setPen(QPen(self.primary_color, self.config['size'], Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin))
if self.secondary_color:
p.setBrush(QBrush(self.secondary_color))
getattr(p, self.active_shape_fn)(*(self.history_pos + [e.pos()]))
self.update()
self.reset_mode()
def polyline_mousePressEvent(self, e):
self.active_shape_fn = 'drawPolyline'
self.preview_pen = PREVIEW_PEN
self.generic_poly_mousePressEvent(e)
def polyline_timerEvent(self, final=False):
self.generic_poly_timerEvent(final)
def polyline_mouseMoveEvent(self, e):
self.generic_poly_mouseMoveEvent(e)
def polyline_mouseDoubleClickEvent(self, e):
self.generic_poly_mouseDoubleClickEvent(e)
def rect_mousePressEvent(self, e):
self.active_shape_fn = 'drawRect'
self.active_shape_args = ()
self.preview_pen = PREVIEW_PEN
self.generic_shape_mousePressEvent(e)
def rect_timerEvent(self, final=False):
self.generic_shape_timerEvent(final)
def rect_mouseMoveEvent(self, e):
self.generic_shape_mouseMoveEvent(e)
def rect_mouseReleaseEvent(self, e):
self.generic_shape_mouseReleaseEvent(e)
def polygon_mousePressEvent(self, e):
self.active_shape_fn = 'drawPolygon'
self.preview_pen = PREVIEW_PEN
self.generic_poly_mousePressEvent(e)
def polygon_timerEvent(self, final=False):
self.generic_poly_timerEvent(final)
def polygon_mouseMoveEvent(self, e):
self.generic_poly_mouseMoveEvent(e)
def polygon_mouseDoubleClickEvent(self, e):
self.generic_poly_mouseDoubleClickEvent(e)
def ellipse_mousePressEvent(self, e):
self.active_shape_fn = 'drawEllipse'
self.active_shape_args = ()
self.preview_pen = PREVIEW_PEN
self.generic_shape_mousePressEvent(e)
def ellipse_timerEvent(self, final=False):
self.generic_shape_timerEvent(final)
def ellipse_mouseMoveEvent(self, e):
self.generic_shape_mouseMoveEvent(e)
def ellipse_mouseReleaseEvent(self, e):
self.generic_shape_mouseReleaseEvent(e)
def roundrect_mousePressEvent(self, e):
self.active_shape_fn = 'drawRoundedRect'
self.active_shape_args = (25, 25)
self.preview_pen = PREVIEW_PEN
self.generic_shape_mousePressEvent(e)
def roundrect_timerEvent(self, final=False):
self.generic_shape_timerEvent(final)
def roundrect_mouseMoveEvent(self, e):
self.generic_shape_mouseMoveEvent(e)
def roundrect_mouseReleaseEvent(self, e):
self.generic_shape_mouseReleaseEvent(e) |
def get_bar():
return bar.Bar([widget.GroupBox(font=font, fontsize=fontsize, active=foreground, urgent_border=alert, padding=0, borderwidth=3, margin_x=3, margin_y=0), widget.Sep(), widget.CurrentLayout(**font_params), widget.Sep(), widget.WindowName(**font_params), Metrics(**font_params), widget.Systray(icon_size=15), widget.Sep(foreground='#000000'), widget.Clock(format='%c', **font_params)], 20) |
class Migration(migrations.Migration):
dependencies = [('domain', '0008_meta')]
operations = [migrations.RemoveField(model_name='condition', name='attribute_entity'), migrations.RemoveField(model_name='condition', name='source_attribute'), migrations.RemoveField(model_name='condition', name='target_option'), migrations.DeleteModel(name='Condition')] |
def save_model(path, epoch, model, optimizer=None):
if isinstance(model, torch.nn.DataParallel):
state_dict = model.module.state_dict()
else:
state_dict = model.state_dict()
data = {'epoch': epoch, 'state_dict': state_dict}
if (not (optimizer is None)):
data['optimizer'] = optimizer.state_dict()
torch.save(data, path) |
class TimelapseFramesExperiment(Experiment):
def get_model_name(self):
exp_name = 'TLF'
exp_name += '_{}'.format(self.dataset.display_name)
exp_name += '_{}'.format(self.arch_params['model_arch'])
exp_name += '_nprev{}'.format(self.combined_data_params['n_prev_frames'])
exp_name += '_recon-{}'.format(self.arch_params['recon_fn'])
if ('vae' in self.arch_params['model_arch']):
exp_name += '_latent{}'.format(self.arch_params['latent_dim'])
exp_name += '_ndec{}'.format(len(self.arch_params['enc_params']['nf_dec']))
self.model_name = super(TimelapseFramesExperiment, self).get_model_name(exp_name)
return self.model_name
def __init__(self, data_params, arch_params, exp_root='C:\\Research\\experiments', prompt_delete_existing=True, prompt_update_name=True, do_logging=True, loaded_from_dir=None):
self.arch_params = arch_params
self.latent_dim = self.arch_params['latent_dim']
if (not (type(data_params) == list)):
data_params = [data_params]
for dp in data_params:
if ('true_starts_file' not in dp):
dp['true_starts_file'] = None
self.datasets = [datasets.WatercolorsDataset(params=dp) for dp in data_params]
if (len(self.datasets) == 1):
self.dataset = self.datasets[0]
else:
self.dataset = dataset_utils.combine_dataset_names(self.datasets)
self.combined_data_params = data_params[0]
self.crop_shape = tuple(self.combined_data_params['pad_to_shape'])
self.gt_input_frames = [(- 1)]
self.n_input_frames = (len(self.gt_input_frames) + self.combined_data_params['n_prev_frames'])
self.n_pred_frames = self.combined_data_params['n_pred_frames']
self.n_chans = 3
if ('activation' not in self.arch_params):
self.arch_params['activation'] = None
self.epoch_count = 0
self.iter_count = 0
self.n_chans = 3
self.cond_input_frames_shapes = [(self.crop_shape[:(- 1)] + (c,)) for c in ([self.n_chans] * self.n_input_frames)]
self.cond_input_names = ['last_frame', 'prev_frame']
self.ae_input_frames_shapes = (self.cond_input_frames_shapes + [self.crop_shape])
self.ae_input_names = (self.cond_input_names + ['curr_frame'])
self.ae_input_names = [('ae_' + input_name) for input_name in self.ae_input_names]
self.cond_input_names = [('cond_' + input_name) for input_name in self.cond_input_names]
self.pred_frame_shape = self.crop_shape
super(TimelapseFramesExperiment, self).__init__(data_params, arch_params, exp_root=exp_root, prompt_delete_existing=prompt_delete_existing, prompt_update_name=prompt_update_name, do_logging=do_logging, loaded_from_dir=loaded_from_dir)
self.logger.debug('Autoencoder input shapes')
self.logger.debug(self.ae_input_frames_shapes)
self.logger.debug('Conditioning input shapes')
self.logger.debug(self.cond_input_frames_shapes)
def load_data(self, load_n=None):
self.dataset = dataset_utils.combine_dataset_vids(self.datasets, self.dataset, load_n=load_n, _print=self.logger.debug)
self._print('Combined dataset has {} train vids: {}...'.format(len(self.dataset.vids_train), [vd['vid_name'] for vd in self.dataset.vids_train[:min(5, len(self.dataset.vids_train))]]))
load_n_prev_frames = self.combined_data_params['n_prev_frames']
(self.seq_infos_train, self.seq_infos_valid) = sequence_extractor.extract_sequences_by_index_from_datasets(self.datasets, self.dataset, _print=self.logger.debug, n_prev_frames=load_n_prev_frames, seq_len=(load_n_prev_frames + 1), do_filter_by_prev_attn=False, include_nonadj_seqs=False, include_starter_seq=True, do_prune_unused=True)
self.logger.debug('Loaded {} sequences: train seq infos: {}...{}'.format(len(self.seq_infos_train), self.seq_infos_train[:min(5, len(self.seq_infos_train))], self.seq_infos_train[(- min(5, len(self.seq_infos_train))):]))
self._print('Loaded total starter seqs: {}'.format(len([i for (i, seq_info) in enumerate(self.seq_infos_train) if (seq_info[1][0] is None)])))
if (load_n is None):
seqs_imfiles_train = [','.join([(self.dataset.vids_train[seq_info[0]]['im_files'][fi] if (fi is not None) else 'blank') for fi in seq_info[1]]) for seq_info in self.seq_infos_train]
with open(os.path.join(self.exp_dir, 'train_seq.txt'), 'w') as f:
f.writelines([(vn + '\n') for vn in seqs_imfiles_train])
seqs_imfiles_valid = [','.join([(self.dataset.vids_valid[seq_info[0]]['im_files'][fi] if (fi is not None) else 'blank') for fi in seq_info[1]]) for seq_info in self.seq_infos_valid]
with open(os.path.join(self.exp_dir, 'valid_seq.txt'), 'w') as f:
f.writelines([(vn + '\n') for vn in seqs_imfiles_valid])
vidnames_train = list(sorted(list(set([self.dataset.vids_train[vi]['vid_name'] for (vi, _, _) in self.seq_infos_train]))))
vidnames_valid = list(sorted(list(set([self.dataset.vids_valid[vi]['vid_name'] for (vi, _, _) in self.seq_infos_valid]))))
with open(os.path.join(self.exp_dir, 'train_vidnames.txt'), 'w') as f:
f.writelines([(vn + '\n') for vn in vidnames_train])
with open(os.path.join(self.exp_dir, 'valid_vidnames.txt'), 'w') as f:
f.writelines([(vn + '\n') for vn in vidnames_valid])
self.frame_shape = self.dataset.vids_data[0]['frames'].shape[1:]
super(TimelapseFramesExperiment, self).load_data()
return 0
def _make_model_targets(self, Y):
if (not self.combined_data_params['normalize_frames']):
Y = np.clip(Y, 0.0, 1.0)
else:
Y = np.clip(Y, (- 1.0), 1.0)
generator_labels = [Y, Y, self.zeros_latent, self.zeros_latent]
if ((self.arch_params['recon_fn'] == 'l2-vgg') or (self.arch_params['recon_fn'] == 'l1-vgg')):
generator_labels = ([Y] + generator_labels)
return generator_labels
def create_models(self, eval=False, verbose=True):
self.do_eval = eval
self.models = []
if ('concat' in self.arch_params['condition_by']):
n_concat_scales = int(np.ceil(np.log2((np.max(self.crop_shape[:(- 1)]) / 3.0))))
else:
n_concat_scales = None
if (('l2-vgg' in self.arch_params['recon_fn']) or ('l1-vgg' in self.arch_params['recon_fn'])):
self.n_recon_outputs = 2
else:
self.n_recon_outputs = 1
self.logger.debug('Creating per-frame VAE with {} recon outputs!'.format(self.n_recon_outputs))
self.latent_dim = self.arch_params['latent_dim']
self.cvae = cvae_class.CVAE(ae_input_shapes=self.ae_input_frames_shapes, ae_input_names=self.ae_input_names, conditioning_input_shapes=self.cond_input_frames_shapes, conditioning_input_names=self.cond_input_names, source_im_idx=1, n_concat_scales=n_concat_scales, output_shape=self.pred_frame_shape, n_outputs=self.n_recon_outputs, condition_on_image=True, transform_latent_dim=self.latent_dim, transform_enc_params=self.arch_params['enc_params'], dec_params=self.arch_params['enc_params'], transform_activation=self.arch_params['activation'], conditioning_type=self.arch_params['condition_by'])
self.cvae.create_modules()
self.cvae.create_train_wrapper()
self.models += self.cvae.get_models()
self.models_to_print = self.models[:]
self.models_to_print.append(self.cvae.trainer_model)
self.trainer_model = self.cvae.trainer_model
self.tester_model = self.cvae.tester_model
super(TimelapseFramesExperiment, self).create_models()
return self.models
def create_generators(self, batch_size):
self.batch_size = batch_size
self.train_gen = sequence_extractor.generate_random_frames_sequences(vids_data_list=self.dataset.vids_train, seq_infos=self.seq_infos_train, batch_size=batch_size, randomize=True, crop_shape=self.crop_shape, crop_type=self.combined_data_params['crop_type'], do_normalize_frames=True, do_aug=True, return_ids=True)
self.test_gen = sequence_extractor.generate_random_frames_sequences(vids_data_list=self.dataset.vids_valid, seq_infos=self.seq_infos_valid, batch_size=batch_size, randomize=False, crop_shape=self.crop_shape, crop_type=self.combined_data_params['crop_type'], do_normalize_frames=True, do_aug=False, return_ids=True)
if ('vae' in self.arch_params['model_arch']):
self.zeros_latent = np.zeros((self.batch_size, self.latent_dim))
def compile_models(self, run_options=None, run_metadata=None):
(recon_fn, recon_fn_name) = utils.parse_loss_name(ln=self.arch_params['recon_fn'], normalize_input=self.combined_data_params['normalize_frames'], pred_shape=self.pred_frame_shape, logger=self.logger)
(self.loss_names, self.loss_functions, self.loss_weights) = self.cvae.get_losses(transform_reg_fn=None, transform_reg_lambda=None, transform_reg_name=None, recon_loss_fn=recon_fn, recon_loss_weight=([self.arch_params['recon_lambda']] * self.n_recon_outputs), recon_loss_name=recon_fn_name)
self.trainer_model.compile(optimizer=Nadam(lr=self.arch_params['lr']), loss=self.loss_functions, loss_weights=self.loss_weights)
super(TimelapseFramesExperiment, self).compile_models()
def make_train_results_im(self):
return self._make_results_im(X=self.X_train_batch, gt_frames=self.Y_train_batch, seqs_frame_files=self.seq_imfiles_train_batch)
def make_test_results_im(self):
return self._make_results_im(X=self.X_test_batch, gt_frames=self.Y_test_batch, seqs_frame_files=self.seq_imfiles_test_batch)
def _make_results_im(self, X, gt_frames, seqs_frame_files=None):
if (seqs_frame_files is not None):
seqs_frame_names = []
for seq_frame_files in seqs_frame_files:
frame_files = seq_frame_files.split(',')
frame_names = [os.path.splitext(os.path.basename(f))[0].split('-')[(- 1)] for f in frame_files]
seqs_frame_names.append(frame_names)
else:
seqs_frame_names = None
preds = self.trainer_model.predict(X)
if (not isinstance(preds, list)):
preds = [preds]
pred = preds[0]
ae_inputs = X[:len(self.ae_input_names)]
cond_inputs = X[len(self.ae_input_names):(len(self.ae_input_names) + len(self.cond_input_names))]
cond_input_im = np.concatenate([vis_utils.label_ims(cond_input, self.cond_input_names[ii]) for (ii, cond_input) in enumerate(cond_inputs)], axis=1)
ae_input_im = np.concatenate([vis_utils.label_ims(ae_input, self.ae_input_names[ii]) for (ii, ae_input) in enumerate(ae_inputs)], axis=1)
input_ims = np.concatenate(vis_utils.pad_images_to_size([ae_input_im, cond_input_im], ignore_axes=1), axis=1)
if (seqs_frame_names is not None):
batch_size = gt_frames.shape[0]
gt_labels = ['gt-{}'.format(seqs_frame_names[ei][(- 1)]) for ei in range(batch_size)]
else:
gt_labels = 'gt'
prev_frame = cond_inputs[1]
pred_im = vis_utils.label_ims(pred, 'pred')
pred_diff_im = vis_utils.label_ims((pred - prev_frame), 'pred_diff')
gt_im = vis_utils.label_ims(gt_frames, gt_labels)
gt_diff_im = vis_utils.label_ims((gt_frames - prev_frame), 'gt_diff')
out_ims_list = [input_ims, pred_diff_im, gt_diff_im, pred_im, gt_im]
out_im = np.concatenate(vis_utils.pad_images_to_size(out_ims_list, ignore_axes=1), axis=1)
return out_im
def get_n_train(self):
return len(self.seq_infos_train)
def get_n_test(self):
return len(self.seq_infos_valid)
def train_on_batch(self):
(X, Y, seq_imfiles) = next(self.train_gen)
generator_labels = self._make_model_targets(Y)
losses = self.trainer_model.train_on_batch(X, generator_labels)
if (not isinstance(losses, list)):
losses = [losses]
loss_names = [('train_' + ln) for ln in self.loss_names]
self.X_train_batch = X
self.seq_imfiles_train_batch = seq_imfiles
if isinstance(generator_labels, list):
self.Y_train_batch = generator_labels[0]
else:
self.Y_train_batch = generator_labels
assert (len(losses) == len(loss_names))
self.iter_count += 1
return (losses, loss_names)
def test_batches(self):
n_test_batches = int(np.ceil((self.get_n_test() / float(self.batch_size))))
self.logger.debug('Validating {} batches...'.format(n_test_batches))
for bi in range(n_test_batches):
(X, Y, seq_imfiles) = next(self.test_gen)
generator_labels = self._make_model_targets(Y)
losses = self.trainer_model.evaluate(X, generator_labels, verbose=False)
if (not isinstance(losses, list)):
losses = [losses]
if (bi == 0):
test_losses = (np.asarray(losses) / float(n_test_batches))
else:
test_losses += (np.asarray(losses) / float(n_test_batches))
loss_names = [('valid_' + ln) for ln in self.loss_names]
self.X_test_batch = X
self.seq_imfiles_test_batch = seq_imfiles
if isinstance(generator_labels, list):
self.Y_test_batch = generator_labels[0]
else:
self.Y_test_batch = generator_labels
return (test_losses.tolist(), loss_names)
def save_exp_info(self, exp_dir, figures_dir, models_dir, logs_dir):
super(TimelapseFramesExperiment, self).save_exp_info(exp_dir, figures_dir, models_dir, logs_dir)
def save_models(self, epoch, iter_count=None):
super(TimelapseFramesExperiment, self).save_models(epoch, iter_count=iter_count)
def print_models(self, save_figs=False, figs_dir=None):
super(TimelapseFramesExperiment, self)._print_models(save_figs=save_figs, figs_dir=figs_dir)
def load_models(self, load_epoch=None, stop_on_missing=True, init_layers=False):
start_epoch = super(TimelapseFramesExperiment, self).load_models(load_epoch)
self.epoch_count = start_epoch
return start_epoch
def update_epoch_count(self, e):
self.epoch_count += 1
return 0 |
def test_complete_headers(test_model_01):
headers = swmmio.utils.text.get_inp_sections_details(test_model_01.inp.path)
print(list(headers.keys()))
sections_in_inp = ['TITLE', 'OPTIONS', 'EVAPORATION', 'RAINGAGES', 'SUBCATCHMENTS', 'SUBAREAS', 'INFILTRATION', 'JUNCTIONS', 'OUTFALLS', 'STORAGE', 'CONDUITS', 'PUMPS', 'WEIRS', 'XSECTIONS', 'INFLOWS', 'CURVES', 'TIMESERIES', 'REPORT', 'TAGS', 'MAP', 'COORDINATES', 'VERTICES', 'POLYGONS', 'SYMBOLS']
assert all(((section in headers) for section in sections_in_inp)) |
def get_feed_items(count=10):
return Item.objects.filter(status='active', activated_at__lte=datetime.datetime.now(), activated_at__gte=(datetime.datetime.now() - datetime.timedelta(days=90))).exclude(section=None).prefetch_related('issue', 'section', 'tags').order_by('-created_at', '-related_to_date')[:count] |
class EggInfoWithJS(egg_info):
def run(self) -> None:
static_path = os.path.join(NAME, STATIC_FOLDER)
if (os.path.exists(static_path) or ('READTHEDOCS' in os.environ)):
pass
else:
js_path = 'sqllineagejs'
use_shell = (True if (platform.system() == 'Windows') else False)
subprocess.check_call(shlex.split('npm install'), cwd=js_path, shell=use_shell)
subprocess.check_call(shlex.split('npm run build'), cwd=js_path, shell=use_shell)
shutil.move(os.path.join(js_path, STATIC_FOLDER), static_path)
super().run() |
def test_a_decorated_singleton_is_shared_among_child_injectors():
parent_injector = Injector()
child_injector_1 = parent_injector.create_child_injector()
child_injector_2 = parent_injector.create_child_injector()
assert (child_injector_1.get(SingletonB) is child_injector_2.get(SingletonB)) |
class TestSeSolve():
H0 = ((0.2 * np.pi) * qutip.sigmaz())
H1 = (np.pi * qutip.sigmax())
tlist = np.linspace(0, 20, 200)
args = {'alpha': 0.5}
w_a = 0.35
a = 0.5
.parametrize(['unitary_op'], [pytest.param(None, id='state'), pytest.param(qutip.qeye(2), id='unitary')])
.parametrize(['H', 'analytical'], [pytest.param(H1, (lambda t, _: t), id='const_H'), pytest.param((lambda t, alpha: ((np.pi * qutip.sigmax()) * np.exp(((- alpha) * t)))), _analytic, id='func_H'), pytest.param([[H1, (lambda t, args: np.exp(((- args['alpha']) * t)))]], _analytic, id='list_func_H'), pytest.param([[H1, 'exp(-alpha*t)']], _analytic, id='list_str_H'), pytest.param([[H1, np.exp(((- args['alpha']) * tlist))]], _analytic, id='list_array_H'), pytest.param(qutip.QobjEvo([[H1, 'exp(-alpha*t)']], args=args), _analytic, id='QobjEvo_H')])
def test_sesolve(self, H, analytical, unitary_op):
tol = 0.005
psi0 = qutip.basis(2, 0)
options = {'progress_bar': None}
if (unitary_op is None):
output = sesolve(H, psi0, self.tlist, [qutip.sigmax(), qutip.sigmay(), qutip.sigmaz()], args=self.args, options=options)
(sx, sy, sz) = (output.expect[0], output.expect[1], output.expect[2])
else:
output = sesolve(H, unitary_op, self.tlist, args=self.args, options=options)
sx = [qutip.expect(qutip.sigmax(), (U * psi0)) for U in output.states]
sy = [qutip.expect(qutip.sigmay(), (U * psi0)) for U in output.states]
sz = [qutip.expect(qutip.sigmaz(), (U * psi0)) for U in output.states]
sx_analytic = np.zeros(np.shape(self.tlist))
sy_analytic = np.array([(- np.sin(((2 * np.pi) * analytical(t, self.args['alpha'])))) for t in self.tlist])
sz_analytic = np.array([np.cos(((2 * np.pi) * analytical(t, self.args['alpha']))) for t in self.tlist])
np.testing.assert_allclose(sx, sx_analytic, atol=tol)
np.testing.assert_allclose(sy, sy_analytic, atol=tol)
np.testing.assert_allclose(sz, sz_analytic, atol=tol)
.parametrize(['state_type'], [pytest.param('ket', id='ket'), pytest.param('unitary', id='unitary')])
def test_sesolve_normalization(self, state_type):
H = qutip.Qobj([[1, (- 0.1j)], [(- 0.1j), 1]])
psi0 = qutip.basis(2, 0)
options = {'normalize_output': True, 'progress_bar': None}
if (state_type == 'ket'):
output = sesolve(H, psi0, self.tlist, e_ops=[], options=options)
norms = [state.norm() for state in output.states]
np.testing.assert_allclose(norms, [1.0 for _ in self.tlist], atol=1e-15)
else:
U = qutip.qeye(2)
output = sesolve(H, U, self.tlist, e_ops=[], options=options)
norms = [state.norm() for state in output.states]
assert all(((norm > 2) for norm in norms[1:]))
.parametrize(['unitary_op'], [pytest.param(None, id='state'), pytest.param(qutip.qeye(2), id='unitary')])
.parametrize('method', all_ode_method, ids=all_ode_method)
def test_sesolve_method(self, method, unitary_op):
tol = 0.005
psi0 = qutip.basis(2, 0)
options = {'method': method, 'progress_bar': None}
H = [[self.H1, 'exp(-alpha*t)']]
if (unitary_op is None):
output = sesolve(H, psi0, self.tlist, [qutip.sigmax(), qutip.sigmay(), qutip.sigmaz()], args=self.args, options=options)
(sx, sy, sz) = (output.expect[0], output.expect[1], output.expect[2])
else:
output = sesolve(H, unitary_op, self.tlist, args=self.args, options=options)
sx = [qutip.expect(qutip.sigmax(), (U * psi0)) for U in output.states]
sy = [qutip.expect(qutip.sigmay(), (U * psi0)) for U in output.states]
sz = [qutip.expect(qutip.sigmaz(), (U * psi0)) for U in output.states]
sx_analytic = np.zeros(np.shape(self.tlist))
sy_analytic = np.array([np.sin((((- 2) * np.pi) * _analytic(t, self.args['alpha']))) for t in self.tlist])
sz_analytic = np.array([np.cos(((2 * np.pi) * _analytic(t, self.args['alpha']))) for t in self.tlist])
np.testing.assert_allclose(sx, sx_analytic, atol=tol)
np.testing.assert_allclose(sy, sy_analytic, atol=tol)
np.testing.assert_allclose(sz, sz_analytic, atol=tol)
.parametrize('normalize', [True, False], ids=['Normalized', ''])
.parametrize(['H', 'args'], [pytest.param((H0 + H1), {}, id='const_H'), pytest.param((lambda t, a, w_a: (((((a * t) * 0.2) * np.pi) * qutip.sigmaz()) + ((np.cos((w_a * t)) * np.pi) * qutip.sigmax()))), {'a': a, 'w_a': w_a}, id='func_H'), pytest.param([[H0, (lambda t, args: (args['a'] * t))], [H1, (lambda t, args: np.cos((args['w_a'] * t)))]], {'a': a, 'w_a': w_a}, id='list_func_H'), pytest.param([H0, [H1, 'cos(w_a*t)']], {'w_a': w_a}, id='list_str_H')])
def test_compare_evolution(self, H, normalize, args, tol=5e-05):
psi0 = qutip.basis(2, 0)
U0 = qutip.qeye(2)
options = {'store_states': True, 'normalize_output': normalize, 'progress_bar': None}
out_s = sesolve(H, psi0, self.tlist, [qutip.sigmax(), qutip.sigmay(), qutip.sigmaz()], options=options, args=args)
(xs, ys, zs) = (out_s.expect[0], out_s.expect[1], out_s.expect[2])
xss = [qutip.expect(qutip.sigmax(), U) for U in out_s.states]
yss = [qutip.expect(qutip.sigmay(), U) for U in out_s.states]
zss = [qutip.expect(qutip.sigmaz(), U) for U in out_s.states]
np.testing.assert_allclose(xs, xss, atol=tol)
np.testing.assert_allclose(ys, yss, atol=tol)
np.testing.assert_allclose(zs, zss, atol=tol)
if normalize:
tol = 0.0005
out_u = sesolve(H, U0, self.tlist, options=options, args=args)
xu = [qutip.expect(qutip.sigmax(), (U * psi0)) for U in out_u.states]
yu = [qutip.expect(qutip.sigmay(), (U * psi0)) for U in out_u.states]
zu = [qutip.expect(qutip.sigmaz(), (U * psi0)) for U in out_u.states]
np.testing.assert_allclose(xs, xu, atol=tol)
np.testing.assert_allclose(ys, yu, atol=tol)
np.testing.assert_allclose(zs, zu, atol=tol)
def test_sesolver_args(self):
options = {'progress_bar': None}
solver_obj = SESolver(qutip.QobjEvo([self.H0, [self.H1, 'a']], args={'a': 1}), options=options)
res = solver_obj.run(qutip.basis(2, 1), [0, 1, 2, 3], e_ops=[qutip.num(2)], args={'a': 0})
np.testing.assert_allclose(res.expect[0], 1)
def test_sesolver_pickling(self):
e_ops = [qutip.sigmax(), qutip.sigmay(), qutip.sigmaz()]
options = {'progress_bar': None}
solver_obj = SESolver((self.H0 + self.H1), options=options)
solver_copy = pickle.loads(pickle.dumps(solver_obj))
(sx, sy, sz) = solver_obj.run(qutip.basis(2, 1), [0, 1, 2, 3], e_ops=e_ops).expect
(csx, csy, csz) = solver_copy.run(qutip.basis(2, 1), [0, 1, 2, 3], e_ops=e_ops).expect
np.testing.assert_allclose(sx, csx)
np.testing.assert_allclose(sy, csy)
np.testing.assert_allclose(sz, csz)
.parametrize('method', all_ode_method, ids=all_ode_method)
def test_sesolver_stepping(self, method):
options = {'method': method, 'atol': 1e-07, 'rtol': 1e-08, 'progress_bar': None}
solver_obj = SESolver(qutip.QobjEvo([self.H1, (lambda t, a: a)], args={'a': 0.25}), options=options)
solver_obj.start(qutip.basis(2, 0), 0)
sr2 = ((- (2 ** 0.5)) / 2)
state = solver_obj.step(1)
np.testing.assert_allclose(qutip.expect(qutip.sigmax(), state), 0.0, atol=2e-06)
np.testing.assert_allclose(qutip.expect(qutip.sigmay(), state), (- 1), atol=2e-06)
np.testing.assert_allclose(qutip.expect(qutip.sigmaz(), state), 0.0, atol=2e-06)
state = solver_obj.step(2, args={'a': 0.125})
np.testing.assert_allclose(qutip.expect(qutip.sigmax(), state), 0.0, atol=2e-06)
np.testing.assert_allclose(qutip.expect(qutip.sigmay(), state), sr2, atol=2e-06)
np.testing.assert_allclose(qutip.expect(qutip.sigmaz(), state), sr2, atol=2e-06)
solver_obj.options = {'method': 'adams', 'atol': 1e-07, 'rtol': 1e-08, 'progress_bar': None}
state = solver_obj.step(3, args={'a': 0})
np.testing.assert_allclose(qutip.expect(qutip.sigmax(), state), 0.0, atol=2e-06)
np.testing.assert_allclose(qutip.expect(qutip.sigmay(), state), sr2, atol=2e-06)
np.testing.assert_allclose(qutip.expect(qutip.sigmaz(), state), sr2, atol=2e-06) |
class Post():
id: strawberry.ID
author: BlogPostAuthor
title: str = strawberry.field(resolver=make_localized_resolver('title'))
slug: str = strawberry.field(resolver=make_localized_resolver('slug'))
excerpt: str = strawberry.field(resolver=make_localized_resolver('excerpt'))
content: str = strawberry.field(resolver=make_localized_resolver('content'))
image: Optional[str]
published: datetime
def __init__(self, id: strawberry.ID, author: BlogPostAuthor, title: str, slug: str, excerpt: str, content: str, published: datetime, image: Optional[str]) -> None:
self.id = id
self.author = author
self.title = title
self.slug = slug
self.excerpt = excerpt
self.content = content
self.published = published
self.image = image |
def _get_density_and_strength_from_npz(npz):
l_density = []
l_strength = []
for word in npz:
if _is_bar_word(word):
l_density.append(_get_density(word))
l_strength.append(([0] * 16))
elif _is_beat_word(word):
(strength, tick) = _get_strength_and_tick(word)
l_strength[(- 1)][tick] = strength
return (np.asarray(l_density), np.asarray(l_strength)) |
def animate(callback_val):
global prev_time
global updates_per_sec
global world
counter_decay = 0
if animating:
num_steps = get_num_timesteps()
curr_time = get_curr_time()
time_elapsed = (curr_time - prev_time)
prev_time = curr_time
timestep = ((- update_timestep) if (playback_speed < 0) else update_timestep)
for i in range(num_steps):
update_world(world, timestep)
update_count = (num_steps / (0.001 * time_elapsed))
if np.isfinite(update_count):
updates_per_sec = ((counter_decay * updates_per_sec) + ((1 - counter_decay) * update_count))
world.env.set_updates_per_sec(updates_per_sec)
timer_step = calc_display_anim_time(num_steps)
update_dur = (get_curr_time() - curr_time)
timer_step -= update_dur
timer_step = np.maximum(timer_step, 0)
glutTimerFunc(int(timer_step), animate, 0)
glutPostRedisplay()
if world.env.is_done():
shutdown()
return |
class MaskRCNNFPNFeatureExtractor(nn.Module):
def __init__(self, cfg):
super(MaskRCNNFPNFeatureExtractor, self).__init__()
resolution = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION
scales = cfg.MODEL.ROI_MASK_HEAD.POOLER_SCALES
sampling_ratio = cfg.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO
pooler = Pooler(output_size=(resolution, resolution), scales=scales, sampling_ratio=sampling_ratio)
input_size = cfg.MODEL.BACKBONE.OUT_CHANNELS
self.pooler = pooler
use_gn = cfg.MODEL.ROI_MASK_HEAD.USE_GN
layers = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS
dilation = cfg.MODEL.ROI_MASK_HEAD.DILATION
next_feature = input_size
self.blocks = []
for (layer_idx, layer_features) in enumerate(layers, 1):
layer_name = 'mask_fcn{}'.format(layer_idx)
module = make_conv3x3(next_feature, layer_features, dilation=dilation, stride=1, use_gn=use_gn)
self.add_module(layer_name, module)
next_feature = layer_features
self.blocks.append(layer_name)
def forward(self, x, proposals):
x = self.pooler(x, proposals)
for layer_name in self.blocks:
x = F.relu(getattr(self, layer_name)(x))
return x |
class BoosterInfo():
def __init__(self, itemID, state=None, sideEffects=None):
self.itemID = itemID
self.state = state
self.sideEffects = sideEffects
def fromBooster(cls, booster):
if (booster is None):
return None
info = cls(itemID=booster.itemID, state=booster.active, sideEffects={se.effectID: se.active for se in booster.sideEffects})
return info
def toBooster(self):
item = Market.getInstance().getItem(self.itemID, eager=('attributes', 'group.category'))
try:
booster = Booster(item)
except ValueError:
pyfalog.warning('Invalid item: {}'.format(self.itemID))
return None
if (self.state is not None):
booster.active = self.state
if (self.sideEffects is not None):
for sideEffect in booster.sideEffects:
sideEffect.active = self.sideEffects.get(sideEffect.effectID, sideEffect.active)
return booster
def __repr__(self):
return makeReprStr(self, ['itemID', 'state', 'sideEffects']) |
def test(venv_client):
assert (venv_client.get('/users/1').status_code == 404)
nickname = str(uuid.uuid4())
r = venv_client.post('/users', json=dict(name=nickname))
r.raise_for_status()
r = r.json()
assert (venv_client.get('/users/{}'.format(r['id'])).json()['nickname'] == nickname) |
def test_ket2dm():
N = 5
ket = qutip.coherent(N, 2)
bra = ket.dag()
oper = qutip.ket2dm(ket)
oper_from_bra = qutip.ket2dm(bra)
assert (qutip.expect(oper, ket) == pytest.approx(1.0))
assert qutip.isoper(oper)
assert (oper == (ket * bra))
assert (oper == oper_from_bra)
with pytest.raises(TypeError) as e:
qutip.ket2dm(oper)
assert (str(e.value) == 'Input is not a ket or bra vector.') |
def export_graph(nodes):
node_representations = []
wn_ids_to_synsets = {synset.wn_id: synset for synset in nodes}
wn_ids = set(wn_ids_to_synsets.keys())
if (len(wn_ids) != len(nodes)):
raise ValueError('Duplicate WordNet IDs in the same graph')
for wn_id in sorted(wn_ids):
synset = wn_ids_to_synsets[wn_id]
children_ids = {child.wn_id for child in synset.children}
if (not children_ids.issubset(wn_ids)):
raise ValueError('Synset has children outside of the graph')
parents_ids = {parent.wn_id for parent in synset.parents}
if (not parents_ids.issubset(wn_ids)):
raise ValueError('Synset has parents outside of the graph')
node_repr = dict(wn_id=wn_id, words=synset.words, children_ids=sorted(children_ids), parents_ids=sorted(parents_ids))
node_representations.append(node_repr)
return node_representations |
def create_pickup_database(game_enum: RandovaniaGame):
pickup_categories = {'weapon': PickupCategory(name='weapon', long_name='Weapon', hint_details=('a ', 'weapon'), hinted_as_major=True), 'ammo-based': PickupCategory(name='ammo-based', long_name='Ammo-Based', hint_details=('an ', 'ammo-based item'), hinted_as_major=False)}
pickup_db = PickupDatabase(pickup_categories=pickup_categories, standard_pickups={'Powerful Weapon': StandardPickupDefinition(game=game_enum, name='Powerful Weapon', pickup_category=pickup_categories['weapon'], broad_category=pickup_categories['ammo-based'], model_name='Powerful', offworld_models=frozendict(), progression=('Weapon',), default_shuffled_count=1, default_starting_count=0, preferred_location_category=LocationCategory.MAJOR)}, ammo_pickups={}, default_pickups={}, default_offworld_model='Powerful')
default_database.write_pickup_database_for_game(pickup_db, game_enum)
return pickup_db |
def main(_):
with tf.Graph().as_default():
(images, labels) = utils.prepare_testdata(FLAGS.dataset_dir, FLAGS.batch_size)
(logits, _) = network.inference(images, FLAGS.num_classes, for_training=False, feature_name=FLAGS.feature_name)
top_1_op = tf.nn.in_top_k(logits, labels, 1)
top_5_op = tf.nn.in_top_k(logits, labels, 5)
var_averages = tf.train.ExponentialMovingAverage(FLAGS.ema_decay)
var_to_restore = var_averages.variables_to_restore()
saver = tf.train.Saver(var_to_restore)
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_path)
model_checkpoint_path = ckpt.model_checkpoint_path
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
saver.restore(sess, model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[(- 1)].split('-')[(- 1)]
print(('Successfully loaded model from %s at step=%s.' % (model_checkpoint_path, global_step)))
coord = tf.train.Coordinator()
try:
threads = []
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(sess, coord=coord, daemon=True, start=True))
num_iter = int(math.ceil((FLAGS.num_examples / FLAGS.batch_size)))
print(('num_iter = ' + str(num_iter)))
count_top_1 = count_top_5 = 0.0
total_sample_count = (num_iter * FLAGS.batch_size)
step = 0
print(('%s: starting evaluation on (%s).' % (datetime.now(), 'test')))
start_time = time.time()
while ((step < num_iter) and (not coord.should_stop())):
(top_1, top_5) = sess.run([top_1_op, top_5_op])
count_top_1 += np.sum(top_1)
count_top_5 += np.sum(top_5)
step += 1
if ((step % 20) == 0):
duration = (time.time() - start_time)
sec_per_batch = (duration / 20.0)
examples_per_sec = (FLAGS.batch_size / sec_per_batch)
print(('%s: [%d batches out of %d] (%.1f examples/sec; %.3f sec/batch)' % (datetime.now(), step, num_iter, examples_per_sec, sec_per_batch)))
start_time = time.time()
precision_at_1 = (count_top_1 / total_sample_count)
recall_at_5 = (count_top_5 / total_sample_count)
print(('%s: precision 1 = %.4f recall 5 = %.4f [%d examples]' % (datetime.now(), precision_at_1, recall_at_5, total_sample_count)))
file_path = (FLAGS.eval_dir + FLAGS.save_txt)
text_file = open(file_path, 'a')
text_file.write(FLAGS.checkpoint_path)
text_file.write('\n')
text_file.write(('%s: precision 1 = %.4f recall 5 = %.4f' % (datetime.now(), precision_at_1, recall_at_5)))
text_file.write('\n')
text_file.close()
except Exception as e:
coord.request_stop(e)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10) |
def RegisterSignalsFor(model):
eventName = 'events::db::{}'.format(GetPathFromClass(model))
eventsDict = {}
for event in events:
eventsDict[event] = '{}::{}'.format(eventName, event)
def pre_save_hook(sender, instance, *args, **kwargs):
if (instance.id is None):
Dispatcher.Dispatch(eventsDict['creating'], instance)
pass
else:
Dispatcher.Dispatch(eventsDict['updating'], instance)
pass
gevent.sleep()
pass
def post_save_hook(sender, instance, created, *args, **kwargs):
if created:
Dispatcher.Dispatch(eventsDict['created'], instance)
pass
else:
Dispatcher.Dispatch(eventsDict['updated'], instance)
pass
gevent.sleep()
pass
def pre_delete_hook(sender, instance, *args, **kwargs):
Dispatcher.Dispatch(eventsDict['deleting'], instance)
gevent.sleep()
pass
def post_delete_hook(sender, instance, *args, **kwargs):
Dispatcher.Dispatch(eventsDict['deleted'], instance)
gevent.sleep()
pass
pre_save.connect(pre_save_hook, sender=model, weak=False, dispatch_uid=str(uuid.uuid4()))
post_save.connect(post_save_hook, sender=model, weak=False, dispatch_uid=str(uuid.uuid4()))
pre_delete.connect(pre_delete_hook, sender=model, weak=False, dispatch_uid=str(uuid.uuid4()))
post_delete.connect(post_delete_hook, sender=model, weak=False, dispatch_uid=str(uuid.uuid4()))
return eventsDict |
class XAUDIO2_PERFORMANCE_DATA(ctypes.Structure):
_fields_ = [('AudioCyclesSinceLastQuery', c_uint64), ('TotalCyclesSinceLastQuery', c_uint64), ('MinimumCyclesPerQuantum', UINT32), ('MaximumCyclesPerQuantum', UINT32), ('MemoryUsageInBytes', UINT32), ('CurrentLatencyInSamples', UINT32), ('GlitchesSinceEngineStarted', UINT32), ('ActiveSourceVoiceCount', UINT32), ('TotalSourceVoiceCount', UINT32), ('ActiveSubmixVoiceCount', UINT32), ('ActiveResamplerCount', UINT32), ('ActiveMatrixMixCount', UINT32), ('ActiveXmaSourceVoices', UINT32), ('ActiveXmaStreams', UINT32)]
def __repr__(self):
return 'XAUDIO2PerformanceData(active_voices={}, total_voices={}, glitches={}, latency={} samples, memory_usage={} bytes)'.format(self.ActiveSourceVoiceCount, self.TotalSourceVoiceCount, self.GlitchesSinceEngineStarted, self.CurrentLatencyInSamples, self.MemoryUsageInBytes) |
def test_charclass_union() -> None:
assert ((parse('[ab]') | parse('[bc]')).reduce() == parse('[abc]'))
assert ((parse('[ab]') | parse('[^bc]')).reduce() == parse('[^c]'))
assert ((parse('[^ab]') | parse('[bc]')).reduce() == parse('[^a]'))
assert ((parse('[^ab]') | parse('[^bc]')).reduce() == parse('[^b]')) |
class ExamplesTests(TestCasePlus):
def test_run_glue(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(sys, 'argv', testargs):
run_flax_glue.main()
result = get_results(tmp_dir)
self.assertGreaterEqual(result['eval_accuracy'], 0.75)
def test_run_clm(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(sys, 'argv', testargs):
run_clm_flax.main()
result = get_results(tmp_dir)
self.assertLess(result['eval_perplexity'], 100)
def test_run_summarization(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(sys, 'argv', testargs):
run_summarization_flax.main()
result = get_results(tmp_dir, split='test')
self.assertGreaterEqual(result['test_rouge1'], 10)
self.assertGreaterEqual(result['test_rouge2'], 2)
self.assertGreaterEqual(result['test_rougeL'], 7)
self.assertGreaterEqual(result['test_rougeLsum'], 7)
def test_run_mlm(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(sys, 'argv', testargs):
run_mlm_flax.main()
result = get_results(tmp_dir)
self.assertLess(result['eval_perplexity'], 42)
def test_run_t5_mlm(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(sys, 'argv', testargs):
run_t5_mlm_flax.main()
result = get_results(tmp_dir)
self.assertGreaterEqual(result['eval_accuracy'], 0.42)
def test_run_ner(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
epochs = (7 if (get_gpu_count() > 1) else 2)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(sys, 'argv', testargs):
run_flax_ner.main()
result = get_results(tmp_dir)
self.assertGreaterEqual(result['eval_accuracy'], 0.75)
self.assertGreaterEqual(result['eval_f1'], 0.3)
def test_run_qa(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(sys, 'argv', testargs):
run_qa.main()
result = get_results(tmp_dir)
self.assertGreaterEqual(result['eval_f1'], 30)
self.assertGreaterEqual(result['eval_exact'], 30) |
class TestParameterize():
def test_idfn_marker(self, pytester: Pytester) -> None:
pytester.makepyfile("\n import pytest\n\n def idfn(param):\n if param == 0:\n return 'spam'\n elif param == 1:\n return 'ham'\n else:\n return None\n\n .parametrize('a,b', [(0, 2), (1, 2)], ids=idfn)\n def test_params(a, b):\n pass\n ")
res = pytester.runpytest('--collect-only')
res.stdout.fnmatch_lines(['*spam-2*', '*ham-2*'])
def test_idfn_fixture(self, pytester: Pytester) -> None:
pytester.makepyfile("\n import pytest\n\n def idfn(param):\n if param == 0:\n return 'spam'\n elif param == 1:\n return 'ham'\n else:\n return None\n\n (params=[0, 1], ids=idfn)\n def a(request):\n return request.param\n\n (params=[1, 2], ids=idfn)\n def b(request):\n return request.param\n\n def test_params(a, b):\n pass\n ")
res = pytester.runpytest('--collect-only')
res.stdout.fnmatch_lines(['*spam-2*', '*ham-2*']) |
def convert_pytorch_checkpoint_to_tf(model: BertModel, ckpt_dir: str, model_name: str):
tensors_to_transpose = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
var_map = (('layer.', 'layer_'), ('word_embeddings.weight', 'word_embeddings'), ('position_embeddings.weight', 'position_embeddings'), ('token_type_embeddings.weight', 'token_type_embeddings'), ('.', '/'), ('LayerNorm/weight', 'LayerNorm/gamma'), ('LayerNorm/bias', 'LayerNorm/beta'), ('weight', 'kernel'))
if (not os.path.isdir(ckpt_dir)):
os.makedirs(ckpt_dir)
state_dict = model.state_dict()
def to_tf_var_name(name: str):
for (patt, repl) in iter(var_map):
name = name.replace(patt, repl)
return 'bert/{}'.format(name)
def create_tf_var(tensor: np.ndarray, name: str, session: tf.Session):
tf_dtype = tf.dtypes.as_dtype(tensor.dtype)
tf_var = tf.get_variable(dtype=tf_dtype, shape=tensor.shape, name=name, initializer=tf.zeros_initializer())
session.run(tf.variables_initializer([tf_var]))
session.run(tf_var)
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
tf_name = to_tf_var_name(var_name)
torch_tensor = state_dict[var_name].numpy()
if any([(x in var_name) for x in tensors_to_transpose]):
torch_tensor = torch_tensor.T
tf_var = create_tf_var(tensor=torch_tensor, name=tf_name, session=session)
tf.keras.backend.set_value(tf_var, torch_tensor)
tf_weight = session.run(tf_var)
print('Successfully created {}: {}'.format(tf_name, np.allclose(tf_weight, torch_tensor)))
saver = tf.train.Saver(tf.trainable_variables())
saver.save(session, os.path.join(ckpt_dir, (model_name.replace('-', '_') + '.ckpt'))) |
class Migration(migrations.Migration):
initial = True
dependencies = [('conferences', '0011_auto__2340')]
operations = [migrations.CreateModel(name='Event', fields=[('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')), ('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')), ('start', models.DateTimeField(blank=True, null=True, verbose_name='start')), ('end', models.DateTimeField(blank=True, null=True, verbose_name='end')), ('latitude', models.DecimalField(blank=True, decimal_places=6, max_digits=9, null=True, verbose_name='latitude')), ('longitude', models.DecimalField(blank=True, decimal_places=6, max_digits=9, null=True, verbose_name='longitude')), ('map_link', models.URLField(blank=True, verbose_name='map link')), ('title', i18n.fields.I18nCharField(verbose_name='title')), ('slug', i18n.fields.I18nCharField(verbose_name='slug')), ('content', i18n.fields.I18nTextField(verbose_name='content')), ('conference', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='events', to='conferences.Conference', verbose_name='conference'))], options={'verbose_name': 'Event', 'verbose_name_plural': 'Events', 'unique_together': {('slug', 'conference')}})] |
def test_bsmgrid(tmpdir):
spec = "dataarg: {workdir}\ndataopts:\n pathbase: {pathbase}\n subinits:\n signals:\n run_points_0: siginputs/sigpoint0\n run_points_1: siginputs/sigpoint1\n run_points_2: siginputs/sigpoint2\n data: datainputs\n backgrounds:\n run_bkgs_0: bkginputs/bkg_sample_0\n run_bkgs_1: bkginputs/bkg_sample_1\n run_bkgs_2: bkginputs/bkg_sample_2\n run_bkgs_3: bkginputs/bkg_sample_3\nworkflow: workflow.yml\ntoplevel: {toplevel}\nplugins: []\nbackend: 'foregroundasync'\nbackendopts: {{}}\n".format(pathbase=os.path.abspath('tests/testspecs/bsm_grid_scaffold/basedata'), workdir=('local:' + os.path.join(str(tmpdir), 'workdir')), signal=os.path.abspath('tests/testspecs/bsm_grid_scaffold/basedata/siginputs'), data=os.path.abspath('tests/testspecs/bsm_grid_scaffold/basedata/datainputs'), bkg=os.path.abspath('tests/testspecs/bsm_grid_scaffold/basedata/bkginputs'), toplevel=os.path.abspath('tests/testspecs/bsm_grid_scaffold/workflow'))
f = tmpdir.join('spec.yml')
f.write(spec)
runner = CliRunner()
result = runner.invoke(yadage.steering.main, ['-f', str(f)])
assert (result.exit_code == 0)
assert tmpdir.join('workdir/inference/summary_plots/output.txt').read() |
def read_setup_file(filename):
from distutils.sysconfig import parse_makefile, expand_makefile_vars, _variable_rx
from distutils.text_file import TextFile
from distutils.util import split_quoted
vars = parse_makefile(filename)
file = TextFile(filename, strip_comments=1, skip_blanks=1, join_lines=1, lstrip_ws=1, rstrip_ws=1)
try:
extensions = []
while True:
line = file.readline()
if (line is None):
break
if _variable_rx.match(line):
continue
if (line[0] == line[(- 1)] == '*'):
file.warn(("'%s' lines not handled yet" % line))
continue
line = expand_makefile_vars(line, vars)
words = split_quoted(line)
module = words[0]
ext = Extension(module, [])
append_next_word = None
for word in words[1:]:
if (append_next_word is not None):
append_next_word.append(word)
append_next_word = None
continue
suffix = os.path.splitext(word)[1]
switch = word[0:2]
value = word[2:]
if (suffix in ('.c', '.cc', '.cpp', '.cxx', '.c++', '.m', '.mm')):
ext.sources.append(word)
elif (switch == '-I'):
ext.include_dirs.append(value)
elif (switch == '-D'):
equals = value.find('=')
if (equals == (- 1)):
ext.define_macros.append((value, None))
else:
ext.define_macros.append((value[0:equals], value[(equals + 2):]))
elif (switch == '-U'):
ext.undef_macros.append(value)
elif (switch == '-C'):
ext.extra_compile_args.append(word)
elif (switch == '-l'):
ext.libraries.append(value)
elif (switch == '-L'):
ext.library_dirs.append(value)
elif (switch == '-R'):
ext.runtime_library_dirs.append(value)
elif (word == '-rpath'):
append_next_word = ext.runtime_library_dirs
elif (word == '-Xlinker'):
append_next_word = ext.extra_link_args
elif (word == '-Xcompiler'):
append_next_word = ext.extra_compile_args
elif (switch == '-u'):
ext.extra_link_args.append(word)
if (not value):
append_next_word = ext.extra_link_args
elif (suffix in ('.a', '.so', '.sl', '.o', '.dylib')):
ext.extra_objects.append(word)
else:
file.warn(("unrecognized argument '%s'" % word))
extensions.append(ext)
finally:
file.close()
return extensions |
def Saveddata():
print('Building Saveddata')
from eos.saveddata.ship import Ship
from eos.saveddata.fit import Fit
from eos.saveddata.character import Character
from eos.saveddata.module import Module
from eos.const import FittingModuleState
from eos.saveddata.citadel import Citadel
from eos.saveddata.booster import Booster
helper = {'Structure': Citadel, 'Ship': Ship, 'Fit': Fit, 'Character': Character, 'Module': Module, 'State': FittingModuleState, 'Booster': Booster}
return helper |
def get_gt_bnd(gt):
gt = (gt > 0).astype(np.uint8).copy()
bnd = np.zeros_like(gt).astype(np.uint8)
for i in range(gt.shape[0]):
_mask = gt[i]
for j in range(1, (_mask.max() + 1)):
_gt = (_mask == j).astype(np.uint8).copy()
_gt_dil = dilation(_gt, disk(2))
bnd[i][((_gt_dil - _gt) == 1)] = 1
return bnd |
def main():
opts = TrainOptions().parse()
os.makedirs(opts.exp_dir, exist_ok=True)
opts_dict = vars(opts)
pprint.pprint(opts_dict)
with open(os.path.join(opts.exp_dir, 'opt.json'), 'w') as f:
json.dump(opts_dict, f, indent=4, sort_keys=True)
coach = Coach(opts)
coach.train() |
class Html():
def __init__(self, html_file, prompt_file_fullpath):
self.html_file_name = html_file
self.prompt_file_fullpath = prompt_file_fullpath
self.f = None
self.init_html()
def init_html(self):
self.f = open(self.html_file_name, 'w')
self.write('<!DOCTYPE html>')
self.write('<html>')
self.write('<head>')
self.write(' <title>AI Art Metadata Explorer</title>')
self.write(' <link rel="stylesheet" href="gallery.css">')
self.write('</head>')
self.write('<body>')
self.write('<div class="intro">')
self.write(' <div>')
self.write(' Metadata gallery of images created with <a href=" target="_blank">AI Art Generator</a>.')
self.write(' </div>')
self.write(' <div class="small">')
self.write(((((' Companion prompt file generated as <a href="' + self.prompt_file_fullpath) + '">') + self.prompt_file_fullpath) + '</a>.'))
self.write(' </div>')
self.write('</div>')
self.write('<div class="flex-column">')
def add_image_section(self, dir, file, height, prompt, settings, initimg, initstr):
ifn = initimg
if ('\\' in initimg):
ifn = initimg.rsplit('\\', 1)[1]
fullpath = ((dir + '\\') + file)
self.write(' <div class="flex-row">')
self.write(' <div>')
self.write(((' <a href="' + fullpath) + '">'))
self.write(((((((' <img src="' + fullpath) + '" alt="') + file) + '" height="') + str(height)) + '">'))
self.write(' </a>')
self.write(' </div>')
self.write(' <div class="flex-info">')
self.write(' <div>')
self.write((' ' + prompt))
self.write(' </div>')
self.write(' <div class="bottom">')
self.write((' ' + settings))
if (ifn != ''):
imgtxt = (((((('<div><a href="' + initimg) + '">') + ifn) + '</a> used as init image ') + str(initstr)) + ' strength</div>')
self.write((' ' + imgtxt))
self.write(' </div>')
self.write(' </div>')
self.write(' </div>')
def cleanup(self, exec_time):
footer_text = ((((('Generated in ' + str(exec_time)) + ' milliseconds on ') + str(date.today())) + ' at ') + time.strftime('%H:%M:%S'))
self.write('</div>')
self.write('<div class="footer">')
self.write(footer_text)
self.write('</div>')
self.write('</body>')
self.write('</html>')
self.f.close()
print(('Created gallery file as ' + self.html_file_name))
def write(self, text):
self.f.write((text + '\n')) |
def process(datas, dataset, mode):
res = []
for data in datas:
res.append(process_one(data))
if (not os.path.exists('./loss/{}/word/'.format(dataset))):
os.makedirs('./loss/{}/word/'.format(dataset))
with open('./loss/{}/word/{}_loss.json'.format(dataset, mode), 'w') as file_obj:
json.dump(res, file_obj) |
.parametrize('rng', [np.random.RandomState(123), np.random.default_rng(123)])
def test_GeneratorSharedVariable(rng):
s_rng_default = shared(rng)
s_rng_True = shared(rng, borrow=True)
s_rng_False = shared(rng, borrow=False)
assert (s_rng_default.container.storage[0] is not rng)
assert (s_rng_False.container.storage[0] is not rng)
assert (s_rng_True.container.storage[0] is rng)
if hasattr(rng, 'randn'):
v = rng.randn()
v0 = s_rng_default.container.storage[0].randn()
v1 = s_rng_False.container.storage[0].randn()
else:
v = rng.standard_normal()
v0 = s_rng_default.container.storage[0].standard_normal()
v1 = s_rng_False.container.storage[0].standard_normal()
assert (v == v0 == v1) |
_grad()
def calculate_fid_given_paths(paths, img_size=256, batch_size=50, real_loader=None, real_mu=None, real_cov=None):
print(('Calculating FID given paths %s and %s...' % (paths[0], paths[1])))
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
inception = InceptionV3().eval().to(device)
loaders = [get_eval_loader(path, img_size, batch_size) for path in paths]
if (real_loader is None):
(mu, cov) = ([], [])
for loader in loaders:
actvs = []
for x in tqdm(loader, total=len(loader)):
actv = inception(x.to(device))
actvs.append(actv)
actvs = torch.cat(actvs, dim=0).cpu().detach().numpy()
mu.append(np.mean(actvs, axis=0))
cov.append(np.cov(actvs, rowvar=False))
real_loader = loaders[0]
real_mu = mu[0]
real_cov = cov[0]
else:
(mu, cov) = ([real_mu], [real_cov])
loader = loaders[1]
actvs = []
for x in tqdm(loader, total=len(loader)):
actv = inception(x.to(device))
actvs.append(actv)
actvs = torch.cat(actvs, dim=0).cpu().detach().numpy()
mu.append(np.mean(actvs, axis=0))
cov.append(np.cov(actvs, rowvar=False))
fid_value = frechet_distance(mu[0], cov[0], mu[1], cov[1])
return (fid_value, real_loader, real_mu, real_cov) |
def gen_rr_src01_template(num_nops_src0, num_nops_src1, num_nops_dest, reg_src0, reg_src1, inst, src0, src1, result):
return '\n\n # Move src0 value into register\n csrr {reg_src0}, mngr2proc < {src0}\n {nops_src0}\n\n # Move src1 value into register\n csrr {reg_src1}, mngr2proc < {src1}\n {nops_src1}\n\n # Instruction under test\n {inst} x3, {reg_src0}, {reg_src1}\n {nops_dest}\n\n # Check the result\n csrw proc2mngr, x3 > {result}\n\n '.format(nops_src0=gen_nops(num_nops_src0), nops_src1=gen_nops(num_nops_src1), nops_dest=gen_nops(num_nops_dest), **locals()) |
class GINEConvLayer(nn.Module):
def __init__(self, dim_in, dim_out, dropout, residual):
super().__init__()
self.dim_in = dim_in
self.dim_out = dim_out
self.dropout = dropout
self.residual = residual
gin_nn = nn.Sequential(pyg_nn.Linear(dim_in, dim_out), nn.ReLU(), pyg_nn.Linear(dim_out, dim_out))
self.model = pyg_nn.GINEConv(gin_nn)
def forward(self, batch):
x_in = batch.x
batch.x = self.model(batch.x, batch.edge_index, batch.edge_attr)
batch.x = F.relu(batch.x)
batch.x = F.dropout(batch.x, p=self.dropout, training=self.training)
if self.residual:
batch.x = (x_in + batch.x)
return batch |
class TrainOptions():
def __init__(self):
self.parser = ArgumentParser()
self.initialize()
def initialize(self):
self.parser.add_argument('--exp_dir', type=str, help='Path to experiment output directory')
self.parser.add_argument('--dataset_type', default='ffhq_encode', type=str, help='Type of dataset/experiment to run')
self.parser.add_argument('--training_stage', default=1, type=int, help='Training the E2Style encoder for stage i')
self.parser.add_argument('--is_training', default=True, type=bool, help='Training or testing')
self.parser.add_argument('--input_nc', default=3, type=int, help='Number of input image channels to the E2Style encoder')
self.parser.add_argument('--label_nc', default=0, type=int, help='Number of input label channels to the E2Style encoder')
self.parser.add_argument('--batch_size', default=4, type=int, help='Batch size for training')
self.parser.add_argument('--test_batch_size', default=2, type=int, help='Batch size for testing and inference')
self.parser.add_argument('--workers', default=4, type=int, help='Number of train dataloader workers')
self.parser.add_argument('--test_workers', default=2, type=int, help='Number of test/inference dataloader workers')
self.parser.add_argument('--learning_rate', default=0.0001, type=float, help='Optimizer learning rate')
self.parser.add_argument('--optim_name', default='ranger', type=str, help='Which optimizer to use')
self.parser.add_argument('--train_decoder', default=False, type=bool, help='Whether to train the decoder model')
self.parser.add_argument('--start_from_latent_avg', action='store_true', help='Whether to add average latent vector to generate codes from encoder.')
self.parser.add_argument('--learn_in_w', action='store_true', help='Whether to learn in w space insteaf of w+')
self.parser.add_argument('--lpips_lambda', default=0.8, type=float, help='LPIPS loss multiplier factor')
self.parser.add_argument('--id_lambda', default=0.1, type=float, help='ID loss multiplier factor')
self.parser.add_argument('--parse_lambda', default=1.0, type=float, help='Mulit-Parse loss multiplier factor')
self.parser.add_argument('--l2_lambda', default=1.0, type=float, help='L2 loss multiplier factor')
self.parser.add_argument('--w_norm_lambda', default=0, type=float, help='W-norm loss multiplier factor')
self.parser.add_argument('--lpips_lambda_crop', default=0, type=float, help='LPIPS loss multiplier factor for inner image region')
self.parser.add_argument('--l2_lambda_crop', default=0, type=float, help='L2 loss multiplier factor for inner image region')
self.parser.add_argument('--stylegan_weights', default=model_paths['stylegan_ffhq'], type=str, help='Path to StyleGAN model weights')
self.parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to E2Style model checkpoint')
self.parser.add_argument('--max_steps', default=500000, type=int, help='Maximum number of training steps')
self.parser.add_argument('--image_interval', default=100, type=int, help='Interval for logging train images during training')
self.parser.add_argument('--board_interval', default=50, type=int, help='Interval for logging metrics to tensorboard')
self.parser.add_argument('--val_interval', default=1000, type=int, help='Validation interval')
self.parser.add_argument('--save_interval', default=None, type=int, help='Model checkpoint interval')
self.parser.add_argument('--resize_factors', type=str, default=None, help='For super-res, comma-separated resize factors to use for inference.')
def parse(self):
opts = self.parser.parse_args()
return opts |
def parse_file(file_name):
prot_dict = {}
with open(file_name) as csvfile:
text = csv.reader(csvfile)
next(text, None)
for row in text:
prot_dict[row[4]] = list()
csvfile.seek(0)
next(text, None)
for row in text:
prot_dict[row[4]].append(int(row[5]))
for key in prot_dict:
temp_list = prot_dict[key]
count_pack = len(temp_list)
mean_pack_len = int((sum(temp_list) / len(temp_list)))
traffic_size = sum(temp_list)
del prot_dict[key][:]
prot_dict[key].append(count_pack)
prot_dict[key].append(mean_pack_len)
prot_dict[key].append(traffic_size)
os.remove(file_name)
return prot_dict |
class GetChatAdminsWithInviteLinks():
async def get_chat_admins_with_invite_links(self: 'pyrogram.Client', chat_id: Union[(int, str)]):
r = (await self.invoke(raw.functions.messages.GetAdminsWithInvites(peer=(await self.resolve_peer(chat_id)))))
users = {i.id: i for i in r.users}
return types.List((types.ChatAdminWithInviteLinks._parse(self, admin, users) for admin in r.admins)) |
class AdaptiveDiffusionPipeline():
def __init__(self, estimator, student, teacher):
self.estimator = estimator
self.score_percentiles = None
self.student = student
self.teacher = teacher
def calc_score_percentiles(self, file_path, n_samples, num_inference_steps_student, prompts_path=None):
if os.path.exists(file_path):
print(f'Loading score percentiles from {file_path}')
with open(f'{file_path}') as f:
data = json.load(f)
self.score_percentiles = {}
for key in data:
self.score_percentiles[int(key)] = data[key]
else:
print(f'Calculating score percentiles on {n_samples} samples and saving as {file_path}')
prompts = list(pd.read_csv(prompts_path)['caption'])[:n_samples]
scores = []
for prompt in prompts:
student_out = self.student(prompt=prompt, num_inference_steps=num_inference_steps_student, guidance_scale=0.0).images[0]
score = self.estimator.score(prompt, student_out)
scores.append(score)
score_percentiles = {}
k_list = [10, 20, 30, 40, 50, 60, 70, 80, 90]
for k in k_list:
score_percentiles[k] = np.percentile(scores, k)
self.score_percentiles = score_percentiles
with open(f'{file_path}', 'w') as fp:
json.dump(self.score_percentiles, fp)
def __call__(self, prompt, num_inference_steps_student=2, student_guidance=0.0, num_inference_steps_teacher=4, teacher_guidance=8.0, sigma=0.4, k=50, seed=0):
generator = torch.Generator(device='cuda').manual_seed(seed)
num_all_steps = int(((num_inference_steps_teacher / sigma) + 1))
chosen_threshold = self.score_percentiles[k]
student_out = self.student(prompt=prompt, num_inference_steps=num_inference_steps_student, generator=generator, guidance_scale=student_guidance).images[0].resize((1024, 1024))
reward = self.estimator.score(prompt, student_out)
if (reward < chosen_threshold):
final_out = self.teacher(prompt=prompt, image=student_out, num_inference_steps=num_all_steps, guidance_scale=teacher_guidance, strength=sigma).images[0]
else:
final_out = student_out
return final_out |
class DropEmAndF1(object):
def __init__(self) -> None:
self._total_em = 0.0
self._total_f1 = 0.0
self._count = 0
def __call__(self, prediction: Union[(str, List)], ground_truths: List):
ground_truth_answer_strings = [answer_json_to_strings(annotation)[0] for annotation in ground_truths]
(exact_match, f1_score) = metric_max_over_ground_truths(drop_em_and_f1, prediction, ground_truth_answer_strings)
self._total_em += exact_match
self._total_f1 += f1_score
self._count += 1
def get_metric(self, reset: bool=False) -> Tuple[(float, float)]:
exact_match = ((self._total_em / self._count) if (self._count > 0) else 0)
f1_score = ((self._total_f1 / self._count) if (self._count > 0) else 0)
if reset:
self.reset()
return (exact_match, f1_score)
def reset(self):
self._total_em = 0.0
self._total_f1 = 0.0
self._count = 0
def __str__(self):
return f'DropEmAndF1(em={self._total_em}, f1={self._total_f1})' |
class TolerantPullParser(_AbstractParser, sgmllib.SGMLParser):
def __init__(self, *args, **kwds):
sgmllib.SGMLParser.__init__(self)
_AbstractParser.__init__(self, *args, **kwds)
def unknown_starttag(self, tag, attrs):
attrs = self.unescape_attrs(attrs)
self._tokenstack.append(Token('starttag', tag, attrs))
def unknown_endtag(self, tag):
self._tokenstack.append(Token('endtag', tag)) |
class proc_t(ctypes.Structure):
class lck_spin_t(ctypes.Structure):
_fields_ = (('opaque', (ctypes.c_ulong * 10)),)
_fields_ = (('p_list', list_entry), ('p_pid', ctypes.c_int32), ('task', POINTER64), ('p_pptr', POINTER64), ('p_ppid', ctypes.c_int32), ('p_pgrpid', ctypes.c_int32), ('p_uid', ctypes.c_uint32), ('p_gid', ctypes.c_uint32), ('p_ruid', ctypes.c_uint32), ('p_rgid', ctypes.c_uint32), ('p_svuid', ctypes.c_uint32), ('p_svgid', ctypes.c_uint32), ('p_uniqueid', ctypes.c_uint64), ('p_puniqueid', ctypes.c_uint64), ('p_mlock', lck_mtx_t), ('p_stat', ctypes.c_char), ('p_shutdownstate', ctypes.c_char), ('p_kdebug', ctypes.c_char), ('p_btrace', ctypes.c_char), ('p_pglist', list_entry), ('p_sibling', list_entry), ('p_children', list_head), ('p_uthlist', tailq_head), ('p_hash', list_entry), ('p_evlist', tailq_head), ('p_fdmlock', lck_mtx_t), ('p_ucred_mlock', lck_mtx_t), ('p_ucred', POINTER64), ('p_fd', POINTER64), ('p_stats', POINTER64), ('p_limit', POINTER64), ('p_sigacts', POINTER64), ('p_siglist', ctypes.c_int), ('p_slock', lck_spin_t), ('p_olimit', POINTER64), ('p_flag', ctypes.c_uint), ('p_lflag', ctypes.c_uint), ('p_listflag', ctypes.c_uint), ('p_ladvflag', ctypes.c_uint), ('p_refcount', ctypes.c_int), ('p_childrencnt', ctypes.c_int), ('p_parentref', ctypes.c_int), ('p_oppid', ctypes.c_int32), ('p_xstat', ctypes.c_uint), ('p_xhighbits', ctypes.c_uint8), ('p_realtimer', itimerval_t), ('p_rtime', timeval_t), ('p_vtimer_user', itimerval_t), ('p_vtimer_prof', itimerval_t), ('p_rlim_cpu', timeval_t), ('p_debugger', ctypes.c_int), ('sigwait', ctypes.c_int), ('sigwait_thread', POINTER64), ('exit_thread', POINTER64), ('p_vforkcnt', ctypes.c_int), ('p_vforkact', POINTER64), ('p_fpdrainwait', ctypes.c_int), ('p_contproc', ctypes.c_int32), ('si_pid', ctypes.c_int32), ('si_status', ctypes.c_uint), ('si_code', ctypes.c_uint), ('si_uid', ctypes.c_uint32), ('vm_shm', POINTER64), ('p_dtrace_argv', ctypes.c_uint64), ('p_dtrace_envp', ctypes.c_uint64), ('p_dtrace_sprlock', lck_mtx_t), ('p_dtrace_probes', ctypes.c_int), ('p_dtrace_count', ctypes.c_uint), ('p_dtrace_stop', ctypes.c_uint8), ('p_dtrace_ptss_pages', POINTER64), ('p_dtrace_ptss_free_list', POINTER64), ('p_dtrace_helpers', POINTER64), ('p_dtrace_lazy_dofs', POINTER64), ('p_argslen', ctypes.c_uint), ('p_argc', ctypes.c_int), ('user_stack', ctypes.c_uint64), ('p_textvp', POINTER64), ('p_textoff', ctypes.c_int64), ('p_sigmask', ctypes.c_uint32), ('p_sigignore', ctypes.c_uint32), ('p_sigcatch', ctypes.c_uint32), ('p_priority', ctypes.c_ubyte), ('p_resv0', ctypes.c_ubyte), ('p_nice', ctypes.c_char), ('p_resv1', ctypes.c_ubyte), ('p_comm', (ctypes.c_char * (16 + 1))), ('p_name', (ctypes.c_char * ((2 * 16) + 1))), ('p_pgrp', POINTER64), ('p_csflags', ctypes.c_uint32), ('p_pcaction', ctypes.c_uint32), ('p_uuid', (ctypes.c_uint8 * 16)), ('p_cputype', ctypes.c_int), ('p_cpusubtype', ctypes.c_int), ('p_aio_total_count', ctypes.c_int), ('p_aio_active_count', ctypes.c_int), ('p_aio_activeq', tailq_head), ('p_aio_doneq', tailq_head), ('p_klist', slist_head), ('p_ru', POINTER64), ('p_sigwaitcnt', ctypes.c_int), ('p_signalholder', POINTER64), ('p_transholder', POINTER64), ('p_acflag', ctypes.c_ushort), ('p_vfs_iopolicy', ctypes.c_ushort), ('p_threadstart', ctypes.c_uint64), ('p_wqthread', ctypes.c_uint64), ('p_pthsize', ctypes.c_int), ('p_pth_tsd_offset', ctypes.c_uint32), ('p_stack_addr_hint', ctypes.c_uint64), ('p_wqptr', POINTER64), ('p_start', timeval_t), ('p_rcall', POINTER64), ('p_ractive', ctypes.c_int), ('p_idversion', ctypes.c_int), ('p_pthhash', POINTER64), ('was_throttled', ctypes.c_uint64), ('did_throttle', ctypes.c_uint64), ('p_dispatchqueue_offset', ctypes.c_uint64), ('p_dispatchqueue_serialno_offset', ctypes.c_uint64), ('p_return_to_kernel_offset', ctypes.c_uint64), ('p_mach_thread_self_offset', ctypes.c_uint64), ('vm_pressure_last_notify_tstamp', timeval_t), ('p_memstat_list', tailq_entry), ('p_memstat_state', ctypes.c_uint32), ('p_memstat_effectivepriority', ctypes.c_int32), ('p_memstat_requestedpriority', ctypes.c_int32), ('p_memstat_dirty', ctypes.c_uint32), ('p_memstat_userdata', ctypes.c_uint64), ('p_memstat_idledeadline', ctypes.c_uint64), ('p_memstat_idle_start', ctypes.c_uint64), ('p_memstat_idle_delta', ctypes.c_uint64), ('p_memstat_memlimit', ctypes.c_int32), ('p_memstat_memlimit_active', ctypes.c_int32), ('p_memstat_memlimit_inactive', ctypes.c_int32), ('p_responsible_pid', ctypes.c_int32), ('p_user_faults', ctypes.c_uint32), ('p_exit_reason', POINTER64), ('p_user_data', ctypes.c_uint64))
def __init__(self, ql, base):
self.ql = ql
self.base = base
def updateToMem(self):
self.ql.mem.write(self.base, bytes(self))
def loadFromMem(self):
data = self.ql.mem.read(self.base, ctypes.sizeof(self))
newObj = type(self).from_buffer(data)
newObj.ql = self.ql
newObj.base = self.base
return newObj |
class Effect2143(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'Target Painter')), 'signatureRadiusBonus', ship.getModifiedItemAttr('shipBonusMC2'), skill='Minmatar Cruiser', **kwargs) |
class CommunityManagersTest(TestCase):
def test_post_manager(self):
private_post = Post.objects.create(content='private post', media_type=Post.MEDIA_TEXT, status=Post.STATUS_PRIVATE)
public_post = Post.objects.create(content='public post', media_type=Post.MEDIA_TEXT, status=Post.STATUS_PUBLIC)
self.assertQuerysetEqual(Post.objects.all(), [public_post, private_post], (lambda x: x))
self.assertQuerysetEqual(Post.objects.public(), [public_post], (lambda x: x))
self.assertQuerysetEqual(Post.objects.private(), [private_post], (lambda x: x)) |
def define_G(input_nc, output_nc, ngf, which_model_netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
netG = None
norm_layer = get_norm_layer(norm_type=norm)
if (which_model_netG == 'resnet_9blocks'):
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
elif (which_model_netG == 'resnet_6blocks'):
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
elif (which_model_netG == 'unet_128'):
netG = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif (which_model_netG == 'unet_256'):
netG = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
else:
raise NotImplementedError(('Generator model name [%s] is not recognized' % which_model_netG))
return init_net(netG, init_type, init_gain, gpu_ids) |
class FileItemObject(ops.data.DszObject):
def __init__(self, dszpath='', cmdid=None, parent=None, debug=False):
self.dszparent = parent
self.opsclass = fileitem
self.update(dszpath, cmdid, debug)
def update(self, dszpath='', cmdid=None, debug=False):
self.filehash = []
for ahash in dsz.cmd.data.ObjectGet(dszpath, 'hash', dsz.TYPE_OBJECT):
self.filehash.append(ops.data.DszObject(ahash, cmdid, filehash, 'dir', debug=True))
def _getFullPath(self):
return os.path.join(self.dszparent.path, self.name)
fullpath = property(_getFullPath) |
def mean_tour_len_edges(x_edges_values, y_pred_edges):
y = F.softmax(y_pred_edges, dim=(- 1))
y = y.argmax(dim=3)
tour_lens = ((y.float() * x_edges_values.float()).sum(dim=1).sum(dim=1) / 2)
mean_tour_len = (tour_lens.sum().to(dtype=torch.float).item() / tour_lens.numel())
return mean_tour_len |
class TokenNetworkState(State):
address: TokenNetworkAddress
token_address: TokenAddress
channelidentifiers_to_channels: Dict[(ChannelID, NettingChannelState)] = field(repr=False, default_factory=dict)
partneraddresses_to_channelidentifiers: Dict[(Address, List[ChannelID])] = field(repr=False, default_factory=(lambda : defaultdict(list)))
def __post_init__(self) -> None:
typecheck(self.address, T_Address)
typecheck(self.token_address, T_Address)
self.partneraddresses_to_channelidentifiers = defaultdict(list, self.partneraddresses_to_channelidentifiers) |
class GuiToggleBoosterStatesCommand(wx.Command):
def __init__(self, fitID, mainPosition, positions):
wx.Command.__init__(self, True, 'Toggle Booster States')
self.internalHistory = InternalCommandHistory()
self.fitID = fitID
self.mainPosition = mainPosition
self.positions = positions
def Do(self):
cmd = CalcToggleBoosterStatesCommand(fitID=self.fitID, mainPosition=self.mainPosition, positions=self.positions)
success = self.internalHistory.submit(cmd)
eos.db.flush()
sFit = Fit.getInstance()
sFit.recalc(self.fitID)
sFit.fill(self.fitID)
eos.db.commit()
wx.PostEvent(gui.mainFrame.MainFrame.getInstance(), GE.FitChanged(fitIDs=(self.fitID,)))
return success
def Undo(self):
success = self.internalHistory.undoAll()
eos.db.flush()
sFit = Fit.getInstance()
sFit.recalc(self.fitID)
sFit.fill(self.fitID)
eos.db.commit()
wx.PostEvent(gui.mainFrame.MainFrame.getInstance(), GE.FitChanged(fitIDs=(self.fitID,)))
return success |
def test_get_children() -> None:
func_node = extract_node('def func[T]() -> T: ...')
func_children = tuple(func_node.get_children())
assert isinstance(func_children[2], TypeVar)
class_node = extract_node('class MyClass[T]: ...')
class_children = tuple(class_node.get_children())
assert isinstance(class_children[0], TypeVar) |
class MainWindow(QMainWindow, Ui_MainWindow):
isFullScreen = False
isHideMenuBar = False
def __init__(self, parent=None):
super(MainWindow, self).__init__()
self.setupUi(self)
self.app = parent
self.settings = QSettings(zapzap.__appname__, zapzap.__appname__)
self.scd = None
MenuBar(self)
self.tray = TrayIcon(self)
self.zapHome = Home()
self.zapSettings = Settings()
self.zapSettings.emitDisableUser.connect(self.emitDisableUser)
self.zapSettings.emitDeleteUser.connect(self.emitDeleteUser)
self.zapSettings.emitEditUser.connect(self.emitEditUser)
self.zapSettings.emitNewtUser.connect(self.emitNewUser)
self.zapSettings.emitSetSpellChecker.connect(self.emitSetSpellChecker)
self.zapSettings.emitDisableSpellChecker.connect(self.emitDisableSpellChecker)
self.zapSettings.emitNotifications.connect(self.emitNotifications)
self.zapSettings.emitQuit.connect((lambda x=None: self.closeEvent(x)))
self.zapSettings.emitGoHome.connect((lambda : self.main_stacked.setCurrentIndex(0)))
self.zapSettings.emitKeepBackground.connect(self.actionHide_on_close.setChecked)
self.zapSettings.emitDisableTrayIcon.connect(self.tray.setVisible)
self.zapSettings.emitSetHideMenuBar.connect(self.setHideMenuBar)
self.zapSettings.emitUpdateUIDecoration.connect(self.updateSCD)
self.zapSettings.emitUpdateTheme.connect(self.setThemeApp)
self.zapSettings.updateUsersShortcuts()
self.main_stacked.insertWidget(0, self.zapHome)
self.main_stacked.insertWidget(1, self.zapSettings)
self.timer = QTimer()
self.timer.setInterval(1000)
self.timer.timeout.connect(self.syncThemeSys)
self.current_theme = (- 1)
self.setZapDecoration()
def emitDisableSpellChecker(self, flag):
self.zapHome.disableSpellChecker(flag)
def emitSetSpellChecker(self, lang):
self.zapHome.setSpellChecker(lang)
def emitNewUser(self, user):
self.zapHome.addNewUser(user)
self.zapSettings.updateUsersShortcuts()
def emitDeleteUser(self, user):
self.zapHome.delUserPage(user)
self.zapSettings.updateUsersShortcuts()
def emitDisableUser(self, user):
self.zapHome.disableUserPage(user)
self.zapSettings.updateUsersShortcuts()
def emitEditUser(self, user):
self.zapHome.editUserPage(user)
def emitNotifications(self):
qtd = self.zapHome.getSizeNotifications()
if (qtd > 0):
self.setWindowTitle((((zapzap.__appname__ + ' (') + str(qtd)) + ')'))
else:
self.setWindowTitle(zapzap.__appname__)
self.tray.showIconNotification(qtd)
def actionEsc(self, closeAll=False):
if (self.main_stacked.currentIndex() == 0):
self.zapHome.closeConversation(closeAll)
else:
self.main_stacked.setCurrentIndex(0)
def updateSCD(self):
if (self.scd != None):
self.scd.headDefinitions()
def setZapDecoration(self):
self.headbar.hide()
if self.settings.value('system/zapzap_decoration', False, bool):
self.scd = UIDecoration(self)
self.zFile.setMenu(self.menuFile)
self.zView.setMenu(self.menuView)
self.zChat.setMenu(self.menuChat)
self.zHelp.setMenu(self.menuHelp)
def syncThemeSys(self):
theme = getSystemTheme()
if (self.current_theme != theme):
self.current_theme = theme
self.setThemeApp('auto')
def setThemeApp(self, theme):
if (theme == 'auto'):
theme = getSystemTheme()
self.timer.start()
else:
self.timer.stop()
if (theme == 'light'):
self.app.setStyleSheet(getThemeLight())
self.zapHome.setThemePages(theme)
elif (theme == 'dark'):
self.app.setStyleSheet(getThemeDark())
self.zapHome.setThemePages(theme)
def xdgOpenChat(self, url):
self.zapHome.openChat(url)
def openNewChatPopup(self):
dialog = OpenChatPopup(self)
r = dialog.exec_()
if (r == 1):
number = dialog.numberPhone.text()
if (number != ''):
url = (' + number)
self.zapHome.openChat(url)
def reload_Service(self):
self.zapHome.reloadPage()
def openTraySettings(self):
if (self.app.activeWindow() == None):
self.show()
self.app.activateWindow()
self.main_stacked.setCurrentIndex(1)
def openSettings(self):
if (self.main_stacked.currentIndex() == 1):
self.main_stacked.setCurrentIndex(0)
else:
self.main_stacked.setCurrentIndex(1)
self.zapSettings.goPageHome()
def openDonations(self):
self.main_stacked.setCurrentIndex(1)
self.zapSettings.goPageDonations()
def openAbout_Zapzap(self):
self.main_stacked.setCurrentIndex(1)
self.zapSettings.goPageHelp()
def loadSettings(self):
theme_mode = self.settings.value('system/theme', 'auto', str)
self.setThemeApp(theme_mode)
self.isHideMenuBar = self.settings.value('main/hideMenuBar', False, bool)
self.setHideMenuBar()
self.actionHide_on_close.setChecked(self.settings.value('system/keep_background', True, bool))
self.restoreGeometry(self.settings.value('main/geometry', QByteArray()))
self.restoreState(self.settings.value('main/windowState', QByteArray()))
def saveSettings(self):
self.settings.setValue('main/geometry', self.saveGeometry())
self.settings.setValue('main/windowState', self.saveState())
self.zapHome.saveSettings()
def closeEvent(self, event):
isBack = self.settings.value('system/keep_background', True, bool)
if (isBack and event):
self.actionEsc(closeAll=True)
self.hide()
event.ignore()
else:
self.saveSettings()
self.hide()
self.app.quit()
def onTrayIconActivated(self, reason):
if ((reason == QSystemTrayIcon.ActivationReason.Trigger) or (reason == QSystemTrayIcon.ActivationReason.MiddleClick)):
self.on_show()
def on_show(self):
if (self.app.activeWindow() != None):
self.hide()
else:
self.show()
self.raise_()
self.app.activateWindow()
self.main_stacked.setCurrentIndex(0)
def setDefault_size_page(self):
self.zapHome.setZoomFactor()
def zoomIn(self):
self.zapHome.setZoomFactor((+ 0.1))
def zoomOut(self):
self.zapHome.setZoomFactor((- 0.1))
def setFullSreen(self):
if (not self.isFullScreen):
self.showFullScreen()
else:
self.showNormal()
self.isFullScreen = (not self.isFullScreen)
def setHideMenuBar(self):
if self.settings.value('system/zapzap_decoration', False, bool):
self.menubar.setMaximumHeight(0)
if self.isHideMenuBar:
self.zapBoxMenu.hide()
else:
self.zapBoxMenu.show()
else:
if self.isHideMenuBar:
self.menubar.setMaximumHeight(0)
else:
self.menubar.setMaximumHeight()
self.settings.setValue('main/hideMenuBar', self.isHideMenuBar)
self.zapSettings.menubar.setChecked(self.isHideMenuBar)
self.isHideMenuBar = (not self.isHideMenuBar) |
def preceding_text(pattern):
try:
return _preceding_text_cache[pattern]
except KeyError:
pass
m = re.compile(pattern)
def _preceding_text():
app = get_app()
return bool(m.match(app.current_buffer.document.current_line_before_cursor))
condition = Condition(_preceding_text)
_preceding_text_cache[pattern] = condition
return condition |
class IntRangeTest(object):
def test_valid_range(self):
int_range = inputs.int_range(1, 5)
assert (int_range(3) == 3)
def test_inclusive_range(self):
int_range = inputs.int_range(1, 5)
assert (int_range(5) == 5)
def test_lower(self):
int_range = inputs.int_range(0, 5)
with pytest.raises(ValueError):
int_range((- 1))
def test_higher(self):
int_range = inputs.int_range(0, 5)
with pytest.raises(ValueError):
int_range(6)
def test_schema(self):
assert (inputs.int_range(1, 5).__schema__ == {'type': 'integer', 'minimum': 1, 'maximum': 5}) |
def GetData(url):
try:
r = requests.get(url, headers=headers, timeout=5)
r.encoding = 'utf-8'
return (r.status_code, r.text)
except (requests.exceptions.ReadTimeout, requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError):
return ('Timeout', 'Timeout') |
class ArgSortOp(Op):
__props__ = ('kind', 'order')
def __init__(self, kind, order=None):
self.kind = kind
self.order = order
def __str__(self):
return (self.__class__.__name__ + f'{{{self.kind}, {self.order}}}')
def make_node(self, input, axis=(- 1)):
input = as_tensor_variable(input)
axis = as_tensor_variable(axis)
return Apply(self, [input, axis], [TensorType(dtype='int64', shape=input.type.shape)()])
def perform(self, node, inputs, output_storage):
a = inputs[0]
axis = inputs[1]
if (axis is not None):
if (axis != int(axis)):
raise ValueError('sort axis must be an integer or None')
axis = int(axis)
z = output_storage[0]
z[0] = _asarray(np.argsort(a, axis, self.kind, self.order), dtype=node.outputs[0].dtype)
def infer_shape(self, fgraph, node, inputs_shapes):
if _variable_is_none(node.inputs[1]):
return [(mul(*inputs_shapes[0]),)]
assert (node.inputs[0].ndim == node.outputs[0].ndim)
assert (inputs_shapes[1] == ())
return [inputs_shapes[0]]
def grad(self, inputs, output_grads):
(inp, axis) = inputs
inp_grad = inp.zeros_like()
axis_grad = grad_undefined(self, 1, axis, 'argsort is not defined for non-integer axes so argsort(x, axis+eps) is undefined')
return [inp_grad, axis_grad]
'\n def R_op(self, inputs, eval_points):\n # R_op can receive None as eval_points.\n # That mean there is no diferientiable path through that input\n # If this imply that you cannot compute some outputs,\n # return None for those.\n if eval_points[0] is None:\n return eval_points\n return self.grad(inputs, eval_points)\n ' |
def perm_entropy(x, order=3, delay=1, normalize=False):
if isinstance(delay, (list, np.ndarray, range)):
return np.mean([perm_entropy(x, order=order, delay=d, normalize=normalize) for d in delay])
x = np.array(x)
ran_order = range(order)
hashmult = np.power(order, ran_order)
assert (delay > 0), 'delay must be greater than zero.'
sorted_idx = _embed(x, order=order, delay=delay).argsort(kind='quicksort')
hashval = np.multiply(sorted_idx, hashmult).sum(1)
(_, c) = np.unique(hashval, return_counts=True)
p = np.true_divide(c, c.sum())
pe = (- _xlogx(p).sum())
if normalize:
pe /= np.log2(factorial(order))
return pe |
def print_asm(asm_code):
asm_code_list = asm_code
if isinstance(asm_code, str):
asm_code_list = [asm_code]
asm_list = []
for asm_seq in asm_code_list:
asm_list.extend(asm_seq.splitlines())
prev_blank_line = False
for asm in asm_list:
if (asm.strip() == ''):
if (not prev_blank_line):
print(asm)
prev_blank_line = True
else:
prev_blank_line = False
print(asm) |
class HP6633A(HP6632A):
def __init__(self, adapter, name='Hewlett Packard HP6633A', **kwargs):
super().__init__(adapter, name, **kwargs)
current_values = [0, limits['HP6633A']['Cur_lim']]
OVP_values = [0, limits['HP6633A']['OVP_lim']]
voltage_values = [0, limits['HP6633A']['Volt_lim']] |
def get_files(path, relative_to='fairseq'):
all_files = []
for (root, _dirs, files) in os.walk(path, followlinks=True):
root = os.path.relpath(root, relative_to)
for file in files:
if file.endswith('.pyc'):
continue
all_files.append(os.path.join(root, file))
return all_files |
class Exclusive(ContextDecorator):
_locks = {}
_locks_creation_lock = threading.Lock()
def __init__(self, wrapped):
self._wrapped = wrapped
def get_lock(self):
_id = id(self._wrapped)
with Exclusive._locks_creation_lock:
if (not (_id in Exclusive._locks)):
Exclusive._locks[_id] = threading.RLock()
return Exclusive._locks[_id]
def __enter__(self):
self.get_lock().acquire()
return self._wrapped
def __exit__(self, *exc):
self.get_lock().release() |
def decode_raiden_event_to_internal(abi: ABI, chain_id: ChainID, log_event: LogReceipt) -> DecodedEvent:
decoded_event = decode_event(abi, log_event)
if (not decoded_event):
raise UnknownRaidenEventType()
data = dict(decoded_event)
args = dict(decoded_event['args'])
data['args'] = args
data['block_number'] = log_event['blockNumber']
data['transaction_hash'] = log_event['transactionHash']
data['block_hash'] = bytes(log_event['blockHash'])
del data['blockNumber']
del data['transactionHash']
del data['blockHash']
assert data['block_number'], 'The event must have the block_number'
assert data['transaction_hash'], 'The event must have the transaction hash field'
assert data['block_hash'], 'The event must have the block_hash'
event = data['event']
if (event == EVENT_TOKEN_NETWORK_CREATED):
args['token_network_address'] = to_canonical_address(args['token_network_address'])
args['token_address'] = to_canonical_address(args['token_address'])
elif (event == ChannelEvent.OPENED):
args['participant1'] = to_canonical_address(args['participant1'])
args['participant2'] = to_canonical_address(args['participant2'])
elif (event == ChannelEvent.DEPOSIT):
args['participant'] = to_canonical_address(args['participant'])
elif (event == ChannelEvent.WITHDRAW):
args['participant'] = to_canonical_address(args['participant'])
elif (event == ChannelEvent.BALANCE_PROOF_UPDATED):
args['closing_participant'] = to_canonical_address(args['closing_participant'])
elif (event == ChannelEvent.CLOSED):
args['closing_participant'] = to_canonical_address(args['closing_participant'])
elif (event == ChannelEvent.UNLOCKED):
args['receiver'] = to_canonical_address(args['receiver'])
args['sender'] = to_canonical_address(args['sender'])
elif (event == EVENT_REGISTERED_SERVICE):
args['service_address'] = to_canonical_address(args.pop('service'))
assert ('valid_till' in args), f"{EVENT_REGISTERED_SERVICE} without 'valid_till'"
return DecodedEvent(chain_id=chain_id, originating_contract=to_canonical_address(log_event['address']), event_data=data, block_number=log_event['blockNumber'], block_hash=BlockHash(log_event['blockHash']), transaction_hash=TransactionHash(log_event['transactionHash'])) |
class PornovkaCz(BaseDownloader):
__name__ = 'PornovkaCz'
__type__ = 'downloader'
__version__ = '0.02'
__status__ = 'testing'
__pattern__ = '
__config__ = [('enabled', 'bool', 'Activated', True)]
__description__ = 'Pornovka.cz downloader plugin'
__license__ = 'GPLv3'
__authors__ = [('ondrej', '')]
NAME_PATTERN = '<h1>([^<]+)'
def setup(self):
self.resume_download = True
self.multi_dl = True
def process(self, pyfile):
pornovka_resp = self.load(pyfile.url)
data_url = re.findall('data-url="([^"]+)', pornovka_resp)
if (not data_url):
self.error(self._('Data url not found'))
data_resp = self.load(data_url[0])
video_url = re.findall('src=.([^\'"]+).></video>', data_resp)
if (not video_url):
self.error(self._('Video url not found'))
self.pyfile.name = re.search(self.NAME_PATTERN, pornovka_resp).group(1)
self.pyfile.name += ('.' + video_url[0].split('.')[(- 1)])
self.log_info(self._('Downloading file...'))
self.download(video_url[0]) |
class SpanEntityScore(object):
def __init__(self, id2label):
self.id2label = id2label
self.reset()
def reset(self):
self.origins = []
self.founds = []
self.rights = []
def compute(self, origin, found, right):
recall = (0 if (origin == 0) else (right / origin))
precision = (0 if (found == 0) else (right / found))
f1 = (0.0 if ((recall + precision) == 0) else (((2 * precision) * recall) / (precision + recall)))
return (recall, precision, f1)
def result(self):
class_info = {}
origin_counter = Counter([self.id2label[x[0]] for x in self.origins])
found_counter = Counter([self.id2label[x[0]] for x in self.founds])
right_counter = Counter([self.id2label[x[0]] for x in self.rights])
for (type_, count) in origin_counter.items():
origin = count
found = found_counter.get(type_, 0)
right = right_counter.get(type_, 0)
(recall, precision, f1) = self.compute(origin, found, right)
class_info[type_] = {'acc': round(precision, 4), 'recall': round(recall, 4), 'f1': round(f1, 4)}
origin = len(self.origins)
found = len(self.founds)
right = len(self.rights)
(recall, precision, f1) = self.compute(origin, found, right)
return ({'acc': precision, 'recall': recall, 'f1': f1}, class_info)
def update(self, true_subject, pred_subject):
self.origins.extend(true_subject)
self.founds.extend(pred_subject)
self.rights.extend([pre_entity for pre_entity in pred_subject if (pre_entity in true_subject)]) |
def apply_transformation(x_source, x_transformation, output_shape, conditioning_input_shape, transform_name, flow_indexing='xy', color_transform_type='WB'):
n_dims = (len(conditioning_input_shape) - 1)
transformation_shape = x_transformation.get_shape().as_list()[1:]
x_transformation = Reshape(transformation_shape, name='{}_dec_out'.format(transform_name))(x_transformation)
print('Applying color transform {}'.format(color_transform_type))
if (color_transform_type == 'delta'):
x_color_out = Add()([x_source, x_transformation])
elif (color_transform_type == 'mult'):
x_color_out = Multiply()([x_source, x_transformation])
else:
raise NotImplementedError('Only color transform types delta and mult are supported!')
im_out = Reshape(output_shape, name='color_transformer')(x_color_out)
return (im_out, x_transformation) |
_start_docstrings('The bare Cvt Model transformer outputting raw hidden-states without any specific head on top.', CVT_START_DOCSTRING)
class CvtModel(CvtPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.encoder = CvtEncoder(config)
self.post_init()
def _prune_heads(self, heads_to_prune):
for (layer, heads) in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
_start_docstrings_to_model_forward(CVT_INPUTS_DOCSTRING)
_code_sample_docstrings(processor_class=_FEAT_EXTRACTOR_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithCLSToken, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE)
def forward(self, pixel_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[(Tuple, BaseModelOutputWithCLSToken)]:
output_hidden_states = (output_hidden_states if (output_hidden_states is not None) else self.config.output_hidden_states)
return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict)
if (pixel_values is None):
raise ValueError('You have to specify pixel_values')
encoder_outputs = self.encoder(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = encoder_outputs[0]
if (not return_dict):
return ((sequence_output,) + encoder_outputs[1:])
return BaseModelOutputWithCLSToken(last_hidden_state=sequence_output, cls_token_value=encoder_outputs.cls_token_value, hidden_states=encoder_outputs.hidden_states) |
class transform(ctypes.Array):
_length_ = 7
_shape_ = (7,)
_type_ = ctypes.c_float
def __init__(self, p=(0.0, 0.0, 0.0), q=(0.0, 0.0, 0.0, 1.0)):
self[0:3] = vec3(*p)
self[3:7] = quat(*q)
def p(self):
return self[0:3]
def q(self):
return self[3:7] |
('a font having {vertAlign_state} vertical alignment')
def given_a_font_having_vertAlign_state(context, vertAlign_state):
style_name = {'inherited': 'Normal', 'subscript': 'Subscript', 'superscript': 'Superscript'}[vertAlign_state]
document = Document(test_docx('txt-font-props'))
context.font = document.styles[style_name].font |
def test_cmd_error_throws_with_save_true_executable_not_found():
cmd = get_cmd('tests/testfiles/cmds/xxx', 'tests\\testfiles\\cmds\\xxx')
with pytest.raises(FileNotFoundError) as err:
context = Context({'cmd': {'run': cmd, 'save': True}})
pypyr.steps.cmd.run_step(context)
assert ('cmdOut' not in context)
if (not is_windows):
(err.value.filename == cmd) |
class AUC(BaseMetric):
def __init__(self, label_name):
self._label_name = label_name
def eval(self, predict, labels_map):
label = labels_map[self._label_name]
if ((np.sum(label) == 0) or (np.sum(label) == label.size)):
return MetricResult(result=float('nan'))
else:
auc = roc_auc_score(y_true=label, y_score=predict)
return MetricResult(result=auc, meta={'#': predict.size})
def required_label_names(self):
return [self._label_name] |
def add_action(name=None, location='\\', action_type='Execute', **kwargs):
logging.debug('Adding an action to the task...')
save_definition = False
if kwargs.get('task_definition', False):
task_definition = kwargs.get('task_definition')
else:
save_definition = True
if (not name):
return 'Required parameter "name" not passed'
if (name in list_tasks(location)):
pythoncom.CoInitialize()
task_service = win32com.client.Dispatch('Schedule.Service')
task_service.Connect()
task_folder = task_service.GetFolder(location)
task_definition = task_folder.GetTask(name).Definition
else:
return '{0} not found'.format(name)
task_action = task_definition.Actions.Create(action_types[action_type])
if (action_types[action_type] == TASK_ACTION_EXEC):
task_action.Id = 'Execute_ID1'
if kwargs.get('cmd', False):
task_action.Path = kwargs.get('cmd')
else:
return 'Required parameter "cmd" not found'
task_action.Arguments = kwargs.get('arguments', '')
task_action.WorkingDirectory = kwargs.get('start_in', '')
elif (action_types[action_type] == TASK_ACTION_SEND_EMAIL):
task_action.Id = 'Email_ID1'
if kwargs.get('server', False):
task_action.Server = kwargs.get('server')
else:
return 'Required parameter "server" not found'
if kwargs.get('from', False):
task_action.From = kwargs.get('from')
else:
return 'Required parameter "from" not found'
if (kwargs.get('to', False) or kwargs.get('cc', False)):
if kwargs.get('to'):
task_action.To = kwargs.get('to')
if kwargs.get('cc'):
task_action.Cc = kwargs.get('cc')
else:
return 'Required parameter "to" or "cc" not found'
if kwargs.get('reply_to'):
task_action.ReplyTo = kwargs.get('reply_to')
if kwargs.get('bcc'):
task_action.Bcc = kwargs.get('bcc')
if kwargs.get('subject'):
task_action.Subject = kwargs.get('subject')
if kwargs.get('body'):
task_action.Body = kwargs.get('body')
if kwargs.get('attachments'):
task_action.Attachments = kwargs.get('attachments')
elif (action_types[action_type] == TASK_ACTION_SHOW_MESSAGE):
task_action.Id = 'Message_ID1'
if kwargs.get('title', False):
task_action.Title = kwargs.get('title')
else:
return 'Required parameter "title" not found'
if kwargs.get('message', False):
task_action.MessageBody = kwargs.get('message')
else:
return 'Required parameter "message" not found'
if save_definition:
return _save_task_definition(name=name, task_folder=task_folder, task_definition=task_definition, user_name=task_definition.Principal.UserID, password=None, logon_type=task_definition.Principal.LogonType) |
class AnsiState(object):
def __init__(self, bold=False, inverse=False, color='white', background='black', backgroundbold=False):
self.bold = bold
self.inverse = inverse
self.color = color
self.background = background
self.backgroundbold = backgroundbold
trtable = {'black': 0, 'red': 4, 'green': 2, 'yellow': 6, 'blue': 1, 'magenta': 5, 'cyan': 3, 'white': 7}
revtable = dict(zip(trtable.values(), trtable.keys()))
def get_winattr(self):
attr = 0
if self.bold:
attr |= 8
if self.backgroundbold:
attr |= 128
if self.inverse:
attr |= 16384
attr |= self.trtable[self.color]
attr |= (self.trtable[self.background] << 4)
return attr
def set_winattr(self, attr):
self.bold = bool((attr & 8))
self.backgroundbold = bool((attr & 128))
self.inverse = bool((attr & 16384))
self.color = self.revtable[(attr & 7)]
self.background = self.revtable[((attr & 112) >> 4)]
winattr = property(get_winattr, set_winattr)
def __repr__(self):
return ('AnsiState(bold=%s,inverse=%s,color=%9s,background=%9s,backgroundbold=%s)# 0x%x' % (self.bold, self.inverse, ('"%s"' % self.color), ('"%s"' % self.background), self.backgroundbold, self.winattr))
def copy(self):
x = AnsiState()
x.bold = self.bold
x.inverse = self.inverse
x.color = self.color
x.background = self.background
x.backgroundbold = self.backgroundbold
return x |
def get_image_paths_from_dir(fdir):
flist = os.listdir(fdir)
flist.sort()
image_paths = []
for i in range(0, len(flist)):
fpath = os.path.join(fdir, flist[i])
if os.path.isdir(fpath):
image_paths.extend(get_image_paths_from_dir(fpath))
else:
image_paths.append(fpath)
return image_paths |
class TestOrderFactory(unittest.TestCase):
def setUpClass(cls):
cls.ticker = BloombergTicker('AAPL US Equity')
cls.crypto_ticker = BinanceTicker('BTC', 'BUSD')
cls.current_portfolio_value = 1000.0
cls.share_price = 10.0
position = Mock(spec=Position)
position.quantity.return_value = 10.0
position.ticker.return_value = cls.ticker
crypto_position = Mock(spec=Position)
crypto_position.quantity.return_value = 10.0
crypto_position.ticker.return_value = cls.crypto_ticker
broker = Mock(spec=Broker)
broker.get_portfolio_value.return_value = cls.current_portfolio_value
broker.get_positions.return_value = [position, crypto_position]
data_handler = Mock(spec=DataHandler)
data_handler.get_last_available_price.side_effect = (lambda tickers, _: QFSeries(([cls.share_price] * len(tickers)), index=tickers))
cls.order_factory = OrderFactory(broker, data_handler)
def test_order(self):
quantity = 5
execution_style = MarketOrder()
time_in_force = TimeInForce.GTC
orders = self.order_factory.orders({self.ticker: quantity}, execution_style, time_in_force)
self.assertEqual(orders[0], Order(self.ticker, quantity, execution_style, time_in_force))
def test_order_target(self):
quantity = (- 5)
execution_style = StopOrder(4.2)
time_in_force = TimeInForce.DAY
orders = self.order_factory.target_orders({self.ticker: 5}, execution_style, time_in_force)
self.assertEqual(orders[0], Order(self.ticker, quantity, execution_style, time_in_force))
def test_order_value(self):
value = 100.0
quantity = float(floor((100.0 / self.share_price)))
execution_style = StopOrder(4.2)
time_in_force = TimeInForce.DAY
orders = self.order_factory.value_orders({self.ticker: value}, execution_style, time_in_force)
self.assertEqual(orders[0], Order(self.ticker, quantity, execution_style, time_in_force))
def test_order_percent(self):
percentage = 0.5
execution_style = StopOrder(4.2)
time_in_force = TimeInForce.GTC
quantity = float(floor(((percentage * self.current_portfolio_value) / self.share_price)))
orders = self.order_factory.percent_orders({self.ticker: percentage}, execution_style, time_in_force)
self.assertEqual(orders[0], Order(self.ticker, quantity, execution_style, time_in_force))
def test_order_target_value(self):
execution_style = StopOrder(4.2)
time_in_force = TimeInForce.GTC
quantity = 4
orders = self.order_factory.target_value_orders({self.ticker: 140.0}, execution_style, time_in_force)
self.assertEqual(orders[0], Order(self.ticker, quantity, execution_style, time_in_force))
def test_order_target_percent(self):
quantity = 40
execution_style = StopOrder(4.2)
time_in_force = TimeInForce.GTC
orders = self.order_factory.target_percent_orders({self.ticker: 0.5}, execution_style, time_in_force)
self.assertEqual(orders[0], Order(self.ticker, quantity, execution_style, time_in_force))
def test_order_target_tolerance1(self):
quantity = 11
execution_style = MarketOrder()
time_in_force = TimeInForce.DAY
tolerance = {self.ticker: 2}
orders = self.order_factory.target_orders({self.ticker: quantity}, execution_style, time_in_force, tolerance)
self.assertEqual(orders, [])
def test_order_target_tolerance1a(self):
quantity = 100
execution_style = MarketOrder()
time_in_force = TimeInForce.DAY
tolerance = {self.ticker: 91}
orders = self.order_factory.target_orders({self.ticker: quantity}, execution_style, time_in_force, tolerance)
self.assertEqual(orders, [])
def test_order_target_tolerance2(self):
quantity = 12
execution_style = MarketOrder()
time_in_force = TimeInForce.DAY
tolerance = {self.ticker: 2}
orders = self.order_factory.target_orders({self.ticker: quantity}, execution_style, time_in_force, tolerance)
self.assertEqual(orders, [])
def test_order_target_tolerance3(self):
quantity = 15
execution_style = MarketOrder()
time_in_force = TimeInForce.DAY
tolerance = {self.ticker: 2}
orders = self.order_factory.target_orders({self.ticker: quantity}, execution_style, time_in_force, tolerance)
trade_quantity = 5
self.assertEqual(orders[0], Order(self.ticker, trade_quantity, execution_style, time_in_force))
def test_order_target_tolerance3a(self):
quantity = 150
execution_style = MarketOrder()
time_in_force = TimeInForce.DAY
tolerance = {self.ticker: 139}
orders = self.order_factory.target_orders({self.ticker: quantity}, execution_style, time_in_force, tolerance)
trade_quantity = 140
self.assertEqual(orders[0], Order(self.ticker, trade_quantity, execution_style, time_in_force))
def test_order_target_value_tolerance1(self):
execution_style = MarketOrder()
tif = TimeInForce.DAY
tolerance = 5.0
target_value = 113.0
tolerance_percentage = (tolerance / target_value)
orders = self.order_factory.target_value_orders({self.ticker: target_value}, execution_style, tif, tolerance_percentage)
quantity = 1
self.assertEqual(orders[0], Order(self.ticker, quantity, execution_style, tif))
def test_order_target_value_tolerance2(self):
execution_style = MarketOrder()
tif = TimeInForce.DAY
tolerance = 5.0
target_value = 219.0
tolerance_percentage = (tolerance / target_value)
orders = self.order_factory.target_value_orders({self.ticker: target_value}, execution_style, tif, tolerance_percentage)
quantity = 11
self.assertEqual(orders[0], Order(self.ticker, quantity, execution_style, tif))
def test_order_target_value_tolerance3(self):
execution_style = MarketOrder()
tif = TimeInForce.DAY
tolerance = 11.0
target_value = 110.0
tolerance_percentage = (tolerance / target_value)
orders = self.order_factory.target_value_orders({self.ticker: target_value}, execution_style, tif, tolerance_percentage)
self.assertEqual(orders, [])
def test_order_target_value_tolerance4(self):
execution_style = MarketOrder()
tif = TimeInForce.DAY
tolerance = 11.0
target_value = 110.999
tolerance_percentage = (tolerance / target_value)
orders = self.order_factory.target_value_orders({self.ticker: target_value}, execution_style, tif, tolerance_percentage)
self.assertEqual(orders, [])
def test_order_target_value_tolerance5(self):
execution_style = MarketOrder()
tif = TimeInForce.DAY
tolerance = 10.0
target_value = 90.1
tolerance_percentage = (tolerance / target_value)
orders = self.order_factory.target_value_orders({self.ticker: target_value}, execution_style, tif, tolerance_percentage)
self.assertEqual(orders, [])
def test_order_target_value_tolerance6(self):
execution_style = MarketOrder()
tif = TimeInForce.DAY
tolerance = 10.0
target_value = 89.9
tolerance_percentage = (tolerance / target_value)
orders = self.order_factory.target_value_orders({self.ticker: target_value}, execution_style, tif, tolerance_percentage)
quantity = (- 2)
self.assertEqual(orders[0], Order(self.ticker, quantity, execution_style, tif))
def test_order_target_value_tolerance7(self):
execution_style = MarketOrder()
tif = TimeInForce.DAY
tolerance = 9
target_value = 90.9
tolerance_percentage = (tolerance / target_value)
orders = self.order_factory.target_value_orders({self.ticker: target_value}, execution_style, tif, tolerance_percentage)
quantity = (- 1)
self.assertEqual(orders[0], Order(self.ticker, quantity, execution_style, tif))
def test_order_target_value_tolerance8(self):
execution_style = MarketOrder()
tif = TimeInForce.DAY
tolerance = 10.0
target_value = 45.0
tolerance_percentage = (tolerance / target_value)
orders = self.order_factory.target_value_orders({self.ticker: target_value}, execution_style, tif, tolerance_percentage)
quantity = (- 6)
self.assertEqual(orders[0], Order(self.ticker, quantity, execution_style, tif))
def test_order_target_value_tolerance9(self):
execution_style = MarketOrder()
tif = TimeInForce.DAY
target_value = 9.0
tolerance_percentage = (1 / 9)
orders = self.order_factory.target_value_orders({self.ticker: target_value}, execution_style, tif, tolerance_percentage)
quantity = (- 10)
self.assertEqual(orders[0], Order(self.ticker, quantity, execution_style, tif))
def test_order_target_value_tolerance10(self):
execution_style = MarketOrder()
tif = TimeInForce.DAY
tolerance = 10.99
target_value = 111.0
tolerance_percentage = (tolerance / target_value)
orders = self.order_factory.target_value_orders({self.ticker: target_value}, execution_style, tif, tolerance_percentage)
quantity = 1
self.assertEqual(orders[0], Order(self.ticker, quantity, execution_style, tif))
def test_order_target_percent_tolerance1(self):
ex_style = MarketOrder()
tif = TimeInForce.DAY
tolerance_percentage = (1 / 12)
target_value = 0.12
orders = self.order_factory.target_percent_orders({self.ticker: target_value}, ex_style, tif, tolerance_percentage)
quantity = 2
self.assertEqual(orders[0], Order(self.ticker, quantity, ex_style, tif))
def test_crypto_order_target_percent_tolerance1(self):
ex_style = MarketOrder()
tif = TimeInForce.DAY
tolerance_percentage = (1 / 12)
target_value = 0.12
orders = self.order_factory.target_percent_orders({self.crypto_ticker: target_value}, ex_style, tif, tolerance_percentage)
quantity = 2
self.assertEqual(orders[0], Order(self.crypto_ticker, quantity, ex_style, tif))
def test_order_target_percent_tolerance2(self):
ex_style = MarketOrder()
tif = TimeInForce.DAY
tolerance_percentage = (1 / 11)
target_value = 0.11
orders = self.order_factory.target_percent_orders({self.ticker: target_value}, ex_style, tif, tolerance_percentage)
self.assertEqual(orders, [])
def test_order_target_percent_tolerance3(self):
ex_style = MarketOrder()
tif = TimeInForce.DAY
tolerance_percentage = (1 / 9)
target_value = 0.09
orders = self.order_factory.target_percent_orders({self.ticker: target_value}, ex_style, tif, tolerance_percentage)
self.assertEqual(orders, [])
def test_order_target_percent_tolerance4(self):
ex_style = MarketOrder()
tif = TimeInForce.DAY
tolerance_percentage = (1 / 8)
target_value = 0.08
orders = self.order_factory.target_percent_orders({self.ticker: target_value}, ex_style, tif, tolerance_percentage)
quantity = (- 2)
self.assertEqual(orders[0], Order(self.ticker, quantity, ex_style, tif))
def test_order_target_percent_tolerance5(self):
ex_style = MarketOrder()
tif = TimeInForce.DAY
tolerance_percentage = (0.5 / 9)
target_value = 0.09
orders = self.order_factory.target_percent_orders({self.ticker: target_value}, ex_style, tif, tolerance_percentage)
quantity = (- 1)
self.assertEqual(orders[0], Order(self.ticker, quantity, ex_style, tif))
def test_order_target_percent_tolerance6(self):
ex_style = MarketOrder()
tif = TimeInForce.DAY
tolerance_percentage = (2 / 50)
target_value = 0.5
orders = self.order_factory.target_percent_orders({self.ticker: target_value}, ex_style, tif, tolerance_percentage)
quantity = 40
self.assertEqual(orders[0], Order(self.ticker, quantity, ex_style, tif))
def test_crypto_order(self):
quantity = 0.5
execution_style = MarketOrder()
time_in_force = TimeInForce.GTC
orders = self.order_factory.orders({self.crypto_ticker: quantity}, execution_style, time_in_force)
self.assertEqual(orders[0], Order(self.crypto_ticker, quantity, execution_style, time_in_force))
def test_crypto_order_target(self):
quantity = (- 4.5)
execution_style = StopOrder(4.2)
time_in_force = TimeInForce.DAY
orders = self.order_factory.target_orders({self.crypto_ticker: 5.5}, execution_style, time_in_force)
self.assertEqual(orders[0], Order(self.crypto_ticker, quantity, execution_style, time_in_force))
def test_crypto_order_value(self):
value = 100.0
quantity = (100.0 / self.share_price)
execution_style = StopOrder(4.2)
time_in_force = TimeInForce.DAY
orders = self.order_factory.value_orders({self.crypto_ticker: value}, execution_style, time_in_force)
self.assertEqual(orders[0], Order(self.crypto_ticker, quantity, execution_style, time_in_force)) |
class COW():
def basepages(self, offset, length):
basepages = []
basepages.append(((offset - (offset % 4096)), (offset % 4096), (4096 - (offset % 4096))))
length -= (4096 - (offset % 4096))
offset += 4096
while (length >= 4096):
basepages.append((offset, 0, 4096))
length -= 4096
offset += 4096
if (length > 0):
basepages.append((offset, 0, length))
return basepages
def read(self, offset, length):
basepages = self.basepages(offset, length)
self.logger.debug('{0} reading {1} bytes from {2}. Pages: {3}'.format(self.addr, length, hex(offset), len(basepages)))
data = ''
for (major, minor, length) in basepages:
if (major in self.pages):
off = self.pages.index(major)
self.fh.seek(((off * 4096) + minor))
data += self.fh.read(length)
else:
self.seek_lock.acquire()
self.imagefd.seek((major + minor))
data += self.imagefd.read(length)
self.seek_lock.release()
return data
def write(self, offset, data):
basepages = self.basepages(offset, len(data))
self.logger.debug('{0} writing {1} bytes to {2}. Pages: {3}'.format(self.addr, len(data), hex(offset), len(basepages)))
for (major, minor, length) in basepages:
if (major in self.pages):
self.fh.seek((major + minor))
self.fh.write(data[:length])
data = data[length:]
else:
self.seek_lock.acquire()
self.imagefd.seek(major)
cpdata = self.imagefd.read(4096)
self.seek_lock.release()
self.fh.seek(0, 2)
self.fh.write(cpdata)
self.pages.append(major)
off = self.pages.index(major)
self.fh.seek(((off * 4096) + minor))
self.fh.write(data[:length])
data = data[length:] |
class Delete(_base_nodes.AssignTypeNode, _base_nodes.Statement):
_astroid_fields = ('targets',)
def __init__(self, lineno: int, col_offset: int, parent: NodeNG, *, end_lineno: (int | None), end_col_offset: (int | None)) -> None:
self.targets: list[NodeNG] = []
super().__init__(lineno=lineno, col_offset=col_offset, end_lineno=end_lineno, end_col_offset=end_col_offset, parent=parent)
def postinit(self, targets: list[NodeNG]) -> None:
self.targets = targets
def get_children(self):
(yield from self.targets) |
class _PositionFactory():
def parse_position(element):
if element.findall('WorldPosition'):
return WorldPosition.parse(element)
elif element.findall('RelativeWorldPosition'):
return RelativeWorldPosition.parse(element)
elif element.findall('RelativeObjectPosition'):
return RelativeObjectPosition.parse(element)
elif element.findall('RoadPosition'):
return RoadPosition.parse(element)
elif element.findall('RelativeRoadPosition'):
return RelativeRoadPosition.parse(element)
elif element.findall('LanePosition'):
return LanePosition.parse(element)
elif element.findall('RelativeLanePosition'):
return RelativeLanePosition.parse(element)
elif element.findall('RoutePosition/InRoutePosition/FromCurrentEntity'):
return RoutePositionOfCurrentEntity.parse(element)
elif element.findall('RoutePosition/InRoutePosition/FromRoadCoordinates'):
return RoutePositionInRoadCoordinates.parse(element)
elif element.findall('RoutePosition/InRoutePosition/FromLaneCoordinates'):
return RoutePositionInLaneCoordinates.parse(element)
elif element.findall('TrajectoryPosition'):
return TrajectoryPosition.parse(element)
elif element.findall('GeoPosition'):
return GeoPosition.parse(element)
else:
raise NotAValidElement('element ', element, 'is not a valid position') |
class TestBatchProcess(CommandTest):
def test_batch_commands(self):
self.call(batchprocess.CmdBatchCommands(), 'example_batch_cmds', 'Running Batch-command processor - Automatic mode for example_batch_cmds')
confirm = building.CmdDestroy.confirm
building.CmdDestroy.confirm = False
self.call(building.CmdDestroy(), 'button', 'button was destroyed.')
building.CmdDestroy.confirm = confirm |
class FixedWordEmbedder(WordEmbedder):
def __init__(self, vec_name: str, word_vec_init_scale: float=0.05, learn_unk: bool=True, keep_probs: float=1, keep_word: float=1, shrink_embed: bool=False, cpu=False):
self.keep_word = keep_word
self.keep_probs = keep_probs
self.word_vec_init_scale = word_vec_init_scale
self.learn_unk = learn_unk
self.vec_name = vec_name
self.cpu = cpu
self.shrink_embed = shrink_embed
self._word_to_ix = None
self._word_emb_mat = None
self._special_tokens = None
def set_vocab(self, _, loader: ResourceLoader, special_tokens: List[str]):
if (special_tokens is not None):
self._special_tokens = sorted(special_tokens)
def is_vocab_set(self):
return True
def question_word_to_ix(self, word, is_train):
ix = self._word_to_ix.get(word, 1)
if (ix == 1):
return self._word_to_ix.get(word.lower(), 1)
else:
return ix
def context_word_to_ix(self, word, is_train):
ix = self._word_to_ix.get(word, 1)
if (ix == 1):
return self._word_to_ix.get(word.lower(), 1)
else:
return ix
def version(self):
return 1
def init(self, loader: ResourceLoader, voc: Iterable[str]):
if self.cpu:
with tf.device('/cpu:0'):
self._init(loader, voc)
else:
self._init(loader, voc)
def _init(self, loader: ResourceLoader, voc: Iterable[str]):
if (voc is not None):
word_to_vec = loader.load_word_vec(self.vec_name, voc)
else:
word_to_vec = loader.load_word_vec(self.vec_name)
voc = set(word_to_vec.keys())
self._word_to_ix = {}
dim = next(iter(word_to_vec.values())).shape[0]
null_embed = tf.zeros((1, dim), dtype=tf.float32)
unk_embed = tf.get_variable(shape=(1, dim), name='unk_embed', dtype=np.float32, trainable=self.learn_unk, initializer=tf.random_uniform_initializer((- self.word_vec_init_scale), self.word_vec_init_scale))
ix = 2
matrix_list = [null_embed, unk_embed]
if ((self._special_tokens is not None) and (len(self._special_tokens) > 0)):
print(('Building embeddings for %d special_tokens' % len(self._special_tokens)))
tok_embed = tf.get_variable(shape=(len(self._special_tokens), dim), name='token_embed', dtype=np.float32, trainable=True, initializer=tf.random_uniform_initializer((- self.word_vec_init_scale), self.word_vec_init_scale))
matrix_list.append(tok_embed)
for token in self._special_tokens:
self._word_to_ix[token] = ix
ix += 1
mat = []
for word in voc:
if (word in self._word_to_ix):
continue
if (word in word_to_vec):
mat.append(word_to_vec[word])
self._word_to_ix[word] = ix
ix += 1
else:
lower = word.lower()
if ((lower in word_to_vec) and (lower not in self._word_to_ix)):
mat.append(word_to_vec[lower])
self._word_to_ix[lower] = ix
ix += 1
print(('Had pre-trained word embeddings for %d of %d words' % (len(mat), len(voc))))
matrix_list.append(tf.constant(value=np.vstack(mat)))
self._word_emb_mat = tf.concat(matrix_list, axis=0)
def embed(self, is_train, *word_ix):
if any(((len(x) != 2) for x in word_ix)):
raise ValueError()
mat = self._word_emb_mat
if (self.keep_probs < 1):
mat = tf.cond(is_train, (lambda : tf.nn.dropout(mat, self.keep_probs)), (lambda : mat))
if (self.keep_word < 1):
mat = tf.cond(is_train, (lambda : tf.nn.dropout(mat, self.keep_word, (mat.shape.as_list()[0], 1))), (lambda : mat))
if self.cpu:
with tf.device('/cpu:0'):
return [tf.nn.embedding_lookup(self._word_emb_mat, x[0]) for x in word_ix]
else:
return [tf.nn.embedding_lookup(self._word_emb_mat, x[0]) for x in word_ix]
def __getstate__(self):
state = dict(self.__dict__)
state['_word_emb_mat'] = None
state['_word_to_ix'] = None
return dict(version=self.version, state=state)
def __setstate__(self, state):
if ('state' in state):
if ('cpu' not in state['state']):
state['state']['cpu'] = False
if ('keep_probs' not in state['state']):
state['state']['keep_probs'] = 1.0
if ('keep_word' not in state['state']):
state['state']['keep_word'] = 1.0
if ('_special_tokens' not in state['state']):
state['state']['_special_tokens'] = []
super().__setstate__(state) |
def make_data_sampler(dataset, shuffle, distributed):
if distributed:
return samplers.DistributedSampler(dataset, shuffle=shuffle)
if shuffle:
sampler = torch.utils.data.sampler.RandomSampler(dataset)
else:
sampler = torch.utils.data.sampler.SequentialSampler(dataset)
return sampler |
def add_input_options(command):
def add_option(*args, **kwargs):
click.option(*args, **kwargs)(command)
add_option('--in', '-i', 'in_format', type=click.Choice(['smi', 'smi.gz']), help="Input structuture format (one of 'smi', 'smi.gz'). If not specified, use the filename extension or default to 'smi'.")
add_option('--delimiter', default='whitespace', type=click.Choice(['whitespace', 'to-eol', 'comma', 'tab', 'space', 'native']), help="SMILES file delimiter style (one of 'whitespace' (default), 'to-eol', 'comma', 'tab', 'space', or 'native')")
add_option('--has-header', is_flag=True, default=False, help='Skip the first line, which is the header line')
def make_input_options_wrapper(**kwargs):
kwargs['input_options'] = SmiInputOptions(format=kwargs.pop('in_format'), delimiter=kwargs.pop('delimiter'), has_header=kwargs.pop('has_header'))
return command(**kwargs)
set_click_attrs(make_input_options_wrapper, command)
return make_input_options_wrapper |
def datafiles_retrivedatabundle(config):
tutorial = config['tutorial']
countries = config['countries']
config_enable = config['enable']
config_bundles = load_databundle_config(config['databundles'])
bundles_to_download = get_best_bundles(countries, config_bundles, tutorial, config_enable)
listoutputs = list(set([inneroutput for bundlename in bundles_to_download for inneroutput in config['databundles'][bundlename]['output'] if (('*' not in inneroutput) or inneroutput.endswith('/'))]))
return listoutputs |
.parametrize(['dev', 'lines'], [(False, [f'a==1.2.3 ; {MARKER_PY27.union(MARKER_PY36_38)}']), (True, [f'a==1.2.3 ; {MARKER_PY27.union(MARKER_PY36_38).union(MARKER_PY36)}', f'b==4.5.6 ; {MARKER_PY}'])])
def test_exporter_can_export_requirements_txt_with_nested_packages_and_markers_any(tmp_path: Path, poetry: Poetry, dev: bool, lines: list[str]) -> None:
poetry.locker.mock_lock_data({'package': [{'name': 'a', 'version': '1.2.3', 'optional': False, 'python-versions': '*'}, {'name': 'b', 'version': '4.5.6', 'optional': False, 'python-versions': '*', 'dependencies': {'a': '>=1.2.3'}}], 'metadata': {'python-versions': '*', 'content-hash': '', 'files': {'a': [], 'b': []}}})
root = poetry.package.with_dependency_groups([], only=True)
root.add_dependency(Factory.create_dependency(name='a', constraint={'version': '^1.2.3', 'python': '<3.8'}))
root.add_dependency(Factory.create_dependency(name='b', constraint={'version': '^4.5.6'}, groups=['dev']))
poetry._package = root
exporter = Exporter(poetry, NullIO())
if dev:
exporter.only_groups([MAIN_GROUP, 'dev'])
exporter.export('requirements.txt', tmp_path, 'requirements.txt')
with (tmp_path / 'requirements.txt').open(encoding='utf-8') as f:
content = f.read()
assert (content.strip() == '\n'.join(lines)) |
def enable_sanitized_heap(ql, fault_rate=0):
heap = QlSanitizedMemoryHeap(ql, ql.os.heap, fault_rate=fault_rate)
heap.oob_handler = (lambda *args: my_abort(f'Out-of-bounds read detected'))
heap.bo_handler = (lambda *args: my_abort(f'Buffer overflow/underflow detected'))
heap.bad_free_handler = (lambda *args: my_abort(f'Double free or bad free detected'))
heap.uaf_handler = (lambda *args: my_abort(f'Use-after-free detected'))
heap.alloc(4096)
ql.os.heap = heap
ql.loader.dxe_context.heap = heap |
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1, linear=False):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio, linear=linear)
self.drop_path = (DropPath(drop_path) if (drop_path > 0.0) else nn.Identity())
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int((dim * mlp_ratio))
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop, linear=linear)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if (isinstance(m, nn.Linear) and (m.bias is not None)):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt((2.0 / fan_out)))
if (m.bias is not None):
m.bias.data.zero_()
def forward(self, x, H, W):
x = (x + self.drop_path(self.attn(self.norm1(x), H, W)))
x = (x + self.drop_path(self.mlp(self.norm2(x), H, W)))
return x |
def get_qroam_cost(data_size: int, bitsize: int, adjoint: bool=False) -> Tuple[(int, int)]:
if adjoint:
k = (0.5 * np.log2(data_size))
value = (lambda k: ((data_size / (2 ** k)) + (2 ** k)))
else:
k = (0.5 * np.log2((data_size / bitsize)))
assert (k >= 0)
value = (lambda k: ((data_size / (2 ** k)) + (bitsize * ((2 ** k) - 1))))
k_int = np.array([np.floor(k), np.ceil(k)])
k_opt = k_int[np.argmin(value(k_int))]
val_opt = np.ceil(value(k_opt))
return int(val_opt) |
def validate_pathname_binary_tuple(data: Tuple[(str, IOBase)]):
if (not isinstance(data, tuple)):
raise TypeError(f'pathname binary data should be tuple type, but it is type {type(data)}')
if (len(data) != 2):
raise TypeError(f'pathname binary stream tuple length should be 2, but got {len(data)}')
if (not isinstance(data[0], str)):
raise TypeError(f'pathname within the tuple should have string type pathname, but it is type {type(data[0])}')
if ((not isinstance(data[1], IOBase)) and (not isinstance(data[1], StreamWrapper))):
raise TypeError(f'binary stream within the tuple should have IOBase orits subclasses as type, but it is type {type(data[1])}') |
def test_mouse_release_event_when_scale_action(view, item):
view.scene.addItem(item)
event = MagicMock()
event.scenePos.return_value = QtCore.QPointF(20, 90)
item.scale_active = True
item.event_direction = (QtCore.QPointF(1, 1) / math.sqrt(2))
item.event_anchor = QtCore.QPointF(100, 80)
item.event_start = QtCore.QPointF(10, 10)
item.scale_orig_factor = 1
view.scene.undo_stack = MagicMock(push=MagicMock())
with patch.object(item, 'bounding_rect_unselected', return_value=QtCore.QRectF(0, 0, 100, 80)):
item.mouseReleaseEvent(event)
view.scene.undo_stack.push.assert_called_once()
args = view.scene.undo_stack.push.call_args_list[0][0]
cmd = args[0]
isinstance(cmd, commands.ScaleItemsBy)
assert (cmd.items == [item])
assert (cmd.factor == approx(1.5, 0.01))
assert (cmd.anchor == QtCore.QPointF(100, 80))
assert (cmd.ignore_first_redo is True)
assert (item.scale_active is False)
event.accept.assert_called_once_with() |
class Registry(object):
def __init__(self, name):
self._name = name
self._module_dict = dict()
def __repr__(self):
format_str = (self.__class__.__name__ + '(name={}, items={})'.format(self._name, list(self._module_dict.keys())))
return format_str
def name(self):
return self._name
def module_dict(self):
return self._module_dict
def get(self, key):
return self._module_dict.get(key, None)
def _register_module(self, module_class):
if (not inspect.isclass(module_class)):
raise TypeError('module must be a class, but got {}'.format(type(module_class)))
module_name = module_class.__name__
if (module_name in self._module_dict):
raise KeyError('{} is already registered in {}'.format(module_name, self.name))
self._module_dict[module_name] = module_class
def register_module(self, cls):
self._register_module(cls)
return cls |
class ScdocLexer(RegexLexer):
name = 'scdoc'
url = '
aliases = ['scdoc', 'scd']
filenames = ['*.scd', '*.scdoc']
version_added = '2.5'
flags = re.MULTILINE
tokens = {'root': [('^(;.+\\n)', bygroups(Comment)), ('^(#)([^#].+\\n)', bygroups(Generic.Heading, Text)), ('^(#{2})(.+\\n)', bygroups(Generic.Subheading, Text)), ('^(\\s*)([*-])(\\s)(.+\\n)', bygroups(Text, Keyword, Text, using(this, state='inline'))), ('^(\\s*)(\\.+\\.)( .+\\n)', bygroups(Text, Keyword, using(this, state='inline'))), ('^(\\s*>\\s)(.+\\n)', bygroups(Keyword, Generic.Emph)), ('^(```\\n)([\\w\\W]*?)(^```$)', bygroups(String, Text, String)), include('inline')], 'inline': [('\\\\.', Text), ('(\\s)(_[^_]+_)(\\W|\\n)', bygroups(Text, Generic.Emph, Text)), ('(\\s)(\\*[^*]+\\*)(\\W|\\n)', bygroups(Text, Generic.Strong, Text)), ('`[^`]+`', String.Backtick), ('[^\\\\\\s]+', Text), ('.', Text)]}
def analyse_text(text):
result = 0
if ('*' in text):
result += 0.01
if ('_' in text):
result += 0.01
first_line = text.partition('\n')[0]
scdoc_preamble_pattern = '^.*\\([1-7]\\)( "[^"]+"){0,2}$'
if re.search(scdoc_preamble_pattern, first_line):
result += 0.5
return result |
def auth_handler() -> Tuple[(str, bool)]:
num = user_input[0]
input_thread = Thread(target=get_auth_code, args=(user_input,))
input_thread.daemon = True
input_thread.start()
for _ in range(120):
sleep(1)
if user_input[0]:
num = user_input[0]
user_input[0] = None
break
if (not num):
raise TimeoutError(' .')
remember_device = True
return (num, remember_device) |
(cc=STDCALL, params={'SystemRoutineName': PUNICODE_STRING})
def hook_MmGetSystemRoutineAddress(ql: Qiling, address: int, params):
SystemRoutineName = params['SystemRoutineName']
routine_name = (SystemRoutineName and utils.read_punicode_string(ql, SystemRoutineName))
if routine_name:
for dll_name in ('ntoskrnl.exe', 'ntkrnlpa.exe', 'hal.dll'):
if (dll_name in ql.loader.import_address_table):
if (routine_name in ql.loader.import_address_table[dll_name]):
return ql.loader.import_address_table[dll_name][routine_name]
if (routine_name in hook_only_routine_address):
index = hook_only_routine_address.index(routine_name)
for dll_name in ('ntoskrnl.exe', 'ntkrnlpa.exe', 'hal.dll'):
image = ql.loader.get_image_by_name(dll_name)
if image:
new_function_address = ((image.base + index) + 1)
ql.loader.import_symbols[new_function_address] = {'name': SystemRoutineName.encode(), 'ordinal': (- 1)}
return new_function_address
return 0 |
class EquivalentRectangularIndex():
def __init__(self, gdf, areas=None, perimeters=None):
self.gdf = gdf
if (perimeters is None):
perimeters = gdf.geometry.length
elif isinstance(perimeters, str):
perimeters = gdf[perimeters]
self.perimeters = perimeters
if (areas is None):
areas = gdf.geometry.area
elif isinstance(areas, str):
areas = gdf[areas]
self.areas = areas
bbox = shapely.minimum_rotated_rectangle(gdf.geometry)
res = (np.sqrt((areas / bbox.area)) * (bbox.length / perimeters))
self.series = pd.Series(res, index=gdf.index) |
class InputFeedRNNDecoder(RNNDecoderBase):
def _run_forward_pass(self, tgt, memory_bank, memory_lengths=None):
input_feed = self.state['input_feed'].squeeze(0)
(input_feed_batch, _) = input_feed.size()
(_, tgt_batch, _) = tgt.size()
aeq(tgt_batch, input_feed_batch)
dec_outs = []
attns = {}
if (self.attn is not None):
attns['std'] = []
if ((self.copy_attn is not None) or self._reuse_copy_attn):
attns['copy'] = []
if self._coverage:
attns['coverage'] = []
emb = self.embeddings(tgt)
assert (emb.dim() == 3)
dec_state = self.state['hidden']
coverage = (self.state['coverage'].squeeze(0) if (self.state['coverage'] is not None) else None)
for emb_t in emb.split(1):
decoder_input = torch.cat([emb_t.squeeze(0), input_feed], 1)
(rnn_output, dec_state) = self.rnn(decoder_input, dec_state)
if self.attentional:
(decoder_output, p_attn) = self.attn(rnn_output, memory_bank.transpose(0, 1), memory_lengths=memory_lengths)
attns['std'].append(p_attn)
else:
decoder_output = rnn_output
if (self.context_gate is not None):
decoder_output = self.context_gate(decoder_input, rnn_output, decoder_output)
decoder_output = self.dropout(decoder_output)
input_feed = decoder_output
dec_outs += [decoder_output]
if self._coverage:
coverage = (p_attn if (coverage is None) else (p_attn + coverage))
attns['coverage'] += [coverage]
if (self.copy_attn is not None):
(_, copy_attn) = self.copy_attn(decoder_output, memory_bank.transpose(0, 1))
attns['copy'] += [copy_attn]
elif self._reuse_copy_attn:
attns['copy'] = attns['std']
return (dec_state, dec_outs, attns)
def _build_rnn(self, rnn_type, input_size, hidden_size, num_layers, dropout):
assert (rnn_type != 'SRU'), "SRU doesn't support input feed! Please set -input_feed 0!"
stacked_cell = (StackedLSTM if (rnn_type == 'LSTM') else StackedGRU)
return stacked_cell(num_layers, input_size, hidden_size, dropout)
def _input_size(self):
return (self.embeddings.embedding_size + self.hidden_size)
def update_dropout(self, dropout):
self.dropout.p = dropout
self.rnn.dropout.p = dropout
self.embeddings.update_dropout(dropout) |
def test_track_iter_progress():
out = StringIO()
ret = []
for num in mmcv.track_iter_progress([1, 2, 3], bar_width=3, file=out):
ret.append(sleep_1s(num))
assert (out.getvalue() == '[ ] 0/3, elapsed: 0s, ETA:\r[> ] 1/3, 1.0 task/s, elapsed: 1s, ETA: 2s\r[>> ] 2/3, 1.0 task/s, elapsed: 2s, ETA: 1s\r[>>>] 3/3, 1.0 task/s, elapsed: 3s, ETA: 0s\n')
assert (ret == [1, 2, 3]) |
class FusedEmbeddingCollection(EmbeddingCollectionInterface, FusedOptimizerModule):
def __init__(self, tables: List[EmbeddingConfig], optimizer_type: Type[torch.optim.Optimizer], optimizer_kwargs: Dict[(str, Any)], device: Optional[torch.device]=None, need_indices: bool=False, location: Optional[EmbeddingLocation]=None) -> None:
super().__init__()
self._optimizer_type = optimizer_type
self._optimizer_kwargs = optimizer_kwargs
emb_optim_and_kwargs = convert_optimizer_type_and_kwargs(optimizer_type, optimizer_kwargs)
if (emb_optim_and_kwargs is None):
raise ValueError(f'Cannot fuse optimizer_type={optimizer_type} with kwargs {optimizer_kwargs}')
(emb_optim_type, emb_opt_kwargs) = emb_optim_and_kwargs
if (location in [EmbeddingLocation.DEVICE, EmbeddingLocation.MANAGED, EmbeddingLocation.MANAGED_CACHING]):
assert ((device is not None) and (device.type in ['cuda', 'meta'])), f'Using location={location} requires device=cuda or meta'
if (device is None):
device = torch.device('cpu')
assert (device.type in ['cuda', 'meta']), 'FusedEmbeddingCollection is only supported for device in [CUDA, meta] currently. There are plans to support device=CPU.'
if (location is None):
if (device.type in ['cpu', 'meta']):
location = EmbeddingLocation.HOST
elif (device.type == 'cuda'):
location = EmbeddingLocation.DEVICE
else:
raise ValueError('EmbeddingLocation could not be set')
self._embedding_configs = tables
self._need_indices: bool = need_indices
self._embedding_dim: int = (- 1)
self._emb_modules: List[nn.Module] = []
self._key_to_tables: Dict[(DataType, List[EmbeddingConfig])] = defaultdict(list)
seen_features = set()
self._shared_features: Set[str] = set()
for table in tables:
key = table.data_type
self._key_to_tables[key].append(table)
if (self._embedding_dim == (- 1)):
self._embedding_dim = table.embedding_dim
elif (self._embedding_dim != table.embedding_dim):
raise ValueError((('All tables in a EmbeddingCollection are required to have same embedding dimension.' + f" Violating case: {table}'s embedding_dim {table.embedding_dim} !=") + f' {self._embedding_dim}'))
for feature in table.feature_names:
if (feature in seen_features):
self._shared_features.add(feature)
else:
seen_features.add(feature)
optims = []
for (key, tables) in self._key_to_tables.items():
data_type = key
emb_module = _BatchedFusedEmbeddingLookups(cast(List[BaseEmbeddingConfig], tables), data_type=data_type, pooling=PoolingType.NONE, optimizer_type=emb_optim_type, optimizer_kwargs=emb_opt_kwargs, device=device, embedding_location=location)
self._emb_modules.append(emb_module)
params: Dict[(str, torch.Tensor)] = {}
for (param_key, weight) in emb_module.fused_optimizer().params.items():
params[f'embeddings.{param_key}'] = weight
optims.append(('', emb_module.fused_optimizer()))
self._optim: CombinedOptimizer = CombinedOptimizer(optims)
self._embedding_names: List[str] = list(itertools.chain(*get_embedding_names_by_table(self._embedding_configs)))
self._embedding_names_by_table: List[List[str]] = get_embedding_names_by_table(self._embedding_configs)
self.embeddings: nn.ModuleDict = nn.ModuleDict()
for ((_key, tables), emb_module) in zip(self._key_to_tables.items(), self._emb_modules):
for (embedding_config, weight) in zip(tables, emb_module.split_embedding_weights()):
self.embeddings[embedding_config.name] = torch.nn.Module()
self.embeddings[embedding_config.name].register_parameter('weight', torch.nn.Parameter(weight))
def forward(self, features: KeyedJaggedTensor) -> Dict[(str, JaggedTensor)]:
assert (features is not None)
feature_dict = features.to_dict()
feature_embeddings: Dict[(str, JaggedTensor)] = {}
for (emb_op, (_key, tables)) in zip(self._emb_modules, self._key_to_tables.items()):
indicies = []
lengths = []
offsets = []
feature_names = []
feature_lengths = []
feature_values = []
splits = []
for table in tables:
for feature in table.feature_names:
f = feature_dict[feature]
indicies.append(f.values())
lengths.append(f.lengths())
if (feature in self._shared_features):
feature = f'{feature}{table.name}'
feature_names.append(feature)
feature_values.append(f.values())
feature_lengths.append(f.lengths())
splits.append(torch.sum(feature_lengths[(- 1)]))
indicies = torch.cat(indicies)
lengths = torch.cat(lengths)
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
lookups = emb_op(indicies.int(), offsets.int(), weights=None)
lookups = torch.split(lookups, split_size_or_sections=splits)
for (feature, lookup, feature_length, values) in zip(feature_names, lookups, feature_lengths, feature_values):
feature_embeddings[feature] = JaggedTensor(values=lookup, lengths=feature_length, weights=(values if self.need_indices() else None))
return feature_embeddings
def _get_name(self) -> str:
return 'FusedEmbeddingCollection'
def embedding_configs(self) -> List[EmbeddingConfig]:
return self._embedding_configs
def embedding_names_by_table(self) -> List[List[str]]:
return self._embedding_names_by_table
def embedding_dim(self) -> int:
return self._embedding_dim
def optimizer_type(self) -> Type[torch.optim.Optimizer]:
return self._optimizer_type
def optimizer_kwargs(self) -> Dict[(str, Any)]:
return self._optimizer_kwargs
def fused_optimizer(self) -> KeyedOptimizer:
return self._optim
def need_indices(self) -> bool:
return self._need_indices |
class SingleFileConstraint(ValidationConstraint):
def __init__(self, error_message=None):
error_message = (error_message or _('$label can only accept a single file'))
super().__init__(error_message=error_message)
def validate_input(self, unparsed_input):
if (not (len(unparsed_input) <= 1)):
raise self |
def run(train_path, test_path):
height = int(get_option('image', 'height'))
width = int(get_option('image', 'width'))
classes = int(get_option('image', 'classes'))
epochs = int(get_option('train', 'epochs'))
batch_size = int(get_option('train', 'batch_size'))
save_path = get_option('model', 'save_path')
model = ImageModel.build(width=width, heigth=height, classes=classes)
init_lr = 0.001
decay = 0.0
opt = Adam(lr=init_lr, decay=decay)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
train_matrix = image_matrix(train_path)
train_label = image_label(train_path)
test_matrix = image_matrix(test_path)
test_label = image_label(test_path)
datagen = ImageDataGenerator(rotation_range=20, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True)
if os.path.exists('checkpoint.chk'):
model.load_weights('checkpoint.chk')
result = model.fit_generator(datagen.flow(train_matrix, train_label, batch_size=batch_size), validation_data=(test_matrix, test_label), epochs=epochs, verbose=1, callbacks=[ModelCheckpoint('checkpoint.chk', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=False, mode='auto', period=1)])
score = model.evaluate(test_matrix, test_label, batch_size=32)
print((' %s' % score))
model.save(save_path)
plot_model(model, to_file='model.png')
plt.style.use('ggplot')
plt.figure()
n = epochs
aranges = np.arange(0, n)
plt.plot(result.history['loss'], label='train_loss')
plt.plot(result.history['acc'], label='train_acc')
plt.plot(result.history['val_loss'], label='val_loss')
plt.plot(result.history['val_acc'], label='val_acc')
plt.title('Image recognition')
plt.xlabel('Epochs')
plt.ylabel('loss/acc')
plt.legend(loc='lower left')
plt.savefig('reco')
tellMeResult(score) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.