code stringlengths 281 23.7M |
|---|
class Geod_NaN_Issue112_Test(unittest.TestCase):
def test_geod_nans(self):
g = Geod(ellps='clrk66')
(azi1, azi2, s12) = g.inv(43, 10, float('nan'), 20)
self.assertTrue((azi1 != azi1))
self.assertTrue((azi2 != azi2))
self.assertTrue((s12 != s12))
(azi1, azi2, s12) = g.inv(43, 10, 53, float('nan'))
self.assertTrue((azi1 != azi1))
self.assertTrue((azi2 != azi2))
self.assertTrue((s12 != s12))
(azi1, azi2, s12) = g.inv(43, 10, 53, 91)
self.assertTrue((azi1 != azi1))
self.assertTrue((azi2 != azi2))
self.assertTrue((s12 != s12))
(lon2, lat2, azi2) = g.fwd(43, 10, float('nan'), 1000000.0)
self.assertTrue((lon2 != lon2))
self.assertTrue((lat2 != lat2))
self.assertTrue((azi2 != azi2))
(lon2, lat2, azi2) = g.fwd(43, 10, 20, float('nan'))
self.assertTrue((lon2 != lon2))
self.assertTrue((lat2 != lat2))
self.assertTrue((azi2 != azi2))
(lon2, lat2, azi2) = g.fwd(43, float('nan'), 20, 1000000.0)
self.assertTrue((lon2 != lon2))
self.assertTrue((lat2 != lat2))
self.assertTrue((azi2 != azi2))
(lon2, lat2, azi2) = g.fwd(43, 91, 20, 1000000.0)
self.assertTrue((lon2 != lon2))
self.assertTrue((lat2 != lat2))
self.assertTrue((azi2 != azi2))
(lon2, lat2, azi2) = g.fwd(float('nan'), 10, 20, 1000000.0)
self.assertTrue((lon2 != lon2))
self.assertTrue((lat2 == lat2))
self.assertTrue((azi2 == azi2)) |
def get_environment_pseudosmiles_from_smarts(smarts):
matches = list(_smarts_atom_pat.finditer(smarts))
matches.reverse()
pat = Chem.MolFromSmarts(smarts)
assert (pat.GetNumAtoms() == len(matches)), (smarts, matches)
smiles = smarts
for (match, pat_atom) in zip(matches, reversed(pat.GetAtoms())):
bonds = pat_atom.GetBonds()
hcount = int(match.group(3))
extra_count = ((int(match.group(2)) - hcount) - len(bonds))
element_term = match.group(1)
if (element_term[0] == '#'):
if any(((bond.GetSmarts() == ':') for bond in bonds)):
element_term = _aromatic_symbol_lookup[element_term]
elif (extra_count < 2):
element_term = _aliphatic_symbol_lookup[element_term]
else:
pass
else:
pass
if (hcount == 0):
hydrogens = ''
elif (hcount == 1):
hydrogens = 'H'
else:
hydrogens = ('H' + str(hcount))
charge = match.group(4)
if (charge == '+0'):
charge = ''
attachment_label = match.group(5)
if (attachment_label is None):
attachment_label = ''
closures = match.group(6)
smiles_term = ((((((('[' + element_term) + hydrogens) + charge) + attachment_label) + ']') + closures) + ('(~*)' * extra_count))
smiles = ((smiles[:match.start()] + smiles_term) + smiles[match.end():])
return smiles |
def patch_forward_method(func, src_type, dst_type, convert_output=True):
def new_forward(*args, **kwargs):
output = func(*cast_tensor_type(args, src_type, dst_type), **cast_tensor_type(kwargs, src_type, dst_type))
if convert_output:
output = cast_tensor_type(output, dst_type, src_type)
return output
return new_forward |
class FindBar():
def __init__(self, parent, finder, is_reg_expr=False):
self.finder = finder
self.context = []
self.last_value = None
self.last_pattern = None
label = QLabel('Find:')
label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.textbox = QComboBox()
self.textbox.setEditable(True)
self.textbox.currentIndexChanged.connect(self.ValueChanged)
self.progress = QProgressBar()
self.progress.setRange(0, 0)
self.progress.hide()
if is_reg_expr:
self.pattern = QCheckBox('Regular Expression')
else:
self.pattern = QCheckBox('Pattern')
self.pattern.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.next_button = QToolButton()
self.next_button.setIcon(parent.style().standardIcon(QStyle.SP_ArrowDown))
self.next_button.released.connect((lambda : self.NextPrev(1)))
self.prev_button = QToolButton()
self.prev_button.setIcon(parent.style().standardIcon(QStyle.SP_ArrowUp))
self.prev_button.released.connect((lambda : self.NextPrev((- 1))))
self.close_button = QToolButton()
self.close_button.setIcon(parent.style().standardIcon(QStyle.SP_DockWidgetCloseButton))
self.close_button.released.connect(self.Deactivate)
self.hbox = QHBoxLayout()
self.hbox.setContentsMargins(0, 0, 0, 0)
self.hbox.addWidget(label)
self.hbox.addWidget(self.textbox)
self.hbox.addWidget(self.progress)
self.hbox.addWidget(self.pattern)
self.hbox.addWidget(self.next_button)
self.hbox.addWidget(self.prev_button)
self.hbox.addWidget(self.close_button)
self.bar = QWidget()
self.bar.setLayout(self.hbox)
self.bar.hide()
def Widget(self):
return self.bar
def Activate(self):
self.bar.show()
self.textbox.lineEdit().selectAll()
self.textbox.setFocus()
def Deactivate(self):
self.bar.hide()
def Busy(self):
self.textbox.setEnabled(False)
self.pattern.hide()
self.next_button.hide()
self.prev_button.hide()
self.progress.show()
def Idle(self):
self.textbox.setEnabled(True)
self.progress.hide()
self.pattern.show()
self.next_button.show()
self.prev_button.show()
def Find(self, direction):
value = self.textbox.currentText()
pattern = self.pattern.isChecked()
self.last_value = value
self.last_pattern = pattern
self.finder.Find(value, direction, pattern, self.context)
def ValueChanged(self):
value = self.textbox.currentText()
pattern = self.pattern.isChecked()
index = self.textbox.currentIndex()
data = self.textbox.itemData(index)
if (data == None):
self.textbox.setItemData(index, pattern)
else:
self.pattern.setChecked(data)
self.Find(0)
def NextPrev(self, direction):
value = self.textbox.currentText()
pattern = self.pattern.isChecked()
if (value != self.last_value):
index = self.textbox.findText(value)
if (index < 0):
index = self.textbox.count()
self.textbox.addItem(value, pattern)
self.textbox.setCurrentIndex(index)
return
else:
self.textbox.setItemData(index, pattern)
elif (pattern != self.last_pattern):
index = self.textbox.currentIndex()
self.textbox.setItemData(index, pattern)
self.Find(direction)
def NotFound(self):
QMessageBox.information(self.bar, 'Find', (("'" + self.textbox.currentText()) + "' not found")) |
def get_placeholder_loop(placeholder_string, embedder, is_sd):
new_placeholder = None
while True:
if (new_placeholder is None):
new_placeholder = input(f'Placeholder string {placeholder_string} was already used. Please enter a replacement string: ')
else:
new_placeholder = input(f"Placeholder string '{new_placeholder}' maps to more than a single token. Please enter another string: ")
token = (get_clip_token_for_string(embedder.tokenizer, new_placeholder) if is_sd else get_bert_token_for_string(embedder.tknz_fn, new_placeholder))
if (token is not None):
return (new_placeholder, token) |
class Document(_BaseThumbedMedium):
__slots__ = ('file_name', 'mime_type')
def __init__(self, file_id: str, file_unique_id: str, file_name: Optional[str]=None, mime_type: Optional[str]=None, file_size: Optional[int]=None, thumbnail: Optional[PhotoSize]=None, *, api_kwargs: Optional[JSONDict]=None):
super().__init__(file_id=file_id, file_unique_id=file_unique_id, file_size=file_size, thumbnail=thumbnail, api_kwargs=api_kwargs)
with self._unfrozen():
self.mime_type: Optional[str] = mime_type
self.file_name: Optional[str] = file_name |
def ql_qnx_msg_io_lseek(ql: Qiling, coid, smsg, sparts, rmsg, rparts, *args, **kw):
(type, combine_len, whence, zero, offset) = unpack('<HHhHQ', ql.mem.read(smsg, 16))
assert ((c_int32(sparts).value, c_int32(rparts).value) == ((- 16), (- 8))), 'input/output sizes are wrong'
assert ((type, combine_len, zero) == (265, 16, 0)), 'io_stat message is wrong'
if (not (whence in lseek_whence)):
raise NotImplementedError('unknown lseek direction')
fd = ql.os.connections[coid].fd
ql.log.debug(f'msg_io_lseek(coid = {coid} => fd = {fd}, offset = {offset}, whence = {lseek_whence[whence]})')
regreturn = ql_syscall_lseek(ql, fd, offset, whence)
ql.mem.write_ptr(rmsg, regreturn, 8)
return 0 |
def silent(input_filepath: Union[(str, Path)], threshold: float=0.001) -> bool:
validate_input_file(input_filepath)
stat_dictionary = stat(input_filepath)
mean_norm = stat_dictionary['Mean norm']
if (mean_norm is not float('nan')):
if (mean_norm >= threshold):
return False
else:
return True
else:
return True |
class UniformPolicy(BasePolicy):
def __init__(self, input_shapes, output_shape, action_range=((- 1.0), 1.0)):
super(UniformPolicy, self).__init__()
self._Serializable__initialize(locals())
self.inputs = [tf.keras.layers.Input(shape=input_shape) for input_shape in input_shapes]
self._action_range = action_range
x = tf.keras.layers.Lambda((lambda x: tf.concat(x, axis=(- 1))))(self.inputs)
actions = tf.keras.layers.Lambda((lambda x: tf.random.uniform((tf.shape(x)[0], output_shape[0]), *action_range)))(x)
self.actions_model = tf.keras.Model(self.inputs, actions)
self.actions_input = tf.keras.Input(shape=output_shape)
log_pis = tf.keras.layers.Lambda((lambda x: tf.tile(tf.log([((action_range[1] - action_range[0]) / 2.0)])[None], (tf.shape(x)[0], 1))))(self.actions_input)
self.log_pis_model = tf.keras.Model((*self.inputs, self.actions_input), log_pis)
def get_weights(self):
return []
def set_weights(self, *args, **kwargs):
return
def trainable_variables(self):
return []
def reset(self):
pass
def actions(self, conditions):
return self.actions_model(conditions)
def log_pis(self, conditions, actions):
return self.log_pis_model([*conditions, actions])
def actions_np(self, conditions):
return self.actions_model.predict(conditions)
def log_pis_np(self, conditions, actions):
return self.log_pis_model.predict([*conditions, actions])
def get_diagnostics(self, conditions):
return OrderedDict({}) |
class UpvoteEntry(Action, Mutation):
def mutate(_root, entry, sender, upvoted, downvoted, in_upvoted, in_downvoted, constants, exceeded, reason):
response = UpvoteEntry(feedback=None)
(karma, cost, downvote_rate, upvote_rate) = constants
if in_upvoted:
upvoted.remove(entry)
if sender.is_karma_eligible:
sender.karma = (karma + cost)
entry.author.karma = (karma - upvote_rate)
sender.save(update_fields=['karma'])
entry.author.save(update_fields=['karma'])
return response
if in_downvoted:
downvoted.remove(entry)
upvoted.add(entry)
if sender.is_karma_eligible:
entry.author.karma = (karma + (downvote_rate + upvote_rate))
entry.author.save(update_fields=['karma'])
return response
if exceeded:
return UpvoteEntry(feedback=reason)
upvoted.add(entry)
if sender.is_karma_eligible:
sender.karma = (karma - cost)
entry.author.karma = (karma + upvote_rate)
sender.save(update_fields=['karma'])
entry.author.save(update_fields=['karma'])
return response |
class ToggleButton(PushButton):
def _get_release_image(self, x, y):
return (self._hover_img if self._check_hit(x, y) else self._depressed_img)
def on_mouse_press(self, x, y, buttons, modifiers):
if ((not self.enabled) or (not self._check_hit(x, y))):
return
self._pressed = (not self._pressed)
self._sprite.image = (self._pressed_img if self._pressed else self._get_release_image(x, y))
self.dispatch_event('on_toggle', self._pressed)
def on_mouse_release(self, x, y, buttons, modifiers):
if ((not self.enabled) or self._pressed):
return
self._sprite.image = self._get_release_image(x, y) |
class UTCDateTimeAttribute(Attribute[datetime]):
attr_type = STRING
def serialize(self, value):
if (value.tzinfo is None):
value = value.replace(tzinfo=timezone.utc)
fmt = value.astimezone(timezone.utc).strftime(DATETIME_FORMAT).zfill(31)
return fmt
def deserialize(self, value):
return self._fast_parse_utc_date_string(value)
def _fast_parse_utc_date_string(date_string: str) -> datetime:
_int = int
try:
date_string = date_string.zfill(31)
if ((len(date_string) != 31) or (date_string[4] != '-') or (date_string[7] != '-') or (date_string[10] != 'T') or (date_string[13] != ':') or (date_string[16] != ':') or (date_string[19] != '.') or (date_string[26:31] != '+0000')):
raise ValueError("Datetime string '{}' does not match format '{}'".format(date_string, DATETIME_FORMAT))
return datetime(_int(date_string[0:4]), _int(date_string[5:7]), _int(date_string[8:10]), _int(date_string[11:13]), _int(date_string[14:16]), _int(date_string[17:19]), _int(date_string[20:26]), timezone.utc)
except (TypeError, ValueError):
raise ValueError("Datetime string '{}' does not match format '{}'".format(date_string, DATETIME_FORMAT)) |
def create_repitched_txt_from_ultrastar_data(input_file: str, note_numbers: list[int], output_repitched_ultrastar: str) -> None:
print('{PRINT_ULTRASTAR} Creating repitched ultrastar txt -> {input_file}_repitch.txt')
with open(input_file, 'r', encoding=FILE_ENCODING) as file:
txt = file.readlines()
i = 0
with open(output_repitched_ultrastar, 'w', encoding=FILE_ENCODING) as file:
for line in txt:
if line.startswith(f'#{UltrastarTxtNoteTypeTag.NORMAL} '):
parts = re.findall('\\S+|\\s+', line)
parts[6] = str(note_numbers[i])
delimiter = ''
file.write(delimiter.join(parts))
i += 1
else:
file.write(line) |
_layout_config
def test_window_types(manager):
if ((manager.backend.name == 'wayland') and (not has_wayland_notifications)):
pytest.skip('Notification tests for Wayland need gtk-layer-shell')
manager.test_window('one')
manager.test_window('dialog', floating=True)
assert_focused(manager, 'dialog')
manager.test_notification('notification')
assert (manager.c.group.info()['focus'] == 'dialog')
for window in manager.c.windows():
if (window['name'] in ('dialog', 'notification')):
assert window['floating'] |
def dual_ascent_step(model, X, lambda1, lambda2, rho, alpha, h, rho_max):
h_new = None
optimizer = LBFGSBScipy(model.parameters())
X_torch = torch.from_numpy(X)
while (rho < rho_max):
def closure():
optimizer.zero_grad()
X_hat = model(X_torch)
loss = squared_loss(X_hat, X_torch)
h_val = model.h_func()
penalty = ((((0.5 * rho) * h_val) * h_val) + (alpha * h_val))
l2_reg = ((0.5 * lambda2) * model.l2_reg())
l1_reg = (lambda1 * model.fc1_l1_reg())
primal_obj = (((loss + penalty) + l2_reg) + l1_reg)
primal_obj.backward()
return primal_obj
optimizer.step(closure)
with torch.no_grad():
h_new = model.h_func().item()
if (h_new > (0.25 * h)):
rho *= 10
else:
break
alpha += (rho * h_new)
return (rho, alpha, h_new) |
class PresetChannel(Channel):
values = values
load_capacity = Instrument.control('GU\x00{ch:c}\x00\x00', 'TD{ch:c}\x01\x00%c', 'Control the percentage of full-scale value of the load capacity preset.', preprocess_reply=(lambda d: struct.unpack('>H', d[2:4])), validator=strict_discrete_set, values=range(101))
tune_capacity = Instrument.control('GU\x00{ch:c}\x00\x00', 'TD{ch:c}\x02\x00%c', 'Control the percentage of full-scale value of the tune capacity preset.', preprocess_reply=(lambda d: struct.unpack('>H', d[4:6])), validator=strict_discrete_set, values=range(101)) |
def check_accumulator_overflow(model: torch.nn.Module, quant_bw: int, accum_bw: int):
most_accum_range_used = 0
most_accum_range_used_layer = None
for (layer_name, layer) in model.named_modules():
if isinstance(layer, torch.nn.Conv2d):
(was_accum_range_exceeded, accum_range_used) = get_conv_accum_bounds(layer.weight.detach().numpy(), quant_bw, accum_bw)
if (accum_range_used > most_accum_range_used):
most_accum_range_used = accum_range_used
most_accum_range_used_layer = layer_name
if was_accum_range_exceeded:
logger.info('Possible accumulator overflow for layer: %s', layer_name)
if (most_accum_range_used < 1):
logger.info('No overflow detected. Layer %s had the most accumulator range used: %f%%', most_accum_range_used_layer, (most_accum_range_used * 100))
else:
logger.info('Overflow detected. Layer %s had the most accumulator range used: %f%%', most_accum_range_used_layer, (most_accum_range_used * 100))
return (most_accum_range_used_layer, most_accum_range_used) |
class PuzzleWidget(QWidget):
puzzleCompleted = pyqtSignal()
def __init__(self, parent=None):
super(PuzzleWidget, self).__init__(parent)
self.piecePixmaps = []
self.pieceRects = []
self.pieceLocations = []
self.highlightedRect = QRect()
self.inPlace = 0
self.setAcceptDrops(True)
self.setMinimumSize(400, 400)
self.setMaximumSize(400, 400)
def clear(self):
self.pieceLocations = []
self.piecePixmaps = []
self.pieceRects = []
self.highlightedRect = QRect()
self.inPlace = 0
self.update()
def dragEnterEvent(self, event):
if event.mimeData().hasFormat('image/x-puzzle-piece'):
event.accept()
else:
event.ignore()
def dragLeaveEvent(self, event):
updateRect = self.highlightedRect
self.highlightedRect = QRect()
self.update(updateRect)
event.accept()
def dragMoveEvent(self, event):
updateRect = self.highlightedRect.united(self.targetSquare(event.pos()))
if (event.mimeData().hasFormat('image/x-puzzle-piece') and (self.findPiece(self.targetSquare(event.pos())) == (- 1))):
self.highlightedRect = self.targetSquare(event.pos())
event.setDropAction(Qt.MoveAction)
event.accept()
else:
self.highlightedRect = QRect()
event.ignore()
self.update(updateRect)
def dropEvent(self, event):
if (event.mimeData().hasFormat('image/x-puzzle-piece') and (self.findPiece(self.targetSquare(event.pos())) == (- 1))):
pieceData = event.mimeData().data('image/x-puzzle-piece')
dataStream = QDataStream(pieceData, QIODevice.ReadOnly)
square = self.targetSquare(event.pos())
pixmap = QPixmap()
location = QPoint()
((dataStream >> pixmap) >> location)
self.pieceLocations.append(location)
self.piecePixmaps.append(pixmap)
self.pieceRects.append(square)
self.hightlightedRect = QRect()
self.update(square)
event.setDropAction(Qt.MoveAction)
event.accept()
if (location == QPoint((square.x() / 80), (square.y() / 80))):
self.inPlace += 1
if (self.inPlace == 25):
self.puzzleCompleted.emit()
else:
self.highlightedRect = QRect()
event.ignore()
def findPiece(self, pieceRect):
try:
return self.pieceRects.index(pieceRect)
except ValueError:
return (- 1)
def mousePressEvent(self, event):
square = self.targetSquare(event.pos())
found = self.findPiece(square)
if (found == (- 1)):
return
location = self.pieceLocations[found]
pixmap = self.piecePixmaps[found]
del self.pieceLocations[found]
del self.piecePixmaps[found]
del self.pieceRects[found]
if (location == QPoint((square.x() / 80), (square.y() / 80))):
self.inPlace -= 1
self.update(square)
itemData = QByteArray()
dataStream = QDataStream(itemData, QIODevice.WriteOnly)
((dataStream << pixmap) << location)
mimeData = QMimeData()
mimeData.setData('image/x-puzzle-piece', itemData)
drag = QDrag(self)
drag.setMimeData(mimeData)
drag.setHotSpot((event.pos() - square.topLeft()))
drag.setPixmap(pixmap)
if (drag.exec_(Qt.MoveAction) != Qt.MoveAction):
self.pieceLocations.insert(found, location)
self.piecePixmaps.insert(found, pixmap)
self.pieceRects.insert(found, square)
self.update(self.targetSquare(event.pos()))
if (location == QPoint((square.x() / 80), (square.y() / 80))):
self.inPlace += 1
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
painter.fillRect(event.rect(), Qt.white)
if self.highlightedRect.isValid():
painter.setBrush(QColor('#ffcccc'))
painter.setPen(Qt.NoPen)
painter.drawRect(self.highlightedRect.adjusted(0, 0, (- 1), (- 1)))
for (rect, pixmap) in zip(self.pieceRects, self.piecePixmaps):
painter.drawPixmap(rect, pixmap)
painter.end()
def targetSquare(self, position):
return QRect(((position.x() // 80) * 80), ((position.y() // 80) * 80), 80, 80) |
class SigmoidFocalLoss(nn.Module):
def __init__(self, gamma, alpha, weight=None, reduction='mean'):
super(SigmoidFocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
self.register_buffer('weight', weight)
self.reduction = reduction
def forward(self, input, target):
return sigmoid_focal_loss(input, target, self.gamma, self.alpha, self.weight, self.reduction)
def __repr__(self):
s = self.__class__.__name__
s += f'(gamma={self.gamma}, '
s += f'alpha={self.alpha}, '
s += f'reduction={self.reduction})'
return s |
def _start_kernel():
from IPython.zmq.ipkernel import IPKernelApp
from zmq.eventloop import ioloop
global _kernel_running, _ipython_app
if _kernel_running:
return _ipython_app
if IPKernelApp.initialized():
app = IPKernelApp.instance()
else:
app = IPKernelApp.instance()
app.initialize()
main = app.kernel.shell._orig_sys_modules_main_mod
if (main is not None):
sys.modules[app.kernel.shell._orig_sys_modules_main_name] = main
app.kernel.user_module = sys.modules[__name__]
app.kernel.user_ns = {}
from IPython.core.interactiveshell import InteractiveShell
old_set_completer_frame = InteractiveShell.set_completer_frame
bound_scf = old_set_completer_frame.__get__(app.shell, InteractiveShell)
app.shell.set_completer_frame = bound_scf
app.shell.set_completer_frame()
app.kernel.start()
loop = ioloop.IOLoop.instance()
def poll_ioloop(timer_id, time):
global _kernel_running
if app.kernel.shell.exit_now:
_log.debug(('IPython kernel stopping (%s)' % app.connection_file))
timer.kill_timer(timer_id)
ioloop.IOLoop.instance().start()
_kernel_running = False
return
loop.add_timeout(0, (lambda : loop.add_callback(loop.stop)))
ioloop.IOLoop.instance().start()
_log.debug(("IPython kernel starting. Use '--existing %s' to connect." % app.connection_file))
timer.set_timer(100, poll_ioloop)
_kernel_running = True
_ipython_app = app
return _ipython_app |
def attack_Linf_PGD_bin(input_v, ones, dis, Ld, steps, epsilon):
dis.eval()
adverse_v = input_v.data.clone()
adverse_v = Variable(adverse_v, requires_grad=True)
optimizer = Linf_SGD([adverse_v], lr=0.0078)
for _ in range(steps):
optimizer.zero_grad()
dis.zero_grad()
d_bin = dis(adverse_v)
loss = (- Ld(d_bin, ones))
loss.backward()
optimizer.step()
diff = (adverse_v.data - input_v.data)
diff.clamp_((- epsilon), epsilon)
adverse_v.data.copy_((diff + input_v.data).clamp_((- 1), 1))
dis.train()
dis.zero_grad()
return adverse_v |
def print_progress(transferred_blocks, block_size, total_size):
current_mb = (((transferred_blocks * block_size) / 1024) / 1024)
total_mb = ((total_size / 1024) / 1024)
percent = (current_mb / total_mb)
progress_str = 'Progress: {:5.1f}M / {:5.1f}M ({:6.1%})'
print(progress_str.format(current_mb, total_mb, percent), end='\r') |
class TestGroverConstructor(QiskitAquaTestCase):
def setUp(self):
super().setUp()
oracle = QuantumCircuit(2)
oracle.cz(0, 1)
self._expected_grover_op = GroverOperator(oracle=oracle)
def test_oracle_quantumcircuit(self):
oracle = QuantumCircuit(2)
oracle.cz(0, 1)
grover = Grover(oracle=oracle, good_state=['11'])
grover_op = grover._grover_operator
self.assertTrue(Operator(grover_op).equiv(Operator(self._expected_grover_op)))
def test_oracle_statevector(self):
mark_state = Statevector.from_label('11')
grover = Grover(oracle=mark_state, good_state=['11'])
grover_op = grover._grover_operator
self.assertTrue(Operator(grover_op).equiv(Operator(self._expected_grover_op)))
def test_state_preparation_quantumcircuit(self):
state_preparation = QuantumCircuit(2)
state_preparation.h(0)
oracle = QuantumCircuit(3)
oracle.cz(0, 1)
grover = Grover(oracle=oracle, state_preparation=state_preparation, good_state=['011'])
grover_op = grover._grover_operator
expected_grover_op = GroverOperator(oracle, state_preparation=state_preparation)
self.assertTrue(Operator(grover_op).equiv(Operator(expected_grover_op)))
def test_state_preparation_type_error(self):
init_state = Zero(2)
oracle = QuantumCircuit(2)
oracle.cz(0, 1)
try:
warnings.filterwarnings(action='ignore', category=DeprecationWarning)
with self.assertRaises(TypeError):
Grover(oracle=oracle, state_preparation=init_state)
finally:
warnings.filterwarnings(action='always', category=DeprecationWarning)
def test_is_good_state_list(self):
oracle = QuantumCircuit(2)
oracle.cz(0, 1)
is_good_state = ['11', '00']
grover = Grover(oracle=oracle, good_state=is_good_state)
self.assertListEqual(grover._is_good_state, ['11', '00'])
def test_is_good_state_statevector(self):
oracle = QuantumCircuit(2)
oracle.cz(0, 1)
is_good_state = Statevector.from_label('11')
grover = Grover(oracle=oracle, good_state=is_good_state)
self.assertTrue(grover._is_good_state.equiv(Statevector.from_label('11')))
def test_grover_operator(self):
oracle = QuantumCircuit(2)
oracle.cz(0, 1)
grover_op = GroverOperator(oracle)
grover = Grover(oracle=grover_op.oracle, grover_operator=grover_op, good_state=['11'])
grover_op = grover._grover_operator
self.assertTrue(Operator(grover_op).equiv(Operator(self._expected_grover_op))) |
def solid_angle(v0: wp.vec3, v1: wp.vec3, v2: wp.vec3, p: wp.vec3):
a = (v0 - p)
b = (v1 - p)
c = (v2 - p)
a_len = wp.length(a)
b_len = wp.length(b)
c_len = wp.length(c)
det = wp.dot(a, wp.cross(b, c))
den = (((((a_len * b_len) * c_len) + (wp.dot(a, b) * c_len)) + (wp.dot(b, c) * a_len)) + (wp.dot(c, a) * b_len))
return wp.atan2(det, den) |
def target_df_rows_agg(spark_context, spark_session):
data = [{'id': 1, 'timestamp': '2016-04-11 11:31:11', 'feature1': 200, 'feature2': 200, 'feature1__avg_over_2_events_row_windows': 200, 'feature1__avg_over_3_events_row_windows': 200}, {'id': 1, 'timestamp': '2016-04-11 11:44:12', 'feature1': 300, 'feature2': 300, 'feature1__avg_over_2_events_row_windows': 250, 'feature1__avg_over_3_events_row_windows': 250}, {'id': 1, 'timestamp': '2016-04-11 11:46:24', 'feature1': 400, 'feature2': 400, 'feature1__avg_over_2_events_row_windows': 350, 'feature1__avg_over_3_events_row_windows': 300}, {'id': 1, 'timestamp': '2016-04-11 12:03:21', 'feature1': 500, 'feature2': 500, 'feature1__avg_over_2_events_row_windows': 450, 'feature1__avg_over_3_events_row_windows': 400}]
df = spark_session.read.json(spark_context.parallelize(data, 1))
df = df.withColumn(TIMESTAMP_COLUMN, df.timestamp.cast(DataType.TIMESTAMP.spark))
return df |
.fast
.parametrize(('input_wavenumbers', 'expected_wavenumbers_cm1'), [[(((2000 * 1) / u.cm), ((230000 * 1) / u.m)), (2000, 2300)]])
def test_wavenumber_units_conversion(input_wavenumbers, expected_wavenumbers_cm1, verbose=True, *args, **kwargs):
setup_test_line_databases()
(wmin, wmax) = input_wavenumbers
(expected_wmin, expected_wmax) = expected_wavenumbers_cm1
sf = SpectrumFactory(wavenum_min=wmin, wavenum_max=wmax, wstep=0.01, cutoff=1e-30, pressure=1, path_length=1, mole_fraction=1, isotope=[1], verbose=verbose)
sf.load_databank('HITRAN-CO-TEST')
s = sf.eq_spectrum(Tgas=300)
assert np.isclose(s.get_wavenumber().min(), expected_wmin)
assert np.isclose(s.get_wavenumber().max(), expected_wmax) |
class Routing():
def __init__(self, plugin: 'Plugin') -> None:
self._rules: Dict[(Callable, List[UrlRule])] = {}
self.plugin = plugin
def route_for(self, path: str) -> Optional[Callable]:
if path.startswith(self.plugin.PLUGIN_URL):
path = path.split(self.plugin.PLUGIN_URL, 1)[1]
for (view_fun, rules) in self._rules.items():
for rule in rules:
if (rule.match(path) is not None):
return view_fun
return None
def build_url(self, func_name: str, *args, **kwargs) -> str:
path = '/'.join(([func_name] + [str(arg) for arg in args]))
return urlunsplit(('plugin', self.plugin.PLUGIN_ID, path, urlencode(kwargs), ''))
def add_kwargs_to_url(self, **kwargs) -> str:
self.plugin.kwargs.update(kwargs)
query_params = urlencode(self.plugin.kwargs)
return urlunsplit(('plugin', self.plugin.PLUGIN_ID, self.plugin.path, query_params, ''))
def route(self, pattern: str) -> Callable:
def decorator(func: Callable) -> Callable:
self.add_route(func, pattern)
return func
return decorator
def add_route(self, func: Callable, pattern: str) -> None:
rule = UrlRule(pattern)
if (func not in self._rules):
self._rules[func] = []
self._rules[func].append(rule)
def redirect(self, path: str) -> None:
xbmc.executebuiltin(f'Container.Update({path})')
def dispatch(self, path: str) -> None:
for (view_func, rules) in self._rules.items():
for rule in rules:
kwargs = rule.match(path)
if (kwargs is not None):
self.plugin.logger.debug(f"Dispatching to '{view_func.__name__}', args: {kwargs}")
view_func(**kwargs)
return
raise RoutingException(f'No route to path "{path}"')
def build_icon_path(self, name: str) -> str:
return xbmcvfs.translatePath(f'special://home/addons/{self.plugin.PLUGIN_ID}/resources/media/{name}.png') |
class PositionWeightedModuleCollectionEmbeddingBagCollectionTest(unittest.TestCase):
def test_position_weighted_collection_module_ebc(self) -> None:
features = KeyedJaggedTensor.from_offsets_sync(keys=['f1', 'f2'], values=torch.tensor([0, 1, 2, 3, 4, 5, 6, 7]), offsets=torch.tensor([0, 2, 2, 3, 4, 5, 8]))
ebc = EmbeddingBagCollection(tables=[EmbeddingBagConfig(name='t1', embedding_dim=8, num_embeddings=16, feature_names=['f1']), EmbeddingBagConfig(name='t2', embedding_dim=8, num_embeddings=16, feature_names=['f2'])], is_weighted=True)
fp_ebc = FeatureProcessedEmbeddingBagCollection(ebc, PositionWeightedModuleCollection({'f1': 10, 'f2': 10}))
pooled_embeddings = fp_ebc(features)
self.assertEqual(pooled_embeddings.keys(), ['f1', 'f2'])
self.assertEqual(pooled_embeddings.values().size(), (3, 16))
self.assertEqual(pooled_embeddings.offset_per_key(), [0, 8, 16])
fp_ebc_gm_script = torch.jit.script(symbolic_trace(fp_ebc))
pooled_embeddings_gm_script = fp_ebc_gm_script(features)
torch.testing.assert_close(pooled_embeddings_gm_script.values(), pooled_embeddings.values())
torch.testing.assert_close(pooled_embeddings_gm_script.offset_per_key(), pooled_embeddings.offset_per_key()) |
def _lambertw_v_from_i(current, photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth):
output_is_scalar = all(map(np.isscalar, (current, photocurrent, saturation_current, resistance_series, resistance_shunt, nNsVth)))
conductance_shunt = (1.0 / resistance_shunt)
(I, IL, I0, Rs, Gsh, a) = np.broadcast_arrays(current, photocurrent, saturation_current, resistance_series, conductance_shunt, nNsVth)
V = np.full_like(I, np.nan, dtype=np.float64)
idx_p = (0.0 < Gsh)
idx_z = (0.0 == Gsh)
if np.any(idx_z):
V[idx_z] = ((a[idx_z] * np.log1p(((IL[idx_z] - I[idx_z]) / I0[idx_z]))) - (I[idx_z] * Rs[idx_z]))
if np.any(idx_p):
with np.errstate(over='ignore'):
argW = ((I0[idx_p] / (Gsh[idx_p] * a[idx_p])) * np.exp(((((- I[idx_p]) + IL[idx_p]) + I0[idx_p]) / (Gsh[idx_p] * a[idx_p]))))
lambertwterm = lambertw(argW).real
idx_inf = np.logical_not(np.isfinite(lambertwterm))
if np.any(idx_inf):
logargW = (((np.log(I0[idx_p]) - np.log(Gsh[idx_p])) - np.log(a[idx_p])) + ((((- I[idx_p]) + IL[idx_p]) + I0[idx_p]) / (Gsh[idx_p] * a[idx_p])))[idx_inf]
w = logargW
for _ in range(0, 3):
w = ((w * ((1.0 - np.log(w)) + logargW)) / (1.0 + w))
lambertwterm[idx_inf] = w
V[idx_p] = (((((IL[idx_p] + I0[idx_p]) - I[idx_p]) / Gsh[idx_p]) - (I[idx_p] * Rs[idx_p])) - (a[idx_p] * lambertwterm))
if output_is_scalar:
return V.item()
else:
return V |
class Msg15NativeTrailerRecord(object):
def get(self):
record = [('GP_PK_HEADER', GSDTRecords.gp_pk_header), ('GP_PK_SH1', GSDTRecords.gp_pk_sh1), ('15TRAILER', self.seviri_l15_trailer)]
return np.dtype(record).newbyteorder('>')
def seviri_l15_trailer(self):
record = [('15TrailerVersion', np.uint8), ('ImageProductionStats', self.image_production_stats), ('NavigationExtractionResults', self.navigation_extraction_results), ('RadiometricQuality', self.radiometric_quality), ('GeometricQuality', self.geometric_quality), ('TimelinessAndCompleteness', self.timeliness_and_completeness)]
return record
def image_production_stats(self):
gp_sc_id = GSDTRecords.gp_sc_id
actual_scanning_summary = [('NominalImageScanning', np.uint8), ('ReducedScan', np.uint8), ('ForwardScanStart', time_cds_short), ('ForwardScanEnd', time_cds_short)]
radiometric_behaviour = [('NominalBehaviour', np.uint8), ('RadScanIrregularity', np.uint8), ('RadStoppage', np.uint8), ('RepeatCycleNotCompleted', np.uint8), ('GainChangeTookPlace', np.uint8), ('DecontaminationTookPlace', np.uint8), ('NoBBCalibrationAchieved', np.uint8), ('IncorrectTemperature', np.uint8), ('InvalidBBData', np.uint8), ('InvalidAuxOrHKTMData', np.uint8), ('RefocusingMechanismActuated', np.uint8), ('MirrorBackToReferencePos', np.uint8)]
reception_summary_stats = [('PlannedNumberOfL10Lines', (np.uint32, 12)), ('NumberOfMissingL10Lines', (np.uint32, 12)), ('NumberOfCorruptedL10Lines', (np.uint32, 12)), ('NumberOfReplacedL10Lines', (np.uint32, 12))]
l15_image_validity = [('NominalImage', np.uint8), ('NonNominalBecauseIncomplete', np.uint8), ('NonNominalRadiometricQuality', np.uint8), ('NonNominalGeometricQuality', np.uint8), ('NonNominalTimeliness', np.uint8), ('IncompleteL15', np.uint8)]
actual_l15_coverage_vis_ir = [('SouthernLineActual', np.int32), ('NorthernLineActual', np.int32), ('EasternColumnActual', np.int32), ('WesternColumnActual', np.int32)]
actual_l15_coverage_hrv = [('LowerSouthLineActual', np.int32), ('LowerNorthLineActual', np.int32), ('LowerEastColumnActual', np.int32), ('LowerWestColumnActual', np.int32), ('UpperSouthLineActual', np.int32), ('UpperNorthLineActual', np.int32), ('UpperEastColumnActual', np.int32), ('UpperWestColumnActual', np.int32)]
record = [('SatelliteId', gp_sc_id), ('ActualScanningSummary', actual_scanning_summary), ('RadiometricBehaviour', radiometric_behaviour), ('ReceptionSummaryStats', reception_summary_stats), ('L15ImageValidity', (l15_image_validity, 12)), ('ActualL15CoverageVIS_IR', actual_l15_coverage_vis_ir), ('ActualL15CoverageHRV', actual_l15_coverage_hrv)]
return record
def navigation_extraction_results(self):
horizon_observation = [('HorizonId', np.uint8), ('Alpha', np.float64), ('AlphaConfidence', np.float64), ('Beta', np.float64), ('BetaConfidence', np.float64), ('ObservationTime', time_cds), ('SpinRate', np.float64), ('AlphaDeviation', np.float64), ('BetaDeviation', np.float64)]
star_observation = [('StarId', np.uint16), ('Alpha', np.float64), ('AlphaConfidence', np.float64), ('Beta', np.float64), ('BetaConfidence', np.float64), ('ObservationTime', time_cds), ('SpinRate', np.float64), ('AlphaDeviation', np.float64), ('BetaDeviation', np.float64)]
landmark_observation = [('LandmarkId', np.uint16), ('LandmarkLongitude', np.float64), ('LandmarkLatitude', np.float64), ('Alpha', np.float64), ('AlphaConfidence', np.float64), ('Beta', np.float64), ('BetaConfidence', np.float64), ('ObservationTime', time_cds), ('SpinRate', np.float64), ('AlphaDeviation', np.float64), ('BetaDeviation', np.float64)]
record = [('ExtractedHorizons', (horizon_observation, 4)), ('ExtractedStars', (star_observation, 20)), ('ExtractedLandmarks', (landmark_observation, 50))]
return record
def radiometric_quality(self):
l10_rad_quality = [('FullImageMinimumCount', np.uint16), ('FullImageMaximumCount', np.uint16), ('EarthDiskMinimumCount', np.uint16), ('EarthDiskMaximumCount', np.uint16), ('MoonMinimumCount', np.uint16), ('MoonMaximumCount', np.uint16), ('FullImageMeanCount', np.float32), ('FullImageStandardDeviation', np.float32), ('EarthDiskMeanCount', np.float32), ('EarthDiskStandardDeviation', np.float32), ('MoonMeanCount', np.float32), ('MoonStandardDeviation', np.float32), ('SpaceMeanCount', np.float32), ('SpaceStandardDeviation', np.float32), ('SESpaceCornerMeanCount', np.float32), ('SESpaceCornerStandardDeviation', np.float32), ('SWSpaceCornerMeanCount', np.float32), ('SWSpaceCornerStandardDeviation', np.float32), ('NESpaceCornerMeanCount', np.float32), ('NESpaceCornerStandardDeviation', np.float32), ('NWSpaceCornerMeanCount', np.float32), ('NWSpaceCornerStandardDeviation', np.float32), ('4SpaceCornersMeanCount', np.float32), ('4SpaceCornersStandardDeviation', np.float32), ('FullImageHistogram', (np.uint32, 256)), ('EarthDiskHistogram', (np.uint32, 256)), ('ImageCentreSquareHistogram', (np.uint32, 256)), ('SESpaceCornerHistogram', (np.uint32, 128)), ('SWSpaceCornerHistogram', (np.uint32, 128)), ('NESpaceCornerHistogram', (np.uint32, 128)), ('NWSpaceCornerHistogram', (np.uint32, 128)), ('FullImageEntropy', (np.float32, 3)), ('EarthDiskEntropy', (np.float32, 3)), ('ImageCentreSquareEntropy', (np.float32, 3)), ('SESpaceCornerEntropy', (np.float32, 3)), ('SWSpaceCornerEntropy', (np.float32, 3)), ('NESpaceCornerEntropy', (np.float32, 3)), ('NWSpaceCornerEntropy', (np.float32, 3)), ('4SpaceCornersEntropy', (np.float32, 3)), ('ImageCentreSquarePSD_EW', (np.float32, 128)), ('FullImagePSD_EW', (np.float32, 128)), ('ImageCentreSquarePSD_NS', (np.float32, 128)), ('FullImagePSD_NS', (np.float32, 128))]
l15_rad_quality = [('FullImageMinimumCount', np.uint16), ('FullImageMaximumCount', np.uint16), ('EarthDiskMinimumCount', np.uint16), ('EarthDiskMaximumCount', np.uint16), ('FullImageMeanCount', np.float32), ('FullImageStandardDeviation', np.float32), ('EarthDiskMeanCount', np.float32), ('EarthDiskStandardDeviation', np.float32), ('SpaceMeanCount', np.float32), ('SpaceStandardDeviation', np.float32), ('FullImageHistogram', (np.uint32, 256)), ('EarthDiskHistogram', (np.uint32, 256)), ('ImageCentreSquareHistogram', (np.uint32, 256)), ('FullImageEntropy', (np.float32, 3)), ('EarthDiskEntropy', (np.float32, 3)), ('ImageCentreSquareEntropy', (np.float32, 3)), ('ImageCentreSquarePSD_EW', (np.float32, 128)), ('FullImagePSD_EW', (np.float32, 128)), ('ImageCentreSquarePSD_NS', (np.float32, 128)), ('FullImagePSD_NS', (np.float32, 128)), ('SESpaceCornerL15_RMS', np.float32), ('SESpaceCornerL15_Mean', np.float32), ('SWSpaceCornerL15_RMS', np.float32), ('SWSpaceCornerL15_Mean', np.float32), ('NESpaceCornerL15_RMS', np.float32), ('NESpaceCornerL15_Mean', np.float32), ('NWSpaceCornerL15_RMS', np.float32), ('NWSpaceCornerL15_Mean', np.float32)]
record = [('L10RadQuality', (l10_rad_quality, 42)), ('L15RadQuality', (l15_rad_quality, 12))]
return record
def geometric_quality(self):
absolute_accuracy = [('QualityInfoValidity', np.uint8), ('EastWestAccuracyRMS', np.float32), ('NorthSouthAccuracyRMS', np.float32), ('MagnitudeRMS', np.float32), ('EastWestUncertaintyRMS', np.float32), ('NorthSouthUncertaintyRMS', np.float32), ('MagnitudeUncertaintyRMS', np.float32), ('EastWestMaxDeviation', np.float32), ('NorthSouthMaxDeviation', np.float32), ('MagnitudeMaxDeviation', np.float32), ('EastWestUncertaintyMax', np.float32), ('NorthSouthUncertaintyMax', np.float32), ('MagnitudeUncertaintyMax', np.float32)]
relative_accuracy = absolute_accuracy
pixels_500_relative_accuracy = absolute_accuracy
pixels_16_relative_accuracy = absolute_accuracy
misregistration_residuals = [('QualityInfoValidity', np.uint8), ('EastWestResidual', np.float32), ('NorthSouthResidual', np.float32), ('EastWestUncertainty', np.float32), ('NorthSouthUncertainty', np.float32), ('EastWestRMS', np.float32), ('NorthSouthRMS', np.float32), ('EastWestMagnitude', np.float32), ('NorthSouthMagnitude', np.float32), ('EastWestMagnitudeUncertainty', np.float32), ('NorthSouthMagnitudeUncertainty', np.float32)]
geometric_quality_status = [('QualityNominal', np.uint8), ('NominalAbsolute', np.uint8), ('NominalRelativeToPreviousImage', np.uint8), ('NominalForREL500', np.uint8), ('NominalForREL16', np.uint8), ('NominalForResMisreg', np.uint8)]
record = [('AbsoluteAccuracy', (absolute_accuracy, 12)), ('RelativeAccuracy', (relative_accuracy, 12)), ('500PixelsRelativeAccuracy', (pixels_500_relative_accuracy, 12)), ('16PixelsRelativeAccuracy', (pixels_16_relative_accuracy, 12)), ('MisregistrationResiduals', (misregistration_residuals, 12)), ('GeometricQualityStatus', (geometric_quality_status, 12))]
return record
def timeliness_and_completeness(self):
timeliness = [('MaxDelay', np.float32), ('MinDelay', np.float32), ('MeanDelay', np.float32)]
completeness = [('PlannedL15ImageLines', np.uint16), ('GeneratedL15ImageLines', np.uint16), ('ValidL15ImageLines', np.uint16), ('DummyL15ImageLines', np.uint16), ('CorruptedL15ImageLines', np.uint16)]
record = [('Timeliness', timeliness), ('Completeness', (completeness, 12))]
return record |
def interpolate_bilinear(grid, query_points, indexing='ij', name=None):
if ((indexing != 'ij') and (indexing != 'xy')):
raise ValueError("Indexing mode must be 'ij' or 'xy'")
with tf.name_scope((name or 'interpolate_bilinear')):
grid = tf.convert_to_tensor(grid)
query_points = tf.convert_to_tensor(query_points)
grid_static_shape = grid.shape
grid_shape = tf.shape(grid)
if (grid_static_shape.dims is not None):
if (len(grid_static_shape) != 4):
raise ValueError('Grid must be 4D Tensor')
if ((grid_static_shape[1] is not None) and (grid_static_shape[1] < 2)):
raise ValueError('Grid height must be at least 2.')
if ((grid_static_shape[2] is not None) and (grid_static_shape[2] < 2)):
raise ValueError('Grid width must be at least 2.')
else:
with tf.control_dependencies([tf.debugging.assert_greater_equal(grid_shape[1], 2, message='Grid height must be at least 2.'), tf.debugging.assert_greater_equal(grid_shape[2], 2, message='Grid width must be at least 2.'), tf.debugging.assert_less_equal(tf.cast(((grid_shape[0] * grid_shape[1]) * grid_shape[2]), dtype=tf.dtypes.float32), (np.iinfo(np.int32).max / 8.0), message='The image size or batch size is sufficiently large that the linearized addresses used by tf.gather may exceed the int32 limit.')]):
pass
query_static_shape = query_points.shape
query_shape = tf.shape(query_points)
if (query_static_shape.dims is not None):
if (len(query_static_shape) != 3):
raise ValueError('Query points must be 3 dimensional.')
query_hw = query_static_shape[2]
if ((query_hw is not None) and (query_hw != 2)):
raise ValueError('Query points last dimension must be 2.')
else:
with tf.control_dependencies([tf.debugging.assert_equal(query_shape[2], 2, message='Query points last dimension must be 2.')]):
pass
(batch_size, height, width, channels) = (grid_shape[0], grid_shape[1], grid_shape[2], grid_shape[3])
num_queries = query_shape[1]
query_type = query_points.dtype
grid_type = grid.dtype
alphas = []
floors = []
ceils = []
index_order = ([0, 1] if (indexing == 'ij') else [1, 0])
unstacked_query_points = tf.unstack(query_points, axis=2, num=2)
for (i, dim) in enumerate(index_order):
with tf.name_scope(('dim-' + str(dim))):
queries = unstacked_query_points[dim]
size_in_indexing_dimension = grid_shape[(i + 1)]
max_floor = tf.cast((size_in_indexing_dimension - 2), query_type)
min_floor = tf.constant(0.0, dtype=query_type)
floor = tf.math.minimum(tf.math.maximum(min_floor, tf.math.floor(queries)), max_floor)
int_floor = tf.cast(floor, tf.dtypes.int32)
floors.append(int_floor)
ceil = (int_floor + 1)
ceils.append(ceil)
alpha = tf.cast((queries - floor), grid_type)
min_alpha = tf.constant(0.0, dtype=grid_type)
max_alpha = tf.constant(1.0, dtype=grid_type)
alpha = tf.math.minimum(tf.math.maximum(min_alpha, alpha), max_alpha)
alpha = tf.expand_dims(alpha, 2)
alphas.append(alpha)
flattened_grid = tf.reshape(grid, [((batch_size * height) * width), channels])
batch_offsets = tf.reshape(((tf.range(batch_size) * height) * width), [batch_size, 1])
def gather(y_coords, x_coords, name):
with tf.name_scope(('gather-' + name)):
linear_coordinates = ((batch_offsets + (y_coords * width)) + x_coords)
gathered_values = tf.gather(flattened_grid, linear_coordinates)
return tf.reshape(gathered_values, [batch_size, num_queries, channels])
top_left = gather(floors[0], floors[1], 'top_left')
top_right = gather(floors[0], ceils[1], 'top_right')
bottom_left = gather(ceils[0], floors[1], 'bottom_left')
bottom_right = gather(ceils[0], ceils[1], 'bottom_right')
with tf.name_scope('interpolate'):
interp_top = ((alphas[1] * (top_right - top_left)) + top_left)
interp_bottom = ((alphas[1] * (bottom_right - bottom_left)) + bottom_left)
interp = ((alphas[0] * (interp_bottom - interp_top)) + interp_top)
return interp |
def new_npair_loss(labels, embedding_anchor, embedding_positive, reg_lambda, equal_shape=True, half_batch_size=64):
reg_anchor = math_ops.reduce_mean(math_ops.reduce_sum(math_ops.square(embedding_anchor), 1))
reg_positive = math_ops.reduce_mean(math_ops.reduce_sum(math_ops.square(embedding_positive), 1))
l2loss = math_ops.multiply((0.25 * reg_lambda), (reg_anchor + reg_positive), name='l2loss')
xent_loss = []
if equal_shape:
pos_tile = tf.tile(embedding_positive, [half_batch_size, 1], name='pos_tile')
else:
pos_tile = embedding_positive
anc = tf.split(embedding_anchor, half_batch_size, axis=0)
pos = tf.split(pos_tile, half_batch_size, axis=0)
label2 = tf.split(labels, 2, axis=0)
label_anc = tf.reshape(label2[0], [half_batch_size, 1])
label_pos = tf.reshape(label2[1], [half_batch_size, 1])
label_anc = tf.split(label_anc, half_batch_size, axis=0)
for i in range(half_batch_size):
similarity_matrix = tf.matmul(anc[i], pos[i], transpose_a=False, transpose_b=True)
anc_label = tf.reshape(label_anc[i], [1, 1])
pos_label = tf.reshape(label_pos, [half_batch_size, 1])
labels_remapped = tf.to_float(tf.equal(anc_label, tf.transpose(pos_label)))
labels_remapped /= tf.reduce_sum(labels_remapped, 1, keep_dims=True)
x_loss = tf.nn.softmax_cross_entropy_with_logits(logits=similarity_matrix, labels=labels_remapped)
xent_loss.append(x_loss)
xent_loss = tf.reduce_mean(xent_loss, name='xentrop')
r_loss = tf.cond(tf.is_nan((xent_loss + l2loss)), (lambda : tf.constant(0.0)), (lambda : (xent_loss + l2loss)))
return r_loss |
class TestMakeTimeCdsDictionary(unittest.TestCase):
def test_fun(self):
tcds = {'Days': np.array(1), 'Milliseconds': np.array(2)}
expected = datetime(1958, 1, 2, 0, 0, 0, 2000)
assert (timecds2datetime(tcds) == expected)
tcds = {'Days': np.array(1), 'Milliseconds': np.array(2), 'Microseconds': np.array(3)}
expected = datetime(1958, 1, 2, 0, 0, 0, 2003)
assert (timecds2datetime(tcds) == expected)
tcds = {'Days': np.array(1), 'Milliseconds': np.array(2), 'Microseconds': np.array(3), 'Nanoseconds': np.array(4)}
expected = datetime(1958, 1, 2, 0, 0, 0, 2003)
assert (timecds2datetime(tcds) == expected) |
def convert_pl_to_hf(pl_ckpt_path: str, hf_src_model_dir: str, save_path: str) -> None:
hf_model = AutoModelForSeq2SeqLM.from_pretrained(hf_src_model_dir)
if os.path.isfile(pl_ckpt_path):
ckpt_files = [pl_ckpt_path]
else:
assert os.path.isdir(pl_ckpt_path)
ckpt_files = list(Path(pl_ckpt_path).glob('*.ckpt'))
assert ckpt_files, f'could not find any ckpt files inside the {pl_ckpt_path} directory'
if (len(ckpt_files) > 1):
logger.info(f'averaging the weights of {ckpt_files}')
state_dicts = [sanitize(torch.load(x, map_location='cpu')['state_dict']) for x in ckpt_files]
state_dict = average_state_dicts(state_dicts)
(missing, unexpected) = hf_model.load_state_dict(state_dict, strict=False)
assert (not missing), f'missing keys: {missing}'
hf_model.save_pretrained(save_path)
try:
tok = AutoTokenizer.from_pretrained(hf_src_model_dir)
tok.save_pretrained(save_path)
except Exception:
pass |
_cache(maxsize=200)
def _calcMissileFactor(atkEr, atkEv, atkDrf, tgtSpeed, tgtSigRadius):
factors = [1]
if (atkEr > 0):
factors.append((tgtSigRadius / atkEr))
if (tgtSpeed > 0):
factors.append((((atkEv * tgtSigRadius) / (atkEr * tgtSpeed)) ** atkDrf))
totalMult = min(factors)
return totalMult |
class TestGoBack(BaseTestCase):
async def test_back(self):
(await self.page.goto((self.url + 'empty')))
(await self.page.goto((self.url + 'static/textarea.html')))
response = (await self.page.goBack())
self.assertTrue(response.ok)
self.assertIn('empty', response.url)
response = (await self.page.goForward())
self.assertTrue(response.ok)
self.assertIn('static/textarea.html', response.url)
response = (await self.page.goForward())
self.assertIsNone(response)
async def test_history_api(self):
(await self.page.goto((self.url + 'empty')))
(await self.page.evaluate("() => {\n history.pushState({}, '', '/first.html');\n history.pushState({}, '', '/second.html');\n }"))
self.assertEqual(self.page.url, (self.url + 'second.html'))
(await self.page.goBack())
self.assertEqual(self.page.url, (self.url + 'first.html'))
(await self.page.goBack())
self.assertEqual(self.page.url, (self.url + 'empty'))
(await self.page.goForward())
self.assertEqual(self.page.url, (self.url + 'first.html')) |
def source(left, right, boundary=False):
if isinstance(left, numbers.Number):
left = pybamm.PrimaryBroadcast(left, 'current collector')
if ((left.domain != ['current collector']) or (right.domain != ['current collector'])):
raise pybamm.DomainError(f''''source' only implemented in the 'current collector' domain,
but symbols have domains {left.domain} and {right.domain}''')
if boundary:
return (pybamm.BoundaryMass(right) left)
else:
return (pybamm.Mass(right) left) |
class TestNetwork(ElectrumTestCase):
def setUpClass(cls):
super().setUpClass()
constants.set_regtest()
def tearDownClass(cls):
super().tearDownClass()
constants.set_mainnet()
def setUp(self):
super().setUp()
self.config = SimpleConfig({'electrum_path': self.electrum_path})
self.interface = MockInterface(self.config)
def test_fork_noconflict(self):
blockchain.blockchains = {}
self.interface.q.put_nowait({'block_height': 8, 'mock': {'catchup': 1, 'check': (lambda x: False), 'connect': (lambda x: False)}})
def mock_connect(height):
return (height == 6)
self.interface.q.put_nowait({'block_height': 7, 'mock': {'backward': 1, 'check': (lambda x: False), 'connect': mock_connect, 'fork': self.mock_fork}})
self.interface.q.put_nowait({'block_height': 2, 'mock': {'backward': 1, 'check': (lambda x: True), 'connect': (lambda x: False)}})
self.interface.q.put_nowait({'block_height': 4, 'mock': {'binary': 1, 'check': (lambda x: True), 'connect': (lambda x: True)}})
self.interface.q.put_nowait({'block_height': 5, 'mock': {'binary': 1, 'check': (lambda x: True), 'connect': (lambda x: True)}})
self.interface.q.put_nowait({'block_height': 6, 'mock': {'binary': 1, 'check': (lambda x: True), 'connect': (lambda x: True)}})
ifa = self.interface
self.assertEqual(('fork', 8), asyncio.get_event_loop().run_until_complete(ifa.sync_until(8, next_height=7)))
self.assertEqual(self.interface.q.qsize(), 0)
def test_fork_conflict(self):
blockchain.blockchains = {7: {'check': (lambda bad_header: False)}}
self.interface.q.put_nowait({'block_height': 8, 'mock': {'catchup': 1, 'check': (lambda x: False), 'connect': (lambda x: False)}})
def mock_connect(height):
return (height == 6)
self.interface.q.put_nowait({'block_height': 7, 'mock': {'backward': 1, 'check': (lambda x: False), 'connect': mock_connect, 'fork': self.mock_fork}})
self.interface.q.put_nowait({'block_height': 2, 'mock': {'backward': 1, 'check': (lambda x: True), 'connect': (lambda x: False)}})
self.interface.q.put_nowait({'block_height': 4, 'mock': {'binary': 1, 'check': (lambda x: True), 'connect': (lambda x: True)}})
self.interface.q.put_nowait({'block_height': 5, 'mock': {'binary': 1, 'check': (lambda x: True), 'connect': (lambda x: True)}})
self.interface.q.put_nowait({'block_height': 6, 'mock': {'binary': 1, 'check': (lambda x: True), 'connect': (lambda x: True)}})
ifa = self.interface
self.assertEqual(('fork', 8), asyncio.get_event_loop().run_until_complete(ifa.sync_until(8, next_height=7)))
self.assertEqual(self.interface.q.qsize(), 0)
def test_can_connect_during_backward(self):
blockchain.blockchains = {}
self.interface.q.put_nowait({'block_height': 8, 'mock': {'catchup': 1, 'check': (lambda x: False), 'connect': (lambda x: False)}})
def mock_connect(height):
return (height == 2)
self.interface.q.put_nowait({'block_height': 7, 'mock': {'backward': 1, 'check': (lambda x: False), 'connect': mock_connect, 'fork': self.mock_fork}})
self.interface.q.put_nowait({'block_height': 2, 'mock': {'backward': 1, 'check': (lambda x: False), 'connect': mock_connect, 'fork': self.mock_fork}})
self.interface.q.put_nowait({'block_height': 3, 'mock': {'catchup': 1, 'check': (lambda x: False), 'connect': (lambda x: True)}})
self.interface.q.put_nowait({'block_height': 4, 'mock': {'catchup': 1, 'check': (lambda x: False), 'connect': (lambda x: True)}})
ifa = self.interface
self.assertEqual(('catchup', 5), asyncio.get_event_loop().run_until_complete(ifa.sync_until(8, next_height=4)))
self.assertEqual(self.interface.q.qsize(), 0)
def mock_fork(self, bad_header):
forkpoint = bad_header['block_height']
b = blockchain.Blockchain(config=self.config, forkpoint=forkpoint, parent=None, forkpoint_hash=bh2u(sha256(str(forkpoint))), prev_hash=bh2u(sha256(str((forkpoint - 1)))))
return b
def test_chain_false_during_binary(self):
blockchain.blockchains = {}
self.interface.q.put_nowait({'block_height': 8, 'mock': {'catchup': 1, 'check': (lambda x: False), 'connect': (lambda x: False)}})
mock_connect = (lambda height: (height == 3))
self.interface.q.put_nowait({'block_height': 7, 'mock': {'backward': 1, 'check': (lambda x: False), 'connect': mock_connect}})
self.interface.q.put_nowait({'block_height': 2, 'mock': {'backward': 1, 'check': (lambda x: True), 'connect': mock_connect}})
self.interface.q.put_nowait({'block_height': 4, 'mock': {'binary': 1, 'check': (lambda x: False), 'fork': self.mock_fork, 'connect': mock_connect}})
self.interface.q.put_nowait({'block_height': 3, 'mock': {'binary': 1, 'check': (lambda x: True), 'connect': (lambda x: True)}})
self.interface.q.put_nowait({'block_height': 5, 'mock': {'catchup': 1, 'check': (lambda x: False), 'connect': (lambda x: True)}})
self.interface.q.put_nowait({'block_height': 6, 'mock': {'catchup': 1, 'check': (lambda x: False), 'connect': (lambda x: True)}})
ifa = self.interface
self.assertEqual(('catchup', 7), asyncio.get_event_loop().run_until_complete(ifa.sync_until(8, next_height=6)))
self.assertEqual(self.interface.q.qsize(), 0) |
class CommandBase(object):
def __init__(self, url: str='') -> None:
self._url = url
def __repr__(self) -> str:
return f'{type(self).__name__}({self.__dict__})'
def __eq__(self, other) -> bool:
return ((self is other) or (self.__dict__ == other.__dict__))
def _method(self) -> str:
return 'put'
def url(self) -> str:
return self._url
def url(self, new_url: str) -> None:
self._url = new_url
def get_url(self) -> str:
return self._url
def get_method(self) -> str:
return self._method
def process_response(self, json_obj: Dict[(str, Any)]) -> Any:
return True |
class BuildingMenu(object):
keys_go_back = ['']
sep_keys = '.'
joker_key = '*'
min_shortcut = 1
def __init__(self, caller=None, obj=None, title='Building menu: {obj}', keys=None, parents=None, persistent=False):
self.caller = caller
self.obj = obj
self.title = title
self.keys = (keys or [])
self.parents = (parents or ())
self.persistent = persistent
self.choices = []
self.cmds = {}
self.can_quit = False
if obj:
self.init(obj)
if ((not parents) and (not self.can_quit)):
self.add_choice_quit(key=None)
self._add_keys_choice()
def current_choice(self):
menu_keys = self.keys
if (not menu_keys):
return None
for choice in self.choices:
choice_keys = choice.keys
if (len(menu_keys) == len(choice_keys)):
common = True
for (menu_key, choice_key) in zip(menu_keys, choice_keys):
if (choice_key == self.joker_key):
continue
if ((not isinstance(menu_key, str)) or (menu_key != choice_key)):
common = False
break
if common:
return choice
return None
def relevant_choices(self):
menu_keys = self.keys
relevant = []
for choice in self.choices:
choice_keys = choice.keys
if ((not menu_keys) and (len(choice_keys) == 1)):
relevant.append(choice)
elif (len(menu_keys) == (len(choice_keys) - 1)):
common = True
for (menu_key, choice_key) in zip(menu_keys, choice_keys):
if (choice_key == self.joker_key):
continue
if ((not isinstance(menu_key, str)) or (menu_key != choice_key)):
common = False
break
if common:
relevant.append(choice)
return relevant
def _save(self):
self.caller.ndb._building_menu = self
if self.persistent:
self.caller.db._building_menu = {'class': ((type(self).__module__ + '.') + type(self).__name__), 'obj': self.obj, 'title': self.title, 'keys': self.keys, 'parents': self.parents, 'persistent': self.persistent}
def _add_keys_choice(self):
for choice in self.choices:
if (not choice.key):
title = strip_ansi(choice.title.strip()).lower()
length = self.min_shortcut
while (length <= len(title)):
i = 0
while (i < ((len(title) - length) + 1)):
guess = title[i:(i + length)]
if (guess not in self.cmds):
choice.key = guess
break
i += 1
if choice.key:
break
length += 1
if choice.key:
self.cmds[choice.key] = choice
else:
raise ValueError('Cannot guess the key for {}'.format(choice))
def init(self, obj):
pass
def add_choice(self, title, key=None, aliases=None, attr=None, text=None, glance=None, on_enter=None, on_nomatch=None, on_leave=None):
key = (key or '')
key = key.lower()
aliases = (aliases or [])
aliases = [a.lower() for a in aliases]
if (attr and (on_nomatch is None)):
on_nomatch = menu_setattr
if (key and (key in self.cmds)):
raise ValueError('A conflict exists between {} and {}, both use key or alias {}'.format(self.cmds[key], title, repr(key)))
if attr:
if (glance is None):
glance = (('{obj.' + attr) + '}')
if (text is None):
text = '\n \n {attr} for {{obj}}(#{{obj.id}})\n\n You can change this value simply by entering it.\n Use |y{back}|n to go back to the main menu.\n\n Current value: |c{{{obj_attr}}}|n\n '.format(attr=attr, obj_attr=('obj.' + attr), back='|n or |y'.join(self.keys_go_back))
choice = Choice(title, key=key, aliases=aliases, attr=attr, text=text, glance=glance, on_enter=on_enter, on_nomatch=on_nomatch, on_leave=on_leave, menu=self, caller=self.caller, obj=self.obj)
self.choices.append(choice)
if key:
self.cmds[key] = choice
for alias in aliases:
self.cmds[alias] = choice
return choice
def add_choice_edit(self, title='description', key='d', aliases=None, attr='db.desc', glance='\n {obj.db.desc}', on_enter=None):
on_enter = (on_enter or menu_edit)
return self.add_choice(title, key=key, aliases=aliases, attr=attr, glance=glance, on_enter=on_enter, text='')
def add_choice_quit(self, title='quit the menu', key='q', aliases=None, on_enter=None):
on_enter = (on_enter or menu_quit)
self.can_quit = True
return self.add_choice(title, key=key, aliases=aliases, on_enter=on_enter)
def open(self):
caller = self.caller
self._save()
if caller.cmdset.has(BuildingMenuCmdSet):
caller.cmdset.remove(BuildingMenuCmdSet)
self.caller.cmdset.add(BuildingMenuCmdSet, permanent=self.persistent)
self.display()
def open_parent_menu(self):
parents = list(self.parents)
if parents:
(parent_class, parent_obj, parent_keys) = parents[(- 1)]
del parents[(- 1)]
if self.caller.cmdset.has(BuildingMenuCmdSet):
self.caller.cmdset.remove(BuildingMenuCmdSet)
try:
menu_class = class_from_module(parent_class)
except Exception:
log_trace('BuildingMenu: attempting to load class {} failed'.format(repr(parent_class)))
return
try:
building_menu = menu_class(self.caller, parent_obj, keys=parent_keys, parents=tuple(parents))
except Exception:
log_trace('An error occurred while creating building menu {}'.format(repr(parent_class)))
return
else:
return building_menu.open()
def open_submenu(self, submenu_class, submenu_obj, parent_keys=None):
parent_keys = (parent_keys or [])
parents = list(self.parents)
parents.append((((type(self).__module__ + '.') + type(self).__name__), self.obj, parent_keys))
if self.caller.cmdset.has(BuildingMenuCmdSet):
self.caller.cmdset.remove(BuildingMenuCmdSet)
try:
menu_class = class_from_module(submenu_class)
except Exception:
log_trace('BuildingMenu: attempting to load class {} failed'.format(repr(submenu_class)))
return
try:
building_menu = menu_class(self.caller, submenu_obj, parents=parents)
except Exception:
log_trace('An error occurred while creating building menu {}'.format(repr(submenu_class)))
return
else:
return building_menu.open()
def move(self, key=None, back=False, quiet=False, string=''):
choice = self.current_choice
if choice:
choice.leave('')
if (not back):
if (not key):
raise ValueError('you are asking to move forward, you should specify a key.')
self.keys.append(key)
else:
if (not self.keys):
raise ValueError('you already are at the top of the tree, you cannot move backward.')
del self.keys[(- 1)]
self._save()
choice = self.current_choice
if choice:
choice.enter(string)
if (not quiet):
self.display()
def close(self):
if self.caller.cmdset.has(BuildingMenuCmdSet):
self.caller.cmdset.delete(BuildingMenuCmdSet)
if self.caller.attributes.has('_building_menu'):
self.caller.attributes.remove('_building_menu')
if self.caller.nattributes.has('_building_menu'):
self.caller.nattributes.remove('_building_menu')
def display_title(self):
return _call_or_get(self.title, menu=self, obj=self.obj, caller=self.caller).format(obj=self.obj)
def display_choice(self, choice):
title = _call_or_get(choice.title, menu=self, choice=choice, obj=self.obj, caller=self.caller)
clear_title = title.lower()
pos = clear_title.find(choice.key.lower())
ret = ' '
if (pos >= 0):
ret += ((((title[:pos] + '[|y') + choice.key.title()) + '|n]') + title[(pos + len(choice.key)):])
else:
ret += ((('[|y' + choice.key.title()) + '|n] ') + title)
if choice.glance:
glance = _call_or_get(choice.glance, menu=self, choice=choice, caller=self.caller, string='', obj=self.obj)
glance = glance.format(obj=self.obj, caller=self.caller)
ret += (': ' + glance)
return ret
def display(self):
choice = self.current_choice
if (self.keys and choice):
text = choice.format_text()
else:
text = (self.display_title() + '\n')
for choice in self.relevant_choices:
text += ('\n' + self.display_choice(choice))
self.caller.msg(text)
def restore(caller):
menu = caller.db._building_menu
if menu:
class_name = menu.get('class')
if (not class_name):
log_err('BuildingMenu: on caller {}, a persistent attribute holds building menu data, but no class could be found to restore the menu'.format(caller))
return
try:
menu_class = class_from_module(class_name)
except Exception:
log_trace('BuildingMenu: attempting to load class {} failed'.format(repr(class_name)))
return
obj = menu.get('obj')
keys = menu.get('keys')
title = menu.get('title', '')
parents = menu.get('parents')
persistent = menu.get('persistent', False)
try:
building_menu = menu_class(caller, obj, title=title, keys=keys, parents=parents, persistent=persistent)
except Exception:
log_trace('An error occurred while creating building menu {}'.format(repr(class_name)))
return
return building_menu |
class GenerationConfig(FairseqDataclass):
beam: int = field(default=5, metadata={'help': 'beam size'})
nbest: int = field(default=1, metadata={'help': 'number of hypotheses to output'})
max_len_a: float = field(default=0, metadata={'help': 'generate sequences of maximum length ax + b, where x is the source length'})
max_len_b: int = field(default=200, metadata={'help': 'generate sequences of maximum length ax + b, where x is the source length'})
min_len: int = field(default=1, metadata={'help': 'minimum generation length'})
match_source_len: bool = field(default=False, metadata={'help': 'generations should match the source length'})
unnormalized: bool = field(default=False, metadata={'help': 'compare unnormalized hypothesis scores'})
no_early_stop: bool = field(default=False, metadata={'help': 'deprecated'})
no_beamable_mm: bool = field(default=False, metadata={'help': "don't use BeamableMM in attention layers"})
lenpen: float = field(default=1, metadata={'help': 'length penalty: <1.0 favors shorter, >1.0 favors longer sentences'})
unkpen: float = field(default=0, metadata={'help': 'unknown word penalty: <0 produces more unks, >0 produces fewer'})
replace_unk: Optional[str] = field(default=None, metadata={'help': 'perform unknown replacement (optionally with alignment dictionary)', 'argparse_const': ' '})
sacrebleu: bool = field(default=False, metadata={'help': 'score with sacrebleu'})
score_reference: bool = field(default=False, metadata={'help': 'just score the reference translation'})
prefix_size: int = field(default=0, metadata={'help': 'initialize generation by target prefix of given length'})
no_repeat_ngram_size: int = field(default=0, metadata={'help': 'ngram blocking such that this size ngram cannot be repeated in the generation'})
sampling: bool = field(default=False, metadata={'help': 'sample hypotheses instead of using beam search'})
sampling_topk: int = field(default=(- 1), metadata={'help': 'sample from top K likely next words instead of all words'})
sampling_topp: float = field(default=(- 1.0), metadata={'help': 'sample from the smallest set whose cumulative probability mass exceeds p for next words'})
constraints: Optional[GENERATION_CONSTRAINTS_CHOICES] = field(default=None, metadata={'help': 'enables lexically constrained decoding', 'argparse_const': 'ordered'})
temperature: float = field(default=1.0, metadata={'help': 'temperature for generation'})
diverse_beam_groups: int = field(default=(- 1), metadata={'help': 'number of groups for Diverse Beam Search'})
diverse_beam_strength: float = field(default=0.5, metadata={'help': 'strength of diversity penalty for Diverse Beam Search'})
diversity_rate: float = field(default=(- 1.0), metadata={'help': 'strength of diversity penalty for Diverse Siblings Search'})
print_alignment: bool = field(default=False, metadata={'help': 'if set, uses attention feedback to compute and print alignment to source tokens'})
print_step: bool = field(default=False, metadata={'help': 'print steps'})
lm_path: Optional[str] = field(default=None, metadata={'help': 'path to lm checkpoint for lm fusion'})
lm_weight: float = field(default=0.0, metadata={'help': 'weight for lm probs for lm fusion'})
iter_decode_eos_penalty: float = field(default=0.0, metadata={'help': 'if > 0.0, it penalized early-stopping in decoding.'})
iter_decode_max_iter: int = field(default=10, metadata={'help': 'maximum iterations for iterative refinement.'})
iter_decode_force_max_iter: bool = field(default=False, metadata={'help': 'if set, run exact the maximum number of iterations without early stop'})
iter_decode_with_beam: int = field(default=1, metadata={'help': 'if > 1, model will generate translations varying by the lengths.'})
iter_decode_with_external_reranker: bool = field(default=False, metadata={'help': 'if set, the last checkpoint are assumed to be a reranker to rescore the translations'})
retain_iter_history: bool = field(default=False, metadata={'help': 'if set, decoding returns the whole history of iterative refinement'})
retain_dropout: bool = field(default=False, metadata={'help': 'Use dropout at inference time'})
retain_dropout_modules: Any = field(default=None, metadata={'help': 'if set, only retain dropout for the specified modules; if not set, then dropout will be retained for all modules'})
decoding_format: Optional[GENERATION_DECODING_FORMAT_CHOICES] = field(default=None, metadata={'help': 'special decoding format for advanced decoding.'})
no_seed_provided: bool = field(default=False, metadata={'help': 'if set, dont use seed for initializing random generators'}) |
class SplunkLogsModel(SharedModel, ActionLogsDataInterface):
def __init__(self, producer, splunk_config, should_skip_logging=None):
self._should_skip_logging = should_skip_logging
self._logs_producer = LogProducerProxy()
if (producer == 'splunk'):
self._logs_producer.initialize(SplunkLogsProducer(**splunk_config))
else:
raise Exception(('Invalid log producer: %s' % producer))
def log_action(self, kind_name, namespace_name=None, performer=None, ip=None, metadata=None, repository=None, repository_name=None, timestamp=None, is_free_namespace=False):
if (self._should_skip_logging and self._should_skip_logging(kind_name, namespace_name, is_free_namespace)):
return
if (repository_name is not None):
if ((repository is not None) or (namespace_name is None)):
raise ValueError('Incorrect argument provided when logging action logs, namespace name should not be empty')
repository = model.repository.get_repository(namespace_name, repository_name)
if (timestamp is None):
timestamp = datetime.today()
username = None
performer_name = None
repo_name = None
if (namespace_name is not None):
ns_user = model.user.get_namespace_user(namespace_name)
if (ns_user is not None):
username = ns_user.username
if ((performer is not None) and performer.username):
performer_name = performer.username
if ((repository is not None) and repository.name):
repo_name = repository.name
metadata_json = (metadata or {})
log_data = {'kind': kind_name, 'account': username, 'performer': performer_name, 'repository': repo_name, 'ip': ip, 'metadata_json': (metadata_json or {}), 'datetime': timestamp}
try:
self._logs_producer.send(json.dumps(log_data, sort_keys=True, default=str))
except LogSendException as lse:
strict_logging_disabled = config.app_config.get('ALLOW_PULLS_WITHOUT_STRICT_LOGGING')
logger.exception('log_action failed', extra={'exception': lse}.update(log_data))
if (not (strict_logging_disabled and (kind_name in ACTIONS_ALLOWED_WITHOUT_AUDIT_LOGGING))):
raise
def lookup_logs(self, start_datetime, end_datetime, performer_name=None, repository_name=None, namespace_name=None, filter_kinds=None, page_token=None, max_page_count=None):
raise NotImplementedError('Method not implemented, Splunk does not support log lookups')
def lookup_latest_logs(self, performer_name=None, repository_name=None, namespace_name=None, filter_kinds=None, size=20):
raise NotImplementedError('Method not implemented, Splunk does not support log lookups')
def get_aggregated_log_counts(self, start_datetime, end_datetime, performer_name=None, repository_name=None, namespace_name=None, filter_kinds=None):
raise NotImplementedError('Method not implemented, Splunk does not support log lookups')
def count_repository_actions(self, repository, day):
raise NotImplementedError('Method not implemented, Splunk does not support log lookups')
def yield_logs_for_export(self, start_datetime, end_datetime, repository_id=None, namespace_id=None, max_query_time=None):
raise NotImplementedError('Method not implemented, Splunk does not support log lookups')
def yield_log_rotation_context(self, cutoff_date, min_logs_per_rotation):
raise NotImplementedError('Method not implemented, Splunk does not support log lookups') |
def test_initialize_fresh(hatch, helpers, temp_dir):
project_name = 'My.App'
description = 'foo'
with temp_dir.as_cwd():
result = hatch('new', project_name)
assert (result.exit_code == 0), result.output
path = (temp_dir / 'my-app')
project_file = (path / 'pyproject.toml')
project_file.remove()
assert (not project_file.is_file())
with path.as_cwd():
result = hatch('new', '--init', input=f'''{project_name}
{description}''')
expected_files = helpers.get_template_files('new.default', project_name, description=description)
helpers.assert_files(path, expected_files)
assert (result.exit_code == 0), result.output
assert (remove_trailing_spaces(result.output) == helpers.dedent(f'''
Project name: {project_name}
Description []: {description}
Wrote: pyproject.toml
''')) |
def set_player_object_material_text(player_id: int, object_id: int, text: str, material_index: int=0, material_size: int=OBJECT_MATERIAL_SIZE_256x128, font_face: str='Arial', font_size: int=24, bold: bool=True, font_color: int=, back_color: int=0, text_alignment: int=0) -> bool:
return SetPlayerObjectMaterialText(player_id, object_id, text, material_index, material_size, font_face, font_size, bold, font_color, back_color, text_alignment) |
class CompDoc(object):
def __init__(self, mem, logfile=sys.stdout, DEBUG=0, ignore_workbook_corruption=False):
self.logfile = logfile
self.ignore_workbook_corruption = ignore_workbook_corruption
self.DEBUG = DEBUG
if (mem[0:8] != SIGNATURE):
raise CompDocError('Not an OLE2 compound document')
if (mem[28:30] != b'\xfe\xff'):
raise CompDocError(('Expected "little-endian" marker, found %r' % mem[28:30]))
(revision, version) = unpack('<HH', mem[24:28])
if DEBUG:
print(('\nCompDoc format: version=0x%04x revision=0x%04x' % (version, revision)), file=logfile)
self.mem = mem
(ssz, sssz) = unpack('<HH', mem[30:34])
if (ssz > 20):
print(('WARNING: sector size (2**%d) is preposterous; assuming 512 and continuing ...' % ssz), file=logfile)
ssz = 9
if (sssz > ssz):
print(('WARNING: short stream sector size (2**%d) is preposterous; assuming 64 and continuing ...' % sssz), file=logfile)
sssz = 6
self.sec_size = sec_size = (1 << ssz)
self.short_sec_size = (1 << sssz)
if ((self.sec_size != 512) or (self.short_sec_size != 64)):
print((' sec_size=%d short_sec_size=%d' % (self.sec_size, self.short_sec_size)), file=logfile)
(SAT_tot_secs, self.dir_first_sec_sid, _unused, self.min_size_std_stream, SSAT_first_sec_sid, SSAT_tot_secs, MSATX_first_sec_sid, MSATX_tot_secs) = unpack('<iiiiiiii', mem[44:76])
mem_data_len = (len(mem) - 512)
(mem_data_secs, left_over) = divmod(mem_data_len, sec_size)
if left_over:
mem_data_secs += 1
print(('WARNING *** file size (%d) not 512 + multiple of sector size (%d)' % (len(mem), sec_size)), file=logfile)
self.mem_data_secs = mem_data_secs
self.mem_data_len = mem_data_len
seen = self.seen = (array.array('B', [0]) * mem_data_secs)
if DEBUG:
print('sec sizes', ssz, sssz, sec_size, self.short_sec_size, file=logfile)
print(('mem data: %d bytes == %d sectors' % (mem_data_len, mem_data_secs)), file=logfile)
print(('SAT_tot_secs=%d, dir_first_sec_sid=%d, min_size_std_stream=%d' % (SAT_tot_secs, self.dir_first_sec_sid, self.min_size_std_stream)), file=logfile)
print(('SSAT_first_sec_sid=%d, SSAT_tot_secs=%d' % (SSAT_first_sec_sid, SSAT_tot_secs)), file=logfile)
print(('MSATX_first_sec_sid=%d, MSATX_tot_secs=%d' % (MSATX_first_sec_sid, MSATX_tot_secs)), file=logfile)
nent = (sec_size // 4)
fmt = ('<%di' % nent)
trunc_warned = 0
MSAT = list(unpack('<109i', mem[76:512]))
SAT_sectors_reqd = (((mem_data_secs + nent) - 1) // nent)
expected_MSATX_sectors = max(0, ((((SAT_sectors_reqd - 109) + nent) - 2) // (nent - 1)))
actual_MSATX_sectors = 0
if ((MSATX_tot_secs == 0) and (MSATX_first_sec_sid in (EOCSID, FREESID, 0))):
pass
else:
sid = MSATX_first_sec_sid
while (sid not in (EOCSID, FREESID, MSATSID)):
if (DEBUG > 1):
print(('MSATX: sid=%d (0x%08X)' % (sid, sid)), file=logfile)
if (sid >= mem_data_secs):
msg = ('MSAT extension: accessing sector %d but only %d in file' % (sid, mem_data_secs))
if (DEBUG > 1):
print(msg, file=logfile)
break
raise CompDocError(msg)
elif (sid < 0):
raise CompDocError(('MSAT extension: invalid sector id: %d' % sid))
if seen[sid]:
raise CompDocError(('MSAT corruption: seen[%d] == %d' % (sid, seen[sid])))
seen[sid] = 1
actual_MSATX_sectors += 1
if (DEBUG and (actual_MSATX_sectors > expected_MSATX_sectors)):
print('[1]===>>>', mem_data_secs, nent, SAT_sectors_reqd, expected_MSATX_sectors, actual_MSATX_sectors, file=logfile)
offset = (512 + (sec_size * sid))
MSAT.extend(unpack(fmt, mem[offset:(offset + sec_size)]))
sid = MSAT.pop()
if (DEBUG and (actual_MSATX_sectors != expected_MSATX_sectors)):
print('[2]===>>>', mem_data_secs, nent, SAT_sectors_reqd, expected_MSATX_sectors, actual_MSATX_sectors, file=logfile)
if DEBUG:
print('MSAT: len =', len(MSAT), file=logfile)
dump_list(MSAT, 10, logfile)
self.SAT = []
actual_SAT_sectors = 0
dump_again = 0
for msidx in xrange(len(MSAT)):
msid = MSAT[msidx]
if (msid in (FREESID, EOCSID)):
continue
if (msid >= mem_data_secs):
if (not trunc_warned):
print('WARNING *** File is truncated, or OLE2 MSAT is corrupt!!', file=logfile)
print(('INFO: Trying to access sector %d but only %d available' % (msid, mem_data_secs)), file=logfile)
trunc_warned = 1
MSAT[msidx] = EVILSID
dump_again = 1
continue
elif (msid < (- 2)):
raise CompDocError(('MSAT: invalid sector id: %d' % msid))
if seen[msid]:
raise CompDocError(('MSAT extension corruption: seen[%d] == %d' % (msid, seen[msid])))
seen[msid] = 2
actual_SAT_sectors += 1
if (DEBUG and (actual_SAT_sectors > SAT_sectors_reqd)):
print('[3]===>>>', mem_data_secs, nent, SAT_sectors_reqd, expected_MSATX_sectors, actual_MSATX_sectors, actual_SAT_sectors, msid, file=logfile)
offset = (512 + (sec_size * msid))
self.SAT.extend(unpack(fmt, mem[offset:(offset + sec_size)]))
if DEBUG:
print('SAT: len =', len(self.SAT), file=logfile)
dump_list(self.SAT, 10, logfile)
print(file=logfile)
if (DEBUG and dump_again):
print('MSAT: len =', len(MSAT), file=logfile)
dump_list(MSAT, 10, logfile)
for satx in xrange(mem_data_secs, len(self.SAT)):
self.SAT[satx] = EVILSID
print('SAT: len =', len(self.SAT), file=logfile)
dump_list(self.SAT, 10, logfile)
dbytes = self._get_stream(self.mem, 512, self.SAT, self.sec_size, self.dir_first_sec_sid, name='directory', seen_id=3)
dirlist = []
did = (- 1)
for pos in xrange(0, len(dbytes), 128):
did += 1
dirlist.append(DirNode(did, dbytes[pos:(pos + 128)], 0, logfile))
self.dirlist = dirlist
_build_family_tree(dirlist, 0, dirlist[0].root_DID)
if DEBUG:
for d in dirlist:
d.dump(DEBUG)
sscs_dir = self.dirlist[0]
assert (sscs_dir.etype == 5)
if ((sscs_dir.first_SID < 0) or (sscs_dir.tot_size == 0)):
self.SSCS = ''
else:
self.SSCS = self._get_stream(self.mem, 512, self.SAT, sec_size, sscs_dir.first_SID, sscs_dir.tot_size, name='SSCS', seen_id=4)
self.SSAT = []
if ((SSAT_tot_secs > 0) and (sscs_dir.tot_size == 0)):
print('WARNING *** OLE2 inconsistency: SSCS size is 0 but SSAT size is non-zero', file=logfile)
if (sscs_dir.tot_size > 0):
sid = SSAT_first_sec_sid
nsecs = SSAT_tot_secs
while ((sid >= 0) and (nsecs > 0)):
if seen[sid]:
raise CompDocError(('SSAT corruption: seen[%d] == %d' % (sid, seen[sid])))
seen[sid] = 5
nsecs -= 1
start_pos = (512 + (sid * sec_size))
news = list(unpack(fmt, mem[start_pos:(start_pos + sec_size)]))
self.SSAT.extend(news)
sid = self.SAT[sid]
if DEBUG:
print(('SSAT last sid %d; remaining sectors %d' % (sid, nsecs)), file=logfile)
assert ((nsecs == 0) and (sid == EOCSID))
if DEBUG:
print('SSAT', file=logfile)
dump_list(self.SSAT, 10, logfile)
if DEBUG:
print('seen', file=logfile)
dump_list(seen, 20, logfile)
def _get_stream(self, mem, base, sat, sec_size, start_sid, size=None, name='', seen_id=None):
sectors = []
s = start_sid
if (size is None):
while (s >= 0):
if (seen_id is not None):
if self.seen[s]:
raise CompDocError(('%s corruption: seen[%d] == %d' % (name, s, self.seen[s])))
self.seen[s] = seen_id
start_pos = (base + (s * sec_size))
sectors.append(mem[start_pos:(start_pos + sec_size)])
try:
s = sat[s]
except IndexError:
raise CompDocError(('OLE2 stream %r: sector allocation table invalid entry (%d)' % (name, s)))
assert (s == EOCSID)
else:
todo = size
while (s >= 0):
if (seen_id is not None):
if self.seen[s]:
raise CompDocError(('%s corruption: seen[%d] == %d' % (name, s, self.seen[s])))
self.seen[s] = seen_id
start_pos = (base + (s * sec_size))
grab = sec_size
if (grab > todo):
grab = todo
todo -= grab
sectors.append(mem[start_pos:(start_pos + grab)])
try:
s = sat[s]
except IndexError:
raise CompDocError(('OLE2 stream %r: sector allocation table invalid entry (%d)' % (name, s)))
assert (s == EOCSID)
if (todo != 0):
fprintf(self.logfile, 'WARNING *** OLE2 stream %r: expected size %d, actual size %d\n', name, size, (size - todo))
return b''.join(sectors)
def _dir_search(self, path, storage_DID=0):
head = path[0]
tail = path[1:]
dl = self.dirlist
for child in dl[storage_DID].children:
if (dl[child].name.lower() == head.lower()):
et = dl[child].etype
if (et == 2):
return dl[child]
if (et == 1):
if (not tail):
raise CompDocError("Requested component is a 'storage'")
return self._dir_search(tail, child)
dl[child].dump(1)
raise CompDocError("Requested stream is not a 'user stream'")
return None
def get_named_stream(self, qname):
d = self._dir_search(qname.split('/'))
if (d is None):
return None
if (d.tot_size >= self.min_size_std_stream):
return self._get_stream(self.mem, 512, self.SAT, self.sec_size, d.first_SID, d.tot_size, name=qname, seen_id=(d.DID + 6))
else:
return self._get_stream(self.SSCS, 0, self.SSAT, self.short_sec_size, d.first_SID, d.tot_size, name=(qname + ' (from SSCS)'), seen_id=None)
def locate_named_stream(self, qname):
d = self._dir_search(qname.split('/'))
if (d is None):
return (None, 0, 0)
if (d.tot_size > self.mem_data_len):
raise CompDocError(('%r stream length (%d bytes) > file data size (%d bytes)' % (qname, d.tot_size, self.mem_data_len)))
if (d.tot_size >= self.min_size_std_stream):
result = self._locate_stream(self.mem, 512, self.SAT, self.sec_size, d.first_SID, d.tot_size, qname, (d.DID + 6))
if self.DEBUG:
print('\nseen', file=self.logfile)
dump_list(self.seen, 20, self.logfile)
return result
else:
return (self._get_stream(self.SSCS, 0, self.SSAT, self.short_sec_size, d.first_SID, d.tot_size, (qname + ' (from SSCS)'), None), 0, d.tot_size)
def _locate_stream(self, mem, base, sat, sec_size, start_sid, expected_stream_size, qname, seen_id):
s = start_sid
if (s < 0):
raise CompDocError(('_locate_stream: start_sid (%d) is -ve' % start_sid))
p = (- 99)
start_pos = (- 9999)
end_pos = (- 8888)
slices = []
tot_found = 0
found_limit = (((expected_stream_size + sec_size) - 1) // sec_size)
while (s >= 0):
if self.seen[s]:
if (not self.ignore_workbook_corruption):
print(('_locate_stream(%s): seen' % qname), file=self.logfile)
dump_list(self.seen, 20, self.logfile)
raise CompDocError(('%s corruption: seen[%d] == %d' % (qname, s, self.seen[s])))
self.seen[s] = seen_id
tot_found += 1
if (tot_found > found_limit):
raise CompDocError(('%s: size exceeds expected %d bytes; corrupt?' % (qname, (found_limit * sec_size))))
if (s == (p + 1)):
end_pos += sec_size
else:
if (p >= 0):
slices.append((start_pos, end_pos))
start_pos = (base + (s * sec_size))
end_pos = (start_pos + sec_size)
p = s
s = sat[s]
assert (s == EOCSID)
assert (tot_found == found_limit)
if (not slices):
return (mem, start_pos, expected_stream_size)
slices.append((start_pos, end_pos))
return (b''.join((mem[start_pos:end_pos] for (start_pos, end_pos) in slices)), 0, expected_stream_size) |
def test_pool_no_package_from_specified_repository_raises_package_not_found() -> None:
package = get_package('foo', '1.0.0')
repo1 = Repository('repo1')
repo2 = Repository('repo2', [package])
pool = RepositoryPool([repo1, repo2])
with pytest.raises(PackageNotFound):
pool.package('foo', Version.parse('1.0.0'), repository_name='repo1') |
class Lock(object):
_NODE_NAME = '__lock__'
_EXCLUDE_NAMES = ['__lock__']
def __init__(self, client, path, identifier=None, extra_lock_patterns=()):
self.client = client
self.path = path
self._exclude_names = set((self._EXCLUDE_NAMES + list(extra_lock_patterns)))
self._contenders_re = re.compile('(?:{patterns})(-?\\d{{10}})$'.format(patterns='|'.join(self._exclude_names)))
self.data = str((identifier or '')).encode('utf-8')
self.node = None
self.wake_event = client.handler.event_object()
self.prefix = (uuid.uuid4().hex + self._NODE_NAME)
self.create_path = ((self.path + '/') + self.prefix)
self.create_tried = False
self.is_acquired = False
self.assured_path = False
self.cancelled = False
self._retry = KazooRetry(max_tries=None, sleep_func=client.handler.sleep_func)
self._acquire_method_lock = client.handler.lock_object()
def _ensure_path(self):
self.client.ensure_path(self.path)
self.assured_path = True
def cancel(self):
self.cancelled = True
self.wake_event.set()
def acquire(self, blocking=True, timeout=None, ephemeral=True):
retry = self._retry.copy()
retry.deadline = timeout
method_locked = self._acquire_method_lock.acquire(blocking=blocking, timeout=(timeout if (timeout is not None) else (- 1)))
if (not method_locked):
return False
already_acquired = self.is_acquired
try:
gotten = False
try:
gotten = retry(self._inner_acquire, blocking=blocking, timeout=timeout, ephemeral=ephemeral)
except RetryFailedError:
pass
except KazooException:
if (not already_acquired):
self._best_effort_cleanup()
self.cancelled = False
raise
if gotten:
self.is_acquired = gotten
if ((not gotten) and (not already_acquired)):
self._best_effort_cleanup()
return gotten
finally:
self._acquire_method_lock.release()
def _watch_session(self, state):
self.wake_event.set()
return True
def _inner_acquire(self, blocking, timeout, ephemeral=True):
if self.is_acquired:
if (not blocking):
return False
raise ForceRetryError()
if (not self.assured_path):
self._ensure_path()
node = None
if self.create_tried:
node = self._find_node()
else:
self.create_tried = True
if (not node):
node = self.client.create(self.create_path, self.data, ephemeral=ephemeral, sequence=True)
node = node[(len(self.path) + 1):]
self.node = node
while True:
self.wake_event.clear()
if self.cancelled:
raise CancelledError()
predecessor = self._get_predecessor(node)
if (predecessor is None):
return True
if (not blocking):
return False
predecessor = ((self.path + '/') + predecessor)
self.client.add_listener(self._watch_session)
try:
self.client.get(predecessor, self._watch_predecessor)
except NoNodeError:
pass
else:
self.wake_event.wait(timeout)
if (not self.wake_event.is_set()):
raise LockTimeout(('Failed to acquire lock on %s after %s seconds' % (self.path, timeout)))
finally:
self.client.remove_listener(self._watch_session)
def _watch_predecessor(self, event):
self.wake_event.set()
def _get_predecessor(self, node):
node_sequence = node[len(self.prefix):]
children = self.client.get_children(self.path)
found_self = False
contender_matches = []
for child in children:
match = self._contenders_re.search(child)
if (match is not None):
contender_sequence = match.group(1)
if (contender_sequence < node_sequence):
contender_matches.append(match)
if (child == node):
found_self = match
if (found_self is False):
raise ForceRetryError()
if (not contender_matches):
return None
sorted_matches = sorted(contender_matches, key=(lambda m: m.groups()))
return sorted_matches[(- 1)].string
def _find_node(self):
children = self.client.get_children(self.path)
for child in children:
if child.startswith(self.prefix):
return child
return None
def _delete_node(self, node):
self.client.delete(((self.path + '/') + node))
def _best_effort_cleanup(self):
try:
node = (self.node or self._find_node())
if node:
self._delete_node(node)
except KazooException:
pass
def release(self):
return self.client.retry(self._inner_release)
def _inner_release(self):
if (not self.is_acquired):
return False
try:
self._delete_node(self.node)
except NoNodeError:
pass
self.is_acquired = False
self.node = None
return True
def contenders(self):
if (not self.assured_path):
self._ensure_path()
children = self.client.get_children(self.path)
all_contenders_re = re.compile('(?:{patterns})(-?\\d{{10}})$'.format(patterns='|'.join((self._exclude_names | {self._NODE_NAME}))))
contender_matches = []
for child in children:
match = all_contenders_re.search(child)
if (match is not None):
contender_matches.append(match)
contender_nodes = [match.string for match in sorted(contender_matches, key=(lambda m: m.groups()))]
contenders = []
for node in contender_nodes:
try:
(data, stat) = self.client.get(((self.path + '/') + node))
if (data is not None):
contenders.append(data.decode('utf-8'))
except NoNodeError:
pass
return contenders
def __enter__(self):
self.acquire()
def __exit__(self, exc_type, exc_value, traceback):
self.release() |
def test_update_catalogs(db, settings):
xml_file = (((Path(settings.BASE_DIR) / 'xml') / 'elements') / 'catalogs.xml')
root = read_xml_file(xml_file)
version = root.attrib.get('version')
elements = flat_xml_to_elements(root)
elements = convert_elements(elements, version)
elements = order_elements(elements)
elements = elements.values()
import_elements(elements)
assert (len(root) == len(elements) == 148)
assert all(((element['created'] is False) for element in elements))
assert all(((element['updated'] is True) for element in elements)) |
class ElementProxy():
def __init__(self, element: BaseOxmlElement, parent: (t.ProvidesXmlPart | None)=None):
self._element = element
self._parent = parent
def __eq__(self, other: object):
if (not isinstance(other, ElementProxy)):
return False
return (self._element is other._element)
def __ne__(self, other: object):
if (not isinstance(other, ElementProxy)):
return True
return (self._element is not other._element)
def element(self):
return self._element
def part(self) -> XmlPart:
if (self._parent is None):
raise ValueError('part is not accessible from this element')
return self._parent.part |
def save_an_icom_batch(date_pattern, ip_directory, data_to_save):
if (not date_pattern.match(data_to_save[8:26])):
raise ValueError('Unexpected iCOM stream format')
counter = str(int(data_to_save[26])).zfill(3)
filepath = ip_directory.joinpath(f'{counter}.txt')
with open(filepath, 'bw+') as f:
f.write(data_to_save)
logging.debug('Saved stream to %(filepath)s', {'filepath': filepath}) |
class BiF_Att(nn.Module):
def __init__(self, emodict, worddict, embedding, args):
super(BiF_Att, self).__init__()
self.num_classes = emodict.n_words
self.embeddings = embedding
self.gpu = args.gpu
self.hops = args.hops
self.wind_1 = args.wind1
self.utt_gru = GRUencoder(args.d_word_vec, args.d_h1, num_layers=1)
self.d_lin_1 = (args.d_h1 * 2)
self.lin_1 = nn.Linear(self.d_lin_1, 100)
self.dropout_in = nn.Dropout(0.3)
self.cont_gru = nn.GRU(100, 100, num_layers=1, bidirectional=True)
self.dropout_mid = nn.Dropout(0.3)
self.classifier = nn.Linear(100, self.num_classes)
def init_hidden(self, num_directs, num_layers, batch_size, d_model):
return Variable(torch.zeros((num_directs * num_layers), batch_size, d_model))
def forward(self, sents, lengths):
if (len(sents.size()) < 2):
sents = sents.unsqueeze(0)
w_embed = self.embeddings(sents)
w_gru = self.utt_gru(w_embed, lengths)
maxpl = torch.max(w_gru, dim=1)[0]
s_utt = F.tanh(self.lin_1(maxpl))
s_utt = self.dropout_in(s_utt)
s_out = []
cont_inp = s_utt.unsqueeze(1)
s_out.append(cont_inp[:1])
attn_weights = []
if (sents.size()[0] > 1):
batches = []
masks = []
for i in range(1, sents.size()[0]):
pad = max((self.wind_1 - i), 0)
i_st = (0 if (i < (self.wind_1 + 1)) else (i - self.wind_1))
m_pad = F.pad(cont_inp[i_st:i], (0, 0, 0, 0, pad, 0), mode='constant', value=0)
batches.append(m_pad)
mask = (([0] * pad) + ([1] * (self.wind_1 - pad)))
masks.append(mask)
batches_tensor = torch.cat(batches, dim=1)
masks_tensor = torch.tensor(masks).long().to(sents.device)
query_mask = torch.ones(masks_tensor.size()[0], 1).long().to(sents.device)
attn_mask = get_attn_pad_mask(query_mask, masks_tensor)
mem_out = self.cont_gru(batches_tensor)[0]
(mem_fwd, mem_bwd) = mem_out.chunk(2, (- 1))
mem_bank = ((batches_tensor + mem_fwd) + mem_bwd).transpose(0, 1).contiguous()
mem_bank = self.dropout_mid(mem_bank)
query = cont_inp[1:]
eps_mem = query
for hop in range(self.hops):
(attn_out, attn_weight) = dotprod_attention(eps_mem, mem_bank, mem_bank, attn_mask)
attn_weights.append(attn_weight.squeeze(1))
eps_mem = (eps_mem + attn_out)
eps_mem = self.dropout_mid(eps_mem)
s_out.append(eps_mem)
s_cont = torch.cat(s_out, dim=0).squeeze(1)
s_output = self.classifier(s_cont)
pred_s = F.log_softmax(s_output, dim=1)
return (pred_s, attn_weights) |
class F9_Raid(F7_Raid):
removedKeywords = F7_Raid.removedKeywords
removedAttrs = F7_Raid.removedAttrs
def _getParser(self):
op = F7_Raid._getParser(self)
op.add_argument('--bytes-per-inode', deprecated=F9)
op.add_argument('--fsprofile', version=F9, help='\n Specifies a usage type to be passed to the program that\n makes a filesystem on this partition. A usage type\n defines a variety of tuning parameters to be used when\n making a filesystem. For this option to work, the\n filesystem must support the concept of usage types and\n there must be a configuration file that lists valid\n types. For ext2/3/4, this configuration file is\n ``/etc/mke2fs.conf``.')
op.add_argument('--encrypted', action='store_true', version=F9, default=False, help='\n Specify that this RAID device should be encrypted.')
op.add_argument('--passphrase', version=F9, help='\n Specify the passphrase to use when encrypting this RAID\n device. Without the above --encrypted option, this option\n does nothing. If no passphrase is specified, the default\n system-wide one is used, or the installer will stop and\n prompt if there is no default.')
return op |
def _prepare_connection_costs_per_link(n, costs, renewable_config, hvdc_as_lines, lines_length_factor):
if n.links.empty:
return {}
connection_costs_per_link = {}
if hvdc_as_lines:
dc_lengths = n.lines.length
unterwater_fractions = n.lines.underwater_fraction
else:
dc_lengths = n.links.length
unterwater_fractions = n.links.underwater_fraction
for tech in renewable_config:
if tech.startswith('offwind'):
connection_costs_per_link[tech] = ((dc_lengths * lines_length_factor) * ((unterwater_fractions * costs.at[((tech + '-connection-submarine'), 'capital_cost')]) + ((1.0 - unterwater_fractions) * costs.at[((tech + '-connection-underground'), 'capital_cost')])))
return connection_costs_per_link |
class FixScaleCrop(object):
def __init__(self, crop_size):
self.crop_size = crop_size
def __call__(self, sample):
img = sample['image']
mask = sample['label']
(w, h) = img.size
if (w > h):
oh = self.crop_size
ow = int((((1.0 * w) * oh) / h))
else:
ow = self.crop_size
oh = int((((1.0 * h) * ow) / w))
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
(w, h) = img.size
x1 = int(round(((w - self.crop_size) / 2.0)))
y1 = int(round(((h - self.crop_size) / 2.0)))
img = img.crop((x1, y1, (x1 + self.crop_size), (y1 + self.crop_size)))
mask = mask.crop((x1, y1, (x1 + self.crop_size), (y1 + self.crop_size)))
return {'image': img, 'label': mask} |
class SshConfig(config_parser.Config):
def __init__(self, host, username, password=None, cfg_file=None):
import paramiko
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.connect(host, username=username, password=password)
self.ftp = self.ssh.open_sftp()
try:
import cStringIO
c = cStringIO.StringIO()
except:
c = StringIO()
self.tar = tarfile.open(mode='w', fileobj=c)
self.cached_stats = {}
super(SshConfig, self).__init__(cfg_file=cfg_file)
def open(self, filename, *args, **kwargs):
return self.tar.extractfile(filename)
tarinfo = self._get_file(filename)
string = tarinfo.tobuf()
print(string)
return StringIO.StringIO(string)
return self.tar.extractfile(tarinfo)
def add_to_tar(self, path):
print('Taring ', path)
command = "find '{path}' -type f | tar -c -T - --to-stdout --absolute-names"
command = command.format(path=path)
print(command)
(stdin, stdout, stderr) = self.ssh.exec_command(command, bufsize=50000)
tar = tarfile.open(fileobj=stdout, mode='r|')
if (not self.tar):
self.tar = tar
else:
for i in tar:
self.tar.addfile(i)
def is_cached(self, filename):
if (not self.tar):
return False
return (filename in self.tar.getnames())
def _get_file(self, filename):
if (filename not in self.tar.getnames()):
self.add_to_tar(filename)
return self.tar.getmember(filename)
def get_cfg_files(self):
cfg_files = []
for (config_object, config_value) in self.maincfg_values:
if (config_object == 'cfg_file'):
config_value = self.abspath(config_value)
if self.isfile(config_value):
cfg_files.append(config_value)
elif (config_object == 'cfg_dir'):
absolut_path = self.abspath(config_value)
command = ("find '%s' -type f -iname \\*cfg" % absolut_path)
(stdin, stdout, stderr) = self.ssh.exec_command(command)
raw_filelist = stdout.read().splitlines()
cfg_files += raw_filelist
else:
continue
if (not self.is_cached(config_value)):
self.add_to_tar(config_value)
return cfg_files
def isfile(self, path):
try:
copy = self._get_file(path)
return copy.isfile()
except IOError:
return False
def isdir(self, path):
try:
file_stat = self.stat(path)
return stat.S_ISDIR(file_stat.st_mode)
except IOError:
return False
def islink(self, path):
try:
file_stat = self.stat(path)
return stat.S_ISLNK(file_stat.st_mode)
except IOError:
return False
def readlink(self, path):
return self.ftp.readlink(path)
def stat(self, *args, **kwargs):
path = args[0]
if (not self.is_cached(path)):
self.add_to_tar(path)
if (path not in self.tar.getnames()):
raise IOError(('No such file or directory %s' % path))
member = self.tar.getmember(path)
member.st_mode = member.mode
member.st_mtime = member.mtime
return member
def access(self, *args, **kwargs):
return os.access(*args, **kwargs)
def exists(self, path):
try:
self.ftp.stat(path)
return True
except IOError:
return False
def listdir(self, *args, **kwargs):
stats = self.ftp.listdir_attr(*args, **kwargs)
for i in stats:
self.cached_stats[((args[0] + '/') + i.filename)] = i
files = [x.filename for x in stats]
return files |
def get_class_splits(spanning_leaves, valid_test_roots=None, **kwargs):
if (valid_test_roots is not None):
if ((valid_test_roots['valid'] is None) or (valid_test_roots['test'] is None)):
raise ValueError('A root cannot be None.')
if (valid_test_roots is None):
valid_test_roots = propose_valid_test_roots(spanning_leaves, **kwargs)
(valid_root, test_root) = (valid_test_roots['valid'], valid_test_roots['test'])
valid_wn_ids = set([s.wn_id for s in spanning_leaves[valid_root]])
test_wn_ids = set([s.wn_id for s in spanning_leaves[test_root]])
overlap = [s for s in valid_wn_ids if (s in test_wn_ids)]
logging.info('Size of overlap: %d leaves', len(overlap))
assign_to_valid = True
for s in overlap:
if assign_to_valid:
test_wn_ids.remove(s)
else:
valid_wn_ids.remove(s)
assign_to_valid = (not assign_to_valid)
leaves = get_leaves(spanning_leaves.keys())
train_wn_ids = set([s.wn_id for s in leaves if ((s.wn_id not in valid_wn_ids) and (s.wn_id not in test_wn_ids))])
split_classes = {'train': train_wn_ids, 'valid': valid_wn_ids, 'test': test_wn_ids}
return (split_classes, valid_test_roots) |
class TokenClassificationEvaluator(ClassificationEvaluator):
def __init__(self, model_args: ModelArguments, data_args: DataTrainingArguments, training_args: TrainingArguments, processor: DataProcessor, model: torch.nn.Module, trainer: Optional[HugTrainer]=None, eval_dataset: Optional[Dataset]=None, test_dataset: Optional[Dataset]=None) -> None:
super().__init__(model_args, data_args, training_args, processor, model, trainer, eval_dataset, test_dataset)
self.paradigm = NO_GENERATE
def default_compute_metrics(self, eval_predictions):
examples = self.eval_dataset
labels = examples['label']
golden = {}
(predictions, _) = self.get_best_and_topk(eval_predictions[0], examples, stage='dev')
for example in examples:
try:
idx = int(example['idx'])
except:
idx = int(example['idx'].split('-')[1])
golden[idx] = example['label']
all_metrics = {'eval_macro_f1': 0.0, 'eval_acc': 0.0, 'acc': 0.0}
metric = TokenClassificationMetric()
gold = {k: v for (k, v) in golden.items()}
pred = {k: v for (k, v) in predictions.items()}
score = metric.calc_metric(golden=gold, predictions=pred)
(acc, f1) = (score['acc'], score['f1'])
all_metrics['eval_macro_f1'] = f1
all_metrics['eval_acc'] = acc
all_metrics['acc'] = acc
return all_metrics
def evaluate(self):
if ((not hasattr(self.trainer, 'compute_metrics')) or (self.trainer.compute_metrics is None)):
self.trainer.compute_metrics = self.default_compute_metrics
metrics = self.trainer.evaluate()
max_eval_samples = (self.data_args.max_eval_samples if (self.data_args.max_eval_samples is not None) else len(self.eval_dataset))
metrics['eval_samples'] = min(max_eval_samples, len(self.eval_dataset))
self.trainer.log_metrics('eval', metrics)
self.trainer.save_metrics('eval', metrics)
def predict(self):
assert (self.paradigm == NO_GENERATE), 'classification only support no-generate model.'
if (not self.data_args.keep_predict_labels):
for l in ['labels', 'label']:
if (l in self.test_dataset.column_names):
self.test_dataset = self.test_dataset.remove_columns(l)
prediction = self.trainer.predict(self.test_dataset, metric_key_prefix='predict')
logits = prediction.predictions
if self.data_args.keep_predict_labels:
label_ids = prediction.label_ids
if hasattr(self.processor, 'save_result'):
assert (self.paradigm == NO_GENERATE), 'default processor only support no-generate model.'
if self.trainer.is_world_process_zero():
if (not self.data_args.keep_predict_labels):
self.processor.save_result(logits)
else:
self.processor.save_result(logits, label_ids)
else:
examples = self.test_dataset
(predicts, topk_predictions) = self.get_best_and_topk(logits, examples, stage='test')
label_list = self.processor.labels
id2label = {i: label for (i, label) in enumerate(label_list)}
answer = list()
for (k, tag_list) in predicts.items():
res_list = list()
for v in tag_list:
if (v not in id2label.keys()):
res = ''
print('unknown')
else:
res = id2label[v]
res_list.append(res)
answer.append({'id': k, 'label': res_list})
output_submit_file = os.path.join(self.training_args.output_dir, 'answer.json')
with open(output_submit_file, 'w') as writer:
for (i, pred) in enumerate(answer):
json_d = {}
json_d['id'] = i
json_d['label'] = pred['label']
writer.write((json.dumps(json_d) + '\n'))
topfile = os.path.join(self.training_args.output_dir, 'topk_predict.json')
with open(topfile, 'w', encoding='utf-8') as f2:
json.dump(topk_predictions, f2, ensure_ascii=False, indent=4)
def get_best_and_topk(self, logits, examples, topk=10, stage='dev'):
if (type(logits) == tuple):
logits = logits[0]
predictions = dict()
topk_result = dict()
preds = logits
preds = np.argmax(preds, axis=(- 1))
for (pred, example, logit) in zip(preds, examples, logits):
id_ = example['idx']
id_ = int(id_.split('-')[1])
predictions[id_] = pred.tolist()
proba = softmax(logit)
indices = np.argsort((- proba))
out = list()
for index in indices[:topk]:
prob = proba[index].tolist()
index = index.tolist()
out.append({'prob': prob, 'answer': index})
topk_result[id_] = out
return (predictions, topk_result) |
def DS_format_to_lines(context_mode, summ_mode, args):
assert (summ_mode in ['final', 'user', 'agent'])
assert (context_mode in ['both', 'user', 'agent'])
corpora = {'train': [], 'val': [], 'test': []}
read_root_path = Path(args.raw_path)
save_root_path = ((Path(args.save_path) / f'{context_mode}') / f'{summ_mode}')
save_root_path.mkdir(exist_ok=True, parents=True)
for corpus_type in ['val', 'test', 'train']:
read_path = (read_root_path / f'{corpus_type}.json')
save_path = (save_root_path / f'{corpus_type}.json')
with read_path.open('r', encoding='utf-8') as r_f:
json_data = json.load(r_f)
for sample in json_data:
if (summ_mode == 'final'):
summ = [list(jieba.cut(sen)) for sen in sample['FinalSumm']]
elif (summ_mode == 'user'):
summ = [list(jieba.cut(sen)) for sen in sample['UserSumm']]
elif (summ_mode == 'agent'):
summ = [list(jieba.cut(sen)) for sen in sample['AgentSumm']]
ext_label = []
bio_indexs = []
for qa in sample['QA']:
if (qa != []):
if (summ_mode == 'final'):
ext_label = ((ext_label + qa['QueSummUttIDs']) + qa['AnsSummLongUttIDs'])
start = min((qa['QueSummUttIDs'] + qa['AnsSummLongUttIDs']))
end = max((qa['QueSummUttIDs'] + qa['AnsSummLongUttIDs']))
bio_indexs.append([start, end])
elif (summ_mode == 'user'):
ext_label = (ext_label + qa['QueSummUttIDs'])
elif (summ_mode == 'agent'):
ext_label = (ext_label + qa['AnsSummLongUttIDs'])
context = []
for turn in sample['Dialogue']:
tmp_utt = []
if args.add_prefix:
if (turn['speaker'] == 'Q'):
tmp_utt += [sample['QRole'], ':']
else:
tmp_utt += ['', ':']
for word in turn['utterance'].split():
if ((len(word) > 2) and (word[0] == '[') and (word[(- 1)] == ']')):
tmp_utt += ['[', word[1:(- 1)], ']']
else:
tmp_utt.append(word)
if (context_mode == 'both'):
context.append(tmp_utt)
elif ((context_mode == 'user') and (turn['speaker'] == 'Q')):
context.append(tmp_utt)
elif ((context_mode == 'agent') and (turn['speaker'] == 'A')):
context.append(tmp_utt)
bio_label = convert_bio_label(bio_indexs, len(context))
corpora[corpus_type].append({'src': context, 'tgt': summ, 'ext': ext_label, 'bio': bio_label})
with save_path.open('w', encoding='utf-8') as w_f:
w_f.write(json.dumps(corpora[corpus_type], indent=4, ensure_ascii=False)) |
def add_or_invite_to_team(inviter, team, user_obj=None, email=None, requires_invite=True):
if (user_obj and requires_invite):
orgname = team.organization.username
if user_obj.robot:
requires_invite = False
if (not user_obj.username.startswith((orgname + '+'))):
raise InvalidTeamMemberException(('Cannot add the specified robot to this team, ' + 'as it is not a member of the organization'))
else:
query = TeamMember.select().where((TeamMember.user == user_obj)).join(Team).join(User).where((User.username == orgname), (User.organization == True))
requires_invite = (not any(query))
if (user_obj and (not requires_invite)):
add_user_to_team(user_obj, team)
return None
email_address = (email if (not user_obj) else None)
return TeamMemberInvite.create(user=user_obj, email=email_address, team=team, inviter=inviter) |
def get(identifier):
if isinstance(identifier, six.string_types):
identifier = str(identifier)
return deserialize(identifier)
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret metric function identifier:', identifier) |
class TestTypeAlias(TestNameCheckVisitorBase):
_passes()
def test_runtime(self):
from typing_extensions import TypeAlias
X: TypeAlias = int
Y = X
Z: 'TypeAlias' = int
def capybara(x: X, y: Y, x_quoted: 'X', y_quoted: 'Y', z: Z) -> None:
assert_is_value(x, TypedValue(int))
assert_is_value(y, TypedValue(int))
assert_is_value(x_quoted, TypedValue(int))
assert_is_value(y_quoted, TypedValue(int))
assert_is_value(z, TypedValue(int)) |
class MultiProjectRefactoring():
def __init__(self, refactoring, projects, addpath=True):
self.refactoring = refactoring
self.projects = projects
self.addpath = addpath
def __call__(self, project, *args, **kwds):
return _MultiRefactoring(self.refactoring, self.projects, self.addpath, project, *args, **kwds) |
class DiceLoss(nn.Module):
def __init__(self, smooth=1.0, ignore_index=255):
super(DiceLoss, self).__init__()
self.ignore_index = ignore_index
self.smooth = smooth
def forward(self, output, target):
if (self.ignore_index not in range(target.min(), target.max())):
if ((target == self.ignore_index).sum() > 0):
target[(target == self.ignore_index)] = target.min()
target = make_one_hot(target.unsqueeze(dim=1), classes=output.size()[1])
output = F.softmax(output, dim=1)
output_flat = output.contiguous().view((- 1))
target_flat = target.contiguous().view((- 1))
intersection = (output_flat * target_flat).sum()
loss = (1 - (((2.0 * intersection) + self.smooth) / ((output_flat.sum() + target_flat.sum()) + self.smooth)))
return loss |
class KnownValues(unittest.TestCase):
def test_ip_adc2(self):
(e, t_amp1, t_amp2) = myadc.kernel_gs()
self.assertAlmostEqual(e, (- 0.), 6)
myadcip = adc.radc_ip.RADCIP(myadc)
(e, v, p, x) = myadcip.kernel(nroots=3)
self.assertAlmostEqual(e[0], 0., 6)
self.assertAlmostEqual(e[1], 0., 6)
self.assertAlmostEqual(e[2], 0., 6)
self.assertAlmostEqual(p[0], 1., 6)
self.assertAlmostEqual(p[1], 1., 6)
self.assertAlmostEqual(p[2], 1., 6)
def test_ip_adc2x(self):
myadc.method = 'adc(2)-x'
(e, t_amp1, t_amp2) = myadc.kernel_gs()
self.assertAlmostEqual(e, (- 0.), 6)
myadcip = adc.radc_ip.RADCIP(myadc)
(e, v, p, x) = myadcip.kernel(nroots=3)
self.assertAlmostEqual(e[0], 0., 6)
self.assertAlmostEqual(e[1], 0., 6)
self.assertAlmostEqual(e[2], 0., 6)
self.assertAlmostEqual(p[0], 1., 6)
self.assertAlmostEqual(p[1], 1., 6)
self.assertAlmostEqual(p[2], 1., 6)
def test_ip_adc3(self):
myadc.method = 'adc(3)'
(e, t_amp1, t_amp2) = myadc.kernel_gs()
self.assertAlmostEqual(e, (- 0.), 6)
myadcip = adc.radc_ip.RADCIP(myadc)
(e, v, p, x) = myadcip.kernel(nroots=4)
myadcip.analyze()
self.assertAlmostEqual(e[0], 0., 6)
self.assertAlmostEqual(e[1], 0., 6)
self.assertAlmostEqual(e[2], 0., 6)
self.assertAlmostEqual(e[3], 1., 6)
self.assertAlmostEqual(p[0], 1., 6)
self.assertAlmostEqual(p[1], 1., 6)
self.assertAlmostEqual(p[2], 1., 6)
self.assertAlmostEqual(p[3], 0., 6) |
class BitPackDecoder():
_data: bytes
_offset: int
def __init__(self, data: bytes):
self._data = data
self._offset = 0
def decode(self, *args: int) -> tuple[(int, ...)]:
compiled = _compile_format(*args)
offset = self._offset
self._offset += compiled.calcsize()
return compiled.unpack_from(self._data, offset)
def decode_single(self, value: int) -> int:
return self.decode(value)[0]
def decode_element(self, array: list[T]) -> T:
if (len(array) == 1):
return array[0]
return array[self.decode_single(len(array))]
def peek(self, *args: int) -> tuple[(int, ...)]:
compiled = _compile_format(*args)
return compiled.unpack_from(self._data, self._offset)
def ensure_data_end(self) -> None:
try:
self.peek(256)
raise ValueError('At least one entire byte of data is still unread.')
except bitstruct.Error:
pass |
class DebugConnectorBuilder(ConnectorBuilder):
target_game: RandovaniaGame
layout_uuid: uuid.UUID
def __init__(self, game: str, layout_uuid: str=str(INVALID_UUID)):
super().__init__()
self.target_game = RandovaniaGame(game)
self.layout_uuid = uuid.UUID(layout_uuid)
def create(cls, game: RandovaniaGame, layout_uuid: uuid.UUID) -> DebugConnectorBuilder:
return cls(game.value, str(layout_uuid))
def connector_builder_choice(self) -> ConnectorBuilderChoice:
return ConnectorBuilderChoice.DEBUG
def configuration_params(self) -> dict:
return {'game': self.target_game.value, 'layout_uuid': str(self.layout_uuid)}
async def build_connector(self) -> (RemoteConnector | None):
return DebugRemoteConnector(self.target_game, self.layout_uuid)
def get_status_message(self) -> (str | None):
return f'{self.target_game.long_name}: {self.layout_uuid}' |
def _SetInformationProcess(ql: Qiling, address: int, params):
process = params['ProcessHandle']
flag = params['ProcessInformationClass']
ibuf_ptr = params['ProcessInformation']
ibuf_len = params['ProcessInformationLength']
if (flag == ProcessDebugFlags):
flag_name = 'ProcessDebugFlags'
comment = ''
read_len = 4
elif (flag == ProcessDebugPort):
flag_name = 'ProcessDebugPort'
comment = ''
read_len = 4
elif (flag == ProcessDebugObjectHandle):
return STATUS_PORT_NOT_SET
elif (flag == ProcessBreakOnTermination):
flag_name = 'ProcessBreakOnTermination'
comment = 'the critical flag of the process'
read_len = 1
elif (flag == ProcessExecuteFlags):
flag_name = 'ProcessExecuteFlags'
comment = 'DEP for the process'
read_len = 1
elif (flag == ProcessBasicInformation):
flag_name = 'ProcessBasicInformation'
comment = 'PEB debug flag for the process'
pbi_struct = structs.make_process_basic_info(ql.arch.bits)
read_len = pbi_struct.sizeof()
else:
ql.log.info(f'SetInformationProcess: no implementation for info class {flag:#04x}')
return STATUS_UNSUCCESSFUL
if (ibuf_len >= read_len):
data = (ql.mem.read_ptr if (read_len in (1, 2, 4, 8)) else ql.mem.read)(ibuf_ptr, read_len)
ql.log.debug(f'SetInformationProcess: {flag_name} was set to {data}')
if comment:
ql.log.debug(f'The target may be attempting modify {comment}')
return STATUS_SUCCESS |
def AutoDancefer(source, target, output_path=None, synch_video_beat=0, synch_audio_beat=0, beat_offset=0, **kwargs):
sourcev = PullVideo(source_location=source)
targetv = PullVideo(source_location=target)
result = Dancefer(source_video=sourcev, target=targetv, output_path=output_path, force_recompute=True, synch_audio_beat=synch_audio_beat, synch_video_beat=synch_video_beat, beat_offset=beat_offset, **kwargs)
AINFORM('\n\n\nResult saved to {}\n\n\n'.format(result.getPath()))
return result |
def pad(array, transform, pad_width, mode=None, **kwargs):
import numpy as np
transform = guard_transform(transform)
padded_array = np.pad(array, pad_width, mode, **kwargs)
padded_trans = list(transform)
padded_trans[2] -= (pad_width * padded_trans[0])
padded_trans[5] -= (pad_width * padded_trans[4])
return (padded_array, Affine(*padded_trans[:6])) |
class CmdEvscapeRoom(Command):
arg_regex = '(/\\w+?(\\s|$))|\\s|$'
help_category = 'Evscaperoom'
obj1_search = None
obj2_search = None
def _search(self, query, required):
if (required is False):
return (None, query)
matches = self.caller.search(query, quiet=True)
if ((not matches) or (len(matches) > 1)):
if required:
if (not query):
self.caller.msg('You must give an argument.')
else:
_AT_SEARCH_RESULT(matches, self.caller, query=query)
raise InterruptCommand
else:
return (None, query)
else:
return (matches[0], None)
def parse(self):
caller = self.caller
self.args = self.args.strip()
parts = [part.strip() for part in _RE_ARGSPLIT.split((' ' + self.args), 1)]
nparts = len(parts)
self.obj1 = None
self.arg1 = None
self.prep = None
self.obj2 = None
self.arg2 = None
if (nparts == 1):
(self.obj1, self.arg1) = self._search(parts[0], self.obj1_search)
elif (nparts == 3):
(obj1, self.prep, obj2) = parts
(self.obj1, self.arg1) = self._search(obj1, self.obj1_search)
(self.obj2, self.arg2) = self._search(obj2, self.obj2_search)
self.room = caller.location
self.roomstate = self.room.db.state
def focus(self):
return self.caller.attributes.get('focus', category=self.room.db.tagcategory)
def focus(self, obj):
self.caller.attributes.add('focus', obj, category=self.room.tagcategory)
def focus(self):
self.caller.attributes.remove('focus', category=self.room.tagcategory) |
class ToolsWizardPage2(BasePyzoWizardPage):
_title = translate('wizard', 'Recommended tools')
_image_filename = 'pyzo_tools2.png'
_descriptions = [translate('wizard', 'We especially recommend the following tools:'), translate('wizard', 'The *Source structure tool* gives an outline of the source code.'), translate('wizard', 'The *File browser tool* helps keep an overview of all files\n in a directory. To manage your projects, click the star icon.')] |
def change(file_name, file_out, dict_file, split_=' ', split_3=False):
with open(file_name, 'r', encoding='utf-8') as f:
data = [(i.split(split_) if (len(i.split(split_)) == 2) else ['###', '###']) for i in f.readlines()]
document_pair = [[i[0], i[1].strip().replace('M-', 'I-').replace('E-', 'I-').replace('S-', 'I-')] for i in data]
label_2_id = load_json(dict_file)
sen_list = []
label_list = []
cur_sen_list = []
cur_label_list = []
for (char, label) in document_pair:
cur_sen_list.append(char)
cur_label_list.append(label)
if (char in ('###',)):
sen_list.append(cur_sen_list[:(- 1)].copy())
label_list.append(cur_label_list[:(- 1)].copy())
cur_label_list = []
cur_sen_list = []
raw_sen_list = [''.join(i) for i in sen_list]
label2_list = [[label_2_id[ii] for ii in i] for i in label_list]
data_dict = [{'sen': i, 'label_decode': j, 'label': (([label_2_id['O']] + k) + [label_2_id['O']]), 'raw_sen': q} for (i, j, k, q) in zip(sen_list, label_list, label2_list, raw_sen_list) if i]
df = pd.DataFrame(data_dict)
df['length'] = df['sen'].apply((lambda x: len(x)))
if split_3:
(train_df, test_df) = train_test_split(df, test_size=0.2)
(test_df, dev_df) = train_test_split(test_df, test_size=0.1)
train_df.to_csv(file_out, index=False, encoding='utf-8')
test_df.to_csv('test.csv', index=False, encoding='utf-8')
dev_df.to_csv('dev.csv', index=False, encoding='utf-8')
else:
df.to_csv(file_out, index=False, encoding='utf-8') |
class UEM(MutableMapping):
def __init__(self, *args, **kwargs):
super(UEM, self).__init__()
self.update(*args, **kwargs)
def __setitem__(self, fid, score_regions):
invalid_type_msg = ('Expected sequence of pairs. Received: %r (%s).' % (score_regions, type(score_regions)))
try:
score_regions = [tuple(region) for region in score_regions]
except TypeError:
raise TypeError(invalid_type_msg)
for score_region in score_regions:
if (len(score_region) != 2):
raise TypeError(invalid_type_msg)
def _convert_to_float(score_region):
(onset, offset) = score_region
try:
onset = float(onset)
offset = float(offset)
except ValueError:
raise ValueError(('Could not convert interval onset/offset to float: %s' % repr(score_region)))
if ((onset >= offset) or (onset < 0)):
raise ValueError(('Invalid interval (%.3f, %.3f) for file "%s".' % (onset, offset, fid)))
return (onset, offset)
score_regions = [_convert_to_float(region) for region in score_regions]
tree = IntervalTree.from_tuples(score_regions)
tree.merge_overlaps()
score_regions = [(intrvl.begin, intrvl.end) for intrvl in tree]
self.__dict__[fid] = score_regions
def __getitem__(self, key):
return self.__dict__[key]
def __delitem__(self, key):
del self.__dict__[key]
def __iter__(self):
return iter(self.__dict__)
def __len__(self):
return len(self.__dict__)
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return '{}, UEM({})'.format(super(UEM, self).__repr__(), self.__dict__) |
def generate_subset_of_filenames(subset=None, base_dir=''):
if (subset is None):
subset = _create_full_set()
pattern = os.path.join(base_dir, FILENAME)
files = []
for (channel, segments) in subset.items():
new_files = _generate_filenames(pattern, channel, segments)
files.extend(new_files)
return files |
class MinMaxScaler(SKCMatrixAndWeightTransformerABC):
_skcriteria_parameters = ['target', 'clip', 'criteria_range']
def __init__(self, target, *, clip=False, criteria_range=(0, 1)):
super().__init__(target)
self._clip = bool(clip)
(self._cr_min, self._cr_max) = map(float, criteria_range)
def clip(self):
return self._clip
def criteria_range(self):
return (self._cr_min, self._cr_max)
def _get_scaler(self):
return _sklpreproc.MinMaxScaler(clip=self.clip, feature_range=self.criteria_range)
_inherit(SKCMatrixAndWeightTransformerABC._transform_weights)
def _transform_weights(self, weights):
scaler = self._get_scaler()
return _run_sklearn_scaler(weights, scaler)
_inherit(SKCMatrixAndWeightTransformerABC._transform_matrix)
def _transform_matrix(self, matrix):
scaler = self._get_scaler()
return _run_sklearn_scaler(matrix, scaler) |
class DszCommandError(list):
def __init__(self, timestamp, cmdid):
self.timestamp = timestamp
self.__cmdid = cmdid
list.__init__(self)
def __str__(self):
msg = ('Error running command %d: %s\n' % (self.__cmdid, dsz.cmd.data.Get('commandmetadata::fullcommand', dsz.TYPE_STRING, cmdId=self.__cmdid)[0]))
if len(self):
for i in self:
msg += (' - %s' % i)
else:
msg += ' - No additional information available. Try viewing the logs.'
return msg |
def total_intersect_and_union(results, gt_seg_maps, num_classes, ignore_index, label_map=dict(), reduce_zero_label=False):
total_area_intersect = torch.zeros((num_classes,), dtype=torch.float64).cuda()
total_area_union = torch.zeros((num_classes,), dtype=torch.float64).cuda()
total_area_pred_label = torch.zeros((num_classes,), dtype=torch.float64).cuda()
total_area_label = torch.zeros((num_classes,), dtype=torch.float64).cuda()
for (result, gt_seg_map) in zip(results, gt_seg_maps):
(area_intersect, area_union, area_pred_label, area_label) = intersect_and_union(result, gt_seg_map, num_classes, ignore_index, label_map, reduce_zero_label)
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return (total_area_intersect, total_area_union, total_area_pred_label, total_area_label) |
.parametrize(['ops', 'error'], [pytest.param([qutip.basis(5, 0)], 'square', id='Not square'), pytest.param([qutip.qeye(5), qutip.qeye(3)], 'shape', id='shape mismatch'), pytest.param([qutip.destroy(5)], 'Hermitian', id='Non Hermitian'), pytest.param([qutip.sigmax(), qutip.sigmay()], 'commute', id='Not commuting')])
def test_simdiag_errors(ops, error):
with pytest.raises(TypeError) as err:
qutip.simdiag(ops)
assert (error in str(err.value)) |
class RobustNorm(nn.BatchNorm2d):
def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True, use_tracked_mean=True, use_tracked_range=True, power=0.2):
nn.BatchNorm2d.__init__(self, num_features=num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats)
self.power = power
self.use_tracked_mean = use_tracked_mean
self.use_tracked_range = use_tracked_range
def forward(self, x):
self._check_input_dim(x)
y = x.transpose(0, 1)
return_shape = y.shape
y = y.contiguous().view(x.size(1), (- 1))
mu = y.mean(dim=1)
min = y.min(dim=1)[0]
max = y.max(dim=1)[0]
range = torch.sub(max, min)
if (self.training is not True):
if (self.use_tracked_mean is True):
y = (y - self.running_mean.view((- 1), 1))
else:
y = (y - mu.view((- 1), 1))
if (self.use_tracked_range is True):
y = (y / ((self.running_var.view((- 1), 1) ** self.power) + self.eps))
else:
y = (y / ((range.view((- 1), 1) ** self.power) + self.eps))
elif (self.training is True):
with torch.no_grad():
self.running_mean = (((1 - self.momentum) * self.running_mean) + (self.momentum * mu))
self.running_var = (((1 - self.momentum) * self.running_var) + (self.momentum * range))
y = (y - mu.view((- 1), 1))
y = (y / ((range.view((- 1), 1) ** self.power) + self.eps))
y = ((self.weight.view((- 1), 1) * y) + self.bias.view((- 1), 1))
return y.view(return_shape).transpose(0, 1) |
def get_dataset(data_cfg):
if isinstance(data_cfg['ann_file'], (list, tuple)):
ann_files = data_cfg['ann_file']
num_dset = len(ann_files)
else:
ann_files = [data_cfg['ann_file']]
num_dset = 1
if isinstance(data_cfg['img_prefix'], (list, tuple)):
img_prefixes = data_cfg['img_prefix']
else:
img_prefixes = ([data_cfg['img_prefix']] * num_dset)
assert (len(img_prefixes) == num_dset)
dsets = []
for i in range(num_dset):
data_info = copy.deepcopy(data_cfg)
data_info['ann_file'] = ann_files[i]
data_info['img_prefix'] = img_prefixes[i]
dset = obj_from_dict(data_info, datasets)
dsets.append(dset)
dset = dsets[0]
return dset |
def parse_key_part(src: str, pos: Pos) -> tuple[(Pos, str)]:
try:
char: (str | None) = src[pos]
except IndexError:
char = None
if (char in BARE_KEY_CHARS):
start_pos = pos
pos = skip_chars(src, pos, BARE_KEY_CHARS)
return (pos, src[start_pos:pos])
if (char == "'"):
return parse_literal_str(src, pos)
if (char == '"'):
return parse_one_line_basic_str(src, pos)
raise suffixed_err(src, pos, 'Invalid initial character for a key part') |
def discriminator_loss(disc_real_outputs, disc_generated_outputs):
loss = 0
r_losses = []
g_losses = []
for (dr, dg) in zip(disc_real_outputs, disc_generated_outputs):
r_loss = torch.mean(((1 - dr) ** 2))
g_loss = torch.mean((dg ** 2))
loss += (r_loss + g_loss)
r_losses.append(r_loss.item())
g_losses.append(g_loss.item())
return (loss, r_losses, g_losses) |
class CWERMetric(tf.keras.metrics.Metric):
def __init__(self, padding_token, name='CWER', **kwargs):
super(CWERMetric, self).__init__(name=name, **kwargs)
self.cer_accumulator = tf.Variable(0.0, name='cer_accumulator', dtype=tf.float32)
self.wer_accumulator = tf.Variable(0.0, name='wer_accumulator', dtype=tf.float32)
self.batch_counter = tf.Variable(0, name='batch_counter', dtype=tf.int32)
self.padding_token = padding_token
def update_state(self, y_true, y_pred, sample_weight=None):
input_shape = tf.keras.backend.shape(y_pred)
input_length = (tf.ones(shape=input_shape[0], dtype='int32') * tf.cast(input_shape[1], 'int32'))
(decode_predicted, log) = tf.keras.backend.ctc_decode(y_pred, input_length, greedy=True)
predicted_labels_sparse = tf.keras.backend.ctc_label_dense_to_sparse(decode_predicted[0], input_length)
true_labels_sparse = tf.cast(tf.keras.backend.ctc_label_dense_to_sparse(y_true, input_length), 'int64')
predicted_labels_sparse = tf.sparse.retain(predicted_labels_sparse, tf.not_equal(predicted_labels_sparse.values, (- 1)))
true_labels_sparse = tf.sparse.retain(true_labels_sparse, tf.not_equal(true_labels_sparse.values, self.padding_token))
distance = tf.edit_distance(predicted_labels_sparse, true_labels_sparse, normalize=True)
self.cer_accumulator.assign_add(tf.reduce_sum(distance))
self.batch_counter.assign_add(len(y_true))
self.wer_accumulator.assign_add(tf.reduce_sum(tf.cast(tf.not_equal(distance, 0), tf.float32)))
def result(self):
return {'CER': tf.math.divide_no_nan(self.cer_accumulator, tf.cast(self.batch_counter, tf.float32)), 'WER': tf.math.divide_no_nan(self.wer_accumulator, tf.cast(self.batch_counter, tf.float32))} |
class ArithmeticCoder():
def __init__(self, fo: tp.IO[bytes], total_range_bits: int=24):
assert (total_range_bits <= 30)
self.total_range_bits = total_range_bits
self.packer = BitPacker(bits=1, fo=fo)
self.low: int = 0
self.high: int = 0
self.max_bit: int = (- 1)
self._dbg: tp.List[tp.Any] = []
self._dbg2: tp.List[tp.Any] = []
def delta(self) -> int:
return ((self.high - self.low) + 1)
def _flush_common_prefix(self):
assert (self.high >= self.low), (self.low, self.high)
assert (self.high < (2 ** (self.max_bit + 1)))
while (self.max_bit >= 0):
b1 = (self.low >> self.max_bit)
b2 = (self.high >> self.max_bit)
if (b1 == b2):
self.low -= (b1 << self.max_bit)
self.high -= (b1 << self.max_bit)
assert (self.high >= self.low), (self.high, self.low, self.max_bit)
assert (self.low >= 0)
self.max_bit -= 1
self.packer.push(b1)
else:
break
def push(self, symbol: int, quantized_cdf: torch.Tensor):
while (self.delta < (2 ** self.total_range_bits)):
self.low *= 2
self.high = ((self.high * 2) + 1)
self.max_bit += 1
range_low = (0 if (symbol == 0) else quantized_cdf[(symbol - 1)].item())
range_high = (quantized_cdf[symbol].item() - 1)
effective_low = int(math.ceil((range_low * (self.delta / (2 ** self.total_range_bits)))))
effective_high = int(math.floor((range_high * (self.delta / (2 ** self.total_range_bits)))))
assert (self.low <= self.high)
self.high = (self.low + effective_high)
self.low = (self.low + effective_low)
assert (self.low <= self.high), (effective_low, effective_high, range_low, range_high)
self._dbg.append((self.low, self.high))
self._dbg2.append((self.low, self.high))
outs = self._flush_common_prefix()
assert (self.low <= self.high)
assert (self.max_bit >= (- 1))
assert (self.max_bit <= 61), self.max_bit
return outs
def flush(self):
while (self.max_bit >= 0):
b1 = ((self.low >> self.max_bit) & 1)
self.packer.push(b1)
self.max_bit -= 1
self.packer.flush() |
def test_help_subcommand_completion_multiple(sc_app):
text = ''
line = 'help base {}'.format(text)
endidx = len(line)
begidx = (endidx - len(text))
first_match = complete_tester(text, line, begidx, endidx, sc_app)
assert ((first_match is not None) and (sc_app.completion_matches == ['bar', 'foo', 'sport'])) |
class EngineGenerator():
def __init__(self, max_input_length: int=1024, max_total_tokens: int=2048):
self.max_input_length = max_input_length
self.max_total_tokens = max_total_tokens
def create_engine_config(self):
return VLLMEngineConfig(type='VLLMEngine', model_id=MODEL_ID, generation=GenerationConfig(prompt_format=PromptFormat(system='{instruction}', assistant='{instruction}', trailing_assistant='', user='{instruction}')))
def create_scaling_config(self):
return ScalingConfig(num_workers=1, num_gpus_per_worker=0, num_cpus_per_worker=1, placement_strategy='STRICT_PACK')
def create_engine(self):
engine = VLLMEngine(VLLMApp(engine_config=self.create_engine_config(), scaling_config=self.create_scaling_config()))
return engine |
class FileToMiscIter(IterWrappingFile):
def __init__(self, file):
IterWrappingFile.__init__(self, file)
self.buf = b''
def __iter__(self):
return self
def __next__(self):
if self.currently_in_file:
self.currently_in_file.close()
type = None
while (not type):
(type, data) = self._get()
if (type == b'z'):
raise StopIteration
elif (type == b'r'):
return self._get_rorp(data)
elif (type == b'o'):
return data
else:
raise IterFileException(('Bad file type %s' % (type,)))
def _get_rorp(self, pickled_tuple):
(index, data_dict, num_files) = pickled_tuple
rorp = rpath.RORPath(index, data_dict)
if num_files:
assert (num_files == 1), 'Only one file accepted right now'
rorp.setfile(self._get_file())
return rorp
def _get_file(self):
(file_type, file_data) = self._get()
if (file_type == b'f'):
return IterVirtualFile(self, file_data)
elif ((file_type == b'e') and isinstance(file_data, Exception)):
return ErrorFile(file_data)
else:
raise ValueError("File type is '{ftype}', should be one of [fe], or data isn't an _e_xception but a {dtype}.".format(ftype=file_type, dtype=type(file_data)))
def _get(self):
if (not self.buf):
self.buf += self.file.read()
if (not self.buf):
return (None, None)
assert (len(self.buf) >= 8), 'Unexpected end of MiscIter file'
(type, length) = (self.buf[0:1], self._b2i(self.buf[1:8]))
data = self.buf[8:(8 + length)]
self.buf = self.buf[(8 + length):]
if (type in b'oerh'):
return (type, pickle.loads(data))
else:
return (type, data) |
class TPLPushHandler(BaseHandler):
.authenticated
async def get(self, tplid):
user = self.current_user
tpl = (await self.db.tpl.get(tplid, fields=('id', 'userid', 'sitename')))
if (not self.permission(tpl, 'w')):
self.evil((+ 5))
(await self.finish(u'<span class="alert alert-danger"></span>'))
return
tpls = (await self.db.tpl.list(userid=None, limit=None, fields=('id', 'sitename', 'public')))
for i in range(len(tpls)):
if (tpls[i]['public'] == 2):
tpls[i]['sitename'] += u' []'
(await self.render('tpl_push.html', tpl=tpl, tpls=tpls))
.authenticated
async def post(self, tplid):
user = self.current_user
tplid = int(tplid)
async with self.db.transaction() as sql_session:
tpl = (await self.db.tpl.get(tplid, fields=('id', 'userid'), sql_session=sql_session))
if (not self.permission(tpl, 'w')):
self.evil((+ 5))
(await self.finish(u'<span class="alert alert-danger"></span>'))
return
to_tplid = int(self.get_argument('totpl'))
msg = self.get_argument('msg')
if (to_tplid == 0):
to_tplid = None
to_userid = None
else:
totpl = (await self.db.tpl.get(to_tplid, fields=('id', 'userid'), sql_session=sql_session))
if (not totpl):
self.evil((+ 1))
(await self.finish(u'<span class="alert alert-danger"></span>'))
return
to_userid = totpl['userid']
(await self.db.push_request.add(from_tplid=tpl['id'], from_userid=user['id'], to_tplid=to_tplid, to_userid=to_userid, msg=msg, sql_session=sql_session))
(await self.db.tpl.mod(tpl['id'], lock=True, sql_session=sql_session))
self.redirect('/pushs') |
def test_standstillcondition():
cond = OSC.StandStillCondition(1)
prettyprint(cond.get_element())
cond2 = OSC.StandStillCondition(1)
cond3 = OSC.StandStillCondition(3)
assert (cond == cond2)
assert (cond != cond3)
cond4 = OSC.StandStillCondition.parse(cond.get_element())
assert (cond == cond4)
assert (version_validation('EntityCondition', cond, 0) == ValidationResponse.OK)
assert (version_validation('EntityCondition', cond, 1) == ValidationResponse.OK)
assert (version_validation('EntityCondition', cond, 2) == ValidationResponse.OK) |
class TextFrame():
def __init__(self, layout, border_width, border_color, pad_x, pad_y, highlight_color=None):
self.layout = layout
self.border_width = border_width
self.border_color = border_color
self.drawer = self.layout.drawer
self.highlight_color = highlight_color
if isinstance(pad_x, collections.abc.Iterable):
self.pad_left = pad_x[0]
self.pad_right = pad_x[1]
else:
self.pad_left = self.pad_right = pad_x
if isinstance(pad_y, collections.abc.Iterable):
self.pad_top = pad_y[0]
self.pad_bottom = pad_y[1]
else:
self.pad_top = self.pad_bottom = pad_y
def draw(self, x, y, rounded=True, fill=False, line=False, highlight=False):
self.drawer.set_source_rgb(self.border_color)
opts = [x, y, ((self.layout.width + self.pad_left) + self.pad_right), ((self.layout.height + self.pad_top) + self.pad_bottom), self.border_width]
if line:
if highlight:
self.drawer.set_source_rgb(self.highlight_color)
self.drawer.fillrect(*opts)
self.drawer.set_source_rgb(self.border_color)
opts[1] = (self.height - self.border_width)
opts[3] = self.border_width
self.drawer.fillrect(*opts)
elif fill:
if rounded:
self.drawer.rounded_fillrect(*opts)
else:
self.drawer.fillrect(*opts)
elif rounded:
self.drawer.rounded_rectangle(*opts)
else:
self.drawer.rectangle(*opts)
self.drawer.ctx.stroke()
self.layout.draw((x + self.pad_left), (y + self.pad_top))
def draw_fill(self, x, y, rounded=True):
self.draw(x, y, rounded=rounded, fill=True)
def draw_line(self, x, y, highlighted):
self.draw(x, y, line=True, highlight=highlighted)
def height(self):
return ((self.layout.height + self.pad_top) + self.pad_bottom)
def width(self):
return ((self.layout.width + self.pad_left) + self.pad_right) |
def _build_plain_hierarchy(hierarchy, is_root=False):
all_children = set([])
all_keyed_parent = {}
all_keyed_child = {}
if ('Subcategory' in hierarchy):
for node in hierarchy['Subcategory']:
(keyed_parent, keyed_child, children) = _build_plain_hierarchy(node)
_update_dict(all_keyed_parent, keyed_parent)
_update_dict(all_keyed_child, keyed_child)
all_children.update(children)
if (not is_root):
all_keyed_parent[freebase2id[hierarchy['LabelName']]] = copy.deepcopy(all_children)
all_children.add(freebase2id[hierarchy['LabelName']])
for (child, _) in all_keyed_child.items():
all_keyed_child[child].add(freebase2id[hierarchy['LabelName']])
all_keyed_child[freebase2id[hierarchy['LabelName']]] = set([])
return (all_keyed_parent, all_keyed_child, all_children) |
def merge_pos_pairs_into_dialog_file(data_folder, dialog_file):
fout = open(dialog_file, 'w', encoding='utf-8')
for partition in list(['train', 'valid']):
with open(((data_folder + partition) + '.txt'), encoding='utf-8') as fin:
for l in tqdm(fin):
tokens = l.split('\t')
if (tokens[0] == '0'):
continue
fout.write(l) |
def test_state_wait_secretrequest_valid():
setup = setup_initiator_tests()
state_change = ReceiveSecretRequest(payment_identifier=UNIT_TRANSFER_IDENTIFIER, amount=setup.lock.amount, expiration=setup.lock.expiration, secrethash=setup.lock.secrethash, sender=UNIT_TRANSFER_TARGET)
iteration = initiator_manager.state_transition(payment_state=setup.current_state, state_change=state_change, channelidentifiers_to_channels=setup.channel_map, addresses_to_channel=setup.channels.addresses_to_channel(), pseudo_random_generator=setup.prng, block_number=setup.block_number)
assert (len(iteration.events) == 1)
assert isinstance(iteration.events[0], SendSecretReveal)
initiator_state = get_transfer_at_index(iteration.new_state, 0)
assert (initiator_state.received_secret_request is True)
state_change_2 = ReceiveSecretRequest(payment_identifier=UNIT_TRANSFER_IDENTIFIER, amount=setup.lock.amount, expiration=setup.lock.expiration, secrethash=setup.lock.secrethash, sender=UNIT_TRANSFER_TARGET)
iteration2 = initiator_manager.state_transition(payment_state=iteration.new_state, state_change=state_change_2, channelidentifiers_to_channels=setup.channel_map, addresses_to_channel=setup.channels.addresses_to_channel(), pseudo_random_generator=setup.prng, block_number=setup.block_number)
assert (not iteration2.events) |
def admin_session(user, session_id):
session: MultiplayerSession = MultiplayerSession.get_by_id(session_id)
rows = []
associations: list[WorldUserAssociation] = list(WorldUserAssociation.select().join(World).where((World.session == session)))
for association in associations:
inventory = []
if (association.inventory is not None):
parsed_inventory = remote_inventory.decode_remote_inventory(association.inventory)
if isinstance(parsed_inventory, construct.ConstructError):
inventory.append(f'Error parsing: {parsed_inventory}')
else:
game = VersionedPreset.from_str(association.world.preset).game
db = default_database.resource_database_for(game)
for (item_name, item) in parsed_inventory.items():
if (item > 0):
inventory.append(f'{db.get_item(item_name).long_name} x{item}')
else:
inventory.append('Missing')
rows.append([association.user.name, association.world.name, association.connection_state.pretty_text, ', '.join(inventory)])
header = ['User', 'World', 'Connection State', 'Inventory']
return "<table border='1'><tr>{}</tr>{}</table>".format(''.join((f'<th>{h}</th>' for h in header)), ''.join(('<tr>{}</tr>'.format(''.join((f'<td>{h}</td>' for h in r))) for r in rows))) |
def mosek_solve_qp(P: Union[(np.ndarray, spa.csc_matrix)], q: np.ndarray, G: Optional[Union[(np.ndarray, spa.csc_matrix)]]=None, h: Optional[np.ndarray]=None, A: Optional[Union[(np.ndarray, spa.csc_matrix)]]=None, b: Optional[np.ndarray]=None, lb: Optional[np.ndarray]=None, ub: Optional[np.ndarray]=None, initvals: Optional[np.ndarray]=None, verbose: bool=False, **kwargs) -> Optional[np.ndarray]:
problem = Problem(P, q, G, h, A, b, lb, ub)
solution = mosek_solve_problem(problem, initvals, verbose, **kwargs)
return (solution.x if solution.found else None) |
class DevDataset(Dataset):
def __init__(self, args, raw_datasets, cache_root):
self.args = args
self.raw_datasets = raw_datasets
cache_path = os.path.join(cache_root, 'sql2text_dev.cache')
if (os.path.exists(cache_path) and args.dataset.use_cache):
self.data = torch.load(cache_path)
else:
self.data = []
for raw_data in tqdm(self.raw_datasets):
extend_data = deepcopy(raw_data)
extend_data.update({'struct_in': '', 'text_in': extend_data['query'], 'seq_out': extend_data['question'].strip()})
self.data.append(extend_data)
if args.dataset.use_cache:
torch.save(self.data, cache_path)
def __getitem__(self, index) -> T_co:
return self.data[index]
def __len__(self):
return len(self.data) |
def test_biorbd_model_import():
from bioptim.examples.torque_driven_ocp import example_multi_biorbd_model as ocp_module
bioptim_folder = os.path.dirname(ocp_module.__file__)
biorbd_model_path = '/models/triple_pendulum.bioMod'
biorbd_model_path_modified_inertia = '/models/triple_pendulum_modified_inertia.bioMod'
MultiBiorbdModel(((bioptim_folder + biorbd_model_path), (bioptim_folder + biorbd_model_path_modified_inertia)))
MultiBiorbdModel((biorbd.Model((bioptim_folder + biorbd_model_path)), biorbd.Model((bioptim_folder + biorbd_model_path_modified_inertia))))
with pytest.raises(ValueError, match="The models must be a 'str', 'biorbd.Model', 'bioptim.BiorbdModel' or a tuple of those"):
MultiBiorbdModel([1]) |
class Delta(D.Distribution):
arg_constraints: dict = {}
def __init__(self, param: torch.Tensor, atol: float=1e-06, rtol: float=1e-06, batch_shape: ((torch.Size | Sequence[int]) | None)=None, event_shape: ((torch.Size | Sequence[int]) | None)=None) -> None:
if (batch_shape is None):
batch_shape = torch.Size([])
if (event_shape is None):
event_shape = torch.Size([])
self.update(param)
self.atol = atol
self.rtol = rtol
if ((not len(batch_shape)) and (not len(event_shape))):
batch_shape = param.shape[:(- 1)]
event_shape = param.shape[(- 1):]
super().__init__(batch_shape=batch_shape, event_shape=event_shape)
def update(self, param: torch.Tensor) -> None:
self.param = param
def _is_equal(self, value: torch.Tensor) -> torch.Tensor:
param = self.param.expand_as(value)
is_equal = (abs((value - param)) < (self.atol + (self.rtol * abs(param))))
for i in range((- 1), ((- len(self.event_shape)) - 1), (- 1)):
is_equal = is_equal.all(i)
return is_equal
def log_prob(self, value: torch.Tensor) -> torch.Tensor:
is_equal = self._is_equal(value)
out = torch.zeros_like(is_equal, dtype=value.dtype)
out.masked_fill_(is_equal, np.inf)
out.masked_fill_((~ is_equal), (- np.inf))
return out
_grad()
def sample(self, sample_shape: ((torch.Size | Sequence[int]) | None)=None) -> torch.Tensor:
if (sample_shape is None):
sample_shape = torch.Size([])
return self.param.expand((*sample_shape, *self.param.shape))
def rsample(self, sample_shape: ((torch.Size | Sequence[int]) | None)=None) -> torch.Tensor:
if (sample_shape is None):
sample_shape = torch.Size([])
return self.param.expand((*sample_shape, *self.param.shape))
def mode(self) -> torch.Tensor:
return self.param
def mean(self) -> torch.Tensor:
return self.param |
(frozen=True)
class BoundMethodSignature():
signature: ConcreteSignature
self_composite: Composite
return_override: Optional[Value] = None
def check_call(self, args: Iterable[Argument], visitor: 'NameCheckVisitor', node: Optional[ast.AST]) -> Value:
ret = self.signature.check_call([(self.self_composite, None), *args], visitor, node)
if ((self.return_override is not None) and (not self.signature.has_return_value())):
if isinstance(ret, AnnotatedValue):
return annotate_value(self.return_override, ret.metadata)
return self.return_override
return ret
def get_signature(self, *, preserve_impl: bool=False, ctx: CanAssignContext, self_annotation_value: Optional[Value]=None) -> Optional[ConcreteSignature]:
if (self_annotation_value is None):
self_annotation_value = self.self_composite.value
return self.signature.bind_self(preserve_impl=preserve_impl, self_value=self.self_composite.value, ctx=ctx, self_annotation_value=self_annotation_value)
def has_return_value(self) -> bool:
if (self.return_override is not None):
return True
return self.signature.has_return_value()
def return_value(self) -> Value:
if (isinstance(self.signature, Signature) and self.signature.has_return_value()):
return self.signature.return_value
if (self.return_override is not None):
return self.return_override
return AnyValue(AnySource.unannotated)
def substitute_typevars(self, typevars: TypeVarMap) -> 'BoundMethodSignature':
return BoundMethodSignature(self.signature.substitute_typevars(typevars), self.self_composite.substitute_typevars(typevars), (self.return_override.substitute_typevars(typevars) if (self.return_override is not None) else None))
def __str__(self) -> str:
return f'{self.signature} bound to {self.self_composite.value}' |
class EditInlineCaption():
async def edit_inline_caption(self: 'pyrogram.Client', inline_message_id: str, caption: str, parse_mode: Optional['enums.ParseMode']=None, reply_markup: 'types.InlineKeyboardMarkup'=None) -> bool:
return (await self.edit_inline_text(inline_message_id=inline_message_id, text=caption, parse_mode=parse_mode, reply_markup=reply_markup)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.