code stringlengths 281 23.7M |
|---|
def ql_syscall_sys_cpupage_get(ql: Qiling, index, *args, **kw):
if (index == ):
return ql.os.cpupage_addr
elif (index == 1):
return ql.mem.read_ptr((ql.os.cpupage_addr + 4), 4)
elif (index == 2):
return ql.os.syspage_addr
ql.log.warning(f'ql_syscall_sys_cpupage_get (index {index:d}) not implemented') |
def memory_stream_pump(memory_send_stream: MemorySendStream, memory_receive_stream: MemoryReceiveStream, *, max_bytes: (int | None)=None) -> bool:
try:
data = memory_send_stream.get_data_nowait(max_bytes)
except _core.WouldBlock:
return False
try:
if (not data):
memory_receive_stream.put_eof()
else:
memory_receive_stream.put_data(data)
except _core.ClosedResourceError:
raise _core.BrokenResourceError('MemoryReceiveStream was closed') from None
return True |
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=''):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [(self.prefix + self.batch_fmtstr.format(batch))]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def display_summary(self):
entries = [' *']
entries += [meter.summary() for meter in self.meters]
print(' '.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str((num_batches // 1)))
fmt = (('{:' + str(num_digits)) + 'd}')
return (((('[' + fmt) + '/') + fmt.format(num_batches)) + ']') |
def test_wrapper_bug():
with safer.writer(FILENAME) as fp:
fp.write('hello, world')
assert (FILENAME.read_text() == 'hello, world')
fp = open(FILENAME, 'w')
with safer.writer(fp, close_on_exit=True):
fp.write('hello, world')
assert (FILENAME.read_text() == 'hello, world') |
class FixtureFactory(object):
def __init__(self):
self._setup_stack = SetupStack()
self._context_managers = {}
self._fixtures = {}
def register_context_manager(self, name, context_manager):
self._context_managers[name] = context_manager
def get_fixture(self, name, add_teardown):
context_manager = self._context_managers[name]
fixture = context_manager.__enter__()
add_teardown((lambda : context_manager.__exit__(None, None, None)))
return fixture
def get_cached_fixture(self, name):
fixture = self._fixtures.get(name)
if (fixture is None):
fixture = self.get_fixture(name, self._setup_stack.add_teardown)
self._fixtures[name] = fixture
return fixture
def tear_down(self):
self._setup_stack.tear_down() |
_flags(floatX='float64')
def test_debugprint_mitmot():
k = iscalar('k')
A = dvector('A')
(result, updates) = pytensor.scan(fn=(lambda prior_result, A: (prior_result * A)), outputs_info=pt.ones_like(A), non_sequences=A, n_steps=k)
final_result = pytensor.grad(result[(- 1)].sum(), A)
output_str = debugprint(final_result, file='str', print_op_info=True)
lines = output_str.split('\n')
expected_output = "Subtensor{i} [id A]\n Scan{grad_of_scan_fn, while_loop=False, inplace=none}.1 [id B] (outer_out_sit_sot-0)\n Sub [id C] (n_steps)\n Subtensor{i} [id D]\n Shape [id E]\n Scan{scan_fn, while_loop=False, inplace=none} [id F] (outer_out_sit_sot-0)\n k [id G] (n_steps)\n SetSubtensor{:stop} [id H] (outer_in_sit_sot-0)\n AllocEmpty{dtype='float64'} [id I]\n Add [id J]\n k [id G]\n Subtensor{i} [id K]\n Shape [id L]\n Unbroadcast{0} [id M]\n ExpandDims{axis=0} [id N]\n Second [id O]\n A [id P]\n ExpandDims{axis=0} [id Q]\n 1.0 [id R]\n 0 [id S]\n Subtensor{i} [id T]\n Shape [id U]\n Unbroadcast{0} [id M]\n \n 1 [id V]\n Unbroadcast{0} [id M]\n \n ScalarFromTensor [id W]\n Subtensor{i} [id K]\n \n A [id P] (outer_in_non_seqs-0)\n 0 [id X]\n 1 [id Y]\n Subtensor{:stop} [id Z] (outer_in_seqs-0)\n Subtensor{::step} [id BA]\n Subtensor{:stop} [id BB]\n Scan{scan_fn, while_loop=False, inplace=none} [id F] (outer_out_sit_sot-0)\n \n -1 [id BC]\n -1 [id BD]\n ScalarFromTensor [id BE]\n Sub [id C]\n \n Subtensor{:stop} [id BF] (outer_in_seqs-1)\n Subtensor{:stop} [id BG]\n Subtensor{::step} [id BH]\n Scan{scan_fn, while_loop=False, inplace=none} [id F] (outer_out_sit_sot-0)\n \n -1 [id BI]\n -1 [id BJ]\n ScalarFromTensor [id BK]\n Sub [id C]\n \n Subtensor{::step} [id BL] (outer_in_mit_mot-0)\n IncSubtensor{start:} [id BM]\n Second [id BN]\n Scan{scan_fn, while_loop=False, inplace=none} [id F] (outer_out_sit_sot-0)\n \n ExpandDims{axes=[0, 1]} [id BO]\n 0.0 [id BP]\n IncSubtensor{i} [id BQ]\n Second [id BR]\n Subtensor{start:} [id BS]\n Scan{scan_fn, while_loop=False, inplace=none} [id F] (outer_out_sit_sot-0)\n \n 1 [id BT]\n ExpandDims{axes=[0, 1]} [id BU]\n 0.0 [id BV]\n Second [id BW]\n Subtensor{i} [id BX]\n Subtensor{start:} [id BS]\n \n -1 [id BY]\n ExpandDims{axis=0} [id BZ]\n Second [id CA]\n Sum{axes=None} [id CB]\n Subtensor{i} [id BX]\n \n 1.0 [id CC]\n -1 [id BY]\n 1 [id BT]\n -1 [id CD]\n Alloc [id CE] (outer_in_sit_sot-0)\n 0.0 [id CF]\n Add [id CG]\n Sub [id C]\n \n 1 [id CH]\n Subtensor{i} [id CI]\n Shape [id CJ]\n A [id P]\n 0 [id CK]\n A [id P] (outer_in_non_seqs-0)\n -1 [id CL]\n\n Inner graphs:\n\n Scan{grad_of_scan_fn, while_loop=False, inplace=none} [id B]\n Add [id CM] (inner_out_mit_mot-0-0)\n Mul [id CN]\n *2-<Vector(float64, shape=(?,))> [id CO] -> [id BL] (inner_in_mit_mot-0-0)\n *5-<Vector(float64, shape=(?,))> [id CP] -> [id P] (inner_in_non_seqs-0)\n *3-<Vector(float64, shape=(?,))> [id CQ] -> [id BL] (inner_in_mit_mot-0-1)\n Add [id CR] (inner_out_sit_sot-0)\n Mul [id CS]\n *2-<Vector(float64, shape=(?,))> [id CO] -> [id BL] (inner_in_mit_mot-0-0)\n *0-<Vector(float64, shape=(?,))> [id CT] -> [id Z] (inner_in_seqs-0)\n *4-<Vector(float64, shape=(?,))> [id CU] -> [id CE] (inner_in_sit_sot-0)\n\n Scan{scan_fn, while_loop=False, inplace=none} [id F]\n Mul [id CV] (inner_out_sit_sot-0)\n *0-<Vector(float64, shape=(?,))> [id CT] -> [id H] (inner_in_sit_sot-0)\n *1-<Vector(float64, shape=(?,))> [id CW] -> [id P] (inner_in_non_seqs-0)"
for (truth, out) in zip(expected_output.split('\n'), lines):
assert (truth.strip() == out.strip()) |
def test_admin_session_download_layout_description_no_spoiler(clean_database, mock_emit_session_update, flask_app, mocker):
mock_layout_description: PropertyMock = mocker.patch('randovania.server.database.MultiplayerSession.layout_description', new_callable=PropertyMock)
user1 = database.User.create(id=1234, name='The Name')
session = database.MultiplayerSession.create(id=1, name='Debug', state=MultiplayerSessionVisibility.VISIBLE, creator=user1, layout_description_json='layout_description_json', game_details_json=json.dumps(GameDetails(spoiler=False, word_hash='fun', seed_hash='fun').as_json))
database.MultiplayerMembership.create(user=user1, session=session, admin=False)
sa = MagicMock(spec=ServerApp)
sa.get_current_user.return_value = user1
with pytest.raises(error.InvalidActionError), flask_app.test_request_context():
session_admin.admin_session(sa, 1, SessionAdminGlobalAction.DOWNLOAD_LAYOUT_DESCRIPTION.value)
mock_emit_session_update.assert_not_called()
mock_layout_description.assert_not_called() |
_fixtures(WebFixture, ExampleFixture.layout)
def test_layout(web_fixture, layout_scenario):
fixture = layout_scenario
fixture.start_example_app()
web_fixture.driver_browser.open('/')
web_fixture.driver_browser.type(XPath.input_labelled('Email address'), 'johndoe')
assert web_fixture.driver_browser.wait_for(fixture.error_is_visible)
web_fixture.driver_browser.capture_cropped_screenshot(fixture.new_screenshot_path('layout.png')) |
class ProxyEnv(Env):
def __init__(self, wrapped_env):
self._wrapped_env = wrapped_env
def wrapped_env(self):
return self._wrapped_env
def reset(self):
return self._wrapped_env.reset()
def action_space(self):
return self._wrapped_env.action_space
def observation_space(self):
return self._wrapped_env.observation_space
def step(self, action):
return self._wrapped_env.step(action)
def render(self, *args, **kwargs):
return self._wrapped_env.render(*args, **kwargs)
def log_diagnostics(self, paths):
self._wrapped_env.log_diagnostics(paths)
def horizon(self):
return self._wrapped_env.horizon
def terminate(self):
self._wrapped_env.terminate()
def get_param_values(self):
return self._wrapped_env.get_param_values()
def set_param_values(self, params):
self._wrapped_env.set_param_values(params) |
def digit_version(version_str):
digit_version = []
for x in version_str.split('.'):
if x.isdigit():
digit_version.append(int(x))
elif (x.find('rc') != (- 1)):
patch_version = x.split('rc')
digit_version.append((int(patch_version[0]) - 1))
digit_version.append(int(patch_version[1]))
return digit_version |
def on_text(text):
try:
index = (int(text) - 1)
except ValueError:
return
if (not (0 <= index < len(tablets))):
return
name = tablets[index].name
try:
canvas = tablets[index].open(window)
except pyglet.input.DeviceException:
print(f'Failed to open tablet {index} on window')
return
print(f'Opened {name}')
def on_enter(cursor):
print(f'{name}: on_enter({cursor!r})')
def on_leave(cursor):
print(f'{name}: on_leave({cursor!r})')
def on_motion(cursor, x, y, pressure, tilt_x, tilt_y, buttons):
print(f'{name}: on_motion({cursor!r}, {x!r}, {y!r}, {pressure!r}, {tilt_x!r}, {tilt_y!r}, {buttons!r})')
if ('on_express_key_press' in canvas.event_types):
def on_express_key_press(control_id, location_id):
print(f'on_express_key_press(control_id={control_id}, location_id={location_id}')
def on_express_key_release(control_id, location_id):
print(f'on_express_key_release(control_id={control_id}, location_id={location_id}') |
class TermGraph(object):
def __init__(self, terms):
self.graph = nx.DiGraph()
self._frozen = False
parents = set()
for term in itervalues(terms):
self._add_to_graph(term, parents)
assert (not parents)
self._outputs = terms
self._frozen = True
def __contains__(self, term):
return (term in self.graph)
def _add_to_graph(self, term, parents):
if self._frozen:
raise ValueError(("Can't mutate %s after construction." % type(self).__name__))
if (term in parents):
raise CyclicDependency(term)
parents.add(term)
self.graph.add_node(term)
for dependency in term.dependencies:
self._add_to_graph(dependency, parents)
self.graph.add_edge(dependency, term)
parents.remove(term)
def outputs(self):
return self._outputs
def screen_name(self):
return SCREEN_NAME
def execution_order(self, workspace, refcounts):
return list(nx.topological_sort(self.graph.subgraph({term for (term, refcount) in refcounts.items() if ((refcount > 0) and (term not in workspace))})))
def ordered(self):
return iter(nx.topological_sort(self.graph))
def loadable_terms(self):
return {term for term in self.graph if isinstance(term, LoadableTerm)}
def jpeg(self):
return display_graph(self, 'jpeg')
def png(self):
return display_graph(self, 'png')
def svg(self):
return display_graph(self, 'svg')
def _repr_png_(self):
return self.png.data
def initial_refcounts(self, initial_terms):
refcounts = self.graph.out_degree()
for t in self.outputs.values():
refcounts[t] += 1
for t in initial_terms:
self._decref_dependencies_recursive(t, refcounts, set())
return refcounts
def _decref_dependencies_recursive(self, term, refcounts, garbage):
for (parent, _) in self.graph.in_edges([term]):
refcounts[parent] -= 1
if (refcounts[parent] == 0):
garbage.add(parent)
self._decref_dependencies_recursive(parent, refcounts, garbage)
def decref_dependencies(self, term, refcounts):
garbage = set()
for (parent, _) in self.graph.in_edges([term]):
refcounts[parent] -= 1
if (refcounts[parent] == 0):
garbage.add(parent)
return garbage
def __len__(self):
return len(self.graph) |
class ErnieMConfig(PretrainedConfig):
model_type = 'ernie_m'
attribute_map: Dict[(str, str)] = {'dropout': 'classifier_dropout', 'num_classes': 'num_labels'}
def __init__(self, vocab_size: int=250002, hidden_size: int=768, num_hidden_layers: int=12, num_attention_heads: int=12, intermediate_size: int=3072, hidden_act: str='gelu', hidden_dropout_prob: float=0.1, attention_probs_dropout_prob: float=0.1, max_position_embeddings: int=514, initializer_range: float=0.02, pad_token_id: int=1, layer_norm_eps: float=1e-05, classifier_dropout=None, is_decoder=False, act_dropout=0.0, **kwargs):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.classifier_dropout = classifier_dropout
self.is_decoder = is_decoder
self.act_dropout = act_dropout |
(scope='module')
def reply_keyboard_markup():
return ReplyKeyboardMarkup(TestReplyKeyboardMarkupBase.keyboard, resize_keyboard=TestReplyKeyboardMarkupBase.resize_keyboard, one_time_keyboard=TestReplyKeyboardMarkupBase.one_time_keyboard, selective=TestReplyKeyboardMarkupBase.selective, is_persistent=TestReplyKeyboardMarkupBase.is_persistent) |
_ephem
def test_pyephem_physical_dst(expected_solpos, golden):
times = pd.date_range(datetime.datetime(2003, 10, 17, 13, 30, 30), periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.pyephem(times, golden.latitude, golden.longitude, pressure=82000, temperature=11)
expected_solpos.index = times
assert_frame_equal(expected_solpos.round(2), ephem_data[expected_solpos.columns].round(2)) |
class RELATIONSHIP_TYPE():
AUDIO = '
A_F_CHUNK = '
CALC_CHAIN = '
CERTIFICATE = '
CHART = '
CHARTSHEET = '
CHART_USER_SHAPES = '
COMMENTS = '
COMMENT_AUTHORS = '
CONNECTIONS = '
CONTROL = '
CORE_PROPERTIES = '
CUSTOM_PROPERTIES = '
CUSTOM_PROPERTY = '
CUSTOM_XML = '
CUSTOM_XML_PROPS = '
DIAGRAM_COLORS = '
DIAGRAM_DATA = '
DIAGRAM_LAYOUT = '
DIAGRAM_QUICK_STYLE = '
DIALOGSHEET = '
DRAWING = '
ENDNOTES = '
EXTENDED_PROPERTIES = '
EXTERNAL_LINK = '
FONT = '
FONT_TABLE = '
FOOTER = '
FOOTNOTES = '
GLOSSARY_DOCUMENT = '
HANDOUT_MASTER = '
HEADER = '
HYPERLINK = '
IMAGE = '
NOTES_MASTER = '
NOTES_SLIDE = '
NUMBERING = '
OFFICE_DOCUMENT = '
OLE_OBJECT = '
ORIGIN = '
PACKAGE = '
PIVOT_CACHE_DEFINITION = '
PIVOT_CACHE_RECORDS = '
PIVOT_TABLE = '
PRES_PROPS = '
PRINTER_SETTINGS = '
QUERY_TABLE = '
REVISION_HEADERS = '
REVISION_LOG = '
SETTINGS = '
SHARED_STRINGS = '
SHEET_METADATA = '
SIGNATURE = '
SLIDE = '
SLIDE_LAYOUT = '
SLIDE_MASTER = '
SLIDE_UPDATE_INFO = '
STYLES = '
TABLE = '
TABLE_SINGLE_CELLS = '
TABLE_STYLES = '
TAGS = '
THEME = '
THEME_OVERRIDE = '
THUMBNAIL = '
USERNAMES = '
VIDEO = '
VIEW_PROPS = '
VML_DRAWING = '
VOLATILE_DEPENDENCIES = '
WEB_SETTINGS = '
WORKSHEET_SOURCE = '
XML_MAPS = ' |
class EpisodicCPUDataset():
def __init__(self, data, num_classes, transforms=[], episode_size=args.batch_size, use_hd=False):
self.data = data
if torch.is_tensor(data):
self.length = data.shape[0]
else:
self.length = len(self.data)
self.episode_size = ((episode_size // args.n_ways) * args.n_ways)
self.transforms = transforms
self.use_hd = use_hd
self.num_classes = num_classes
self.targets = []
self.indices = []
self.corrected_length = (args.episodes_per_epoch * self.episode_size)
episodes = args.episodes_per_epoch
for i in range(episodes):
classes = np.random.permutation(np.arange(self.num_classes))[:args.n_ways]
for c in range(args.n_ways):
class_indices = np.random.permutation(np.arange((self.length // self.num_classes)))[:(self.episode_size // args.n_ways)]
self.indices += list((class_indices + (classes[c] * (self.length // self.num_classes))))
self.targets += ([c] * (self.episode_size // args.n_ways))
self.indices = np.array(self.indices)
self.targets = np.array(self.targets)
def generate_next_episode(self, idx):
if (idx >= args.episodes_per_epoch):
idx = 0
classes = np.random.permutation(np.arange(self.num_classes))[:args.n_ways]
n_samples = (self.episode_size // args.n_ways)
for c in range(args.n_ways):
class_indices = np.random.permutation(np.arange((self.length // self.num_classes)))[:(self.episode_size // args.n_ways)]
self.indices[((idx * self.episode_size) + (c * n_samples)):((idx * self.episode_size) + ((c + 1) * n_samples))] = (class_indices + (classes[c] * (self.length // self.num_classes)))
def __getitem__(self, idx):
if ((idx % self.episode_size) == 0):
self.generate_next_episode(((idx // self.episode_size) + 1))
if self.use_hd:
elt = transforms.ToTensor()(np.array(Image.open(self.data[self.indices[idx]]).convert('RGB')))
else:
elt = self.data[self.indices[idx]]
return (self.transforms(elt), self.targets[idx])
def __len__(self):
return self.corrected_length |
class TransLog(object):
def __init__(self):
self.layers = {}
self.detail_layers = {}
self.detail_blobs = {}
self._blobs = Blob_LOG()
self._blobs_data = []
self.cnet = caffe_net.Caffemodel('')
self.debug = True
def init(self, inputs):
self.add_blobs(inputs)
def add_layer(self, name='layer'):
if (name in self.layers):
return self.layers[name]
if (name not in self.detail_layers.keys()):
self.detail_layers[name] = 0
self.detail_layers[name] += 1
name = '{}{}'.format(name, self.detail_layers[name])
self.layers[name] = name
if self.debug:
print('{} was added to layers'.format(self.layers[name]))
return self.layers[name]
def add_blobs(self, blobs, name='blob', with_num=True):
rst = []
for blob in blobs:
self._blobs_data.append(blob)
blob_id = int(id(blob))
if (name not in self.detail_blobs.keys()):
self.detail_blobs[name] = 0
self.detail_blobs[name] += 1
if with_num:
rst.append('{}{}'.format(name, self.detail_blobs[name]))
else:
rst.append('{}'.format(name))
if self.debug:
print('{}:{} was added to blobs'.format(blob_id, rst[(- 1)]))
print('Add blob {} : {}'.format(rst[(- 1)].center(21), blob.size()))
self._blobs[blob_id] = rst[(- 1)]
return rst
def blobs(self, var):
var = id(var)
if self.debug:
print('{}:{} getting'.format(var, self._blobs[var]))
try:
return self._blobs[var]
except:
print('WARNING: CANNOT FOUND blob {}'.format(var))
return None |
def expected_flattened(prefix: str) -> Dict[(str, Any)]:
flattened = {'foo': 0, 'bar': 1, 'baz/0': 2, 'baz/1': 3, 'baz/2/qux': 4, 'baz/2/quxx/0': 5, 'baz/2/quxx/1/quuz': 6, 'baz/2/quxx/1/corge/0': 7, 'baz/2/quxx/1/corge/1': 8, 'baz/2/quxx/1/corge/2': 9, 'x%2Fy/%25a%2Fb': 10, 'dict_with_colliding_keys': {'0': {'1': 'foo', 1: 'bar'}, 0: 'baz'}, 'dict_with_mixed_type_keys/0/0': 'foo', 'dict_with_mixed_type_keys/0/1': 'bar', 'dict_with_mixed_type_keys/1': 'baz'}
flattened.update({f'long_list/{i}': i for i in range(100)})
flattened = {f'{_encode(prefix)}/{k}': v for (k, v) in flattened.items()}
return flattened |
class ImageExporter(Exporter):
Name = 'Image File (PNG, TIF, JPG, ...)'
allowCopy = True
def __init__(self, item):
Exporter.__init__(self, item)
tr = self.getTargetRect()
if isinstance(item, QtWidgets.QGraphicsItem):
scene = item.scene()
else:
scene = item
bgbrush = scene.views()[0].backgroundBrush()
bg = bgbrush.color()
if (bgbrush.style() == QtCore.Qt.BrushStyle.NoBrush):
bg.setAlpha(0)
self.params = Parameter(name='params', type='group', children=[{'name': 'width', 'title': translate('Exporter', 'width'), 'type': 'int', 'value': int(tr.width()), 'limits': (0, None)}, {'name': 'height', 'title': translate('Exporter', 'height'), 'type': 'int', 'value': int(tr.height()), 'limits': (0, None)}, {'name': 'antialias', 'title': translate('Exporter', 'antialias'), 'type': 'bool', 'value': True}, {'name': 'background', 'title': translate('Exporter', 'background'), 'type': 'color', 'value': bg}, {'name': 'invertValue', 'title': translate('Exporter', 'invertValue'), 'type': 'bool', 'value': False}])
self.params.param('width').sigValueChanged.connect(self.widthChanged)
self.params.param('height').sigValueChanged.connect(self.heightChanged)
def widthChanged(self):
sr = self.getSourceRect()
ar = (float(sr.height()) / sr.width())
self.params.param('height').setValue(int((self.params['width'] * ar)), blockSignal=self.heightChanged)
def heightChanged(self):
sr = self.getSourceRect()
ar = (float(sr.width()) / sr.height())
self.params.param('width').setValue(int((self.params['height'] * ar)), blockSignal=self.widthChanged)
def parameters(self):
return self.params
def getSupportedImageFormats():
filter = [('*.' + f.data().decode('utf-8')) for f in QtGui.QImageWriter.supportedImageFormats()]
preferred = ['*.png', '*.tif', '*.jpg']
for p in preferred[::(- 1)]:
if (p in filter):
filter.remove(p)
filter.insert(0, p)
return filter
def export(self, fileName=None, toBytes=False, copy=False):
if ((fileName is None) and (not toBytes) and (not copy)):
filter = self.getSupportedImageFormats()
self.fileSaveDialog(filter=filter)
return
w = int(self.params['width'])
h = int(self.params['height'])
if ((w == 0) or (h == 0)):
raise Exception(('Cannot export image with size=0 (requested export size is %dx%d)' % (w, h)))
targetRect = QtCore.QRect(0, 0, w, h)
sourceRect = self.getSourceRect()
self.png = QtGui.QImage(w, h, QtGui.QImage.Format.Format_ARGB32)
self.png.fill(self.params['background'])
origTargetRect = self.getTargetRect()
resolutionScale = (targetRect.width() / origTargetRect.width())
painter = QtGui.QPainter(self.png)
try:
self.setExportMode(True, {'antialias': self.params['antialias'], 'background': self.params['background'], 'painter': painter, 'resolutionScale': resolutionScale})
painter.setRenderHint(QtGui.QPainter.RenderHint.Antialiasing, self.params['antialias'])
self.getScene().render(painter, QtCore.QRectF(targetRect), QtCore.QRectF(sourceRect))
finally:
self.setExportMode(False)
painter.end()
if self.params['invertValue']:
bg = fn.ndarray_from_qimage(self.png)
if (sys.byteorder == 'little'):
cv = slice(0, 3)
else:
cv = slice(1, 4)
mn = bg[(..., cv)].min(axis=2)
mx = bg[(..., cv)].max(axis=2)
d = ((255 - mx) - mn)
bg[(..., cv)] += d[(..., np.newaxis)]
if copy:
QtWidgets.QApplication.clipboard().setImage(self.png)
elif toBytes:
return self.png
else:
return self.png.save(fileName) |
class DepthwiseSeparableASPPModule(ASPPModule):
def __init__(self, **kwargs):
super(DepthwiseSeparableASPPModule, self).__init__(**kwargs)
for (i, dilation) in enumerate(self.dilations):
if (dilation > 1):
self[i] = DepthwiseSeparableConvModule(self.in_channels, self.channels, 3, dilation=dilation, padding=dilation, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) |
def test_speedcondition():
cond = OSC.SpeedCondition(1, OSC.Rule.lessThan, OSC.DirectionalDimension.lateral)
prettyprint(cond.get_element())
cond2 = OSC.SpeedCondition(1, OSC.Rule.lessThan, OSC.DirectionalDimension.lateral)
cond3 = OSC.SpeedCondition(2, OSC.Rule.lessThan)
assert (cond == cond2)
assert (cond != cond3)
cond4 = OSC.SpeedCondition.parse(cond.get_element())
assert (cond == cond4)
assert (version_validation('EntityCondition', cond, 0) == ValidationResponse.OSC_VERSION)
assert (version_validation('EntityCondition', cond, 1) == ValidationResponse.OSC_VERSION)
assert (version_validation('EntityCondition', cond, 2) == ValidationResponse.OK)
assert (version_validation('EntityCondition', cond3, 0) == ValidationResponse.OK)
assert (version_validation('EntityCondition', cond3, 1) == ValidationResponse.OK)
assert (version_validation('EntityCondition', cond3, 2) == ValidationResponse.OK) |
def _build_module(cfg, registry, default_args):
assert (isinstance(cfg, dict) and ('type' in cfg))
assert (isinstance(default_args, dict) or (default_args is None))
args = cfg.copy()
obj_type = args.pop('type')
if mmcv.is_str(obj_type):
if (obj_type not in registry.module_dict):
raise KeyError('{} is not in the {} registry'.format(obj_type, registry.name))
obj_type = registry.module_dict[obj_type]
elif (not isinstance(obj_type, type)):
raise TypeError('type must be a str or valid type, but got {}'.format(type(obj_type)))
if (default_args is not None):
for (name, value) in default_args.items():
args.setdefault(name, value)
return obj_type(**args) |
def test_editable_wheel_namespace_package(copy_sample):
td = copy_sample('ns1-pkg')
make_wheel_in((td / 'pyproject.toml'), td, editable=True)
whl_file = (td / 'ns1_pkg-0.1-py2.py3-none-any.whl')
assert_isfile(whl_file)
with unpack(whl_file) as unpacked:
pth_path = Path(unpacked, 'ns1.pkg.pth')
assert_isfile(pth_path)
assert (pth_path.read_text() == str(td))
assert_isdir(Path(unpacked, 'ns1_pkg-0.1.dist-info')) |
def _form_master_re(relist, reflags, ldict):
if (not relist):
return []
regex = '|'.join(relist)
try:
lexre = re.compile(regex, (re.VERBOSE | reflags))
lexindexfunc = ([None] * (max(lexre.groupindex.values()) + 1))
for (f, i) in lexre.groupindex.items():
handle = ldict.get(f, None)
if (type(handle) in (types.FunctionType, types.MethodType)):
lexindexfunc[i] = (handle, handle.__name__[2:])
elif (handle is not None):
if (f.find('ignore_') > 0):
lexindexfunc[i] = (None, None)
print('IGNORE', f)
else:
lexindexfunc[i] = (None, f[2:])
return ([(lexre, lexindexfunc)], [regex])
except Exception as e:
m = int((len(relist) / 2))
if (m == 0):
m = 1
(llist, lre) = _form_master_re(relist[:m], reflags, ldict)
(rlist, rre) = _form_master_re(relist[m:], reflags, ldict)
return ((llist + rlist), (lre + rre)) |
class Unwrapper(OracleDatabase):
CHAR_MAP_SUBSTITUTION = [61, 101, 133, 179, 24, 219, 226, 135, 241, 82, 171, 99, 75, 181, 160, 95, 125, 104, 123, 155, 36, 194, 40, 103, 138, 222, 164, 38, 30, 3, 235, 23, 111, 52, 62, 122, 63, 210, 169, 106, 15, 233, 53, 86, 31, 177, 77, 16, 120, 217, 117, 246, 188, 65, 4, 129, 97, 6, 249, 173, 214, 213, 41, 126, 134, 158, 121, 229, 5, 186, 132, 204, 110, 39, 142, 176, 93, 168, 243, 159, 208, 162, 113, 184, 88, 221, 44, 56, 153, 76, 72, 7, 85, 228, 83, 140, 70, 182, 45, 165, 175, 50, 34, 64, 220, 80, 195, 161, 37, 139, 156, 22, 96, 92, 207, 253, 12, 152, 28, 212, 55, 109, 60, 58, 48, 232, 108, 49, 71, 245, 51, 218, 67, 200, 227, 94, 25, 148, 236, 230, 163, 149, 20, 224, 157, 100, 250, 89, 21, 197, 47, 202, 187, 11, 223, 242, 151, 191, 10, 118, 180, 73, 68, 90, 29, 240, 0, 150, 33, 128, 127, 26, 130, 57, 79, 193, 167, 215, 13, 209, 216, 255, 19, 147, 112, 238, 91, 239, 190, 9, 185, 119, 114, 231, 178, 84, 183, 42, 199, 115, 144, 102, 32, 14, 81, 237, 248, 124, 143, 46, 244, 18, 198, 43, 131, 205, 172, 203, 59, 196, 78, 192, 105, 54, 98, 2, 174, 136, 252, 170, 66, 8, 166, 69, 87, 211, 154, 189, 225, 35, 141, 146, 74, 17, 137, 116, 107, 145, 251, 254, 201, 1, 234, 27, 247, 206]
REQ_GET_SOURCE_CODE = "SELECT text, owner FROM all_source WHERE name LIKE '{0}' ORDER BY line"
REQ_GET_SOURCE_CODE_WITH_TYPE = "SELECT text, owner FROM all_source WHERE name LIKE '{0}' and type='{1}' ORDER BY line"
def __init__(self, args, offline):
logging.debug('Unwrapper object created')
self.offline = offline
if (offline == False):
logging.debug('Offline mode of Unwrapper module enabled.')
OracleDatabase.__init__(self, args)
else:
logging.debug('Offline mode of Unwrapper module disabled')
def __getSourceCode__(self, objectName, objectType):
sourceCode = ''
logging.info('Geeting the source code of the object named {0} (type={1})'.format(objectName, objectType))
if (objectType == None):
request = self.REQ_GET_SOURCE_CODE.format(objectName)
else:
request = self.REQ_GET_SOURCE_CODE_WITH_TYPE.format(objectName, objectType)
logging.debug('Sending this request: {0}'.format(request))
results = self.__execQuery__(query=request, ld=['text', 'owner'])
if (results == []):
logging.error('Empty response: No source code for the object named {0}. Perhaps a mistake in your object name'.format(objectName))
return None
else:
for aResult in results:
sourceCode += aResult['text']
return {'owner': results[0]['owner'], 'sourceCode': sourceCode}
def __unwrap__(self, wrappedCode):
logging.info("Unwrapping the following PL/SQL source code: '{0}'".format(wrappedCode))
lines = wrappedCode['sourceCode'].split('\n')[:(- 1)]
try:
for i in range(0, len(lines)):
matches = re.compile('^[0-9a-f]+ ([0-9a-f]+)$').match(lines[i])
if matches:
(b64str, j) = ('', 0)
b64len = int(matches.groups()[0], 16)
logging.debug('Length of base 64 string equal to {0}'.format(b64len))
while (len(b64str) < b64len):
j += 1
b64len -= 1
b64str += lines[(i + j)]
return self.__decodeBase64Package__(b64str)
except Exception as e:
logging.error("Impossible to parse the correctly the PL/SQL source code: '{0}'".format(e))
return None
def unwrapRemotely(self, objectName, objectType):
sourceCode = self.__getSourceCode__(objectName, objectType)
if (sourceCode == None):
return None
code = self.__unwrap__(sourceCode)
return code
def unwrapLocally(self, filename):
f = open(filename)
lines = ''.join(f.readlines())
code = self.__unwrap__({'owner': 'unknown', 'sourceCode': lines})
return code
def __decodeBase64Package__(self, b64str):
decoded = ''
try:
b64dec = base64.decodestring(b64str)[20:]
for byte in range(0, len(b64dec)):
decoded += chr(self.CHAR_MAP_SUBSTITUTION[ord(b64dec[byte])])
datadec = zlib.decompress(decoded)
except Exception as e:
logging.error("Impossible to decompress data: '{0}'".format(e))
return None
return datadec
def testAll(self):
self.args['print'].subtitle('Unwrap PL/SQL source code remotely?')
logging.info('Nothing to do, return True')
self.args['print'].goodNews('OK')
return True |
class TemplateTagTests(BaseTestCase):
def render(self, tmpl, **context):
t = template.Template(tmpl)
return t.render(template.Context(context))
def test_tag(self):
r = self.render('{% load boxes %}{% box "test" %}')
self.assertEqual(r, self.box.content.rendered)
def test_tag_invalid_label(self):
r = self.render('{% load boxes %}{% box "missing" %}')
self.assertEqual(r, '') |
class _DeepLabHead(nn.Module):
def __init__(self, in_channels, nclass, norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs):
super(_DeepLabHead, self).__init__()
self.aspp = _ASPP(in_channels, [12, 24, 36], norm_layer=norm_layer, norm_kwargs=norm_kwargs, **kwargs)
out_channels = 128
self.block = nn.Sequential(nn.Conv2d(out_channels, out_channels, 3, padding=1, bias=False), norm_layer(out_channels, **({} if (norm_kwargs is None) else norm_kwargs)), nn.ReLU(True), nn.Dropout(0.1), nn.Conv2d(out_channels, nclass, 1))
def forward(self, x):
x = self.aspp(x)
x = self.block[0:4](x)
x_feat_after_aspp = x
x = self.block[4](x)
return (x, x_feat_after_aspp) |
class TokenEmbedding(nn.Module):
def __init__(self, charset_size: int, embed_dim: int):
super().__init__()
self.embedding = nn.Embedding(charset_size, embed_dim)
self.embed_dim = embed_dim
def forward(self, tokens: torch.Tensor):
return (math.sqrt(self.embed_dim) * self.embedding(tokens)) |
('pypyr.moduleloader.get_module')
(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_skip_str_formatting_false(mock_invoke_step, mock_get_module):
step = Step({'name': 'step1', 'skip': '{key6}'})
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
mock_logger_info.assert_any_call('step1 not running because skip is True.')
mock_invoke_step.assert_not_called()
assert (len(context) == original_len) |
def load_data_and_labels_train(path_train, path_test, categories):
f = codecs.open(path_train, 'r')
train = [x.strip('\n') for x in f.readlines()]
f.close()
clean_train_documents = []
clean_test_documents = []
y_train = []
y_test = []
num_documents = len(train)
for i in range(num_documents):
line = train[i].split('\t')
y_train.append(line[0])
clean_train_documents.append(line[1])
f = codecs.open(path_test, 'r')
test = [x.strip('\n') for x in f.readlines()]
f.close()
num_test_documents = len(test)
for i in range(num_test_documents):
line = test[i].split('\t')
y_test.append(line[0])
clean_test_documents.append(line[1])
x_text_train = [s.split(' ') for s in clean_train_documents]
x_text_test = [s.split(' ') for s in clean_test_documents]
labels_train = []
for label in y_train:
listofzeros = ([0] * len(categories))
listofzeros[categories.index(label)] = 1
labels_train.append(listofzeros)
labels_test = []
for label in y_test:
listofzeros = ([0] * len(categories))
listofzeros[categories.index(label)] = 1
labels_test.append(listofzeros)
return [x_text_train, x_text_test, labels_train, labels_test] |
(scope='module')
def root_dir(tmp_path_factory):
tmpdir = tmp_path_factory.mktemp('jit-unspill')
if (ProxifyHostFile._spill_to_disk is None):
ProxifyHostFile(worker_local_directory=tmpdir.name, device_memory_limit=1024, memory_limit=1024)
assert (ProxifyHostFile._spill_to_disk is not None)
return str((ProxifyHostFile._spill_to_disk.root_dir / '..')) |
class Effect11401(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Small Projectile Turret')), 'maxRange', ship.getModifiedItemAttr('shipBonusNavyDestroyerMinmatar4'), **kwargs) |
class PickleNode():
def __init__(self, name: str='', path: (Path | None)=None) -> None:
self.name = name
self.path = path
def signature(self) -> str:
raw_key = str(hash_value(self.path))
return hashlib.sha256(raw_key.encode()).hexdigest()
def from_path(cls, path: Path) -> 'PickleNode':
if (not path.is_absolute()):
msg = 'Node must be instantiated from absolute path.'
raise ValueError(msg)
return cls(name=path.as_posix(), path=path)
def state(self) -> (str | None):
if self.path.exists():
return str(self.path.stat().st_mtime)
return None
def load(self, is_product: bool) -> Path:
if is_product:
return self
return pickle.loads(self.path.read_bytes())
def save(self, value: Any) -> None:
self.path.write_bytes(pickle.dumps(value)) |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
training_args.group_by_length = True
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'))
logger.info(f'Training/evaluation parameters {training_args}')
last_checkpoint = None
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
elif ((last_checkpoint is not None) and (training_args.resume_from_checkpoint is None)):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
set_seed(training_args.seed)
if data_args.eval_on_gen:
Seq2SeqTrainer = Seq2SeqTrainerGenMetrics
else:
Seq2SeqTrainer = transformers.Seq2SeqTrainer
tokenizer_kwargs = {'cache_dir': model_args.cache_dir, 'use_fast': model_args.use_fast_tokenizer, 'revision': model_args.model_revision, 'use_auth_token': (True if model_args.use_auth_token else None)}
from tokenizer import Tokenizer
data_args.dataset = ('iu_xray' if ('iu_xray' in data_args.annotation_file) else 'mimic_cxr')
data_args.threshold = (3 if ('iu_xray' in data_args.annotation_file) else 10)
train_image_tokenizer = image_tokenizer = transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
text_tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased', **tokenizer_kwargs)
tokenizer = Tokenizer(data_args)
logger.info('')
logger.info('')
logger.info(data_args)
logger.info('')
logger.info('')
logger.info('')
logger.info('')
logger.info(model_args)
logger.info('')
logger.info('')
with open(data_args.annotation_file, 'r', encoding='utf-8') as f:
annotation = json.load(f)
(id2tags, headers) = Tokenizer.load_tag2ids(data_args.tag_path, None, True)
tokenizer.headers = headers
id2tagpos = json.load(open(('./data/%s_id2tagpos.json' % data_args.dataset), 'r', encoding='utf-8'))
obs2key = json.load(open(data_args.node_file, 'r', encoding='utf-8'))
config = BartConfig(vocab_size=len(tokenizer.idx2token), max_position_embeddings=data_args.max_tgt_length, encoder_layers=model_args.num_layers, encoder_ffn_dim=model_args.ffn_dim, encoder_attention_heads=model_args.num_heads, decoder_layers=model_args.num_layers, decoder_ffn_dim=model_args.ffn_dim, decoder_attention_heads=model_args.num_heads, encoder_layerdrop=0.0, decoder_layerdrop=0.0, activation_function='relu', d_model=model_args.d_model, dropout=model_args.dropout, attention_dropout=model_args.dropout, activation_dropout=model_args.dropout, init_std=0.02, classifier_dropout=0.0, scale_embedding=False, use_cache=True, pad_token_id=tokenizer.pad_token_id, bos_token_id=tokenizer.bos_token_id, eos_token_id=tokenizer.eos_token_id, is_encoder_decoder=True, decoder_start_token_id=tokenizer.bos_token_id, forced_eos_token_id=tokenizer.eos_token_id, beta=model_args.beta)
config.output_hidden_states = True
config.token_num = config.vocab_size
from src.models.modeling_bart_custom import BartForConditionalGeneration
visual_backbone = {'resnet101': ('resnet101', 2048)}
(config.visual_extractor, config.d_visual) = visual_backbone['resnet101']
config.visual_extractor_pretrained = True
config.chexpert_model_name_or_path = model_args.chexpert_model_name_or_path
config.obs_num = 14
config.region_num = 49
if ('iu_xray' in data_args.annotation_file):
config.region_num *= 2
config.instance_topk = data_args.topk
config.dataset = data_args.dataset
config.max_tgt_length = data_args.max_tgt_length
config.topk_ngram = data_args.topk
config.outline_level = model_args.outline_level
config.rgcn_layers = model_args.rgcn_layers
from nltk.corpus import stopwords
stopwords = stopwords.words('english')
key_vocab = set()
tok_vocab = set()
node2tok = {}
key2tok = {}
for obs in obs2key:
obs2key[obs] = [ngram for ngram in obs2key[obs] if (ngram not in stopwords)]
obs2key[obs] = [(ngram if (len(ngram.split()) > 1) else (ngram + '_NODE')) for ngram in obs2key[obs]]
key_vocab.update(obs2key[obs][:config.topk_ngram])
for key in key_vocab:
toks = key.replace('_NODE', '').split()
key2tok[key] = []
for tok in toks:
if ((tok not in tokenizer.token2idx) or (tok in stopwords)):
continue
tok_vocab.add(tok)
key2tok[key].append(tok)
offset = 0
node2id = {}
id2node = {}
for status in ['_True', '_False']:
for header in headers:
token = (header + status)
if (token not in obs2key):
continue
node2id[token] = offset
id2node[node2id[token]] = token
offset += 1
for key in sorted(key_vocab):
node2id[key] = offset
id2node[node2id[key]] = key
offset += 1
for tok in sorted(tok_vocab):
assert (tok not in node2id)
node2id[tok] = offset
id2node[node2id[tok]] = tok
node2tok[node2id[tok]] = tokenizer.token2idx[tok]
offset += 1
tokenizer.id2node = id2node
config.node_size = ((len(key_vocab) + len(tok_vocab)) + len(obs2key))
config.tag_size = len(obs2key)
config.mention_size = len(key_vocab)
assert (config.node_size == len(node2id)), 'Node Index Error.'
model = BartForConditionalGeneration(config=config, tokenizer=tokenizer)
logger.info('')
logger.info('***** Model Structure *****')
logger.info(model)
logger.info('***** Model Config *******')
logger.info(config)
logger.info('')
logger.info('')
logger.info('')
train_dataset = eval_dataset = test_dataset = None
if data_args.debug_model:
for key in annotation:
annotation[key] = annotation[key][:16]
ids = set()
for sample in annotation['train']:
ids.add(sample['id'])
if training_args.do_train:
train_dataset = DatasetCustom(data_args=data_args, config=config, annotation=annotation, split='train', image_tokenizer=train_image_tokenizer, text_tokenizer=tokenizer, id2tags=(id2tagpos, id2tags, headers, obs2key), nodes=(key_vocab, tok_vocab, node2id, id2node, key2tok, node2tok))
eval_dataset = DatasetCustom(data_args=data_args, config=config, annotation=annotation, split='valid', image_tokenizer=image_tokenizer, text_tokenizer=tokenizer, id2tags=(id2tagpos, id2tags, headers, obs2key), nodes=(key_vocab, tok_vocab, node2id, id2node, key2tok, node2tok))
if training_args.do_predict:
test_dataset = DatasetCustom(data_args=data_args, config=config, annotation=annotation, split='test', image_tokenizer=image_tokenizer, text_tokenizer=tokenizer, id2tags=(id2tagpos, id2tags, headers, obs2key), nodes=(key_vocab, tok_vocab, node2id, id2node, key2tok, node2tok))
data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, text_tokenizer=text_tokenizer, train_image_tokenizer=train_image_tokenizer, eval_image_tokenizer=image_tokenizer, model=model, padding=True, max_length=data_args.max_context_length, pad_to_multiple_of=8)
training_args.max_tgt_length = data_args.max_tgt_length
training_args.num_beams = model_args.num_beams
training_args.fast_lr = model_args.fast_lr
data_args.max_steps = training_args.max_steps
from transformers import EarlyStoppingCallback
trainer = Seq2SeqTrainer(model=model, args=training_args, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(eval_dataset if training_args.do_eval else None), tokenizer=tokenizer, data_collator=data_collator, callbacks=[EarlyStoppingCallback(early_stopping_patience=(3 if (data_args.dataset == 'mimic_cxr') else 5))])
trainer.data_args = data_args
if training_args.do_train:
logger.info('*** Train ***')
train(training_args, data_args, last_checkpoint, trainer, train_dataset)
if training_args.do_predict:
logger.info('*** Test ***')
if (model_args.test_model_name_or_path is not None):
logger.info(('*** Test: Loading %s ***' % model_args.test_model_name_or_path))
state_dict = torch.load(os.path.join(model_args.test_model_name_or_path, WEIGHTS_NAME), map_location='cpu')
model.load_state_dict(state_dict, strict=False)
model = model.cuda()
from train_eval_ende_full import eval_text
print(model_args.num_beams)
eval_text(max_tgt_length=data_args.max_tgt_length, model=model, tokenizer=tokenizer, test_dataset=trainer.get_test_dataloader(test_dataset), num_beams=model_args.num_beams) |
class BacktestTradingSessionBuilder():
def __init__(self, settings: Settings, pdf_exporter: PDFExporter, excel_exporter: ExcelExporter):
self._logger = qf_logger.getChild(self.__class__.__name__)
self._backtest_name = 'Backtest Results'
self._initial_cash =
self._initial_risk = None
self._benchmark_tms = None
self._monitor_settings = None
self._contract_ticker_mapper = SimulatedContractTickerMapper()
self._commission_model_type = FixedCommissionModel
self._commission_model_kwargs = {'commission': 0.0}
self._slippage_model_type = PriceBasedSlippage
self._slippage_model_kwargs = {'slippage_rate': 0.0, 'max_volume_share_limit': None}
self._position_sizer_type = SimplePositionSizer
self._position_sizer_kwargs = dict()
self._orders_filter_types_params = []
self._signals_register = None
self._data_provider = None
self._settings = settings
self._pdf_exporter = pdf_exporter
self._excel_exporter = excel_exporter
self._frequency = None
self._scheduling_time_delay = RelativeDelta(minutes=1)
self._default_daily_market_open_time = {'hour': 13, 'minute': 30, 'second': 0, 'microsecond': 0}
self._default_daily_market_close_time = {'hour': 20, 'minute': 0, 'second': 0, 'microsecond': 0}
OrderRounder.switch_off_rounding_for_backtest()
_config
def set_backtest_name(self, name: str):
assert (not any(((char in name) for char in '/\\?%*:|"<>')))
self._backtest_name = name
_config
def set_frequency(self, frequency: Frequency):
if (frequency not in [Frequency.MIN_1, Frequency.DAILY]):
raise ValueError('Invalid frequency value. The only frequencies supported frequencies are: Frequency.DAILY and Frequency.MIN_1')
self._frequency = frequency
if (self._frequency == Frequency.DAILY):
MarketOpenEvent.set_trigger_time(self._default_daily_market_open_time)
MarketCloseEvent.set_trigger_time(self._default_daily_market_close_time)
self._logger.warning('MarketOpenEvent was set by default to 13:30 and MarketCloseEvent to 20:00. If you want to change the default market open or close time use the set_market_open_and_close_time function of the BacktestTradingSessionBuilder.')
_config
def set_market_open_and_close_time(self, market_open_time: Dict[(str, int)], market_close_time: Dict[(str, int)]):
try:
(market_open_hour, market_open_minutes) = (market_open_time['hour'], market_open_time['minute'])
(market_close_hour, market_close_minutes) = (market_close_time['hour'], market_close_time['minute'])
MarketOpenEvent.set_trigger_time({'hour': market_open_hour, 'minute': market_open_minutes, 'second': 0, 'microsecond': 0})
MarketCloseEvent.set_trigger_time({'hour': market_close_hour, 'minute': market_close_minutes, 'second': 0, 'microsecond': 0})
except KeyError:
raise ValueError("In order to set market open and close time you need to pass dictionaries, which contain the 'hour' abd 'minute' keys. Any other parameter will be disregarded.") from None
_config
def set_scheduling_time_delay(self, time_delay: RelativeDelta):
self._scheduling_time_delay = time_delay
_config
def set_initial_cash(self, initial_cash: int):
assert ((type(initial_cash) is int) and (initial_cash > 0))
self._initial_cash = initial_cash
_config
def set_initial_risk(self, initial_risk: float):
self._initial_risk = initial_risk
_config
def set_data_provider(self, data_provider: DataProvider):
self._data_provider = data_provider
_config
def set_signals_register(self, signals_register: SignalsRegister):
self._signals_register = signals_register
_config
def set_monitor_settings(self, monitor_settings: BacktestMonitorSettings):
if (not (type(monitor_settings) is BacktestMonitorSettings)):
self._logger.error('Monitor settings of different type than BacktestMonitorSettings: {}'.format(monitor_settings))
else:
self._monitor_settings = monitor_settings
def set_benchmark_tms(self, benchmark_tms: QFSeries):
self._benchmark_tms = benchmark_tms
_config
def set_commission_model(self, commission_model_type: Type[CommissionModel], **kwargs):
try:
commission_model_params = dict(inspect.signature(CommissionModel).parameters)
commission_model_params.update(kwargs)
inspect.signature(commission_model_type).bind(**commission_model_params)
self._commission_model_type = commission_model_type
self._commission_model_kwargs = kwargs
except TypeError as e:
self._logger.error('The Commission Model could not be set correctly - {}'.format(e))
_config
def set_slippage_model(self, slippage_model_type: Type[Slippage], **kwargs):
try:
slippage_model_params = dict(inspect.signature(Slippage).parameters)
slippage_model_params.update(kwargs)
inspect.signature(slippage_model_type).bind(**slippage_model_params)
self._slippage_model_type = slippage_model_type
self._slippage_model_kwargs = kwargs
except TypeError as e:
self._logger.error('The Slippage Model could not be set correctly - {}'.format(e))
_config
def set_position_sizer(self, position_sizer_type: Type[PositionSizer], **kwargs):
try:
position_sizer_params = dict(inspect.signature(PositionSizer).parameters)
position_sizer_params.update(kwargs)
inspect.signature(position_sizer_type).bind(**position_sizer_params)
self._position_sizer_type = position_sizer_type
self._position_sizer_kwargs = kwargs
except TypeError as e:
self._logger.error('The Position Sizer could not be set correctly - {}'.format(e))
_config
def add_orders_filter(self, orders_filter_type: Type[OrdersFilter], **kwargs):
try:
orders_filter_params = dict(inspect.signature(OrdersFilter).parameters)
orders_filter_params.update(kwargs)
inspect.signature(orders_filter_type).bind(**orders_filter_params)
self._orders_filter_types_params.append((orders_filter_type, kwargs))
except TypeError as e:
self._logger.error('The Orders Filter could not be added to the pipeline - {}'.format(e))
def _create_event_manager(timer, notifiers: Notifiers):
event_manager = EventManager(timer)
event_manager.register_notifiers([notifiers.all_event_notifier, notifiers.empty_queue_event_notifier, notifiers.end_trading_event_notifier, notifiers.scheduler])
return event_manager
def _create_data_handler(self, data_provider, timer):
assert (data_provider is not None), 'Data provider is None. Set data_provider using set_data_provider() method before building BacktestTradingSession'
if (self._frequency == Frequency.MIN_1):
data_handler = IntradayDataHandler(data_provider, timer)
elif (self._frequency == Frequency.DAILY):
data_handler = DailyDataHandler(data_provider, timer)
else:
raise ValueError("Invalid frequency parameter. The only frequencies supported by the DataHandler are Frequency.DAILY and Frequency.MIN_1. \nMake sure you set the frequency in the session builder for example: \n\t-> 'session_builder.set_frequency(Frequency.DAILY)'")
return data_handler
def build(self, start_date: datetime, end_date: datetime) -> BacktestTradingSession:
self._timer = SettableTimer(start_date)
self._notifiers = Notifiers(self._timer)
self._events_manager = self._create_event_manager(self._timer, self._notifiers)
self._data_handler = self._create_data_handler(self._data_provider, self._timer)
signals_register = (self._signals_register if self._signals_register else BacktestSignalsRegister())
self._portfolio = Portfolio(self._data_handler, self._initial_cash, self._timer)
self._backtest_result = BacktestResult(self._portfolio, signals_register, self._backtest_name, start_date, end_date, self._initial_risk)
self._monitor = self._monitor_setup()
self._slippage_model = self._slippage_model_setup()
self._commission_model = self._commission_model_setup()
self._execution_handler = SimulatedExecutionHandler(self._data_handler, self._timer, self._notifiers.scheduler, self._monitor, self._commission_model, self._portfolio, self._slippage_model, scheduling_time_delay=self._scheduling_time_delay, frequency=self._frequency)
self._time_flow_controller = BacktestTimeFlowController(self._notifiers.scheduler, self._events_manager, self._timer, self._notifiers.empty_queue_event_notifier, end_date)
self._broker = BacktestBroker(self._contract_ticker_mapper, self._portfolio, self._execution_handler)
self._order_factory = OrderFactory(self._broker, self._data_handler)
self._position_sizer = self._position_sizer_setup(signals_register)
self._orders_filters = self._orders_filter_setup()
self._logger.info('\n'.join(['Creating Backtest Trading Session.', '\tBacktest Name: {}'.format(self._backtest_name), '\tData Provider: {}'.format(self._data_provider.__class__.__name__), '\tStart Date: {}'.format(start_date), '\tEnd Date: {}'.format(end_date), '\tTrading frequency:{}'.format(self._frequency), '\tInitial Cash: {:.2f}'.format(self._initial_cash)]))
self._logger.info('\n'.join(['Configuration of components:', '\tPosition sizer: {:s}'.format(self._position_sizer.__class__.__name__), '\tTimer: {:s}'.format(self._timer.__class__.__name__), '\tData Handler: {:s}'.format(self._data_handler.__class__.__name__), '\tBacktest Result: {:s}'.format(self._backtest_result.__class__.__name__), '\tMonitor: {:s}'.format(self._monitor.__class__.__name__), '\tExecution Handler: {:s}'.format(self._execution_handler.__class__.__name__), '\tSlippage Model: {:s}'.format(self._slippage_model.__class__.__name__), '\tCommission Model: {:s}'.format(self._commission_model.__class__.__name__), '\tBroker: {:s}'.format(self._broker.__class__.__name__)]))
try:
MarketOpenEvent.trigger_time()
MarketCloseEvent.trigger_time()
except ValueError as ex:
self._logger.error('MarketOpenEvent and MarketCloseEvent trigger time has to be set for intraday trading. Call set_market_open_and_close_time(...) before building the session')
raise ex
ts = BacktestTradingSession(contract_ticker_mapper=self._contract_ticker_mapper, start_date=start_date, end_date=end_date, position_sizer=self._position_sizer, orders_filters=self._orders_filters, data_handler=self._data_handler, timer=self._timer, notifiers=self._notifiers, portfolio=self._portfolio, events_manager=self._events_manager, monitor=self._monitor, broker=self._broker, order_factory=self._order_factory, frequency=self._frequency, backtest_result=self._backtest_result)
return ts
def _monitor_setup(self) -> BacktestMonitor:
monitor = BacktestMonitor(self._backtest_result, self._settings, self._pdf_exporter, self._excel_exporter, self._monitor_settings, self._benchmark_tms)
return monitor
def _position_sizer_setup(self, signals_register: SignalsRegister):
return self._position_sizer_type(self._broker, self._data_handler, self._order_factory, signals_register, **self._position_sizer_kwargs)
def _orders_filter_setup(self):
orders_filters = []
for (orders_filter_type, kwargs) in self._orders_filter_types_params:
orders_filter = orders_filter_type(self._data_handler, **kwargs)
orders_filters.append(orders_filter)
return orders_filters
def _slippage_model_setup(self):
return self._slippage_model_type(data_provider=self._data_provider, **self._slippage_model_kwargs)
def _commission_model_setup(self):
return self._commission_model_type(**self._commission_model_kwargs) |
class TestResourcesApp(unittest.TestCase):
def setUpClass(cls):
import resources_app
cls.AppClass = resources_app.MyApp
def setUp(self):
self.AppClass.log_request = (lambda x, y: None)
self.previouse_dir = os.getcwd()
os.chdir(examples_dir)
def tearDown(self):
del self.AppClass.log_request
self.app.on_close()
os.chdir(self.previouse_dir)
def test_main(self):
self.app = self.AppClass(MockRequest(), ('0.0.0.0', 8888), MockServer())
root_widget = self.app.main()
html = root_widget.repr()
assertValidHTML(html) |
def model_processing(model, src_dir, dest_dir, timeseq_len):
train_dir = os.path.join(src_dir, 'train')
test_dir = os.path.join(src_dir, 'test')
dest_train_dir = os.path.join(dest_dir, 'train')
if os.path.exists(dest_train_dir):
print(dest_train_dir, 'already exists')
else:
os.mkdir(dest_train_dir)
print(dest_train_dir, 'created')
dest_test_dir = os.path.join(dest_dir, 'test')
if os.path.exists(dest_test_dir):
print(dest_test_dir, 'already exists')
else:
os.mkdir(dest_test_dir)
print(dest_test_dir, 'created')
dir_mapping = OrderedDict([(train_dir, dest_train_dir), (test_dir, dest_test_dir)])
for (dir, dest_dir) in dir_mapping.items():
print('Processing data in {}'.format(dir))
for (index, class_name) in enumerate(os.listdir(dir)):
class_dir = os.path.join(dir, class_name)
dest_class_dir = os.path.join(dest_dir, class_name)
if (not os.path.exists(dest_class_dir)):
os.mkdir(dest_class_dir)
print(dest_class_dir, 'created')
for filename in os.listdir(class_dir):
file_dir = os.path.join(class_dir, filename)
clip_data = np.load(file_dir)
processed_data = model.predict(clip_data, batch_size=timeseq_len)
dest_file_dir = os.path.join(dest_class_dir, filename)
np.save(dest_file_dir, processed_data)
print('No.{} class {} finished, data saved in {}'.format(index, class_name, dest_class_dir)) |
class TestClassPath():
.skipif((sys.platform == 'win32'), reason='Windows cannot delete jar while JVM runs')
def test_download_classpath_with_verbose(self, r5_jar_url, r5_jar_sha256, r5_jar_cached, r5_jar_cached_invalid):
sys.argv.extend(['--verbose', '--r5-classpath', r5_jar_cached_invalid])
try:
pathlib.Path(r5_jar_cached).unlink()
except FileNotFoundError:
pass
with pytest.warns(RuntimeWarning, match='Could not find R5 jar'):
r5_classpath = find_r5_classpath(Config().arguments)
with open(r5_classpath, 'rb') as r5_jar:
digest = hashlib.sha256(r5_jar.read()).hexdigest()
assert (digest == r5_jar_sha256)
sys.argv = sys.argv[:(- 3)]
def test_use_classpath_from_local_file(self, r5_jar_url, r5_jar_sha256, r5_jar_cached):
find_r5_classpath(Config().arguments)
sys.argv.extend(['--r5-classpath', r5_jar_cached])
r5_classpath = find_r5_classpath(Config().arguments)
with open(r5_classpath, 'rb') as r5_jar:
digest = hashlib.sha256(r5_jar.read()).hexdigest()
assert (digest == r5_jar_sha256)
assert (r5_classpath == r5_jar_cached)
sys.argv = sys.argv[:(- 2)]
def test_use_classpath_from_local_uri(self, r5_jar_url, r5_jar_sha256, r5_jar_cached):
find_r5_classpath(Config().arguments)
sys.argv.extend(['--r5-classpath', f'file://{r5_jar_cached}'])
r5_classpath = find_r5_classpath(Config().arguments)
with open(r5_classpath, 'rb') as r5_jar:
digest = hashlib.sha256(r5_jar.read()).hexdigest()
assert (digest == r5_jar_sha256)
assert (r5_classpath == r5_jar_cached)
sys.argv = sys.argv[:(- 2)]
def test_use_classpath_from_remote_uri(self, r5_jar_url, r5_jar_sha256, r5_jar_cached):
sys.argv.extend(['--r5-classpath', r5_jar_url])
r5_classpath = find_r5_classpath(Config().arguments)
with open(r5_classpath, 'rb') as r5_jar:
digest = hashlib.sha256(r5_jar.read()).hexdigest()
assert (digest == r5_jar_sha256)
assert (r5_classpath == r5_jar_cached)
sys.argv = sys.argv[:(- 2)]
def test_use_classpath_from_invalid_uri(self):
sys.argv.extend(['--r5-classpath', 'invalid://schema/and/path'])
with pytest.raises(UnexpectedClasspathSchema):
_ = find_r5_classpath(Config().arguments)
sys.argv = sys.argv[:(- 2)]
def test_find_classpath_download(self, r5_jar_url, r5_jar_sha256, r5_jar_cached_invalid):
sys.argv.extend(['--r5-classpath', r5_jar_cached_invalid])
r5_classpath = find_r5_classpath(Config().arguments)
with open(r5_classpath, 'rb') as r5_jar:
digest = hashlib.sha256(r5_jar.read()).hexdigest()
assert (digest == r5_jar_sha256)
sys.argv = sys.argv[:(- 2)]
.skipif((sys.platform == 'win32'), reason='No signal chaining library for Windows')
def test_signal_chaining(self):
if (sys.platform == 'linux'):
assert ('LD_PRELOAD' in os.environ)
assert pathlib.Path(os.environ['LD_PRELOAD']).exists()
elif (sys.platform == 'darwin'):
assert ('DYLD_INSERT_LIBRARIES' in os.environ)
assert pathlib.Path(os.environ['DYLD_INSERT_LIBRARIES']).exists() |
class MultiResolutionDataset(Dataset):
def __init__(self, path, transform=_transform, resolution=256, return_indices=False):
self.env = lmdb.open(path, max_readers=32, readonly=True, lock=False, readahead=False, meminit=False)
if (not self.env):
raise IOError('Cannot open lmdb dataset', path)
with self.env.begin(write=False) as txn:
self.length = int(txn.get('length'.encode('utf-8')).decode('utf-8'))
self.resolution = resolution
self.transform = (transform if (transform is not None) else (lambda x: x))
self.return_indices = return_indices
def __len__(self):
return self.length
def __getitem__(self, index):
with self.env.begin(write=False) as txn:
key = f'{self.resolution}-{str(index).zfill(5)}'.encode('utf-8')
img_bytes = txn.get(key)
buffer = BytesIO(img_bytes)
img = Image.open(buffer)
img = self.transform(img)
if self.return_indices:
return (img, index)
else:
return img |
class PartialCompareOutcome():
def __init__(self, error=None):
self.error = error
def __bool__(self):
return (self.error is None)
def __repr__(self):
return 'PartialCompareOutcome(error={!r})'.format(self.error)
def __str__(self):
return ('true' if (self.error is None) else 'false') |
def _read_logo(content):
def _read_logo(pat):
pattern = (pat + ':\\s+\\S+')
data_str = re.compile(pattern).search(content).group()
return data_str.split(':')[1].strip()
info = {}
for pat in ['Version', 'Website']:
info[pat] = _read_logo(pat)
return info |
class SemVerWithVPrefix(Version):
def parse(cls, version: str) -> 'SemVerWithVPrefix':
if (not (version[0] in ('v', 'V'))):
raise ValueError(f"{version!r}: not a valid semantic version tag. Must start with 'v' or 'V'")
return super().parse(version[1:], optional_minor_and_patch=True)
def __str__(self) -> str:
return ('v' + super().__str__()) |
_transform('VisslAutoAugment')
class AutoAugment(ClassyTransform):
def __init__(self, policy_name='v0', magnitude_std=0, **kwargs):
hparams = kwargs
hparams.update(_HPARAMS_DEFAULT)
hparams['magnitude_std'] = magnitude_std
self.policy = auto_augment_policy(policy_name, hparams=hparams)
def __call__(self, img):
sub_policy = random.choice(self.policy)
for op in sub_policy:
img = op(img)
return img |
class Pad(object):
def __init__(self, padding, fill=0, padding_mode='constant'):
assert isinstance(padding, (numbers.Number, tuple))
assert isinstance(fill, (numbers.Number, str, tuple))
assert (padding_mode in ['constant', 'edge', 'reflect', 'symmetric'])
if (isinstance(padding, Sequence) and (len(padding) not in [2, 4])):
raise ValueError(('Padding must be an int or a 2, or 4 element tuple, not a ' + '{} element tuple'.format(len(padding))))
self.padding = padding
self.fill = fill
self.padding_mode = padding_mode
def __call__(self, img):
return F.pad(img, self.padding, self.fill, self.padding_mode)
def __repr__(self):
return (self.__class__.__name__ + '(padding={0}, fill={1}, padding_mode={2})'.format(self.padding, self.fill, self.padding_mode)) |
class Migration(migrations.Migration):
dependencies = [('core', '0012_currentsong_last_paused')]
operations = [migrations.AlterField(model_name='queuedsong', name='external_url', field=models.CharField(max_length=2000)), migrations.AlterField(model_name='queuedsong', name='internal_url', field=models.CharField(blank=True, max_length=2000, null=True)), migrations.AlterField(model_name='queuedsong', name='stream_url', field=models.CharField(blank=True, max_length=2000, null=True)), migrations.AlterField(model_name='currentsong', name='external_url', field=models.CharField(max_length=2000)), migrations.AlterField(model_name='currentsong', name='stream_url', field=models.CharField(blank=True, max_length=2000, null=True))] |
def propose_interpreters(spec, cache_dir, env):
existing = list(discover_pythons())
existing.sort(key=(lambda i: (*tuple((((- 1) if (j is None) else j) for j in i[1:4])), (1 if (i[0] == 'PythonCore') else 0))), reverse=True)
for (name, major, minor, arch, exe, _) in existing:
implementation = _IMPLEMENTATION_BY_ORG.get(name, name)
skip_pre_filter = (implementation.lower() != 'cpython')
registry_spec = PythonSpec(None, implementation, major, minor, None, arch, exe)
if (skip_pre_filter or registry_spec.satisfies(spec)):
interpreter = Pep514PythonInfo.from_exe(exe, cache_dir, env=env, raise_on_error=False)
if ((interpreter is not None) and interpreter.satisfies(spec, impl_must_match=True)):
(yield interpreter) |
class Effect11400(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('High Speed Maneuvering')), 'signatureRadiusBonus', ship.getModifiedItemAttr('shipBonusNavyDestroyerMinmatar3'), skill='Minmatar Destroyer', **kwargs) |
class MolGraph():
def __init__(self, mol: Union[(str, Chem.Mol)], atom_descriptors: np.ndarray=None):
if (type(mol) == str):
mol = Chem.MolFromSmiles(mol)
self.n_atoms = 0
self.n_bonds = 0
self.f_atoms = []
self.f_bonds = []
self.a2b = []
self.b2a = []
self.b2revb = []
self.f_atoms = [atom_features(atom) for atom in mol.GetAtoms()]
if (atom_descriptors is not None):
self.f_atoms = [(f_atoms + descs.tolist()) for (f_atoms, descs) in zip(self.f_atoms, atom_descriptors)]
self.n_atoms = len(self.f_atoms)
for _ in range(self.n_atoms):
self.a2b.append([])
for a1 in range(self.n_atoms):
for a2 in range((a1 + 1), self.n_atoms):
bond = mol.GetBondBetweenAtoms(a1, a2)
if (bond is None):
continue
f_bond = bond_features(bond)
self.f_bonds.append((self.f_atoms[a1] + f_bond))
self.f_bonds.append((self.f_atoms[a2] + f_bond))
b1 = self.n_bonds
b2 = (b1 + 1)
self.a2b[a2].append(b1)
self.b2a.append(a1)
self.a2b[a1].append(b2)
self.b2a.append(a2)
self.b2revb.append(b2)
self.b2revb.append(b1)
self.n_bonds += 2 |
(Nomination)
class NominationAdmin(admin.ModelAdmin):
raw_id_fields = ('nominee', 'nominator')
list_display = ('__str__', 'election', 'accepted', 'approved', 'nominee')
list_filter = ('election', 'accepted', 'approved')
def get_ordering(self, request):
return ['election', Lower('nominee__user__last_name')] |
def get_edge_candidates(o: object) -> Iterator[tuple[(object, object)]]:
if ('__getattribute__' in getattr(type(o), '__dict__')):
return
if (type(o) not in COLLECTION_TYPE_BLACKLIST):
for attr in dir(o):
try:
if ((attr not in ATTR_BLACKLIST) and hasattr(o, attr) and (not isproperty(o, attr))):
e = getattr(o, attr)
if (type(e) not in ATOMIC_TYPE_BLACKLIST):
(yield (attr, e))
except AssertionError:
pass
if isinstance(o, Mapping):
(yield from o.items())
elif (isinstance(o, Iterable) and (not isinstance(o, str))):
for (i, e) in enumerate(o):
(yield (i, e)) |
def load_model(args, do_print=True):
colbert = ColBERT.from_pretrained('bert-base-uncased', query_maxlen=args.query_maxlen, doc_maxlen=args.doc_maxlen, dim=args.dim, similarity_metric=args.similarity, mask_punctuation=args.mask_punctuation)
colbert = colbert.to(DEVICE)
print_message('#> Loading model checkpoint.', condition=do_print)
checkpoint = load_checkpoint(args.checkpoint, colbert, do_print=do_print)
colbert.eval()
return (colbert, checkpoint) |
.parametrize('times, duration, expected_message', [[0, 0, 'times must be between 1 and 9'], [(- 1), 0, 'times must be between 1 and 9'], [10, 0, 'times must be between 1 and 9'], [11, 0, 'times must be between 1 and 9'], [3, 0, 'duration must be between 1 and 9'], [3, (- 1), 'duration must be between 1 and 9'], [3, 10, 'duration must be between 1 and 9'], [3, 11, 'duration must be between 1 and 9']])
def test_buzzer_fuction_with_outrange_values(times: int, duration: int, expected_message: str) -> None:
instance = printer.Dummy()
with pytest.raises(ValueError) as e:
instance.buzzer(times, duration)
assert (str(e.value) == expected_message) |
class ButtonTestCases(unittest.TestCase):
def setUp(self):
_set_timings_fast()
self.app = Application()
self.app = self.app.start(os.path.join(mfc_samples_folder, u'CmnCtrl1.exe'))
self.app.Common_Controls_Sample.TabControl.select('CDateTimeCtrl')
self.ctrl = self.app.Common_Controls_Sample
def tearDown(self):
self.app.kill()
def testGetProperties(self):
props = self.ctrl.Button2.get_properties()
self.assertEqual('Button', props['friendly_class_name'])
self.assertEqual(self.ctrl.Button2.texts(), props['texts'])
for prop_name in props:
self.assertEqual(getattr(self.ctrl.Button2, prop_name)(), props[prop_name])
def test_NeedsImageProp(self):
self.assertEqual(self.ctrl.OKButton._needs_image_prop, True)
self.assertEqual(('image' in self.ctrl.OKButton.get_properties()), True)
def testFriendlyClass(self):
self.assertEqual(self.ctrl.Button2.friendly_class_name(), 'Button')
self.assertEqual(self.ctrl.RadioButton2.friendly_class_name(), 'RadioButton')
def testCheckUncheck(self):
self.ctrl.RadioButton2.check()
self.assertEqual(self.ctrl.RadioButton2.get_check_state(), 1)
self.ctrl.RadioButton2.uncheck()
self.assertEqual(self.ctrl.RadioButton2.get_check_state(), 0)
def testGetCheckState_unchecked(self):
self.assertEqual(self.ctrl.RadioButton.get_check_state(), 0)
def testGetCheckState_checked(self):
self.ctrl.RadioButton2.check()
self.assertEqual(self.ctrl.RadioButton2.get_check_state(), 1)
def testClick(self):
self.ctrl.RadioButton2.click()
self.ctrl.RadioButton.click()
self.ctrl.RadioButton3.click()
self.assertEqual(self.ctrl.RadioButton3.get_check_state(), 1)
def testIsSelected(self):
self.assertEqual(self.ctrl.RadioButton.get_check_state(), 0)
self.ctrl.RadioButton.click()
self.assertEqual(self.ctrl.RadioButton.get_check_state(), 1) |
class AlwaysOnTopWindow(QtWidgets.QMainWindow):
def __init__(self, *args, m=None, **kwargs):
super().__init__(*args, **kwargs)
self.out_alpha = 0.25
self.m = m
self.app = QtWidgets.QApplication([]).instance()
self.setAttribute(Qt.WA_ShowWithoutActivating)
self.setWindowFlags((Qt.FramelessWindowHint | Qt.Dialog))
self.setFocusPolicy(Qt.ClickFocus)
self.on_top = AlwaysOnTopToolButton()
self.toolbar = ToolBar(m=self.m, left_widget=self.on_top, layers='text', add_buttons=True)
self.on_top.clicked.connect(self.toggle_always_on_top)
self.addToolBar(self.toolbar)
()
def toggle_always_on_top(self, *args, **kwargs):
q = self.m._get_always_on_top()
if q:
self.m._set_always_on_top(False)
self.on_top.setChecked(False)
else:
self.m._set_always_on_top(True)
self.on_top.setChecked(True)
def closeEvent(*args, **kwargs):
global _windows_to_close
for w in _windows_to_close:
try:
w.close()
except Exception:
_log.debug(f'There was a problem while trying to close the window {w}') |
.parametrize('client_sends', [True, False])
.parametrize('code, reason', [(CloseReason.NORMAL_CLOSURE, 'bye'), (CloseReason.GOING_AWAY, '')])
def test_closure(client_sends: bool, code: CloseReason, reason: str) -> None:
client = Connection(CLIENT)
server = Connection(SERVER)
if client_sends:
local = client
remote = server
else:
local = server
remote = client
remote.receive_data(local.send(CloseConnection(code=code, reason=reason)))
event = next(remote.events())
assert isinstance(event, CloseConnection)
assert (event.code is code)
assert (event.reason == reason)
assert (remote.state is ConnectionState.REMOTE_CLOSING)
assert (local.state is ConnectionState.LOCAL_CLOSING)
local.receive_data(remote.send(event.response()))
event = next(local.events())
assert isinstance(event, CloseConnection)
assert (event.code is code)
assert (event.reason == reason)
assert (remote.state is ConnectionState.CLOSED)
assert (local.state is ConnectionState.CLOSED)
with pytest.raises(LocalProtocolError):
local.receive_data(b'foobar') |
def create_repo():
def _create_repo(orgname, reponame, user):
r = create_repository(orgname, reponame, user)
assert (r is not None)
repo_ref = registry_model.lookup_repository(orgname, reponame)
assert (repo_ref is not None)
return repo_ref
return _create_repo |
class Term(with_metaclass(ABCMeta, object)):
dtype = NotSpecified
missing_value = NotSpecified
params = ()
domain = GENERIC
window_safe = False
ndim = 2
_term_cache = WeakValueDictionary()
def __new__(cls, domain=NotSpecified, dtype=NotSpecified, missing_value=NotSpecified, window_safe=NotSpecified, ndim=NotSpecified, *args, **kwargs):
if (domain is NotSpecified):
domain = cls.domain
if (dtype is NotSpecified):
dtype = cls.dtype
if (missing_value is NotSpecified):
missing_value = cls.missing_value
if (ndim is NotSpecified):
ndim = cls.ndim
if (window_safe is NotSpecified):
window_safe = cls.window_safe
(dtype, missing_value) = validate_dtype(cls.__name__, dtype, missing_value)
params = cls._pop_params(kwargs)
identity = cls._static_identity(*args, domain=domain, dtype=dtype, missing_value=missing_value, window_safe=window_safe, ndim=ndim, params=params, **kwargs)
try:
return cls._term_cache[identity]
except KeyError:
new_instance = cls._term_cache[identity] = super(Term, cls).__new__(cls)._init(*args, domain=domain, dtype=dtype, missing_value=missing_value, window_safe=window_safe, ndim=ndim, params=params, **kwargs)
return new_instance
def _pop_params(cls, kwargs):
params = cls.params
if (not isinstance(params, Mapping)):
params = {k: NotSpecified for k in params}
param_values = []
for (key, default_value) in params.items():
try:
value = kwargs.pop(key, default_value)
if (value is NotSpecified):
raise KeyError(key)
hash(value)
except KeyError:
raise TypeError('{typename} expected a keyword parameter {name!r}.'.format(typename=cls.__name__, name=key))
except TypeError:
raise TypeError('{typename} expected a hashable value for parameter {name!r}, but got {value!r} instead.'.format(typename=cls.__name__, name=key, value=value))
param_values.append((key, value))
return tuple(param_values)
def __init__(self, *args, **kwargs):
pass
_types(key=Asset)
def __getitem__(self, key):
if isinstance(self, LoadableTerm):
raise NonSliceableTerm(term=self)
from .mixins import SliceMixin
slice_type = type(self)._with_mixin(SliceMixin)
return slice_type(self, key)
def _static_identity(cls, domain, dtype, missing_value, window_safe, ndim, params):
return (cls, domain, dtype, missing_value, window_safe, ndim, params)
def _init(self, domain, dtype, missing_value, window_safe, ndim, params):
self.domain = domain
self.dtype = dtype
self.missing_value = missing_value
self.window_safe = window_safe
self.ndim = ndim
for (name, value) in params:
if hasattr(self, name):
raise TypeError('Parameter {name!r} conflicts with already-present attribute with value {value!r}.'.format(name=name, value=getattr(self, name)))
self.params = dict(params)
self._subclass_called_super_validate = False
self._validate()
assert self._subclass_called_super_validate, 'Term._validate() was not called.\nThis probably means that you overrode _validate without calling super().'
del self._subclass_called_super_validate
return self
def _validate(self):
self._subclass_called_super_validate = True
def compute_extra_rows(self, all_dates, start_date, end_date, min_extra_rows):
return min_extra_rows
def inputs(self):
raise NotImplementedError('inputs')
def windowed(self):
raise NotImplementedError('windowed')
def mask(self):
raise NotImplementedError('mask')
def dependencies(self):
raise NotImplementedError('dependencies')
def graph_repr(self):
return type(self).__name__
def recursive_repr(self):
return type(self).__name__ |
def make_batches(lines, cfg, task, max_positions, encode_fn):
def encode_fn_target(x):
return encode_fn(x)
if cfg.generation.constraints:
batch_constraints = [list() for _ in lines]
for (i, line) in enumerate(lines):
if ('\t' in line):
(lines[i], *batch_constraints[i]) = line.split('\t')
for (i, constraint_list) in enumerate(batch_constraints):
batch_constraints[i] = [task.target_dictionary.encode_line(encode_fn_target(constraint), append_eos=False, add_if_not_exist=False) for constraint in constraint_list]
if cfg.generation.constraints:
constraints_tensor = pack_constraints(batch_constraints)
else:
constraints_tensor = None
(tokens, lengths) = task.get_interactive_tokens_and_lengths(lines, encode_fn)
itr = task.get_batch_iterator(dataset=task.build_dataset_for_inference(tokens, lengths, constraints=constraints_tensor), max_tokens=cfg.dataset.max_tokens, max_sentences=cfg.dataset.batch_size, max_positions=max_positions, ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test).next_epoch_itr(shuffle=False)
for batch in itr:
ids = batch['id']
src_tokens = batch['net_input']['src_tokens']
src_lengths = batch['net_input']['src_lengths']
constraints = batch.get('constraints', None)
(yield Batch(ids=ids, src_tokens=src_tokens, src_lengths=src_lengths, constraints=constraints)) |
def test_create_legacy_tasks(db, settings):
Task.objects.all().delete()
xml_file = ((((Path(settings.BASE_DIR) / 'xml') / 'elements') / 'legacy') / 'tasks.xml')
root = read_xml_file(xml_file)
version = root.attrib.get('version')
elements = flat_xml_to_elements(root)
elements = convert_elements(elements, version)
elements = order_elements(elements)
elements = elements.values()
import_elements(elements)
assert (len(root) == len(elements) == Task.objects.count() == 2)
assert all(((element['created'] is True) for element in elements))
assert all(((element['updated'] is False) for element in elements)) |
_canonicalize
_rewriter([true_div, int_div])
def local_div_switch_sink(fgraph, node):
if ((node.op != true_div) and (node.op != int_div)):
return False
op = node.op
if (node.inputs[0].owner and (node.inputs[0].owner.op == switch)):
switch_node = node.inputs[0].owner
try:
if (get_underlying_scalar_constant_value(switch_node.inputs[1], only_process_constants=True) == 0.0):
fdiv = op(switch_node.inputs[2], node.inputs[1])
copy_stack_trace(node.outputs, fdiv)
fct = [switch(switch_node.inputs[0], 0, fdiv)]
fct[0].tag.values_eq_approx = values_eq_approx_remove_nan
copy_stack_trace((node.outputs + switch_node.outputs), fct)
return fct
except NotScalarConstantError:
pass
try:
if (get_underlying_scalar_constant_value(switch_node.inputs[2], only_process_constants=True) == 0.0):
fdiv = op(switch_node.inputs[1], node.inputs[1])
copy_stack_trace(node.outputs, fdiv)
fct = [switch(switch_node.inputs[0], fdiv, 0)]
fct[0].tag.values_eq_approx = values_eq_approx_remove_nan
copy_stack_trace((node.outputs + switch_node.outputs), fct)
return fct
except NotScalarConstantError:
pass
return False |
def get_dataset_videoswin(args, split='train', dataset_type=None):
from models.videoswintransformer_models.video_dataset import Video_SwinDataset
if (split == 'train'):
raise NotImplementedError('Training dataset processing for Video Swin Transformer to be added!')
elif (split == 'val'):
if (dataset_type == 'tta'):
if_sample_tta_aug_views = args.if_sample_tta_aug_views
elif (dataset_type == 'eval'):
if_sample_tta_aug_views = False
tta_view_sample_style_list = (args.tta_view_sample_style_list if if_sample_tta_aug_views else None)
return Video_SwinDataset(args.val_vid_list, num_segments=args.clip_length, frame_interval=args.frame_interval, num_clips=args.num_clips, frame_uniform=args.frame_uniform, test_mode=True, flip_ratio=args.flip_ratio, scale_size=args.scale_size, input_size=args.input_size, img_norm_cfg=args.img_norm_cfg, vid_format=args.vid_format, video_data_dir=args.video_data_dir, remove_missing=False, debug=args.debug, if_sample_tta_aug_views=if_sample_tta_aug_views, tta_view_sample_style_list=tta_view_sample_style_list, n_augmented_views=args.n_augmented_views) |
def make_client(namespace: str, endpoint: config.EndpointConfiguration, log_if_unconfigured: bool, swallow_network_errors: bool=False) -> Client:
transport: Transport
if endpoint:
transport = RawTransport(endpoint, swallow_network_errors=swallow_network_errors)
else:
transport = NullTransport(log_if_unconfigured)
return Client(transport, namespace) |
def summary(model, input_size, batch_size=(- 1), device='cuda'):
def register_hook(module):
def hook(module, input, output):
class_name = str(module.__class__).split('.')[(- 1)].split("'")[0]
module_idx = len(summary)
m_key = ('%s-%i' % (class_name, (module_idx + 1)))
summary[m_key] = OrderedDict()
summary[m_key]['input_shape'] = list(input[0].size())
summary[m_key]['input_shape'][0] = batch_size
if isinstance(output, (list, tuple)):
summary[m_key]['output_shape'] = [([(- 1)] + list(o.size())[1:]) for o in output]
else:
summary[m_key]['output_shape'] = list(output.size())
summary[m_key]['output_shape'][0] = batch_size
params = 0
if (hasattr(module, 'weight') and hasattr(module.weight, 'size')):
params += torch.prod(torch.LongTensor(list(module.weight.size())))
summary[m_key]['trainable'] = module.weight.requires_grad
if (hasattr(module, 'bias') and hasattr(module.bias, 'size')):
params += torch.prod(torch.LongTensor(list(module.bias.size())))
summary[m_key]['nb_params'] = params
if ((not isinstance(module, nn.Sequential)) and (not isinstance(module, nn.ModuleList)) and (not (module == model))):
hooks.append(module.register_forward_hook(hook))
device = device.lower()
assert (device in ['cuda', 'cpu']), "Input device is not valid, please specify 'cuda' or 'cpu'"
if ((device == 'cuda') and torch.cuda.is_available()):
dtype = torch.cuda.FloatTensor
else:
dtype = torch.FloatTensor
if isinstance(input_size, tuple):
input_size = [input_size]
x = [torch.rand(2, *in_size).type(dtype) for in_size in input_size]
summary = OrderedDict()
hooks = []
model.apply(register_hook)
model(*x)
for h in hooks:
h.remove()
print('')
line_new = '{:>20} {:>25} {:>15}'.format('Layer (type)', 'Output Shape', 'Param #')
print(line_new)
print('')
total_params = 0
total_output = 0
trainable_params = 0
for layer in summary:
line_new = '{:>20} {:>25} {:>15}'.format(layer, str(summary[layer]['output_shape']), '{0:,}'.format(summary[layer]['nb_params']))
total_params += summary[layer]['nb_params']
total_output += np.prod(summary[layer]['output_shape'])
if ('trainable' in summary[layer]):
if (summary[layer]['trainable'] == True):
trainable_params += summary[layer]['nb_params']
print(line_new)
total_input_size = abs((((np.prod(input_size) * batch_size) * 4.0) / (1024 ** 2.0)))
total_output_size = abs((((2.0 * total_output) * 4.0) / (1024 ** 2.0)))
total_params_size = abs(((total_params.numpy() * 4.0) / (1024 ** 2.0)))
total_size = ((total_params_size + total_output_size) + total_input_size)
print('')
print('Total params: {0:,}'.format(total_params))
print('Trainable params: {0:,}'.format(trainable_params))
print('Non-trainable params: {0:,}'.format((total_params - trainable_params)))
print('')
print(('Input size (MB): %0.2f' % total_input_size))
print(('Forward/backward pass size (MB): %0.2f' % total_output_size))
print(('Params size (MB): %0.2f' % total_params_size))
print(('Estimated Total Size (MB): %0.2f' % total_size))
print('') |
class OTTQA(datasets.GeneratorBasedBuilder):
def _info(self):
return datasets.DatasetInfo(description=_DESCRIPTION, features=datasets.Features({'id': datasets.Value('string'), 'question': datasets.Value('string'), 'table_id': datasets.Value('string'), 'table': {'header': datasets.features.Sequence(datasets.Value('string')), 'rows': datasets.features.Sequence(datasets.features.Sequence(datasets.Value('string')))}, 'passage': datasets.Value('string'), 'context': datasets.Value('string'), 'answer_text': datasets.Value('string')}), supervised_keys=None, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION)
def _split_generators(self, dl_manager):
downloaded_files = dl_manager.download_and_extract(_URLS)
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepath': downloaded_files['train'], 'tablepath': downloaded_files['tables'], 'passagepath': downloaded_files['passages']}), datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={'filepath': downloaded_files['dev'], 'tablepath': downloaded_files['tables'], 'passagepath': downloaded_files['passages']})]
def _generate_examples(self, filepath, tablepath, passagepath):
with open(tablepath, encoding='utf-8') as f:
tables = json.load(f)
with open(passagepath, encoding='utf-8') as f:
passages = json.load(f)
with open(filepath, encoding='utf-8') as f:
data = json.load(f)
for (idx, example) in enumerate(data):
table = tables[example['table_id']]
answer_node = example['answer-node']
answer = example['answer-text']
(header, data, passage_context_str) = self.construct_expanded_table(table, passages, answer, answer_node)
(yield (idx, {'id': example['question_id'], 'question': example['question'], 'table_id': example['table_id'], 'table': {'header': header, 'rows': data}, 'passage': passage_context_str, 'context': ((((((table['title'] + ' | ') + table['section_title']) + ' | ') + table['section_text']) + ' | ') + table['intro']), 'answer_text': example['answer-text']}))
def construct_expanded_table(self, table, passages, answer, answer_nodes):
def process_link(link):
return link.split('/')[(- 1)].replace('_', ' ')
selected_passage = {}
for answer_node in answer_nodes:
link = answer_node[2]
type_ = answer_node[3]
if (type_ == 'passage'):
passage_text = passages[link]
sents = nltk.sent_tokenize(passage_text)
has_answer_sent_idx = (- 1)
for (idx, sent) in enumerate(sents):
if (((' ' + answer.lower()) + ' ') in ((' ' + sent.lower()) + ' ')):
has_answer_sent_idx = idx
selected_sents = sents[max(0, (has_answer_sent_idx - ((WINDOW_SIZE - 1) // 2))):min((len(sents) - 1), (has_answer_sent_idx + ((WINDOW_SIZE - 1) // 2)))]
selected_passage[process_link(link)] = ' '.join(selected_sents)
else:
pass
passage_context_str = 'passages: '
for key in selected_passage:
passage_context_str += '{}: {} | '.format(key, selected_passage[key])
return (table['header'], table['data'], passage_context_str) |
def load_rl_model(discrete_act, pretrained_dir=None):
arg_model = 'tsdf-camrest'
arg_mode = 'interact'
cfg.init_handler(arg_model)
cfg.dataset = arg_model.split('-')[(- 1)]
logging.info(str(cfg))
if cfg.cuda:
torch.cuda.set_device(cfg.cuda_device)
logging.info('Device: {}'.format(torch.cuda.current_device()))
cfg.mode = arg_mode
torch.manual_seed(cfg.seed)
torch.cuda.manual_seed(cfg.seed)
random.seed(cfg.seed)
np.random.seed(cfg.seed)
m = Model(arg_model.split('-')[(- 1)], discrete_act=discrete_act)
m.count_params()
if (pretrained_dir is None):
m.load_glove_embedding()
else:
m.load_model()
return m |
def rmse(depth1, depth2):
assert np.all((((np.isfinite(depth1) & np.isfinite(depth2)) & (depth1 >= 0)) & (depth2 >= 0)))
diff = (depth1 - depth2)
num_pixels = float(diff.size)
if (num_pixels == 0):
return np.nan
else:
return np.sqrt((np.sum(np.square(diff)) / num_pixels)) |
class DDPTest(ComponentTestCase):
def test_ddp(self) -> None:
import torchx.components.dist as dist
self.validate(dist, 'ddp')
def test_ddp_mounts(self) -> None:
app = ddp(script='foo.py', mounts=['type=bind', 'src=/dst', 'dst=/dst', 'readonly'])
self.assertEqual(len(app.roles[0].mounts), 1)
def test_ddp_parse_j(self) -> None:
self.assertEqual(parse_nnodes('2'), (1, 1, 2, '1'))
self.assertEqual(parse_nnodes('1x2'), (1, 1, 2, '1'))
self.assertEqual(parse_nnodes('1:2x3'), (1, 2, 3, '1:2'))
def test_ddp_parse_j_exception(self) -> None:
j_exception = ['1x', 'x2', ':3', ':2x1', '1x2:3']
for j in j_exception:
with self.assertRaises(ValueError):
parse_nnodes(j)
def test_ddp_debug(self) -> None:
app = ddp(script='foo.py', debug=True)
env = app.roles[0].env
for (k, v) in _TORCH_DEBUG_FLAGS.items():
self.assertEqual(env[k], v)
def test_ddp_rdzv_backend_static(self) -> None:
app = ddp(script='foo.py', rdzv_backend='static')
cmd = app.roles[0].args[1]
self.assertTrue(('--rdzv_backend static' in cmd))
self.assertTrue(('--node_rank' in cmd)) |
def run_legate(args):
import cunumeric as num
from legate.core import get_legate_runtime
from legate_kvikio.zarr import read_array
def f():
get_legate_runtime().issue_execution_fence(block=True)
t0 = clock()
a = read_array((args.dir / 'A'))
b = read_array((args.dir / 'B'))
c = args.op(num, a, b)
int(c.sum())
t1 = clock()
return (t1 - t0)
(yield f) |
def main(args):
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
if args.params:
keyname = 'params'
else:
keyname = 'params_ema'
model.load_state_dict(torch.load(args.input)[keyname])
model.train(False)
model.cpu().eval()
x = torch.rand(1, 3, 64, 64)
with torch.no_grad():
torch_out = torch.onnx._export(model, x, args.output, opset_version=11, export_params=True)
print(torch_out.shape) |
class UsmmCscDense(_NoPythonCOp):
__props__ = ('inplace',)
def __init__(self, inplace):
self.inplace = inplace
if inplace:
self.destroy_map = {0: [6]}
def __str__(self):
if self.inplace:
return 'UsmmCscDense{inplace}'
else:
return 'UsmmCscDense{no_inplace}'
def make_node(self, alpha, x_val, x_ind, x_ptr, x_nrows, y, z):
alpha = as_tensor_variable(alpha)
x_val = as_tensor_variable(x_val)
x_ind = as_tensor_variable(x_ind)
x_ptr = as_tensor_variable(x_ptr)
x_nrows = as_tensor_variable(x_nrows)
y = as_tensor_variable(y)
z = as_tensor_variable(z)
assert (x_ind.dtype == 'int32')
assert (x_ptr.dtype == 'int32')
assert (x_nrows.dtype == 'int32')
assert ((alpha.ndim == 2) and (alpha.type.shape == (1, 1)))
assert (x_val.ndim == 1)
assert (y.ndim == 2)
assert (z.ndim == 2)
dtype_out = ps.upcast(alpha.type.dtype, x_val.type.dtype, y.type.dtype, z.type.dtype)
if (dtype_out not in ('float32', 'float64')):
raise NotImplementedError('only float types are supported in operands')
if self.inplace:
assert (z.type.dtype == dtype_out)
if (dtype_out != alpha.type.dtype):
alpha = cast(alpha, dtype_out)
if (dtype_out != x_val.type.dtype):
x_val = cast(x_val, dtype_out)
if (dtype_out != y.type.dtype):
y = cast(y, dtype_out)
if (dtype_out != z.type.dtype):
z = cast(z, dtype_out)
r = Apply(self, [alpha, x_val, x_ind, x_ptr, x_nrows, y, z], [tensor(dtype=dtype_out, shape=(None, (1 if (y.type.shape[1] == 1) else None)))])
return r
def c_support_code(self, **kwargs):
return blas.blas_header_text()
def c_libraries(self, **kwargs):
return blas.ldflags()
def c_compile_args(self, **kwargs):
return blas.ldflags(libs=False, flags=True)
def c_lib_dirs(self, **kwargs):
return blas.ldflags(libs=False, libs_dir=True)
def c_header_dirs(self, **kwargs):
return blas.ldflags(libs=False, include_dir=True)
def c_code(self, node, name, inputs, outputs, sub):
(alpha, x_val, x_ind, x_ptr, x_nrows, y, z) = inputs
zn = outputs[0]
if (node.inputs[1].type.dtype in ('complex64', 'complex128')):
raise NotImplementedError('Complex types are not supported for x_val')
if (node.inputs[5].type.dtype in ('complex64', 'complex128')):
raise NotImplementedError('Complex types are not supported for y')
if (node.inputs[6].type.dtype != node.outputs[0].type.dtype):
raise NotImplementedError('z and output must have same type')
if (node.inputs[1].type.dtype == 'float32'):
conv_type = 'float'
axpy = 'saxpy_'
else:
conv_type = 'double'
axpy = 'daxpy_'
typenum_alpha = node.inputs[0].type.dtype_specs()[2]
typenum_x_val = node.inputs[1].type.dtype_specs()[2]
typenum_y = node.inputs[5].type.dtype_specs()[2]
typenum_z = node.inputs[6].type.dtype_specs()[2]
typenum_zn = node.outputs[0].type.dtype_specs()[2]
inplace = int(self.inplace)
rval = ('\n\n if (PyArray_NDIM(%(x_val)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(x_val) != 1"); %(fail)s;}\n if (PyArray_NDIM(%(x_ind)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(x_ind) != 1"); %(fail)s;}\n if (PyArray_NDIM(%(x_ptr)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(x_ptr) != 1"); %(fail)s;}\n if (PyArray_NDIM(%(x_nrows)s) != 0) {PyErr_SetString(PyExc_NotImplementedError, "rank(nrows) != 0"); %(fail)s;}\n if (PyArray_NDIM(%(y)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(y) != 2"); %(fail)s;}\n\n if (PyArray_TYPE(%(x_val)s) != %(typenum_x_val)s) {\n PyErr_SetString(PyExc_NotImplementedError, "Invalid type for x_val"); %(fail)s;}\n\n if (PyArray_TYPE(%(y)s) != %(typenum_y)s) {\n PyErr_SetString(PyExc_NotImplementedError, "Invalid type for y"); %(fail)s;}\n\n if (PyArray_TYPE(%(z)s) != %(typenum_z)s) {\n PyErr_SetString(PyExc_NotImplementedError, "Invalid type for z"); %(fail)s;}\n\n if (PyArray_TYPE(%(alpha)s) != %(typenum_alpha)s) {\n PyErr_SetString(PyExc_NotImplementedError, "Invalid type for alpha"); %(fail)s;}\n\n if (PyArray_TYPE(%(x_ind)s) != NPY_INT32) {\n PyErr_SetString(PyExc_NotImplementedError, "x_ind dtype not INT32"); %(fail)s;}\n\n if (PyArray_TYPE(%(x_ptr)s) != NPY_INT32)\n {PyErr_SetString(PyExc_NotImplementedError, "x_ptr dtype not INT32"); %(fail)s;}\n\n if (PyArray_TYPE(%(x_nrows)s) != NPY_INT32)\n {PyErr_SetString(PyExc_NotImplementedError, "x_nrows dtype not INT32"); %(fail)s;}\n\n if (PyArray_DIMS(%(x_val)s)[0] != PyArray_DIMS(%(x_ind)s)[0])\n {PyErr_SetString(PyExc_NotImplementedError, "x_val and x_ind have different lengths"); %(fail)s;}\n\n if (PyArray_DIMS(%(x_ptr)s)[0] != PyArray_DIMS(%(y)s)[0]+1)\n {PyErr_SetString(PyExc_NotImplementedError, "x\'s number of columns doesn\'t match y\'s rows"); %(fail)s;}\n\n if (PyArray_DIMS(%(z)s)[0] != ((npy_int32 *)PyArray_DATA(%(x_nrows)s))[0] || PyArray_DIMS(%(z)s)[1] != PyArray_DIMS(%(y)s)[1])\n {PyErr_SetString(PyExc_NotImplementedError, "The dimension of the allocated output doesn\'t match the correct output size."); %(fail)s;}\n\n if (PyArray_SIZE(%(alpha)s) != 1)\n {PyErr_SetString(PyExc_NotImplementedError, "The number of element in alpha must be 1"); %(fail)s;}\n\n if (PyArray_NDIM(%(alpha)s) != 2)\n {PyErr_SetString(PyExc_NotImplementedError, "The number dimension of alpha must be 2"); %(fail)s;}\n\n if (PyArray_NDIM(%(x_val)s) != 1)\n {PyErr_SetString(PyExc_NotImplementedError, "The number dimension of x_val must be 1"); %(fail)s;}\n\n if (PyArray_NDIM(%(y)s) != 2)\n {PyErr_SetString(PyExc_NotImplementedError, "The number dimension of y must be 2"); %(fail)s;}\n\n if (PyArray_NDIM(%(z)s) != 2)\n {PyErr_SetString(PyExc_NotImplementedError, "The number dimension of z must be 2"); %(fail)s;}\n\n if (%(inplace)s)\n {\n if (%(typenum_zn)s != %(typenum_z)s) {\n PyErr_SetString(PyExc_NotImplementedError, "When inplace the output dtype must be the same as the input"); %(fail)s;}\n\n Py_XDECREF(%(zn)s);\n %(zn)s = %(z)s;\n Py_INCREF(%(zn)s);\n }\n else if (!%(zn)s\n || (PyArray_DIMS(%(zn)s)[0] != ((npy_int32 *)PyArray_DATA(%(x_nrows)s))[0])\n || (PyArray_DIMS(%(zn)s)[1] != PyArray_DIMS(%(y)s)[1])\n )\n {\n {Py_XDECREF(%(zn)s);}\n npy_intp dims[] = {0, 0};\n dims[0] = ((npy_int32 *)PyArray_DATA(%(x_nrows)s))[0];\n dims[1] = PyArray_DIMS(%(y)s)[1];\n %(zn)s = (PyArrayObject*) PyArray_SimpleNew(2, dims, %(typenum_zn)s);\n }\n\n {\n // sparse array has size MxK, dense KxN, output MxN\n npy_intp M = PyArray_DIMS(%(zn)s)[0];\n npy_intp N = PyArray_DIMS(%(zn)s)[1];\n npy_intp K = PyArray_DIMS(%(y)s)[0];\n\n // pointers to access actual data in the arrays passed as params.\n const dtype_%(x_val)s* __restrict__ Dval = (dtype_%(x_val)s*)PyArray_DATA(%(x_val)s);\n const npy_int32 * __restrict__ Dind = (npy_int32*)PyArray_DATA(%(x_ind)s);\n const npy_int32 * __restrict__ Dptr = (npy_int32*)PyArray_DATA(%(x_ptr)s);\n const dtype_%(alpha)s alpha = ((dtype_%(alpha)s*)PyArray_DATA(%(alpha)s))[0];\n\n npy_intp Sz = PyArray_STRIDES(%(z)s)[1] / PyArray_DESCR(%(z)s)->elsize;\n npy_intp Szn = PyArray_STRIDES(%(zn)s)[1] / PyArray_DESCR(%(zn)s)->elsize;\n npy_intp Sval = PyArray_STRIDES(%(x_val)s)[0] / PyArray_DESCR(%(x_val)s)->elsize;\n npy_intp Sind = PyArray_STRIDES(%(x_ind)s)[0] / PyArray_DESCR(%(x_ind)s)->elsize;\n npy_intp Sptr = PyArray_STRIDES(%(x_ptr)s)[0] / PyArray_DESCR(%(x_ptr)s)->elsize;\n npy_intp Sy = PyArray_STRIDES(%(y)s)[1] / PyArray_DESCR(%(y)s)->elsize;\n\n // blas expects ints; convert here (rather than just making N etc ints) to avoid potential overflow in the negative-stride correction\n if ((N > 0x7fffffffL)||(Sy > 0x7fffffffL)||(Szn > 0x7fffffffL)||(Sy < -0x7fffffffL)||(Szn < -0x7fffffffL))\n {PyErr_SetString(PyExc_NotImplementedError, "array too big for BLAS (overflows int32 index)"); %(fail)s;}\n int N32 = N;\n int Sy32 = Sy;\n int Szn32 = Szn;\n\n if (!(%(inplace)s))\n {\n if (PyArray_CopyInto(%(zn)s, %(z)s))\n {\n Py_XDECREF(%(zn)s);\n %(fail)s;\n }\n }\n\n for (npy_intp k = 0; k < K; ++k)\n {\n for (npy_int32 m_idx = Dptr[k * Sptr]; m_idx < Dptr[(k+1)*Sptr]; ++m_idx)\n {\n const npy_int32 m = Dind[m_idx * Sind]; // row index of non-null value for column K\n\n const dtype_%(x_val)s Amk = alpha * Dval[m_idx * Sval]; // actual value at that location\n\n dtype_%(y)s* y_row = (dtype_%(y)s*)(PyArray_BYTES(%(y)s) + PyArray_STRIDES(%(y)s)[0] * k);\n // axpy expects pointer to the beginning of memory arrays,\n // so when the stride is negative, we need to get the\n // last element\n if (Sy < 0)\n y_row += (K - 1) * Sy;\n\n dtype_%(zn)s* z_row = (dtype_%(zn)s*)(PyArray_BYTES(%(zn)s) + PyArray_STRIDES(%(zn)s)[0] * m);\n if (Szn < 0)\n z_row += (N - 1) * Szn;\n\n %(axpy)s(&N32, (%(conv_type)s*)&Amk, (%(conv_type)s*)y_row, &Sy32, (%(conv_type)s*)z_row, &Szn32);\n }\n }\n }\n ' % dict(locals(), **sub))
return rval
def c_code_cache_version(self):
return (3, blas.blas_header_version()) |
def compute_mask_indices(shape: Tuple[(int, int)], padding_mask: Optional[torch.Tensor], mask_prob: float, mask_length: int, mask_type: str='static', mask_other: float=0.0, min_masks: int=0, no_overlap: bool=False, min_space: int=0) -> np.ndarray:
(bsz, all_sz) = shape
mask = np.full((bsz, all_sz), False)
all_num_mask = int((((mask_prob * all_sz) / float(mask_length)) + np.random.rand()))
all_num_mask = max(min_masks, all_num_mask)
mask_idcs = []
for i in range(bsz):
if (padding_mask is not None):
sz = (all_sz - padding_mask[i].long().sum().item())
num_mask = int((((mask_prob * sz) / float(mask_length)) + np.random.rand()))
num_mask = max(min_masks, num_mask)
else:
sz = all_sz
num_mask = all_num_mask
if (mask_type == 'static'):
lengths = np.full(num_mask, mask_length)
elif (mask_type == 'uniform'):
lengths = np.random.randint(mask_other, ((mask_length * 2) + 1), size=num_mask)
elif (mask_type == 'normal'):
lengths = np.random.normal(mask_length, mask_other, size=num_mask)
lengths = [max(1, int(round(x))) for x in lengths]
elif (mask_type == 'poisson'):
lengths = np.random.poisson(mask_length, size=num_mask)
lengths = [int(round(x)) for x in lengths]
else:
raise Exception(('unknown mask selection ' + mask_type))
if (sum(lengths) == 0):
lengths[0] = min(mask_length, (sz - 1))
if no_overlap:
mask_idc = []
def arrange(s, e, length, keep_length):
span_start = np.random.randint(s, (e - length))
mask_idc.extend(((span_start + i) for i in range(length)))
new_parts = []
if (((span_start - s) - min_space) >= keep_length):
new_parts.append((s, ((span_start - min_space) + 1)))
if ((((e - span_start) - keep_length) - min_space) > keep_length):
new_parts.append((((span_start + length) + min_space), e))
return new_parts
parts = [(0, sz)]
min_length = min(lengths)
for length in sorted(lengths, reverse=True):
lens = np.fromiter((((e - s) if ((e - s) >= (length + min_space)) else 0) for (s, e) in parts), np.int)
l_sum = np.sum(lens)
if (l_sum == 0):
break
probs = (lens / np.sum(lens))
c = np.random.choice(len(parts), p=probs)
(s, e) = parts.pop(c)
parts.extend(arrange(s, e, length, min_length))
mask_idc = np.asarray(mask_idc)
else:
min_len = min(lengths)
if ((sz - min_len) <= num_mask):
min_len = ((sz - num_mask) - 1)
mask_idc = np.random.choice((sz - min_len), num_mask, replace=False)
mask_idc = np.asarray([(mask_idc[j] + offset) for j in range(len(mask_idc)) for offset in range(lengths[j])])
mask_idcs.append(np.unique(mask_idc[(mask_idc < sz)]))
min_len = min([len(m) for m in mask_idcs])
for (i, mask_idc) in enumerate(mask_idcs):
if (len(mask_idc) > min_len):
mask_idc = np.random.choice(mask_idc, min_len, replace=False)
mask[(i, mask_idc)] = True
return mask |
def get_decode_dir_name(ckpt_name):
if ('train' in FLAGS.data_path):
dataset = 'train'
elif ('val' in FLAGS.data_path):
dataset = 'val'
elif ('test' in FLAGS.data_path):
dataset = 'test'
else:
raise ValueError(('FLAGS.data_path %s should contain one of train, val or test' % FLAGS.data_path))
dirname = ('decode_%s_%s_%imaxenc_%ibeam_%imindec_%imaxdec' % (dataset, FLAGS.decode_from, FLAGS.max_enc_steps, FLAGS.beam_size, FLAGS.min_dec_steps, FLAGS.max_dec_steps))
if (ckpt_name is not None):
dirname += ('_%s' % ckpt_name)
return dirname |
def generate_output_file_seg():
contexts = []
num = 0
with open(input_file, 'r', encoding='utf-8') as rf:
for line in rf:
line = line.strip()
if (line and ((num % interval) == 0)):
contexts.append(line.split('\t')[1:(- 1)])
num += 1
print(num)
with open(cut_list_file, 'r', encoding='utf-8') as rf:
cutlist = json.loads(rf.read())
print(len(contexts))
print(len(cutlist))
with open(output_file_seg, 'w', encoding='utf-8') as wf:
for (context, cutl) in zip(contexts, cutlist):
seg_list = []
seg = ''
index_1 = 0
index_2 = 0
for (index, utt) in enumerate(context):
if seg:
seg = ((seg + ' ') + utt)
else:
seg = utt
if (index_1 == cutl[index_2]):
index_2 += 1
seg_list.append(seg)
seg = ''
index_1 += 1
wf.write(('\t'.join(seg_list) + '\n')) |
class CloseMatch(Token):
def __init__(self, match_string, maxMismatches=1):
super().__init__()
self.name = match_string
self.match_string = match_string
self.maxMismatches = maxMismatches
self.errmsg = ('Expected %r (with up to %d mismatches)' % (self.match_string, self.maxMismatches))
self.mayIndexError = False
self.mayReturnEmpty = False
def parseImpl(self, instring, loc, doActions=True):
start = loc
instrlen = len(instring)
maxloc = (start + len(self.match_string))
if (maxloc <= instrlen):
match_string = self.match_string
match_stringloc = 0
mismatches = []
maxMismatches = self.maxMismatches
for (match_stringloc, s_m) in enumerate(zip(instring[loc:maxloc], match_string)):
(src, mat) = s_m
if (src != mat):
mismatches.append(match_stringloc)
if (len(mismatches) > maxMismatches):
break
else:
loc = ((start + match_stringloc) + 1)
results = ParseResults([instring[start:loc]])
results['original'] = match_string
results['mismatches'] = mismatches
return (loc, results)
raise ParseException(instring, loc, self.errmsg, self) |
class Effect2794(BaseEffect):
type = 'passive'
def handler(fit, container, context, projectionRange, **kwargs):
fit.modules.filteredItemIncrease((lambda mod: mod.item.requiresSkill('Salvaging')), 'accessDifficultyBonus', container.getModifiedItemAttr('accessDifficultyBonus'), position='post', **kwargs) |
class F18_PartData(F17_PartData):
removedKeywords = F17_PartData.removedKeywords
removedAttrs = F17_PartData.removedAttrs
def __init__(self, *args, **kwargs):
F17_PartData.__init__(self, *args, **kwargs)
self.hibernation = kwargs.get('hibernation', False)
self.cipher = kwargs.get('cipher', '')
def _getArgsAsStr(self):
retval = F17_PartData._getArgsAsStr(self)
if self.hibernation:
retval += ' --hibernation'
if (self.encrypted and self.cipher):
retval += (' --cipher="%s"' % self.cipher)
return retval |
def compute_global_div_n(caps, n=1):
aggr_div = []
all_ngrams = set()
lenT = 0.0
for k in caps:
for c in caps[k]:
tkns = c.split()
lenT += len(tkns)
ng = find_ngrams(tkns, n)
all_ngrams.update(ng)
if (n == 1):
aggr_div.append(float(len(all_ngrams)))
else:
aggr_div.append((float(len(all_ngrams)) / (1e-06 + float(lenT))))
return (aggr_div[0], np.repeat(np.array(aggr_div), len(caps))) |
def update_config(config):
parser = argparse.ArgumentParser()
for setting in config.keys():
if ((type(config[setting]) == list) or (type(config[setting]) == type(None))):
parser.add_argument(('--' + setting), nargs='+')
else:
parser.add_argument(('--' + setting))
args = parser.parse_args().__dict__
for setting in args.keys():
if (args[setting] is not None):
if (type(config[setting]) == type(True)):
if (args[setting] == 'True'):
x = True
elif (args[setting] == 'False'):
x = False
else:
raise Exception((('Command line parameter ' + setting) + 'must be True or False'))
elif (type(config[setting]) == type(1)):
x = int(args[setting])
elif (type(args[setting]) == type(None)):
x = None
else:
x = args[setting]
config[setting] = x
return config |
.skip
.slow
.parametrize('alg', learn_args.keys())
def test_mnist(alg):
learn_kwargs = learn_args[alg]
learn_kwargs.update(common_kwargs)
learn = get_learn_function(alg)
learn_fn = (lambda e: learn(env=e, **learn_kwargs))
env_fn = (lambda : MnistEnv(seed=0, episode_len=100))
simple_test(env_fn, learn_fn, 0.6) |
class Hook():
def __init__(self, callback: Callable, user_data: Any=None, begin: int=1, end: int=0):
self.callback = callback
self.user_data = user_data
self.begin = begin
self.end = end
def bound_check(self, pc: int, size: int=1) -> bool:
return ((self.end < self.begin) or (self.begin <= pc <= self.end) or (self.begin <= ((pc + size) - 1) <= self.end))
def check(self, *args) -> bool:
return True
def call(self, ql, *args):
if (self.user_data is None):
return self.callback(ql, *args)
return self.callback(ql, *args, self.user_data) |
def profile_tf_runningmeanstd():
import time
from baselines.common import tf_util
tf_util.get_session(config=tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1, allow_soft_placement=True))
x = np.random.random((376,))
n_trials = 10000
rms = RunningMeanStd()
tfrms = TfRunningMeanStd()
tic1 = time.time()
for _ in range(n_trials):
rms.update(x)
tic2 = time.time()
for _ in range(n_trials):
tfrms.update(x)
tic3 = time.time()
print('rms update time ({} trials): {} s'.format(n_trials, (tic2 - tic1)))
print('tfrms update time ({} trials): {} s'.format(n_trials, (tic3 - tic2)))
tic1 = time.time()
for _ in range(n_trials):
z1 = rms.mean
tic2 = time.time()
for _ in range(n_trials):
z2 = tfrms.mean
assert (z1 == z2)
tic3 = time.time()
print('rms get mean time ({} trials): {} s'.format(n_trials, (tic2 - tic1)))
print('tfrms get mean time ({} trials): {} s'.format(n_trials, (tic3 - tic2))) |
def add_summarizer_args(parser):
parser.add_argument('--summarizer', type=str, default='gpt3_summarizer', choices=SUMMARIZER_CHOICES, help='model architecture')
parser.add_argument('--summarizer-save-dir', type=str, default=None, help='directory to save summarizer')
parser.add_argument('--summarizer-load-dir', type=str, default=None, help='directory to load summarizer')
parser.add_argument('--expander', action='store_true', help='swap source and target to learn expanding a summary')
parser.add_argument('--summarizer-temperature', type=float, default=0.8, help='temperature for summarizer')
parser.add_argument('--opt-summarizer-temperature', type=float, default=0.8, help='temperature for OPT summarizer during main story generation')
parser.add_argument('--summarizer-top-p', type=float, default=1.0, help='top p for summarizer')
parser.add_argument('--summarizer-frequency-penalty', type=float, default=0.5, help='frequency penalty for summarizer')
parser.add_argument('--summarizer-prompt-penalty', type=float, default=0.5, help='OPT control penalty for prompt tokens for summarizer, excluding stopwords/punc/names')
parser.add_argument('--summarizer-frequency-penalty-decay', type=float, default=0.98, help='frequency penalty decay for OPT summarizer')
parser.add_argument('--summarizer-presence-penalty', type=float, default=0, help='presence penalty for summarizer')
parser.add_argument('--generation-max-length', type=int, default=256, help='max length for generation, not including prompt')
parser.add_argument('--summarizer-beam-size', type=int, default=1, help='beam size for summarizer')
parser.add_argument('--gpt3-model', type=str, default='text-davinci-002', help='gpt3 model or finetuned ckpt for GPT3Summarizer')
parser.add_argument('--max-context-length', type=int, default=1024, help='max length for context to facilitate toy version')
parser.add_argument('--alpa-url', type=str, default=None, help='url for alpa API')
parser.add_argument('--alpa-port', type=str, default=None, help='port for alpa API, if alpa-url is a filename to read server location from. convenient for slurm')
parser.add_argument('--alpa-key', type=str, default='', help='key for alpa API, if using the public API')
return parser |
def calculate_class_properties(graph: Graph, scc: list[str], errors: Errors) -> None:
builtins = graph['builtins'].tree
assert builtins
for module in scc:
state = graph[module]
tree = state.tree
assert tree
for (_, node, _) in tree.local_definitions():
if isinstance(node.node, TypeInfo):
with state.manager.semantic_analyzer.file_context(tree, state.options, node.node):
calculate_class_abstract_status(node.node, tree.is_stub, errors)
check_protocol_status(node.node, errors)
calculate_class_vars(node.node)
add_type_promotion(node.node, tree.names, graph[module].options, builtins.names) |
.xfail(reason='new_column_names is deprecated.')
def test_new_column_names(process_test_df):
result = process_test_df.process_text(column_name='text', new_column_names='new_text', string_function='slice', start=2)
expected = process_test_df.assign(new_text=process_test_df['text'].str.slice(start=2))
assert_frame_equal(result, expected) |
def parse_coredumpctl_line(line):
fields = {'time': (0, 28, str), 'pid': (29, 35, int), 'uid': (36, 41, int), 'gid': (42, 47, int), 'sig': (48, 51, int), 'present': (52, 53, _convert_present), 'exe': (54, None, str)}
data = {}
for (name, (start, end, converter)) in fields.items():
data[name] = converter(line[start:end])
return Line(**data) |
class CmdLock(ObjManipCommand):
key = 'lock'
aliases = ['locks']
locks = 'cmd: perm(locks) or perm(Builder)'
help_category = 'Building'
def func(self):
caller = self.caller
if (not self.args):
string = 'Usage: lock <object>[ = <lockstring>] or lock[/switch] <object>/<access_type>'
caller.msg(string)
return
if ('/' in self.lhs):
(objname, access_type) = [p.strip() for p in self.lhs.split('/', 1)]
obj = None
if objname.startswith('*'):
obj = caller.search_account(objname.lstrip('*'))
if (not obj):
obj = caller.search(objname)
if (not obj):
return
has_control_access = obj.access(caller, 'control')
if ((access_type == 'control') and (not has_control_access)):
caller.msg("You need 'control' access to change this type of lock.")
return
if (not (has_control_access or obj.access(caller, 'edit'))):
caller.msg('You are not allowed to do that.')
return
lockdef = obj.locks.get(access_type)
if lockdef:
if ('del' in self.switches):
obj.locks.delete(access_type)
string = ('deleted lock %s' % lockdef)
else:
string = lockdef
else:
string = ("%s has no lock of access type '%s'." % (obj, access_type))
caller.msg(string)
return
if self.rhs:
if self.switches:
swi = ', '.join(self.switches)
caller.msg(('Switch(es) |w%s|n can not be used with a lock assignment. Use e.g. |wlock/del objname/locktype|n instead.' % swi))
return
(objname, lockdef) = (self.lhs, self.rhs)
obj = None
if objname.startswith('*'):
obj = caller.search_account(objname.lstrip('*'))
if (not obj):
obj = caller.search(objname)
if (not obj):
return
if (not (obj.access(caller, 'control') or obj.access(caller, 'edit'))):
caller.msg('You are not allowed to do that.')
return
ok = False
lockdef = re.sub('\\\'|\\"', '', lockdef)
try:
ok = obj.locks.add(lockdef)
except LockException as e:
caller.msg(str(e))
if (('cmd' in lockdef.lower()) and inherits_from(obj, 'evennia.objects.objects.DefaultExit')):
obj.at_init()
if ok:
caller.msg(("Added lock '%s' to %s." % (lockdef, obj)))
return
obj = None
if self.lhs.startswith('*'):
obj = caller.search_account(self.lhs.lstrip('*'))
if (not obj):
obj = caller.search(self.lhs)
if (not obj):
return
if (not (obj.access(caller, 'control') or obj.access(caller, 'edit'))):
caller.msg('You are not allowed to do that.')
return
caller.msg('\n'.join(obj.locks.all())) |
def get_ansible_host(config: configparser.ConfigParser, inventory: Inventory, host: str, ssh_config: Optional[str]=None, ssh_identity_file: Optional[str]=None) -> Optional[testinfra.host.Host]:
if is_empty_inventory(inventory):
if (host == 'localhost'):
return testinfra.get_host('local://')
return None
hostvars = inventory['_meta'].get('hostvars', {}).get(host, {})
connection = hostvars.get('ansible_connection', 'ssh')
if (connection not in ('smart', 'ssh', 'paramiko_ssh', 'local', 'docker', 'community.docker.docker', 'lxc', 'lxd')):
return None
connection = {'community.docker.docker': 'docker', 'lxd': 'lxc', 'paramiko_ssh': 'paramiko', 'smart': 'ssh'}.get(connection, connection)
options: dict[(str, Any)] = {'ansible_become': {'ini': {'section': 'privilege_escalation', 'key': 'become'}, 'environment': 'ANSIBLE_BECOME'}, 'ansible_become_user': {'ini': {'section': 'privilege_escalation', 'key': 'become_user'}, 'environment': 'ANSIBLE_BECOME_USER'}, 'ansible_port': {'ini': {'section': 'defaults', 'key': 'remote_port'}, 'environment': 'ANSIBLE_REMOTE_PORT'}, 'ansible_ssh_common_args': {'ini': {'section': 'ssh_connection', 'key': 'ssh_common_args'}, 'environment': 'ANSIBLE_SSH_COMMON_ARGS'}, 'ansible_ssh_extra_args': {'ini': {'section': 'ssh_connection', 'key': 'ssh_extra_args'}, 'environment': 'ANSIBLE_SSH_EXTRA_ARGS'}, 'ansible_user': {'ini': {'section': 'defaults', 'key': 'remote_user'}, 'environment': 'ANSIBLE_REMOTE_USER'}}
def get_config(name: str, default: Union[(None, bool, str)]=None) -> Union[(None, bool, str)]:
value = default
option = options.get(name, {})
ini = option.get('ini')
if ini:
value = config.get(ini['section'], ini['key'], fallback=default)
if (name in hostvars):
value = hostvars[name]
var = option.get('environment')
if (var and (var in os.environ)):
value = os.environ[var]
return value
testinfra_host = get_config('ansible_host', host)
assert isinstance(testinfra_host, str), testinfra_host
user = get_config('ansible_user')
password = get_config('ansible_ssh_pass')
port = get_config('ansible_port')
kwargs: dict[(str, Union[(None, str, bool)])] = {}
if get_config('ansible_become', False):
kwargs['sudo'] = True
kwargs['sudo_user'] = get_config('ansible_become_user')
if (ssh_config is not None):
kwargs['ssh_config'] = ssh_config
if (ssh_identity_file is not None):
kwargs['ssh_identity_file'] = ssh_identity_file
if ('ansible_ssh_private_key_file' in hostvars):
kwargs['ssh_identity_file'] = hostvars['ansible_ssh_private_key_file']
elif ('ansible_private_key_file' in hostvars):
kwargs['ssh_identity_file'] = hostvars['ansible_private_key_file']
kwargs['ssh_extra_args'] = ' '.join([config.get('ssh_connection', 'ssh_args', fallback=''), get_config('ansible_ssh_common_args', ''), get_config('ansible_ssh_extra_args', '')]).strip()
control_path = config.get('ssh_connection', 'control_path', fallback='', raw=True)
if control_path:
directory = config.get('persistent_connection', 'control_path_dir', fallback='~/.ansible/cp')
control_path = (control_path % {'directory': directory})
control_path = control_path.replace('%', '%%')
kwargs['controlpath'] = control_path
spec = '{}://'.format(connection)
if (user and password and (not kwargs.get('ssh_identity_file'))):
spec += '{}:{}'.format(user, password)
elif user:
spec += '{}'.format(user)
try:
version = ipaddress.ip_address(testinfra_host).version
except ValueError:
version = None
if (version == 6):
spec += (('[' + testinfra_host) + ']')
else:
spec += testinfra_host
if port:
spec += ':{}'.format(port)
return testinfra.get_host(spec, **kwargs) |
def test_basetransformerlayer():
attn_cfgs = (dict(type='MultiheadAttention', embed_dims=256, num_heads=8),)
feedforward_channels = 2048
ffn_dropout = 0.1
operation_order = ('self_attn', 'norm', 'ffn', 'norm')
baselayer = BaseTransformerLayer(attn_cfgs=attn_cfgs, feedforward_channels=feedforward_channels, ffn_dropout=ffn_dropout, operation_order=operation_order)
assert (baselayer.batch_first is False)
assert (baselayer.ffns[0].feedforward_channels == feedforward_channels)
attn_cfgs = (dict(type='MultiheadAttention', num_heads=8, embed_dims=256),)
feedforward_channels = 2048
ffn_dropout = 0.1
operation_order = ('self_attn', 'norm', 'ffn', 'norm')
baselayer = BaseTransformerLayer(attn_cfgs=attn_cfgs, feedforward_channels=feedforward_channels, ffn_dropout=ffn_dropout, operation_order=operation_order, batch_first=True)
assert baselayer.attentions[0].batch_first
in_tensor = torch.rand(2, 10, 256)
baselayer(in_tensor) |
def spatial_svd_auto_mode():
sess = tf.compat.v1.Session()
with sess.graph.as_default():
_ = VGG16(weights=None, input_shape=(224, 224, 3))
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
conv2d = sess.graph.get_operation_by_name('block1_conv1/Conv2D')
modules_to_ignore = [conv2d]
greedy_params = GreedySelectionParameters(target_comp_ratio=Decimal(0.8), num_comp_ratio_candidates=10, use_monotonic_fit=True, saved_eval_scores_dict=None)
auto_params = SpatialSvdParameters.AutoModeParams(greedy_select_params=greedy_params, modules_to_ignore=modules_to_ignore)
params = SpatialSvdParameters(input_op_names=['input_1'], output_op_names=['predictions/Softmax'], mode=SpatialSvdParameters.Mode.auto, params=auto_params, multiplicity=8)
input_shape = (1, 3, 224, 224)
(compr_model_sess, stats) = ModelCompressor.compress_model(sess=sess, working_dir=str('./'), eval_callback=evaluate_model, eval_iterations=10, input_shape=input_shape, compress_scheme=CompressionScheme.spatial_svd, cost_metric=CostMetric.mac, parameters=params, trainer=None)
print(stats) |
class FakeInspector(inspector.AbstractWebInspector):
def __init__(self, inspector_widget: QWidget, splitter: miscwidgets.InspectorSplitter, win_id: int, parent: QWidget=None) -> None:
super().__init__(splitter, win_id, parent)
self._set_widget(inspector_widget)
self._inspected_page = None
self.needs_recreate = False
def inspect(self, page):
self._inspected_page = page
def _needs_recreate(self):
return self.needs_recreate |
def SolcoreMaterialToStr(material_input):
material_string = material_input.__str__().strip('<>').split(' ')
material_name = material_string[0].strip("'")
composition = {'material': material_name}
alloy = (True if (len(material_input.composition) > 0) else False)
if alloy:
material_composition = material_string[2].split('=')
for (i, comp) in enumerate(material_composition):
if (comp in material_name):
composition['element'] = material_composition[i]
composition['fraction'] = float(material_composition[(i + 1)])
return composition |
class HierarchicalMultiHeadAttention(nn.Module):
def __init__(self, h, d_model, attn_p=0.1):
super(HierarchicalMultiHeadAttention, self).__init__()
self.h = h
self.d = d_model
assert ((d_model % h) == 0)
self.d_head = (d_model // h)
self.fc_query = Bottle(Linear(d_model, (h * self.d_head), bias=False))
self.fc_key = Bottle(Linear(d_model, (h * self.d_head), bias=False))
self.fc_value = Bottle(Linear(d_model, (h * self.d_head), bias=False))
self.fc_query_2 = Bottle(Linear(d_model, (h * self.d_head), bias=False))
self.fc_concat = Bottle(Linear((h * self.d_head), d_model, bias=False))
self.fc_concat_2 = Bottle(Linear(d_model, d_model, bias=False))
self.sm = nn.Softmax(dim=(- 1))
self.sm_2 = nn.Softmax(dim=(- 1))
self.attn_dropout = StaticDropout(attn_p)
self.attn_dropout_2 = StaticDropout(attn_p)
def _prepare_proj(self, x):
(b, l, d) = x.size()
return contiguous(x.view(b, l, self.h, self.d_head).transpose(1, 2)).view((b * self.h), l, self.d_head)
def shape(self, x):
(b, l, d) = x.size()
return x.view(b, l, self.h, self.d_head).transpose(1, 2)
def forward(self, query, key, mask=None, query_mask=None, value_mask=None):
(n_layer, b, len_key) = (key.size(0), key.size(1), key.size(2))
if (value_mask is not None):
value_mask = value_mask.unsqueeze(0).repeat(n_layer, 1, 1)
key_mask = value_mask
(b, len_query) = (query.size(0), query.size(1))
value = key
proj_query = self.fc_query(query, mask=query_mask)
proj_key = self.fc_key(key, mask=key_mask).transpose(0, 1).contiguous().view(b, (- 1), (self.h * self.d_head))
proj_value = self.fc_value(value, mask=value_mask).transpose(0, 1).contiguous().view(b, (- 1), (self.h * self.d_head))
proj_query = self.shape(proj_query)
proj_key = self.shape(proj_key)
proj_value = self.shape(proj_value)
proj_query = (proj_query * (self.d_head ** (- 0.5)))
scores = torch.matmul(proj_query, proj_key.transpose(2, 3))
scores = scores.view(b, self.h, len_query, n_layer, len_key)
mask_ = Variable(mask.unsqueeze(1).unsqueeze((- 2)))
scores = scores.masked_fill_(mask_, (- float('inf')))
attns = F.softmax(scores, dim=(- 1))
attns = self.attn_dropout(attns)
proj_value = proj_value.view(b, self.h, n_layer, len_key, self.d_head)
attns = attns.transpose(2, 3)
out = torch.matmul(attns, proj_value)
out = out.transpose(1, 3).contiguous().view(b, len_query, n_layer, (self.h * self.d_head))
out = self.fc_concat(out, query_mask.unsqueeze((- 1)).repeat(1, 1, n_layer))
new_query = self.fc_query_2(query, mask=query_mask)
new_query = new_query.view((- 1), new_query.size((- 1))).unsqueeze(1)
proj_query = self.shape(new_query)
new_key = out.view((- 1), n_layer, (self.h * self.d_head))
proj_key = self.shape(new_key)
if (query_mask is not None):
flattened_mask = query_mask.view((- 1))
non_pad_indices = torch.nonzero(flattened_mask).squeeze(1)
proj_query = proj_query.index_select(0, non_pad_indices)
proj_key = proj_key.index_select(0, non_pad_indices)
proj_value = proj_key
scores_2 = torch.matmul(proj_query, proj_key.transpose(2, 3))
attns_2 = F.softmax(scores_2, dim=(- 1))
out = torch.matmul(attns_2, proj_value)
b_ = out.size(0)
out = out.unsqueeze(2).view((- 1), (self.h * self.d_head))
out = self.fc_concat_2(out)
if (query_mask is not None):
final_out = Variable(out.data.new((b * len_query), (self.h * self.d_head)).zero_())
final_out.index_copy_(0, non_pad_indices, out)
else:
final_out = out
out = final_out.view(b, len_query, (self.h * self.d_head))
coverage = None
return (out, coverage) |
class PatchSampler(object):
def __init__(self):
self.full_indices = None
def __call__(self, *args, **kwargs):
raise NotImplementedError
def image2patch(self, imgs, wh, device):
nbatch = imgs.shape[0]
(patch_coord, scale) = self(nbatch, wh, device)
if (not self.full_indices):
imgs = F.grid_sample(imgs, patch_coord, mode='bilinear', align_corners=True)
return (imgs, patch_coord, scale) |
class QlFsMappedObject():
def __init__(self):
pass
def read(self, expected_len):
raise NotImplementedError('QlFsMappedObject method not implemented: read')
def write(self, buffer):
raise NotImplementedError('QlFsMappedObject method not implemented: write')
def fileno(self):
raise NotImplementedError('QlFsMappedObject method not implemented: fileno')
def lseek(self, lseek_offset, lseek_origin):
raise NotImplementedError('QlFsMappedObject method not implemented: lseek')
def close(self):
raise NotImplementedError('QlFsMappedObject method not implemented: close')
def fstat(self):
raise NotImplementedError('QlFsMappedObject method not implemented: fstat')
def ioctl(self, ioctl_cmd, ioctl_arg):
raise NotImplementedError('QlFsMappedObject method not implemented: ioctl')
def tell(self):
raise NotImplementedError('QlFsMappedObject method not implemented: tell')
def dup(self):
raise NotImplementedError('QlFsMappedObject method not implemented: dup')
def readline(self, end=b'\n'):
raise NotImplementedError('QlFsMappedObject method not implemented: readline')
def name(self):
raise NotImplementedError('QlFsMappedObject property not implemented: name') |
.skipif((not HAVE_DEPS_FOR_RESOURCE_ESTIMATES), reason='pyscf and/or jax not installed.')
def test_lambda_calc():
mf = make_diamond_113_szv()
mymp = mp.KMP2(mf)
Luv = cholesky_from_df_ints(mymp)
helper = DFABKpointIntegrals(cholesky_factor=Luv, kmf=mf)
helper.double_factorize(thresh=1e-13)
hcore_ao = mf.get_hcore()
hcore_mo = np.asarray([reduce(np.dot, (mo.T.conj(), hcore_ao[k], mo)) for (k, mo) in enumerate(mf.mo_coeff)])
lambda_data = compute_lambda(hcore_mo, helper)
assert np.isclose(lambda_data.lambda_total, 179.)
lambda_two_body = 0
lambda_two_body_v2 = 0
nkpts = len(mf.kpts)
for qidx in range(nkpts):
aval_to_square = np.zeros(helper.naux, dtype=np.complex128)
bval_to_square = np.zeros(helper.naux, dtype=np.complex128)
aval_to_square_v2 = np.zeros(helper.naux, dtype=np.complex128)
bval_to_square_v2 = np.zeros(helper.naux, dtype=np.complex128)
for kidx in range(nkpts):
(Amats, Bmats) = helper.build_A_B_n_q_k_from_chol(qidx, kidx)
Amats /= np.sqrt(nkpts)
Bmats /= np.sqrt(nkpts)
(wa, _) = np.linalg.eigh(Amats)
(wb, _) = np.linalg.eigh(Bmats)
aval_to_square += np.einsum('npq->n', (np.abs(Amats) ** 2))
bval_to_square += np.einsum('npq->n', (np.abs(Bmats) ** 2))
aval_to_square_v2 += np.sum((np.abs(wa) ** 2), axis=(- 1))
bval_to_square_v2 += np.sum((np.abs(wb) ** 2), axis=(- 1))
assert np.allclose(np.sum((np.abs(wa) ** 2), axis=(- 1)), np.einsum('npq->n', (np.abs(Amats) ** 2)))
lambda_two_body += np.sum(aval_to_square)
lambda_two_body += np.sum(bval_to_square)
lambda_two_body_v2 += np.sum(aval_to_square_v2)
lambda_two_body_v2 += np.sum(bval_to_square_v2)
assert np.isclose(lambda_two_body, lambda_two_body_v2) |
def CheckCStyleCast(filename, linenum, line, raw_line, cast_type, pattern, error):
match = Search(pattern, line)
if (not match):
return False
sizeof_match = Match('.*sizeof\\s*$', line[0:(match.start(1) - 1)])
if sizeof_match:
return False
if (line[0:(match.start(1) - 1)].endswith(' operator++') or line[0:(match.start(1) - 1)].endswith(' operator--')):
return False
remainder = line[match.end(0):]
if Match('^\\s*(?:;|const\\b|throw\\b|=|>|\\{|\\))', remainder):
if Match('^\\s*>', remainder):
return False
matched_zero = Match('^\\s=\\s*(\\S+)\\s*;', remainder)
if (matched_zero and (matched_zero.group(1) != '0')):
return False
if Match('.*\\)\\s*$', line[0:match.start(0)]):
return False
if ('/*' in raw_line):
return False
error(filename, linenum, 'readability/function', 3, 'All parameters should be named in a function')
return True
error(filename, linenum, 'readability/casting', 4, ('Using C-style cast. Use %s<%s>(...) instead' % (cast_type, match.group(1))))
return True |
class Laser():
DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
DEFAULT_BPE_CODES_FILE = os.path.join(DATA_DIR, '93langs.fcodes')
DEFAULT_BPE_VOCAB_FILE = os.path.join(DATA_DIR, '93langs.fvocab')
DEFAULT_ENCODER_FILE = os.path.join(DATA_DIR, 'bilstm.93langs.2018-12-26.pt')
def __init__(self, bpe_codes: Optional[Union[(str, TextIOBase)]]=None, bpe_vocab: Optional[Union[(str, TextIOBase)]]=None, encoder: Optional[Union[(str, BufferedIOBase)]]=None, tokenizer_options: Optional[Dict[(str, Any)]]=None, embedding_options: Optional[Dict[(str, Any)]]=None):
if (tokenizer_options is None):
tokenizer_options = {}
if (embedding_options is None):
embedding_options = {}
if (bpe_codes is None):
if (not os.path.isfile(self.DEFAULT_BPE_CODES_FILE)):
raise FileNotFoundError('93langs.fcodes is missing, run "python -m laserembeddings download-models" to fix that')
bpe_codes = self.DEFAULT_BPE_CODES_FILE
if (bpe_vocab is None):
if (not os.path.isfile(self.DEFAULT_BPE_VOCAB_FILE)):
raise FileNotFoundError('93langs.fvocab is missing, run "python -m laserembeddings download-models" to fix that')
bpe_vocab = self.DEFAULT_BPE_VOCAB_FILE
if (encoder is None):
if (not os.path.isfile(self.DEFAULT_ENCODER_FILE)):
raise FileNotFoundError('bilstm.93langs.2018-12-26.pt is missing, run "python -m laserembeddings download-models" to fix that')
encoder = self.DEFAULT_ENCODER_FILE
self.tokenizer_options = tokenizer_options
self.tokenizers: Dict[(str, Tokenizer)] = {}
self.bpe = BPE(bpe_codes, bpe_vocab)
self.bpeSentenceEmbedding = BPESentenceEmbedding(encoder, **embedding_options)
def _get_tokenizer(self, lang: str) -> Tokenizer:
if (lang not in self.tokenizers):
self.tokenizers[lang] = Tokenizer(lang, **self.tokenizer_options)
return self.tokenizers[lang]
def embed_sentences(self, sentences: Union[(List[str], str)], lang: Union[(str, List[str])]) -> np.ndarray:
sentences = ([sentences] if isinstance(sentences, str) else sentences)
lang = (([lang] * len(sentences)) if isinstance(lang, str) else lang)
if (len(sentences) != len(lang)):
raise ValueError('lang: invalid length: the number of language codes does not match the number of sentences')
with sre_performance_patch():
sentence_tokens = [self._get_tokenizer(sentence_lang).tokenize(sentence) for (sentence, sentence_lang) in zip(sentences, lang)]
bpe_encoded = [self.bpe.encode_tokens(tokens) for tokens in sentence_tokens]
return self.bpeSentenceEmbedding.embed_bpe_sentences(bpe_encoded) |
def run_dijkstra_algorithm(start_node, nodes) -> None:
queue = PriorityQueue()
start_node.distance = 0
current_node = None
shortest_path = [start_node]
queue.put(PriorityItem(0, start_node))
while (not queue.empty()):
current_node = queue.get().item
for (node, weight) in current_node.adjacency_list.items():
if (node not in shortest_path):
if (node.distance and ((current_node.distance + weight) >= node.distance)):
continue
node.distance = (current_node.distance + weight)
node.previous = current_node
queue.put(PriorityItem(node.distance, node)) |
class InlineQueryResultCachedAudio(InlineQueryResult):
__slots__ = ('reply_markup', 'caption_entities', 'caption', 'parse_mode', 'audio_file_id', 'input_message_content')
def __init__(self, id: str, audio_file_id: str, caption: Optional[str]=None, reply_markup: Optional[InlineKeyboardMarkup]=None, input_message_content: Optional['InputMessageContent']=None, parse_mode: ODVInput[str]=DEFAULT_NONE, caption_entities: Optional[Sequence[MessageEntity]]=None, *, api_kwargs: Optional[JSONDict]=None):
super().__init__(InlineQueryResultType.AUDIO, id, api_kwargs=api_kwargs)
with self._unfrozen():
self.audio_file_id: str = audio_file_id
self.caption: Optional[str] = caption
self.parse_mode: ODVInput[str] = parse_mode
self.caption_entities: Tuple[(MessageEntity, ...)] = parse_sequence_arg(caption_entities)
self.reply_markup: Optional[InlineKeyboardMarkup] = reply_markup
self.input_message_content: Optional[InputMessageContent] = input_message_content |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.