code stringlengths 281 23.7M |
|---|
def get_cd_loss(pred, gt, radius, alpha):
(dists_forward, _, dists_backward, _) = tf_nndistance.nn_distance(gt, pred)
CD_dist = ((alpha * dists_forward) + ((1 - alpha) * dists_backward))
CD_dist = tf.reduce_mean(CD_dist, axis=1)
CD_dist_norm = (CD_dist / radius)
cd_loss = tf.reduce_mean(CD_dist_norm)
return (cd_loss, None) |
def test_is_valid_balanceproof_signature():
balance_proof = factories.create(factories.BalanceProofSignedStateProperties())
valid = is_valid_balanceproof_signature(balance_proof, factories.make_address())
assert (not valid), 'Address does not match.'
balance_proof = factories.create(factories.BalanceProofSignedStateProperties(signature=(b'\x00' * 65)))
valid = is_valid_balanceproof_signature(balance_proof, factories.make_address())
assert (not valid), f'Invalid signature check: {valid.as_error_message}' |
class LookupColor(rq.ReplyRequest):
_request = rq.Struct(rq.Opcode(92), rq.Pad(1), rq.RequestLength(), rq.Colormap('cmap'), rq.LengthOf('name', 2), rq.Pad(2), rq.String8('name'))
_reply = rq.Struct(rq.ReplyCode(), rq.Pad(1), rq.Card16('sequence_number'), rq.ReplyLength(), rq.Card16('exact_red'), rq.Card16('exact_green'), rq.Card16('exact_blue'), rq.Card16('screen_red'), rq.Card16('screen_green'), rq.Card16('screen_blue'), rq.Pad(12)) |
def test_error_raising_with_one_class():
with pytest.raises(TypeError):
class BadDecoratorArg(StaticProvider):
_provision_action
def _provide(self, mediator: Mediator, request: int):
pass
with pytest.raises(ValueError):
class DoubleDecoration(StaticProvider):
_provision_action
_provision_action
def _provide(self, mediator: Mediator, request: Request):
pass
with pytest.raises(TypeError):
class SeveralSPA(StaticProvider):
_provision_action
def _provide_one(self, mediator: Mediator, request: Request):
pass
_provision_action
def _provide_two(self, mediator: Mediator, request: Request):
pass |
.skip
def test_interleave_speed():
n_samples = 100000
a = np.arange(0, n_samples)
b = np.arange(1, (n_samples + 1))
c = np.arange(2, (n_samples + 2))
assert (a.shape[0] == b.shape[0] == c.shape[0])
n = a.shape[0]
a_buf = np.empty((n * INT32_BUF_SIZE), dtype=np.uint8)
b_buf = np.empty((n * INT32_BUF_SIZE), dtype=np.uint8)
c_buf = np.empty((n * INT32_BUF_SIZE), dtype=np.uint8)
indexes = np.empty((3, (n + 1)), dtype=np.int32)
buf_size = interleave_buf_size(indexes, a_buf, b_buf, c_buf)
buf = np.empty(buf_size, dtype=np.uint8)
mask = np.array([False, False, False])
import time
start = time.time()
reps = 200
bytes_written = 0
for _ in range(reps):
print('.', end='')
vcf_ints_to_byte_buf(a_buf, 0, a, indexes[0])
vcf_ints_to_byte_buf(b_buf, 0, b, indexes[1])
vcf_ints_to_byte_buf(c_buf, 0, c, indexes[2])
p = interleave(buf, 0, indexes, mask, ord(':'), ord(' '), a_buf, b_buf, c_buf)
bytes_written += len(byte_buf_to_str(buf[:p]))
end = time.time()
print(f'bytes written: {bytes_written}')
print(f'duration: {(end - start)}')
print(f'speed: {(bytes_written / (1000000 * (end - start)))} MB/s') |
class DescribeUnmarshaller():
def it_can_unmarshal_from_a_pkg_reader(self, pkg_reader_, pkg_, part_factory_, _unmarshal_parts_, _unmarshal_relationships_, parts_dict_):
_unmarshal_parts_.return_value = parts_dict_
Unmarshaller.unmarshal(pkg_reader_, pkg_, part_factory_)
_unmarshal_parts_.assert_called_once_with(pkg_reader_, pkg_, part_factory_)
_unmarshal_relationships_.assert_called_once_with(pkg_reader_, pkg_, parts_dict_)
for part in parts_dict_.values():
part.after_unmarshal.assert_called_once_with()
pkg_.after_unmarshal.assert_called_once_with()
def it_can_unmarshal_parts(self, pkg_reader_, pkg_, part_factory_, parts_dict_, partnames_, content_types_, reltypes_, blobs_):
(partname_, partname_2_) = partnames_
(content_type_, content_type_2_) = content_types_
(reltype_, reltype_2_) = reltypes_
(blob_, blob_2_) = blobs_
parts = Unmarshaller._unmarshal_parts(pkg_reader_, pkg_, part_factory_)
assert (part_factory_.call_args_list == [call(partname_, content_type_, reltype_, blob_, pkg_), call(partname_2_, content_type_2_, reltype_2_, blob_2_, pkg_)])
assert (parts == parts_dict_)
def it_can_unmarshal_relationships(self):
reltype = '
pkg_reader = Mock(name='pkg_reader')
pkg_reader.iter_srels.return_value = (('/', Mock(name='srel1', rId='rId1', reltype=reltype, target_partname='partname1', is_external=False)), ('/', Mock(name='srel2', rId='rId2', reltype=reltype, target_ref='target_ref_1', is_external=True)), ('partname1', Mock(name='srel3', rId='rId3', reltype=reltype, target_partname='partname2', is_external=False)), ('partname2', Mock(name='srel4', rId='rId4', reltype=reltype, target_ref='target_ref_2', is_external=True)))
pkg = Mock(name='pkg')
parts = {}
for num in range(1, 3):
name = ('part%d' % num)
part = Mock(name=name)
parts[('partname%d' % num)] = part
pkg.attach_mock(part, name)
Unmarshaller._unmarshal_relationships(pkg_reader, pkg, parts)
expected_pkg_calls = [call.load_rel(reltype, parts['partname1'], 'rId1', False), call.load_rel(reltype, 'target_ref_1', 'rId2', True), call.part1.load_rel(reltype, parts['partname2'], 'rId3', False), call.part2.load_rel(reltype, 'target_ref_2', 'rId4', True)]
assert (pkg.mock_calls == expected_pkg_calls)
def blobs_(self, request):
blob_ = loose_mock(request, spec=str, name='blob_')
blob_2_ = loose_mock(request, spec=str, name='blob_2_')
return (blob_, blob_2_)
def content_types_(self, request):
content_type_ = loose_mock(request, spec=str, name='content_type_')
content_type_2_ = loose_mock(request, spec=str, name='content_type_2_')
return (content_type_, content_type_2_)
def part_factory_(self, request, parts_):
part_factory_ = loose_mock(request, spec=Part)
part_factory_.side_effect = parts_
return part_factory_
def partnames_(self, request):
partname_ = loose_mock(request, spec=str, name='partname_')
partname_2_ = loose_mock(request, spec=str, name='partname_2_')
return (partname_, partname_2_)
def parts_(self, request):
part_ = instance_mock(request, Part, name='part_')
part_2_ = instance_mock(request, Part, name='part_2')
return (part_, part_2_)
def parts_dict_(self, request, partnames_, parts_):
(partname_, partname_2_) = partnames_
(part_, part_2_) = parts_
return {partname_: part_, partname_2_: part_2_}
def pkg_(self, request):
return instance_mock(request, OpcPackage)
def pkg_reader_(self, request, partnames_, content_types_, reltypes_, blobs_):
(partname_, partname_2_) = partnames_
(content_type_, content_type_2_) = content_types_
(reltype_, reltype_2_) = reltypes_
(blob_, blob_2_) = blobs_
iter_spart_items = ((partname_, content_type_, reltype_, blob_), (partname_2_, content_type_2_, reltype_2_, blob_2_))
pkg_reader_ = instance_mock(request, PackageReader)
pkg_reader_.iter_sparts.return_value = iter_spart_items
return pkg_reader_
def reltypes_(self, request):
reltype_ = instance_mock(request, str, name='reltype_')
reltype_2_ = instance_mock(request, str, name='reltype_2')
return (reltype_, reltype_2_)
def _unmarshal_parts_(self, request):
return method_mock(request, Unmarshaller, '_unmarshal_parts', autospec=False)
def _unmarshal_relationships_(self, request):
return method_mock(request, Unmarshaller, '_unmarshal_relationships', autospec=False) |
def _get_user_repo_permissions(user, limit_to_repository_obj=None, limit_namespace=None, limit_repo_name=None):
UserThroughTeam = User.alias()
base_query = RepositoryPermission.select(RepositoryPermission, Role, Repository, Namespace).join(Role).switch(RepositoryPermission).join(Repository).join(Namespace, on=(Repository.namespace_user == Namespace.id)).switch(RepositoryPermission)
if (limit_to_repository_obj is not None):
base_query = base_query.where((RepositoryPermission.repository == limit_to_repository_obj))
elif (limit_namespace and limit_repo_name):
base_query = base_query.where((Repository.name == limit_repo_name), (Namespace.username == limit_namespace))
direct = base_query.clone().join(User).where((User.id == user))
team = base_query.clone().join(Team).join(TeamMember).join(UserThroughTeam, on=(UserThroughTeam.id == TeamMember.user)).where((UserThroughTeam.id == user))
return (direct | team) |
class Effect5825(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredChargeBoost((lambda mod: mod.charge.requiresSkill('Missile Launcher Operation')), 'kineticDamage', ship.getModifiedItemAttr('shipBonusGC2'), skill='Gallente Cruiser', **kwargs) |
class TestTwoBitOp(Bloq):
_property
def signature(self) -> Signature:
return Signature.build(ctrl=1, target=1)
def decompose_bloq(self) -> 'CompositeBloq':
raise DecomposeTypeError(f'{self} is atomic')
def add_my_tensors(self, tn: 'qtn.TensorNetwork', tag: Any, *, incoming: Dict[(str, 'SoquetT')], outgoing: Dict[(str, 'SoquetT')]):
import quimb.tensor as qtn
_I = [[1, 0], [0, 1]]
_NULL = [[0, 0], [0, 0]]
_X = [[0, 1], [1, 0]]
tn.add(qtn.Tensor(data=np.array([[_I, _NULL], [_NULL, _X]]), inds=(outgoing['ctrl'], incoming['ctrl'], outgoing['target'], incoming['target']), tags=[self.short_name(), tag]))
def short_name(self) -> str:
return 'op' |
def outputids2words(id_list, vocab, article_oovs):
words = []
for i in id_list:
try:
w = vocab.id2word(i)
except ValueError:
assert (article_oovs is not None), "Error: model produced a word ID that isn't in the vocabulary. This should not happen in baseline (no pointer-generator) mode"
article_oov_idx = (i - vocab.size())
try:
w = article_oovs[article_oov_idx]
except ValueError:
raise ValueError(('Error: model produced word ID %i which corresponds to article OOV %i but this example only has %i article OOVs' % (i, article_oov_idx, len(article_oovs))))
words.append(w)
return words |
def get_download_model_command(file_id, file_name):
current_directory = os.getcwd()
save_path = MODEL_DIR
if (not os.path.exists(save_path)):
os.makedirs(save_path)
url = 'wget --load-cookies /tmp/cookies.txt " --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate \' -O- | sed -rn \'s/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p\')&id={FILE_ID}" -O {SAVE_PATH}/{FILE_NAME} && rm -rf /tmp/cookies.txt'.format(FILE_ID=file_id, FILE_NAME=file_name, SAVE_PATH=save_path)
return url |
class FomostoTestCase(unittest.TestCase):
def test_fomosto_usage(self):
common.call_assert_usage('fomosto')
def test_fomosto_ahfull(self):
with common.run_in_temp():
fomosto('init', 'ahfullgreen', 'my_gfs')
with common.chdir('my_gfs'):
common.call_assert_usage('fomosto', '--help')
common.call_assert_usage('fomosto', 'help')
common.call_assert_usage('fomosto', 'init')
fomosto('ttt')
fomosto('build', '--nworkers=2')
fomosto('stats')
fomosto('check')
fomosto('tttview', 'anyP')
fomosto('extract', ',')
fomosto('tttextract', 'anyP', ',')
fomosto('tttextract', 'anyP', ',', '--output=anyP.phase')
fomosto('modelview')
fomosto('qc')
fomosto('decimate', '2')
fomosto('decimate', '4')
fomosto('upgrade')
fomosto('init', 'redeploy', 'my_gfs', 'my_gfs2')
fomosto('redeploy', 'my_gfs', 'my_gfs2')
fomosto('ttt', 'my_gfs2')
from pyrocko import trace
(snuffle, trace.snuffle) = (trace.snuffle, dummy)
fomosto('view', '--show-phases=all', '--extract=,', 'my_gfs', 'my_gfs2')
trace.snuffle = snuffle
fomosto('modelview', '--parameters=vp/vs,rho', 'my_gfs', 'my_gfs2')
def test_fomosto_ahfull_refs(self):
with common.run_in_temp():
fomosto('init', 'ahfullgreen', 'my_gfs3')
with common.chdir('my_gfs3'):
try:
from pybtex.database.input import bibtex
with open('refs.bib', 'w') as f:
f.write(refs)
fomosto('addref', 'refs.bib')
except ImportError as e:
raise unittest.SkipTest(str(e)) |
def boolean_mask(boxlist, indicator, fields=None, scope=None):
with tf.name_scope(scope, 'BooleanMask'):
if (indicator.shape.ndims != 1):
raise ValueError('indicator should have rank 1')
if (indicator.dtype != tf.bool):
raise ValueError('indicator should be a boolean tensor')
subboxlist = box_list.BoxList(tf.boolean_mask(boxlist.get(), indicator))
if (fields is None):
fields = boxlist.get_extra_fields()
for field in fields:
if (not boxlist.has_field(field)):
raise ValueError('boxlist must contain all specified fields')
subfieldlist = tf.boolean_mask(boxlist.get_field(field), indicator)
subboxlist.add_field(field, subfieldlist)
return subboxlist |
class JSONBasedEditor(qltk.UniqueWindow):
_WIDTH = 800
_HEIGHT = 400
def __init__(self, proto_cls, values, filename, title):
if self.is_not_unique():
return
super().__init__()
self.proto_cls = proto_cls
self.current = None
self.filename = filename
self.name = (proto_cls.NAME or proto_cls.__name__)
self.input_entries = {}
self.set_border_width(12)
self.set_title(title)
self.set_default_size(self._WIDTH, self._HEIGHT)
self.add(Gtk.HBox(spacing=6))
self.get_child().set_homogeneous(True)
self.accels = Gtk.AccelGroup()
self.model = Gtk.ListStore(object)
self._fill_values(values)
self.view = view = RCMHintedTreeView(model=self.model)
view.set_headers_visible(False)
view.set_reorderable(True)
view.set_rules_hint(True)
render = Gtk.CellRendererText()
render.set_padding(3, 6)
render.props.ellipsize = Pango.EllipsizeMode.END
column = Gtk.TreeViewColumn('', render)
column.set_cell_data_func(render, self.__cdf)
view.append_column(column)
sw = Gtk.ScrolledWindow()
sw.set_shadow_type(Gtk.ShadowType.IN)
sw.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
sw.add(view)
self.get_child().pack_start(sw, True, True, 0)
vbox = Gtk.VBox(spacing=6)
frame = self.__build_input_frame()
vbox.pack_start(frame, False, True, 0)
menu = Gtk.Menu()
rem = MenuItem(_('_Remove'), Icons.LIST_REMOVE)
(keyval, mod) = Gtk.accelerator_parse('Delete')
rem.add_accelerator('activate', self.accels, keyval, mod, Gtk.AccelFlags.VISIBLE)
connect_obj(rem, 'activate', self.__remove, view)
menu.append(rem)
menu.show_all()
view.connect('popup-menu', self.__popup, menu)
view.connect('key-press-event', self.__view_key_press)
connect_obj(self, 'destroy', Gtk.Menu.destroy, menu)
bbox = Gtk.HButtonBox()
self.remove_but = Button(_('_Remove'), Icons.LIST_REMOVE)
self.remove_but.set_sensitive(False)
self.new_but = Button(_('_New'), Icons.DOCUMENT_NEW)
self.new_but.connect('clicked', self._new_item)
bbox.pack_start(self.new_but, True, True, 0)
close = Button(_('_Close'), Icons.WINDOW_CLOSE)
connect_obj(close, 'clicked', qltk.Window.destroy, self)
bbox.pack_start(close, True, True, 0)
vbox.pack_end(bbox, False, True, 0)
self.get_child().pack_start(vbox, True, True, 0)
self.selection = view.get_selection()
self.selection.connect('changed', self.__select)
self.connect('destroy', self.__finish)
self.get_child().show_all()
def _find(self, name):
for row in self.model:
if (row[0].name == name):
return row[0]
def _new_item(self, button):
current_name = name = (_('New %s') % self.name)
n = 2
while True:
if self._find(current_name):
current_name = ('%s (%d)' % (name, n))
n += 1
continue
break
self.model.append(row=(self.proto_cls(name=current_name),))
def _new_widget(self, key, val):
callback = signal = None
if isinstance(val, bool):
entry = Gtk.CheckButton()
callback = self.__toggled_widget
signal = 'toggled'
elif isinstance(val, int):
adj = Gtk.Adjustment.new(0, 0, 9999, 1, 10, 0)
entry = Gtk.SpinButton(adjustment=adj)
entry.set_numeric(True)
callback = self.__changed_numeric_widget
elif ('pattern' in key):
entry = ValidatingEntry(validator=Query.validator)
else:
entry = UndoEntry()
entry.connect((signal or 'changed'), (callback or self.__changed_widget), key)
return entry
def __refresh_view(self):
(model, iter) = self.selection.get_selected()
self.model.emit('row-changed', model[iter].path, iter)
def __changed_widget(self, entry, key):
if self.current:
setattr(self.current, key, str(entry.get_text()))
self.__refresh_view()
def __changed_numeric_widget(self, entry, key):
if self.current:
setattr(self.current, key, int((entry.get_text() or 0)))
self.__refresh_view()
def __toggled_widget(self, entry, key):
if self.current:
setattr(self.current, key, bool(entry.get_active()))
self.__refresh_view()
def _populate_fields(self, obj):
for (fn, val) in obj.data:
widget = self.input_entries[fn]
widget.set_sensitive(True)
if isinstance(val, bool):
widget.set_active(val)
elif isinstance(val, int):
widget.set_value(int(val))
elif isinstance(val, str):
widget.set_text((val or ''))
def __build_input_frame(self):
t = Gtk.Table(n_rows=2, n_columns=3)
t.set_row_spacings(6)
t.set_col_spacing(0, 3)
t.set_col_spacing(1, 12)
empty = self.proto_cls('empty')
for (i, (key, val)) in enumerate(empty.data):
field = empty.field(key)
field_name = self.get_field_name(field, key)
l = Gtk.Label(label=(field_name + ':'))
entry = self._new_widget(key, val)
entry.set_sensitive(False)
if field.doc:
entry.set_tooltip_text(field.doc)
self.input_entries[key] = entry
l.set_mnemonic_widget(entry)
l.set_use_underline(True)
l.set_alignment(0.0, 0.5)
if isinstance(val, (int | bool)):
align = Align(entry, halign=Gtk.Align.START)
t.attach(align, 1, 2, i, (i + 1))
else:
t.attach(entry, 1, 2, i, (i + 1))
t.attach(l, 0, 1, i, (i + 1), xoptions=Gtk.AttachOptions.FILL)
frame = qltk.Frame(label=self.name, child=t)
self.input_entries['name'].grab_focus()
return frame
def get_field_name(field, key):
field_name = (field.human_name or (key and key.replace('_', ' ')))
return ((field_name and util.capitalize(field_name)) or _('(unknown)'))
def _fill_values(self, data):
if (not data):
return
for (_name, obj) in data.items():
self.model.append(row=[obj])
def _update_current(self, new_selection=None):
if new_selection:
self.selection = new_selection
(model, iter) = self.selection.get_selected()
if iter:
self.current = model[iter][0]
def __select(self, selection):
self._update_current(selection)
self.remove_but.set_sensitive(bool(iter))
if (iter is not None):
self._populate_fields(self.current)
def __remove(self, view):
view.remove_selection()
def __popup(self, view, menu):
return view.popup_menu(menu, 0, Gtk.get_current_event_time())
def __view_key_press(self, view, event):
if (event.keyval == Gtk.accelerator_parse('Delete')[0]):
self.__remove(view)
def __cdf(self, column, cell, model, iter, data):
row = model[iter]
obj = row[0]
obj_name = util.escape(obj.name)
obj_description = util.escape(str(obj))
markup = f'''{util.bold(obj_name)}
{obj_description}'''
cell.markup = markup
cell.set_property('markup', markup)
def __finish(self, widget):
all = JSONObjectDict.from_list([row[0] for row in self.model if row[0].name])
all.save(filename=self.filename) |
def main():
args = parse_args()
send_example_telemetry('run_clm_no_trainer', args)
accelerator_log_kwargs = {}
if args.with_tracking:
accelerator_log_kwargs['log_with'] = args.report_to
accelerator_log_kwargs['logging_dir'] = args.output_dir
accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
if (args.seed is not None):
set_seed(args.seed)
if accelerator.is_main_process:
if args.push_to_hub:
if (args.hub_model_id is None):
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
else:
repo_name = args.hub_model_id
create_repo(repo_name, exist_ok=True, token=args.hub_token)
repo = Repository(args.output_dir, clone_from=repo_name, token=args.hub_token)
with open(os.path.join(args.output_dir, '.gitignore'), 'w+') as gitignore:
if ('step_*' not in gitignore):
gitignore.write('step_*\n')
if ('epoch_*' not in gitignore):
gitignore.write('epoch_*\n')
elif (args.output_dir is not None):
os.makedirs(args.output_dir, exist_ok=True)
accelerator.wait_for_everyone()
if (args.dataset_name is not None):
raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)
if ('validation' not in raw_datasets.keys()):
raw_datasets['validation'] = load_dataset(args.dataset_name, args.dataset_config_name, split=f'train[:{args.validation_split_percentage}%]')
raw_datasets['train'] = load_dataset(args.dataset_name, args.dataset_config_name, split=f'train[{args.validation_split_percentage}%:]')
else:
data_files = {}
dataset_args = {}
if (args.train_file is not None):
data_files['train'] = args.train_file
if (args.validation_file is not None):
data_files['validation'] = args.validation_file
extension = args.train_file.split('.')[(- 1)]
if (extension == 'txt'):
extension = 'text'
dataset_args['keep_linebreaks'] = (not args.no_keep_linebreaks)
raw_datasets = load_dataset(extension, data_files=data_files, **dataset_args)
if ('validation' not in raw_datasets.keys()):
raw_datasets['validation'] = load_dataset(extension, data_files=data_files, split=f'train[:{args.validation_split_percentage}%]', **dataset_args)
raw_datasets['train'] = load_dataset(extension, data_files=data_files, split=f'train[{args.validation_split_percentage}%:]', **dataset_args)
if args.config_name:
config = AutoConfig.from_pretrained(args.config_name)
elif args.model_name_or_path:
config = AutoConfig.from_pretrained(args.model_name_or_path)
else:
config = CONFIG_MAPPING[args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.')
if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=(not args.use_slow_tokenizer))
elif args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=(not args.use_slow_tokenizer))
else:
raise ValueError('You are instantiating a new tokenizer from scratch. This is not supported by this script.You can do it from another script, save it, and load it from here, using --tokenizer_name.')
if args.model_name_or_path:
model = AutoModelForCausalLM.from_pretrained(args.model_name_or_path, from_tf=bool(('.ckpt' in args.model_name_or_path)), config=config, low_cpu_mem_usage=args.low_cpu_mem_usage)
else:
logger.info('Training new model from scratch')
model = AutoModelForCausalLM.from_config(config)
embedding_size = model.get_input_embeddings().weight.shape[0]
if (len(tokenizer) > embedding_size):
model.resize_token_embeddings(len(tokenizer))
column_names = raw_datasets['train'].column_names
text_column_name = ('text' if ('text' in column_names) else column_names[0])
def tokenize_function(examples):
return tokenizer(examples[text_column_name])
with accelerator.main_process_first():
tokenized_datasets = raw_datasets.map(tokenize_function, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not args.overwrite_cache), desc='Running tokenizer on dataset')
if (args.block_size is None):
block_size = tokenizer.model_max_length
if (block_size > 1024):
logger.warning('The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can override this default with `--block_size xxx`.')
block_size = 1024
else:
if (args.block_size > tokenizer.model_max_length):
logger.warning(f'The block_size passed ({args.block_size}) is larger than the maximum length for the model({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}.')
block_size = min(args.block_size, tokenizer.model_max_length)
def group_texts(examples):
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
if (total_length >= block_size):
total_length = ((total_length // block_size) * block_size)
result = {k: [t[i:(i + block_size)] for i in range(0, total_length, block_size)] for (k, t) in concatenated_examples.items()}
result['labels'] = result['input_ids'].copy()
return result
with accelerator.main_process_first():
lm_datasets = tokenized_datasets.map(group_texts, batched=True, num_proc=args.preprocessing_num_workers, load_from_cache_file=(not args.overwrite_cache), desc=f'Grouping texts in chunks of {block_size}')
train_dataset = lm_datasets['train']
eval_dataset = lm_datasets['validation']
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.')
train_dataloader = DataLoader(train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size)
eval_dataloader = DataLoader(eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size)
no_decay = ['bias', 'layer_norm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil((len(train_dataloader) / args.gradient_accumulation_steps))
if (args.max_train_steps is None):
args.max_train_steps = (args.num_train_epochs * num_update_steps_per_epoch)
overrode_max_train_steps = True
lr_scheduler = get_scheduler(name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=(args.num_warmup_steps * args.gradient_accumulation_steps), num_training_steps=(args.max_train_steps * args.gradient_accumulation_steps))
(model, optimizer, train_dataloader, eval_dataloader, lr_scheduler) = accelerator.prepare(model, optimizer, train_dataloader, eval_dataloader, lr_scheduler)
if (accelerator.distributed_type == DistributedType.TPU):
model.tie_weights()
num_update_steps_per_epoch = math.ceil((len(train_dataloader) / args.gradient_accumulation_steps))
if overrode_max_train_steps:
args.max_train_steps = (args.num_train_epochs * num_update_steps_per_epoch)
args.num_train_epochs = math.ceil((args.max_train_steps / num_update_steps_per_epoch))
checkpointing_steps = args.checkpointing_steps
if ((checkpointing_steps is not None) and checkpointing_steps.isdigit()):
checkpointing_steps = int(checkpointing_steps)
if args.with_tracking:
experiment_config = vars(args)
experiment_config['lr_scheduler_type'] = experiment_config['lr_scheduler_type'].value
accelerator.init_trackers('clm_no_trainer', experiment_config)
total_batch_size = ((args.per_device_train_batch_size * accelerator.num_processes) * args.gradient_accumulation_steps)
logger.info('***** Running training *****')
logger.info(f' Num examples = {len(train_dataset)}')
logger.info(f' Num Epochs = {args.num_train_epochs}')
logger.info(f' Instantaneous batch size per device = {args.per_device_train_batch_size}')
logger.info(f' Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}')
logger.info(f' Gradient Accumulation steps = {args.gradient_accumulation_steps}')
logger.info(f' Total optimization steps = {args.max_train_steps}')
progress_bar = tqdm(range(args.max_train_steps), disable=(not accelerator.is_local_main_process))
completed_steps = 0
starting_epoch = 0
if args.resume_from_checkpoint:
if ((args.resume_from_checkpoint is not None) or (args.resume_from_checkpoint != '')):
accelerator.print(f'Resumed from checkpoint: {args.resume_from_checkpoint}')
accelerator.load_state(args.resume_from_checkpoint)
path = os.path.basename(args.resume_from_checkpoint)
else:
dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]
dirs.sort(key=os.path.getctime)
path = dirs[(- 1)]
training_difference = os.path.splitext(path)[0]
if ('epoch' in training_difference):
starting_epoch = (int(training_difference.replace('epoch_', '')) + 1)
resume_step = None
else:
resume_step = (int(training_difference.replace('step_', '')) * args.gradient_accumulation_steps)
starting_epoch = (resume_step // len(train_dataloader))
resume_step -= (starting_epoch * len(train_dataloader))
progress_bar.update((starting_epoch * num_update_steps_per_epoch))
completed_steps = (starting_epoch * num_update_steps_per_epoch)
for epoch in range(starting_epoch, args.num_train_epochs):
model.train()
if args.with_tracking:
total_loss = 0
for (step, batch) in enumerate(train_dataloader):
if (args.resume_from_checkpoint and (epoch == starting_epoch)):
if ((resume_step is not None) and (step < resume_step)):
if ((step % args.gradient_accumulation_steps) == 0):
progress_bar.update(1)
completed_steps += 1
continue
with accelerator.accumulate(model):
outputs = model(**batch)
loss = outputs.loss
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
if accelerator.sync_gradients:
progress_bar.update(1)
completed_steps += 1
if isinstance(checkpointing_steps, int):
if ((completed_steps % checkpointing_steps) == 0):
output_dir = f'step_{completed_steps}'
if (args.output_dir is not None):
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
if (completed_steps >= args.max_train_steps):
break
model.eval()
losses = []
for (step, batch) in enumerate(eval_dataloader):
with torch.no_grad():
outputs = model(**batch)
loss = outputs.loss
losses.append(accelerator.gather_for_metrics(loss.repeat(args.per_device_eval_batch_size)))
losses = torch.cat(losses)
try:
eval_loss = torch.mean(losses)
perplexity = math.exp(eval_loss)
except OverflowError:
perplexity = float('inf')
logger.info(f'epoch {epoch}: perplexity: {perplexity} eval_loss: {eval_loss}')
if args.with_tracking:
accelerator.log({'perplexity': perplexity, 'eval_loss': eval_loss, 'train_loss': (total_loss.item() / len(train_dataloader)), 'epoch': epoch, 'step': completed_steps}, step=completed_steps)
if (args.push_to_hub and (epoch < (args.num_train_epochs - 1))):
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
repo.push_to_hub(commit_message=f'Training in progress epoch {epoch}', blocking=False, auto_lfs_prune=True)
if (args.checkpointing_steps == 'epoch'):
output_dir = f'epoch_{epoch}'
if (args.output_dir is not None):
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
if args.with_tracking:
accelerator.end_training()
if (args.output_dir is not None):
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
if args.push_to_hub:
repo.push_to_hub(commit_message='End of training', auto_lfs_prune=True)
with open(os.path.join(args.output_dir, 'all_results.json'), 'w') as f:
json.dump({'perplexity': perplexity}, f) |
def _make_circle_one_point(points, p):
c = (p[0], p[1], 0)
for (i, q) in enumerate(points):
if (not _is_in_circle(c, q)):
if (c[2] == 0):
c = _make_diameter(p, q)
else:
c = _make_circle_two_points(points[:(i + 1)], p, q)
return c |
class ProjectUpdateView(ObjectPermissionMixin, RedirectViewMixin, UpdateView):
model = Project
queryset = Project.objects.all()
form_class = ProjectForm
permission_required = 'projects.change_project_object'
def get_form_kwargs(self):
catalogs = Catalog.objects.filter_current_site().filter_group(self.request.user).filter_availability(self.request.user)
projects = Project.objects.filter_user(self.request.user)
form_kwargs = super().get_form_kwargs()
form_kwargs.update({'catalogs': catalogs, 'projects': projects})
return form_kwargs |
class Tree(nn.Module):
def __init__(self, levels, block, in_channels, out_channels, stride=1, level_root=False, root_dim=0, root_kernel_size=1, dilation=1, root_residual=False):
super(Tree, self).__init__()
if (root_dim == 0):
root_dim = (2 * out_channels)
if level_root:
root_dim += in_channels
if (levels == 1):
self.tree1 = block(in_channels, out_channels, stride, dilation=dilation)
self.tree2 = block(out_channels, out_channels, 1, dilation=dilation)
else:
self.tree1 = Tree((levels - 1), block, in_channels, out_channels, stride, root_dim=0, root_kernel_size=root_kernel_size, dilation=dilation, root_residual=root_residual)
self.tree2 = Tree((levels - 1), block, out_channels, out_channels, root_dim=(root_dim + out_channels), root_kernel_size=root_kernel_size, dilation=dilation, root_residual=root_residual)
if (levels == 1):
self.root = Root(root_dim, out_channels, root_kernel_size, root_residual)
self.level_root = level_root
self.root_dim = root_dim
self.downsample = None
self.project = None
self.levels = levels
if (stride > 1):
self.downsample = nn.MaxPool2d(stride, stride=stride)
if (in_channels != out_channels):
self.project = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False), BatchNorm(out_channels))
def forward(self, x, residual=None, children=None):
children = ([] if (children is None) else children)
bottom = (self.downsample(x) if self.downsample else x)
residual = (self.project(bottom) if self.project else bottom)
if self.level_root:
children.append(bottom)
x1 = self.tree1(x, residual)
if (self.levels == 1):
x2 = self.tree2(x1)
x = self.root(x2, x1, *children)
else:
children.append(x1)
x = self.tree2(x1, children=children)
return x |
def conv(x, c):
ksize = c['ksize']
stride = c['stride']
filters_out = c['conv_filters_out']
filters_in = x.get_shape()[(- 1)]
shape = [ksize, ksize, filters_in, filters_out]
initializer = tf.contrib.layers.xavier_initializer()
weights = _get_variable('weights', shape=shape, dtype='float', initializer=initializer, weight_decay=CONV_WEIGHT_DECAY)
return tf.nn.conv2d(x, weights, [1, stride, stride, 1], padding='SAME') |
def parsefile(file):
(path, filename) = dsz.path.Split(file)
dsz.control.echo.Off()
runsuccess = dsz.cmd.Run(('local run -command "%s\\Tools\\i386-winnt\\SlDecoder.exe %s %s\\GetFiles\\STRANGELAND_Decrypted\\%s.xml"' % (STLA_PATH, file, logdir, filename)), dsz.RUN_FLAG_RECORD)
dsz.control.echo.On()
if (not runsuccess):
dsz.ui.Echo('There was an error parsing the collection', dsz.ERROR)
return runsuccess |
def init(disp, info):
disp.extension_add_method('display', 'xinerama_query_version', query_version)
disp.extension_add_method('window', 'xinerama_get_state', get_state)
disp.extension_add_method('window', 'xinerama_get_screen_count', get_screen_count)
disp.extension_add_method('window', 'xinerama_get_screen_size', get_screen_size)
disp.extension_add_method('display', 'xinerama_is_active', is_active)
disp.extension_add_method('display', 'xinerama_query_screens', query_screens)
disp.extension_add_method('display', 'xinerama_get_info', get_info) |
_if_asan_class
class ShardedEmbeddingCollectionParallelTest(MultiProcessTestBase):
((torch.cuda.device_count() <= 1), 'Not enough GPUs, this test requires at least two GPUs')
(verbosity=Verbosity.verbose, max_examples=10, deadline=None)
(use_apply_optimizer_in_backward=st.booleans(), use_index_dedup=st.booleans())
def test_sharding_ebc(self, use_apply_optimizer_in_backward: bool, use_index_dedup: bool) -> None:
WORLD_SIZE = 2
embedding_config = [EmbeddingConfig(name='table_0', feature_names=['feature_0'], embedding_dim=8, num_embeddings=4), EmbeddingConfig(name='table_1', feature_names=['feature_0', 'feature_1'], embedding_dim=8, num_embeddings=4), EmbeddingConfig(name='table_2', feature_names=['feature_0', 'feature_1'], embedding_dim=8, num_embeddings=4)]
kjt_input_per_rank = [KeyedJaggedTensor.from_lengths_sync(keys=['feature_0', 'feature_1'], values=torch.LongTensor([0, 1, 2, 0, 1, 2]), lengths=torch.LongTensor([2, 0, 1, 2, 0, 1])), KeyedJaggedTensor.from_lengths_sync(keys=['feature_0', 'feature_1'], values=torch.LongTensor([3, 2, 1, 2, 0, 1, 2, 3, 2, 3, 2]), lengths=torch.LongTensor([2, 2, 4, 2, 0, 1]))]
self._run_multi_process_test(callable=_test_sharding, world_size=WORLD_SIZE, tables=embedding_config, kjt_input_per_rank=kjt_input_per_rank, backend='nccl', use_apply_optimizer_in_backward=use_apply_optimizer_in_backward, use_index_dedup=use_index_dedup) |
def get_fold(dataset, fold=None, cross_validation_ratio=0.2, num_valid_per_point=4, seed=0, shuffle=True):
if (fold is not None):
indices = fold.split('_')[1:]
sweep_ind = int(indices[0])
fold_ind = int(indices[1])
assert ((sweep_ind is None) or (sweep_ind < num_valid_per_point))
assert ((fold_ind is None) or (fold_ind < int((1 / cross_validation_ratio))))
valid_size = int(np.ceil((len(dataset) * cross_validation_ratio)))
num_valid_sets = int(np.ceil((len(dataset) / valid_size)))
random = np.random.RandomState(seed)
all_folds = []
for sweep_counter in range(num_valid_per_point):
folds = []
indices = list(range(len(dataset)))
if shuffle:
random.shuffle(indices)
else:
print(('\n' * 10), 'WARNING, NOT SHUFFLING', ('\n' * 10))
for i in range(num_valid_sets):
train_indices = (indices[:(i * valid_size)] + indices[((i + 1) * valid_size):])
print('len(train_indices)', len(train_indices))
train_split = Subset(dataset, train_indices)
valid_indices = indices[(i * valid_size):((i + 1) * valid_size)]
print('len(valid_indices)', len(valid_indices))
valid_split = Subset(dataset, valid_indices)
if ((sweep_counter == 0) and (i == 0)):
print('train_split', train_split, 'valid_split', valid_split)
folds.append((train_split, valid_split))
all_folds.append(folds)
if (fold is not None):
(train_data_subset, val_data_subset) = all_folds[sweep_ind][fold_ind]
train_data = dro_dataset.DRODataset(train_data_subset, process_item_fn=None, n_groups=dataset.n_groups, n_classes=dataset.n_classes, group_str_fn=dataset.group_str)
val_data = dro_dataset.DRODataset(val_data_subset, process_item_fn=None, n_groups=dataset.n_groups, n_classes=dataset.n_classes, group_str_fn=dataset.group_str)
return (train_data, val_data)
else:
return all_folds |
class Experiment(object):
def get_model_name(self, model_name):
model_name = model_name.replace(' ', '').replace(')', '').replace('(', '').replace('[', '').replace(']', '').replace(',', '-').replace("'", '')
return model_name
def __init__(self, data_params, arch_params, loaded_from_dir=None, exp_root='C:\\experiments', prompt_delete_existing=True, prompt_update_name=False, do_logging=True):
self.do_logging = do_logging
self.arch_params = arch_params
self.data_params = data_params
self.get_model_name()
(self.model_name, self.exp_dir, self.figures_dir, self.logs_dir, self.models_dir) = utils.make_output_dirs(self.model_name, exp_root=exp_root, prompt_delete_existing=prompt_delete_existing, prompt_update_name=prompt_update_name, existing_exp_dir=loaded_from_dir)
self.validation_losses_buffer = []
self._init_logger()
def set_debug_mode(self, do_debug=True):
self.debug = do_debug
def get_dirs(self):
return (self.exp_dir, self.figures_dir, self.logs_dir, self.models_dir)
def load_data(self):
with open(os.path.join(self.exp_dir, 'data_params.json'), 'w') as f:
json.dump(self.data_params, f)
def _save_params(self):
with open(os.path.join(self.exp_dir, 'arch_params.json'), 'w') as f:
json.dump(self.arch_params, f)
with open(os.path.join(self.exp_dir, 'data_params.json'), 'w') as f:
json.dump(self.data_params, f)
def create_models(self):
self._print_models()
return None
def load_models(self, load_epoch=None, stop_on_missing=True, init_layers=False):
if (load_epoch == 'latest'):
load_epoch = utils.get_latest_epoch_in_dir(self.models_dir)
self.logger.debug('Found latest epoch {} in dir {}'.format(load_epoch, self.models_dir))
if ((load_epoch is not None) and (int(load_epoch) > 0)):
self.logger.debug('Looking for models in {}'.format(self.models_dir))
found_a_model = False
model_files = os.listdir(self.models_dir)
for m in self.models:
model_filename = [os.path.join(self.models_dir, mf) for mf in model_files if ((mf.split('_epoch')[0] == m.name) and ('epoch{}'.format(load_epoch) in mf))]
if (len(model_filename) == 0):
self.logger.debug('Could not find any model files with name {} and epoch {}!'.format(m.name, load_epoch))
model_filename = None
if stop_on_missing:
sys.exit()
continue
else:
model_filename = model_filename[0]
if os.path.isfile(model_filename):
self.logger.debug('Loading model {} from {}'.format(m.name, model_filename))
try:
m.load_weights(model_filename)
except ValueError:
self.logger.debug('FAILED TO LOAD WEIGHTS DIRECTLY')
if (not init_layers):
sys.exit()
except IndexError:
self.logger.debug('FAILED TO LOAD WEIGHTS DIRECTLY')
if (not init_layers):
sys.exit()
found_a_model = True
elif (not os.path.isfile(model_filename)):
self.logger.debug('Could not find model file {}!'.format(model_filename))
if stop_on_missing:
sys.exit()
if (not found_a_model):
self.logger.debug('Did not find any models with epoch {} in dir {}!'.format(load_epoch, self.models_dir))
load_epoch = 0
self.latest_epoch = (int(load_epoch) + 1)
return (int(load_epoch) + 1)
else:
return 0
def create_generators(self, batch_size):
print('create_generators not implemented')
def compile_models(self):
with open(os.path.join(self.exp_dir, 'arch_params.json'), 'w') as f:
json.dump(self.arch_params, f)
def _print(self, msg):
if (self.logger is not None):
self.logger.debug(msg)
else:
print(msg)
def _print_models(self, save_figs=True, figs_dir=None, models_to_print=None):
if (figs_dir is None):
figs_dir = self.exp_dir
if ((models_to_print is None) and hasattr(self, 'models_to_print')):
models_to_print = (self.models_to_print + self.models)
elif (models_to_print is None):
models_to_print = self.models
for m in models_to_print:
print(m.name)
m.summary(line_length=120)
if save_figs:
plot_model(m, to_file=os.path.join(figs_dir, (m.name + '.jpg')), show_shapes=True)
with open(os.path.join(figs_dir, (m.name + '.txt')), 'w') as fh:
m.summary(print_fn=(lambda x: fh.write((x + '\n'))), line_length=120)
def save_models(self, epoch, iter_count=None):
for m in self.models:
self.logger.debug('Saving model {} epoch {}'.format(m.name, epoch))
if (iter_count is not None):
m.save(os.path.join(self.models_dir, '{}_epoch{}_iter{}.h5'.format(m.name, epoch, iter_count)))
else:
m.save(os.path.join(self.models_dir, '{}_epoch{}.h5'.format(m.name, epoch)))
return 0
def save_exp_info(self, exp_dir, figures_dir, models_dir, logs_dir):
self.exp_dir = exp_dir
self.figures_dir = figures_dir
self.logs_dir = logs_dir
self.models_dir = models_dir
def _init_logger(self):
if (not hasattr(self, 'logger')):
self.logger = None
if (self.logger is None):
formatter = logging.Formatter('[%(asctime)s] %(message)s', '%Y-%m-%d %H:%M:%S')
if self.do_logging:
lfh = logging.FileHandler(filename=os.path.join(self.exp_dir, 'experiment.log'))
lfh.setFormatter(formatter)
lfh.setLevel(logging.DEBUG)
lsh = logging.StreamHandler(sys.stdout)
lsh.setFormatter(formatter)
lsh.setLevel(logging.DEBUG)
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.setLevel(logging.DEBUG)
self.logger.handlers = []
if self.do_logging:
self.logger.addHandler(lfh)
self.logger.addHandler(lsh)
def _reopen_log_file(self):
if (self.logger is not None):
self.logger.handlers[0].close()
lfh = logging.FileHandler(filename=os.path.join(self.exp_dir, 'experiment.log'))
self.logger.handlers[0] = lfh
def train_discriminator(self):
return ([], []) |
class Migration(migrations.Migration):
dependencies = [('conferences', '0011_auto__2340'), ('cms', '0004_auto__0814')]
operations = [migrations.CreateModel(name='FAQ', fields=[('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')), ('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')), ('question', i18n.fields.I18nTextField(verbose_name='question')), ('answer', i18n.fields.I18nTextField(verbose_name='answer')), ('conference', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='faqs', to='conferences.Conference', verbose_name='conference'))], options={'verbose_name': 'FAQ', 'verbose_name_plural': 'FAQs', 'unique_together': {('question', 'conference')}})] |
def binary_CIFAR100(cls1, cls2, train=False, batch_size=None, augm_flag=False, val_size=None):
if (batch_size == None):
if train:
batch_size = train_batch_size
else:
batch_size = test_batch_size
transform_base = [transforms.ToTensor()]
transform_train = transforms.Compose(([transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4, padding_mode='reflect')] + transform_base))
transform_test = transforms.Compose(transform_base)
transform_train = transforms.RandomChoice([transform_train, transform_test])
transform = (transform_train if (augm_flag and train) else transform_test)
dataset = datasets.CIFAR100(path, train=train, transform=transform)
targets = np.array(dataset.targets)
masks = np.logical_or((targets == cls1), (targets == cls2))
idxs = np.where((masks == True))[0]
dataset.data = dataset.data[idxs]
dataset.targets = targets[idxs]
dataset.targets = np.where((dataset.targets == cls1), 0.0, dataset.targets)
dataset.targets = np.where((dataset.targets == cls2), 1.0, dataset.targets)
if (train or (val_size is None)):
loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=train, num_workers=4)
return loader
else:
test_size = (len(dataset) - val_size)
(dataset_val, dataset_test) = data_utils.random_split(dataset, (val_size, test_size))
val_loader = torch.utils.data.DataLoader(dataset_val, batch_size=batch_size, shuffle=train, num_workers=4)
test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=batch_size, shuffle=train, num_workers=4)
return (val_loader, test_loader) |
class _Webhooks(EnvConfig, env_prefix='webhooks_'):
big_brother: Webhook = Webhook(id=, channel=Channels.big_brother)
dev_log: Webhook = Webhook(id=, channel=Channels.dev_log)
duck_pond: Webhook = Webhook(id=, channel=Channels.duck_pond)
incidents: Webhook = Webhook(id=, channel=Channels.incidents)
incidents_archive: Webhook = Webhook(id=, channel=Channels.incidents_archive)
python_news: Webhook = Webhook(id=, channel=Channels.python_news) |
class ReactionDrivenODE(BaseModel):
def __init__(self, param, options, x_average):
super().__init__(param, options)
self.x_average = x_average
def get_fundamental_variables(self):
eps_dict = {}
for domain in self.options.whole_cell_domains:
Domain = domain.capitalize()
if (self.x_average is True):
eps_k_av = pybamm.Variable(f'X-averaged {domain} porosity', domain='current collector', bounds=(0, 1))
eps_k = pybamm.PrimaryBroadcast(eps_k_av, domain)
else:
eps_k = pybamm.Variable(f'{Domain} porosity', domain=domain, auxiliary_domains={'secondary': 'current collector'}, bounds=(0, 1))
eps_dict[domain] = eps_k
variables = self._get_standard_porosity_variables(eps_dict)
return variables
def get_coupled_variables(self, variables):
param = self.param
depsdt_dict = {}
for domain in self.options.whole_cell_domains:
domain_param = self.param.domain_params[domain.split()[0]]
if (domain == 'separator'):
depsdt_k = pybamm.FullBroadcast(0, domain, 'current collector')
elif (self.x_average is True):
a_j_k_av = variables[f'X-averaged {domain} volumetric interfacial current density [A.m-3]']
depsdt_k_av = ((domain_param.DeltaVsurf * a_j_k_av) / param.F)
depsdt_k = pybamm.PrimaryBroadcast(depsdt_k_av, domain)
else:
Domain = domain.capitalize()
a_j_k = variables[f'{Domain} volumetric interfacial current density [A.m-3]']
depsdt_k = ((domain_param.DeltaVsurf * a_j_k) / param.F)
depsdt_dict[domain] = depsdt_k
variables.update(self._get_standard_porosity_change_variables(depsdt_dict))
return variables
def set_rhs(self, variables):
if (self.x_average is True):
for domain in self.options.whole_cell_domains:
eps_av = variables[f'X-averaged {domain} porosity']
deps_dt_av = variables[f'X-averaged {domain} porosity change [s-1]']
self.rhs.update({eps_av: deps_dt_av})
else:
eps = variables['Porosity']
deps_dt = variables['Porosity change']
self.rhs = {eps: deps_dt}
def set_initial_conditions(self, variables):
if (self.x_average is True):
for domain in self.options.whole_cell_domains:
eps_k_av = variables[f'X-averaged {domain} porosity']
domain_param = self.param.domain_params[domain.split()[0]]
self.initial_conditions[eps_k_av] = domain_param.epsilon_init
else:
eps = variables['Porosity']
self.initial_conditions = {eps: self.param.epsilon_init}
def set_events(self, variables):
for domain in self.options.whole_cell_domains:
if (domain == 'separator'):
continue
Domain = domain.capitalize()
eps_k = variables[f'{Domain} porosity']
self.events.append(pybamm.Event(f'Zero {domain} porosity cut-off', pybamm.min(eps_k), pybamm.EventType.TERMINATION))
self.events.append(pybamm.Event(f'Max {domain} porosity cut-off', (1 - pybamm.max(eps_k)), pybamm.EventType.TERMINATION)) |
class PDControlWithRate():
def __init__(self, kp=0.0, kd=0.0, limit=1.0):
self.kp = kp
self.kd = kd
self.limit = limit
def update(self, y_ref, y, ydot):
u = ((self.kp * (y_ref - y)) - (self.kd * ydot))
u_sat = self._saturate(u)
return u_sat
def _saturate(self, u):
if (u >= self.limit):
u_sat = self.limit
elif (u <= (- self.limit)):
u_sat = (- self.limit)
else:
u_sat = u
return u_sat |
def test_record_property(pytester: Pytester, run_and_parse: RunAndParse) -> None:
pytester.makepyfile('\n import pytest\n\n \n def other(record_property):\n record_property("bar", 1)\n def test_record(record_property, other):\n record_property("foo", "<1");\n ')
(result, dom) = run_and_parse()
node = dom.find_first_by_tag('testsuite')
tnode = node.find_first_by_tag('testcase')
psnode = tnode.find_first_by_tag('properties')
pnodes = psnode.find_by_tag('property')
pnodes[0].assert_attr(name='bar', value='1')
pnodes[1].assert_attr(name='foo', value='<1')
result.stdout.fnmatch_lines(['*= 1 passed in *']) |
class FC6_DmRaidData(BaseData):
removedKeywords = BaseData.removedKeywords
removedAttrs = BaseData.removedAttrs
def __init__(self, *args, **kwargs):
BaseData.__init__(self, *args, **kwargs)
self.name = kwargs.get('name', '')
self.devices = kwargs.get('devices', [])
self.dmset = kwargs.get('dmset', None)
def __eq__(self, y):
if (not y):
return False
return ((self.name == y.name) and (self.devices == y.devices))
def __ne__(self, y):
return (not (self == y))
def __str__(self):
retval = BaseData.__str__(self)
retval += ('dmraid --name=%s' % self.name)
for dev in self.devices:
retval += (' --dev="%s"' % dev)
return (retval + '\n') |
def demo_model_parallel(rank, world_size):
print(f'Running DDP with model parallel example on rank {rank}.')
setup(rank, world_size)
dev0 = (rank * 2)
dev1 = ((rank * 2) + 1)
mp_model = ToyMpModel(dev0, dev1)
ddp_mp_model = DDP(mp_model)
loss_fn = nn.MSELoss()
optimizer = optim.SGD(ddp_mp_model.parameters(), lr=0.001)
optimizer.zero_grad()
outputs = ddp_mp_model(torch.randn(20, 10))
labels = torch.randn(20, 5).to(dev1)
loss_fn(outputs, labels).backward()
optimizer.step()
cleanup() |
class Basic():
def __init__(self):
self.__accessToken = ''
self.__leftTime = 0
def __real_get_access_token(self):
appId = 'xxxxxxxxxxxxx'
appSecret = 'xxxxxxxxxxxxxxxxxxxxx'
postUrl = (' % (appId, appSecret))
urlResp = urllib.urlopen(postUrl)
urlResp = json.loads(urlResp.read())
self.__accessToken = urlResp['access_token']
self.__leftTime = urlResp['expires_in']
def get_access_token(self):
if (self.__leftTime < 10):
self.__real_get_access_token()
return self.__accessToken
def run(self):
while True:
if (self.__leftTime > 10):
time.sleep(2)
self.__leftTime -= 2
else:
self.__real_get_access_token() |
class ModelVarClass(VariableClass, metaclass=RegisteringChoiceType):
var_name = 'model'
_var(argument='(?P<dataaug>[a-zA-Z0-9]+-)?(?P<loss>[a-zA-Z0-9\\.]+)-tor-(?P<arch>[a-zA-Z0-9_]+)(?P<hyper>-[a-zA-Z0-9\\.]+)?')
def torch_model(auto_var, inter_var, dataaug, loss, arch, hyper, trnX, trny, n_channels, multigpu=False, trn_log_callbacks=None):
from .torch_model import TorchModel
dataaug = (dataaug[:(- 1)] if dataaug else None)
n_features = trnX.shape[1:]
n_classes = len(np.unique(trny))
dataset_name = auto_var.get_variable_name('dataset')
params: dict = get_hyper(hyper, loss, arch, dataset_name)
params['eps'] = auto_var.get_var('eps')
params['norm'] = auto_var.get_var('norm')
params['loss_name'] = loss
params['n_features'] = n_features
params['n_classes'] = n_classes
params['train_type'] = None
params['architecture'] = arch
params['multigpu'] = multigpu
params['n_channels'] = n_channels
params['dataaug'] = dataaug
model = TorchModel(lbl_enc=inter_var['lbl_enc'], **params)
return model |
.end_to_end()
def test_scheduling_w_mixed_priorities(runner, tmp_path):
source = '\n import pytask\n\n .try_last\n .try_first\n def task_mixed(): pass\n '
tmp_path.joinpath('task_module.py').write_text(textwrap.dedent(source))
result = runner.invoke(cli, [tmp_path.as_posix()])
assert (result.exit_code == ExitCode.COLLECTION_FAILED)
assert ('Could not collect' in result.output)
assert ('The task cannot have' in result.output) |
class CCZ2TFactory(MagicStateFactory):
distillation_l1_d: int = 15
distillation_l2_d: int = 31
qec_scheme: qec.QuantumErrorCorrectionSchemeSummary = qec.FowlerSuperconductingQubits
def l0_state_injection_error(self, phys_err: float) -> float:
return phys_err
def l0_topo_error_t_gate(self, phys_err: float) -> float:
topo_error_per_unit_cell = self.qec_scheme.logical_error_rate(physical_error_rate=phys_err, code_distance=(self.distillation_l1_d // 2))
return (100 * topo_error_per_unit_cell)
def l0_error(self, phys_err: float) -> float:
return (self.l0_state_injection_error(phys_err) + self.l0_topo_error_t_gate(phys_err))
def l1_topo_error_factory(self, phys_err: float) -> float:
return (1000 * self.qec_scheme.logical_error_rate(physical_error_rate=phys_err, code_distance=self.distillation_l1_d))
def l1_topo_error_t_gate(self, phys_err: float) -> float:
return (100 * self.qec_scheme.logical_error_rate(physical_error_rate=phys_err, code_distance=self.distillation_l1_d))
def l1_distillation_error(self, phys_err: float) -> float:
return (35 * (self.l0_error(phys_err) ** 3))
def l1_error(self, phys_err: float) -> float:
return ((self.l1_topo_error_factory(phys_err) + self.l1_topo_error_t_gate(phys_err)) + self.l1_distillation_error(phys_err))
def l2_error(self, phys_err: float) -> float:
l2_topo_error_factory = (1000 * self.qec_scheme.logical_error_rate(physical_error_rate=phys_err, code_distance=self.distillation_l2_d))
l2_distillation_error = (28 * (self.l1_error(phys_err) ** 2))
return (l2_topo_error_factory + l2_distillation_error)
def footprint(self) -> int:
l1 = (((4 * 8) * 2) * (self.distillation_l1_d ** 2))
l2 = (((4 * 8) * 2) * (self.distillation_l2_d ** 2))
return ((6 * l1) + l2)
def distillation_error(self, n_magic: AlgorithmSummary, phys_err: float) -> float:
n_ccz_states = (n_magic.toffoli_gates + math.ceil((n_magic.t_gates / 2)))
return (self.l2_error(phys_err) * n_ccz_states)
def n_cycles(self, n_magic: AlgorithmSummary) -> int:
distillation_d = max(((2 * self.distillation_l1_d) + 1), self.distillation_l2_d)
n_ccz_states = (n_magic.toffoli_gates + math.ceil((n_magic.t_gates / 2)))
catalyzations = math.ceil((n_magic.t_gates / 2))
ccz_depth = 5.5
return math.ceil((((n_ccz_states * ccz_depth) + catalyzations) * distillation_d)) |
class FPNFFConv(nn.Module):
def __init__(self, in_channels):
super(FPNFFConv, self).__init__()
inter_channels = (in_channels // 4)
out_channels = in_channels
self.relu = nn.ReLU(inplace=True)
self.bottleneck = nn.Sequential(nn.Conv2d(in_channels, inter_channels, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm2d(inter_channels), nn.ReLU(inplace=True), nn.Conv2d(inter_channels, inter_channels, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(inter_channels), nn.ReLU(inplace=True), nn.Conv2d(inter_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm2d(out_channels))
def forward(self, x):
identity = x
out = self.bottleneck(x)
out1 = (out + identity)
out1 = self.relu(out1)
return out1 |
def test_put_automatic_versioning(registry_storage):
name = 'test'
type1 = StructType(fields=[IntType(bits=32)])
type2 = StructType(fields=[IntType(bits=24)])
version1 = registry_storage.put(name, type1)
version2 = registry_storage.put(name, type2)
assert (version2 == (version1 + 1)) |
def _process_message(message: Dict[(str, Any)], ws: WebSocketT) -> None:
if ('call' in message):
error_info = {}
try:
return_val = _exposed_functions[message['name']](*message['args'])
status = 'ok'
except Exception as e:
err_traceback = traceback.format_exc()
traceback.print_exc()
return_val = None
status = 'error'
error_info['errorText'] = repr(e)
error_info['errorTraceback'] = err_traceback
_repeated_send(ws, _safe_json({'return': message['call'], 'status': status, 'value': return_val, 'error': error_info}))
elif ('return' in message):
call_id = message['return']
if (call_id in _call_return_callbacks):
(callback, error_callback) = _call_return_callbacks.pop(call_id)
if (message['status'] == 'ok'):
callback(message['value'])
elif ((message['status'] == 'error') and (error_callback is not None)):
error_callback(message['error'], message['stack'])
else:
_call_return_values[call_id] = message['value']
else:
print('Invalid message received: ', message) |
def DBindex(cl_data_file):
class_list = cl_data_file.keys()
cl_num = len(class_list)
cl_means = []
stds = []
DBs = []
for cl in class_list:
cl_means.append(np.mean(cl_data_file[cl], axis=0))
stds.append(np.sqrt(np.mean(np.sum(np.square((cl_data_file[cl] - cl_means[(- 1)])), axis=1))))
mu_i = np.tile(np.expand_dims(np.array(cl_means), axis=0), (len(class_list), 1, 1))
mu_j = np.transpose(mu_i, (1, 0, 2))
mdists = np.sqrt(np.sum(np.square((mu_i - mu_j)), axis=2))
for i in range(cl_num):
DBs.append(np.max([((stds[i] + stds[j]) / mdists[(i, j)]) for j in range(cl_num) if (j != i)]))
return np.mean(DBs) |
def pytest_configure(config):
config.addinivalue_line('markers', "flaky(reruns=1, reruns_delay=0): mark test to re-run up to 'reruns' times. Add a delay of 'reruns_delay' seconds between re-runs.")
if (config.pluginmanager.hasplugin('xdist') and HAS_PYTEST_HANDLECRASHITEM):
config.pluginmanager.register(XDistHooks())
if is_master(config):
config.failures_db = ServerStatusDB()
else:
config.failures_db = ClientStatusDB(config.workerinput['sock_port'])
else:
config.failures_db = StatusDB() |
class HashAlgorithm(DataElementGroup):
usage_hash = DataElementField(type='code', max_length=3)
hash_algorithm = DataElementField(type='code', max_length=3)
algorithm_parameter_name = DataElementField(type='code', max_length=3)
algorithm_parameter_value = DataElementField(type='bin', max_length=512, required=False) |
class TestCRUD(TestCase):
def setUp(self):
self.image_temp = tempfile.NamedTemporaryFile(suffix='.png').name
self.xml_temp = tempfile.NamedTemporaryFile(suffix='.xml').name
self.marker_type = StyleType.objects.create(symbol_type='marker', name='Marker', description='a marker for testing purpose', icon=self.image_temp)
self.line_type = StyleType.objects.create(symbol_type='line', name='Line', description='a line for testing purpose', icon=self.image_temp)
self.user_staff = User.objects.create(username='usertest_staff', first_name='first_name_staff', last_name='last_name_staff', email='', password='passwordtest', is_active=True, is_staff=True, is_superuser=False)
self.style_zero = Style.objects.create(name='style_zero', description='a style for testing purpose', creator=self.user_staff, thumbnail_image=self.image_temp, file=self.xml_temp, style_type=self.marker_type)
def test_create_style_type(self):
fill_type = StyleType.objects.create(symbol_type='fill', name='Fill', description='a fill for testing purpose', icon=self.image_temp)
self.assertEqual(fill_type.__str__(), 'Fill')
def test_create_style(self):
style_one = Style.objects.create(name='style_one', description='a style for testing purpose', creator=self.user_staff, thumbnail_image=self.image_temp, file=self.xml_temp, style_type=self.line_type)
self.assertEqual(style_one.name, 'style_one')
self.assertEqual(style_one.creator.first_name, 'first_name_staff')
self.assertEqual(style_one.style_type.name, 'Line')
def test_update_style(self):
self.assertEqual(self.style_zero.style_type.name, 'Marker')
self.style_zero.style_type = self.line_type
self.assertEqual(self.style_zero.style_type.name, 'Line') |
class CmdFind(COMMAND_DEFAULT_CLASS):
key = 'find'
aliases = 'search, locate'
switch_options = ('room', 'exit', 'char', 'exact', 'loc', 'startswith')
locks = 'cmd:perm(find) or perm(Builder)'
help_category = 'Building'
def func(self):
caller = self.caller
switches = self.switches
if (not self.args):
caller.msg('Usage: find <string> [= low [-high]]')
return
if ('locate' in self.cmdstring):
switches.append('loc')
searchstring = self.lhs
(low, high) = (1, ObjectDB.objects.all().order_by('-id')[0].id)
if self.rhs:
if ('-' in self.rhs):
limlist = [part.lstrip('#').strip() for part in self.rhs.split('-', 1)]
else:
limlist = [part.lstrip('#') for part in self.rhs.split(None, 1)]
if (limlist and limlist[0].isdigit()):
low = max(low, int(limlist[0]))
if ((len(limlist) > 1) and limlist[1].isdigit()):
high = min(high, int(limlist[1]))
low = min(low, high)
high = max(low, high)
is_dbref = utils.dbref(searchstring)
is_account = searchstring.startswith('*')
restrictions = ''
if self.switches:
restrictions = (', %s' % ', '.join(self.switches))
if (is_dbref or is_account):
if is_dbref:
result = caller.search(searchstring, global_search=True, quiet=True)
string = ('|wExact dbref match|n(#%i-#%i%s):' % (low, high, restrictions))
else:
searchstring = searchstring.lstrip('*')
result = caller.search_account(searchstring, quiet=True)
string = ('|wMatch|n(#%i-#%i%s):' % (low, high, restrictions))
if ('room' in switches):
result = (result if inherits_from(result, ROOM_TYPECLASS) else None)
if ('exit' in switches):
result = (result if inherits_from(result, EXIT_TYPECLASS) else None)
if ('char' in switches):
result = (result if inherits_from(result, CHAR_TYPECLASS) else None)
if (not result):
string += '\n |RNo match found.|n'
elif (not (low <= int(result[0].id) <= high)):
string += ("\n |RNo match found for '%s' in #dbref interval.|n" % searchstring)
else:
result = result[0]
string += ('\n|g %s - %s|n' % (result.get_display_name(caller), result.path))
if (('loc' in self.switches) and (not is_account) and result.location):
string += ' (|wlocation|n: |g{}|n)'.format(result.location.get_display_name(caller))
else:
if ('exact' in switches):
keyquery = Q(db_key__iexact=searchstring, id__gte=low, id__lte=high)
aliasquery = Q(db_tags__db_key__iexact=searchstring, db_tags__db_tagtype__iexact='alias', id__gte=low, id__lte=high)
elif ('startswith' in switches):
keyquery = Q(db_key__istartswith=searchstring, id__gte=low, id__lte=high)
aliasquery = Q(db_tags__db_key__istartswith=searchstring, db_tags__db_tagtype__iexact='alias', id__gte=low, id__lte=high)
else:
keyquery = Q(db_key__icontains=searchstring, id__gte=low, id__lte=high)
aliasquery = Q(db_tags__db_key__icontains=searchstring, db_tags__db_tagtype__iexact='alias', id__gte=low, id__lte=high)
results = ObjectDB.objects.filter((keyquery | aliasquery)).distinct()
nresults = results.count()
if nresults:
results = [result for result in results]
if ('room' in switches):
results = [obj for obj in results if inherits_from(obj, ROOM_TYPECLASS)]
if ('exit' in switches):
results = [obj for obj in results if inherits_from(obj, EXIT_TYPECLASS)]
if ('char' in switches):
results = [obj for obj in results if inherits_from(obj, CHAR_TYPECLASS)]
nresults = len(results)
if nresults:
if (nresults > 1):
string = ('|w%i Matches|n(#%i-#%i%s):' % (nresults, low, high, restrictions))
for res in results:
string += ('\n |g%s - %s|n' % (res.get_display_name(caller), res.path))
else:
string = ('|wOne Match|n(#%i-#%i%s):' % (low, high, restrictions))
string += ('\n |g%s - %s|n' % (results[0].get_display_name(caller), results[0].path))
if (('loc' in self.switches) and (nresults == 1) and results[0].location):
string += ' (|wlocation|n: |g{}|n)'.format(results[0].location.get_display_name(caller))
else:
string = ('|wMatch|n(#%i-#%i%s):' % (low, high, restrictions))
string += ("\n |RNo matches found for '%s'|n" % searchstring)
caller.msg(string.strip()) |
def _unpack_sequence_value(value: SequenceValue, target_length: int, post_starred_length: Optional[int]) -> Union[(Sequence[Value], CanAssignError)]:
head = []
tail = []
while (len(head) < target_length):
if (len(head) >= len(value.members)):
return CanAssignError(f'{value} must have at least {target_length} elements')
(is_many, val) = value.members[len(head)]
if is_many:
break
head.append(val)
remaining_target_length = (target_length - len(head))
if (post_starred_length is None):
if (remaining_target_length == 0):
if all((is_many for (is_many, _) in value.members[target_length:])):
return head
return CanAssignError(f'{value} must have exactly {target_length} elements')
tail = []
while (len(tail) < remaining_target_length):
if ((len(tail) + len(head)) >= len(value.members)):
return CanAssignError(f'{value} must have at least {target_length} elements')
(is_many, val) = value.members[((- len(tail)) - 1)]
if is_many:
break
tail.append(val)
if tail:
remaining_members = value.members[len(head):(- len(tail))]
else:
remaining_members = value.members[len(head):]
if (not remaining_members):
return CanAssignError(f'{value} must have exactly {target_length} elements')
middle_length = (remaining_target_length - len(tail))
fallback_value = unite_values(*[val for (_, val) in remaining_members])
return [*head, *[fallback_value for _ in range(middle_length)], *reversed(tail)]
else:
while (len(tail) < post_starred_length):
if (len(tail) >= (len(value.members) - len(head))):
return CanAssignError(f'{value} must have at least {(target_length + post_starred_length)} elements')
(is_many, val) = value.members[((- len(tail)) - 1)]
if is_many:
break
tail.append(val)
remaining_post_starred_length = (post_starred_length - len(tail))
if tail:
remaining_members = value.members[len(head):(- len(tail))]
else:
remaining_members = value.members[len(head):]
if ((remaining_target_length != 0) or (remaining_post_starred_length != 0)):
if (not remaining_members):
return CanAssignError(f'{value} must have at least {(target_length + post_starred_length)} elements')
else:
fallback_value = unite_values(*[val for (_, val) in remaining_members])
return [*head, *[fallback_value for _ in range(remaining_target_length)], GenericValue(list, [fallback_value]), *[fallback_value for _ in range(remaining_post_starred_length)], *reversed(tail)]
else:
return [*head, SequenceValue(list, remaining_members), *reversed(tail)] |
def deselect_by_mark(items: 'List[Item]', config: Config) -> None:
matchexpr = config.option.markexpr
if (not matchexpr):
return
expr = _parse_expression(matchexpr, "Wrong expression passed to '-m'")
remaining: List[Item] = []
deselected: List[Item] = []
for item in items:
if expr.evaluate(MarkMatcher.from_item(item)):
remaining.append(item)
else:
deselected.append(item)
if deselected:
config.hook.pytest_deselected(items=deselected)
items[:] = remaining |
def parse_args():
parser = argparse.ArgumentParser(description='Initialize MS COCO dataset.', epilog='Example: python mscoco.py --download-dir ~/mscoco', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--download-dir', type=str, default=None, help='dataset directory on disk')
parser.add_argument('--no-download', action='store_true', help='disable automatic download if set')
parser.add_argument('--overwrite', action='store_true', help='overwrite downloaded files if set, in case they are corrupted')
args = parser.parse_args()
return args |
class one_conv(nn.Module):
def __init__(self, in_ch, out_ch, normaliz=False):
super(one_conv, self).__init__()
ops = []
ops += [nn.Conv2d(in_ch, out_ch, 3, padding=1)]
if normaliz:
ops += [nn.BatchNorm2d(out_ch)]
ops += [nn.ReLU(inplace=True)]
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x |
class BoundarySelector(_Selector):
def __init__(self):
super(BoundarySelector, self).__init__()
self.setWindowTitle(self.tr('Select Faces/Edges/Vertexes'))
self.setHelpText(self.tr('To add references: select them in the 3D view and click "Add".'))
def getSelection(self):
selection = []
for selObj in Gui.Selection.getSelectionEx():
if selObj.HasSubObjects:
item = (selObj.Object, tuple(selObj.SubElementNames))
selection.append(item)
return selection |
class SpatialSvdModuleSplitter():
def split_module(model: tf.keras.Model, layer: Layer, rank: int) -> (tf.keras.layers.Layer, tf.keras.layers.Layer):
(h, v) = SpatialSvdModuleSplitter.get_svd_matrices(layer, rank)
(conv_a_stride, conv_b_stride) = get_strides_for_split_conv_ops(layer=layer.module)
data_format_channels = layer.module.data_format
padding = layer.module.padding
conv_a = tf.keras.layers.Conv2D(filters=v.shape[3], kernel_size=(v.shape[0], v.shape[1]), strides=conv_a_stride, data_format=data_format_channels, activation=None, padding=padding, name=(layer.module.name + '_a'), use_bias=False)
use_bias = False
if (len(layer.module.get_weights()) > 1):
use_bias = True
conv_b = tf.keras.layers.Conv2D(filters=h.shape[3], kernel_size=(h.shape[0], h.shape[1]), strides=conv_b_stride, name=(layer.module.name + '_b'), data_format=data_format_channels, padding=padding, use_bias=use_bias)
replace_layer_in_functional_model(model, layer.module, [conv_a, conv_b])
assert (conv_a.get_weights()[0].shape == v.shape)
assert (conv_b.get_weights()[0].shape == h.shape)
conv_a.set_weights([v])
conv_b_weight_tensor = [h]
if use_bias:
bias_tensor = layer.module.get_weights()[1]
conv_b_weight_tensor.append(bias_tensor)
conv_b.set_weights(conv_b_weight_tensor)
return (conv_a, conv_b)
def get_svd_matrices(layer: Layer, rank: int) -> (np.array, np.array):
weight_tensor = layer.module.get_weights()[0]
weight_tensor = WeightTensorUtils.transpose_from_tf_to_libpymo_format(weight_tensor, layer.module)
(out_channels, in_channels, height, width) = weight_tensor.shape
(h, v) = SpatialSvdPruner.lingalg_spatial_svd(weight_tensor, rank, in_channels, out_channels, height, width)
h = WeightTensorUtils.transpose_from_libpymo_to_tf_format(h, layer.module)
v = WeightTensorUtils.transpose_from_libpymo_to_tf_format(v, layer.module)
return (h, v) |
class PluginPipelines(PluginActions):
def register_function(self, function, inputs, parameters, outputs, name, description, input_descriptions=None, parameter_descriptions=None, output_descriptions=None, citations=None, deprecated=False, examples=None):
if (citations is None):
citations = ()
else:
citations = tuple(citations)
if (examples is None):
examples = {}
pipeline = qiime2.sdk.Pipeline._init(function, inputs, parameters, outputs, self._plugin_id, name, description, input_descriptions, parameter_descriptions, output_descriptions, citations, deprecated, examples)
self[pipeline.id] = pipeline |
class AttrVI_ATTR_MEM_SPACE(EnumAttribute):
resources = [(constants.InterfaceType.vxi, 'INSTR')]
py_name = ''
visa_name = 'VI_ATTR_MEM_SPACE'
visa_type = 'ViUInt16'
default = constants.VI_A16_SPACE
(read, write, local) = (True, False, False)
enum_type = constants.AddressSpace |
def keytext_to_keyinfo_and_event(keytext):
keyinfo = keysyms.common.make_KeyPress_from_keydescr(keytext)
if ((len(keytext) == 3) and (keytext[0] == '"') and (keytext[2] == '"')):
event = Event(keytext[1])
else:
event = Event(keyinfo.tuple()[3])
return (keyinfo, event) |
class Connection():
def __repr__(self):
return self.__str__()
def __str__(self):
return 'Simple Connection'
def __bool__(self):
return True
def _eval(funcstring):
funclist = funcstring.split('.')
firstelem = funclist.pop(0)
if isinstance(__builtins__, dict):
if (firstelem in __builtins__):
return __builtins__[firstelem]
elif hasattr(__builtins__, firstelem):
return getattr(__builtins__, firstelem)
try:
func = globals()[firstelem]
for elem in funclist:
func = getattr(func, elem)
except (KeyError, AttributeError):
raise NameError("name '{fs}' is not defined".format(fs=funcstring))
return func |
def hkdf_derive_test(backend, algorithm, params):
hkdf = HKDF(algorithm, int(params['l']), salt=(binascii.unhexlify(params['salt']) or None), info=(binascii.unhexlify(params['info']) or None), backend=backend)
okm = hkdf.derive(binascii.unhexlify(params['ikm']))
assert (okm == binascii.unhexlify(params['okm'])) |
.parametrize('name', all_regression_models)
.parametrize('N', [100, 6000])
def test_models_regression(name, N):
(S, Ns, D) = (5, 3, 2)
model = get_regression_model(name)(is_test=True)
model.fit(np.random.randn(N, D), np.random.randn(N, 1))
model.fit(np.random.randn(N, D), np.random.randn(N, 1))
(m, v) = model.predict(np.random.randn(Ns, D))
assert (m.shape == (Ns, 1))
assert (v.shape == (Ns, 1))
samples = model.sample(np.random.randn(Ns, D), S)
assert (samples.shape == (S, Ns, 1)) |
def get_densenet(blocks, model_name=None, pretrained=False, root=os.path.join('~', '.torch', 'models'), **kwargs):
if (blocks == 121):
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 24, 16]
elif (blocks == 161):
init_block_channels = 96
growth_rate = 48
layers = [6, 12, 36, 24]
elif (blocks == 169):
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 32, 32]
elif (blocks == 201):
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 48, 32]
else:
raise ValueError('Unsupported DenseNet version with number of layers {}'.format(blocks))
from functools import reduce
channels = reduce((lambda xi, yi: (xi + [reduce((lambda xj, yj: (xj + [(xj[(- 1)] + yj)])), ([growth_rate] * yi), [(xi[(- 1)][(- 1)] // 2)])[1:]])), layers, [[(init_block_channels * 2)]])[1:]
net = DenseNet(channels=channels, init_block_channels=init_block_channels, **kwargs)
if pretrained:
if ((model_name is None) or (not model_name)):
raise ValueError('Parameter `model_name` should be properly initialized for loading pretrained model.')
from .model_store import download_model
download_model(net=net, model_name=model_name, local_model_store_dir_path=root)
return net |
def RADC(mf, frozen=None, mo_coeff=None, mo_occ=None):
__doc__ = radc.RADC.__doc__
if (not ((frozen is None) or (frozen == 0))):
raise NotImplementedError
mf = mf.remove_soscf()
if (not mf.istype('RHF')):
mf = mf.to_rhf()
return radc.RADC(mf, frozen, mo_coeff, mo_occ) |
def test_tar_archive_one_pass_with_interpolation():
context = Context({'key1': 'value1', 'key2': 'value2', 'key3': 'value3', 'tar': {'archive': [{'in': '{key2}/to/dir', 'out': './blah.tar.{key1}'}]}})
with patch('tarfile.open') as mock_tarfile:
pypyr.steps.tar.run_step(context)
mock_tarfile.assert_called_once_with('./blah.tar.value1', 'w:xz')
mock_tarfile.return_value.__enter__().add.assert_called_once_with('value2/to/dir', arcname='.') |
class Benchmark(object):
def __init__(self, prefix=None):
self.prefix = (prefix or '')
self.results = []
def __call__(self, func):
def stopwatch(*args):
t0 = time.time()
name = (self.prefix + func.__name__)
result = func(*args)
elapsed = (time.time() - t0)
self.results.append((name, elapsed))
return result
return stopwatch
def __str__(self):
rstr = ['Benchmark results']
if (self.prefix != ''):
rstr[(- 1)] += (' - %s' % self.prefix)
if (len(self.results) > 0):
indent = max([len(name) for (name, _) in self.results])
else:
indent = 0
rstr.append(('=' * (indent + 17)))
rstr.insert(0, rstr[(- 1)])
for res in self.results:
rstr.append('{0:<{indent}}{1:.8f} s'.format(*res, indent=(indent + 5)))
if (len(self.results) == 0):
rstr.append('None ran!')
return '\n'.join(rstr) |
class CoverSearch():
def __init__(self, callback):
self.engine_list = []
self._stop = False
def wrap(*args, **kwargs):
if (not self._stop):
return callback(*args, **kwargs)
self.callback = wrap
self.finished = 0
def add_engine(self, engine, query_replace):
self.engine_list.append((engine, query_replace))
def stop(self):
self._stop = True
def start(self, query, raw, limit):
for (engine, replace) in self.engine_list:
thr = threading.Thread(target=self.__search_thread, args=(engine, query, replace, raw, limit))
thr.daemon = True
thr.start()
if (not len(self.engine_list)):
GLib.idle_add(self.callback, [], 1)
def __search_thread(self, engine, query, replace, raw, limit):
search = (query if raw else cleanup_query(query, replace))
print_d(f'[AlbumArt] running search {search!r} on engine {engine.__name__}')
result = []
try:
result = engine().start(search, limit)
except Exception as e:
print_w(f'[AlbumArt] {engine.__name__}: {query!r} ({e})')
print_exc()
self.finished += 1
progress = (float(self.finished) / len(self.engine_list))
GLib.idle_add(self.callback, result, progress) |
def attr(accessing_obj, accessed_obj, *args, **kwargs):
if (not args):
return False
attrname = args[0].strip()
value = None
if (len(args) > 1):
value = args[1].strip()
compare = 'eq'
if kwargs:
compare = kwargs.get('compare', 'eq')
def valcompare(val1, val2, typ='eq'):
try:
return CF_MAPPING.get(typ, CF_MAPPING['default'])(val1, val2)
except Exception:
return False
if hasattr(accessing_obj, 'obj'):
accessing_obj = accessing_obj.obj
if hasattr(accessing_obj, attrname):
if value:
return valcompare(str(getattr(accessing_obj, attrname)), value, compare)
return bool(getattr(accessing_obj, attrname))
if (hasattr(accessing_obj, 'attributes') and accessing_obj.attributes.has(attrname)):
if value:
return (hasattr(accessing_obj, 'attributes') and valcompare(accessing_obj.attributes.get(attrname), value, compare))
return bool(accessing_obj.attributes.get(attrname))
return False |
def senstivity_check():
np.random.seed(101)
mcrr = MonteCarloRR(observed_RR=0.73322, sample=10000)
mcrr.confounder_RR_distribution(trapezoidal(mini=0.9, mode1=1.1, mode2=1.7, maxi=1.8, size=10000))
mcrr.prop_confounder_exposed(trapezoidal(mini=0.25, mode1=0.28, mode2=0.32, maxi=0.35, size=10000))
mcrr.prop_confounder_unexposed(trapezoidal(mini=0.55, mode1=0.58, mode2=0.62, maxi=0.65, size=10000))
mcrr.fit()
mcrr.plot()
plt.show() |
def train(train_iter, dev_iter, mixed_test_iter, model, args, text_field, aspect_field, sm_field, predict_iter):
time_stamps = []
optimizer = torch.optim.Adagrad(model.parameters(), lr=args.lr, weight_decay=args.l2, lr_decay=args.lr_decay)
steps = 0
model.train()
start_time = time.time()
(dev_acc, mixed_acc) = (0, 0)
for epoch in range(1, (args.epochs + 1)):
for batch in train_iter:
(feature, aspect, target) = (batch.text, batch.aspect, batch.sentiment)
feature.data.t_()
if (len(feature) < 2):
continue
if (not args.aspect_phrase):
aspect.data.unsqueeze_(0)
aspect.data.t_()
target.data.sub_(1)
if args.cuda:
(feature, aspect, target) = (feature.cuda(), aspect.cuda(), target.cuda())
optimizer.zero_grad()
(logit, _, _) = model(feature, aspect)
loss = F.cross_entropy(logit, target)
loss.backward()
optimizer.step()
steps += 1
if ((steps % args.log_interval) == 0):
corrects = (torch.max(logit, 1)[1].view(target.size()).data == target.data).sum()
accuracy = ((100.0 * corrects) / batch.batch_size)
if (args.verbose == 1):
sys.stdout.write('\rBatch[{}] - loss: {:.6f} acc: {:.4f}%({}/{})'.format(steps, loss.data[0], accuracy, corrects, batch.batch_size))
if ((steps % args.save_interval) == 0):
if (not os.path.isdir(args.save_dir)):
os.makedirs(args.save_dir)
save_prefix = os.path.join(args.save_dir, 'snapshot')
save_path = '{}_steps{}.pt'.format(save_prefix, steps)
torch.save(model, save_path)
if (epoch == args.epochs):
(dev_acc, _, _) = eval(dev_iter, model, args)
if mixed_test_iter:
(mixed_acc, _, _) = eval(mixed_test_iter, model, args)
else:
mixed_acc = 0.0
if (args.verbose == 1):
delta_time = (time.time() - start_time)
print('\n{:.4f} - {:.4f} - {:.4f}'.format(dev_acc, mixed_acc, delta_time))
time_stamps.append((dev_acc, delta_time))
print()
return ((dev_acc, mixed_acc), time_stamps) |
class AnActor():
(num_returns=2)
def genData(self, rank, nranks, nrows):
(data, labels) = datasets.load_breast_cancer(return_X_y=True)
(train_x, _, train_y, _) = train_test_split(data, labels, test_size=0.25)
train_y = train_y.reshape((train_y.shape[0], 1))
train = np.hstack([train_x, train_y])
assert (nrows <= train.shape[0])
assert (nc == train.shape[1])
sz = (nrows // nranks)
return (train[(sz * rank):(sz * (rank + 1))], ray.util.get_node_ip_address()) |
class ContentManageableAdmin():
def save_model(self, request, obj, form, change):
if (not change):
obj.creator = request.user
else:
obj.last_modified_by = request.user
return super().save_model(request, obj, form, change)
def get_readonly_fields(self, request, obj=None):
fields = list(super().get_readonly_fields(request, obj))
return (fields + ['created', 'updated', 'creator', 'last_modified_by'])
def get_list_filter(self, request):
fields = list(super().get_list_filter(request))
return (fields + ['created', 'updated'])
def get_list_display(self, request):
fields = list(super().get_list_display(request))
return (fields + ['created', 'updated'])
def get_fieldsets(self, request, obj=None):
fieldsets = super().get_fieldsets(request, obj)
for (name, fieldset) in fieldsets:
for f in ('created', 'updated', 'creator', 'last_modified_by'):
if (f in fieldset['fields']):
fieldset['fields'].remove(f)
return (fieldsets + [('CMS metadata', {'fields': [('creator', 'created'), ('last_modified_by', 'updated')], 'classes': ('collapse',)})]) |
def set_backend(name: str) -> _ContextManager:
if (name not in _SUPPORTED_BACKENDS):
supported_backend_names = ', '.join(_SUPPORTED_BACKENDS.keys())
raise RuntimeError(f"Backend '{name}' is not supported. Please choose one of: {supported_backend_names}")
old_backend = _CURRENT_BACKEND
action = (lambda : set_global_backend(name))
cleanup = (lambda : set_global_backend(old_backend))
return _ContextManager(action=action, cleanup=cleanup) |
def gen_value(t):
if (t == 'INT8'):
val = randint((- 128), (- 1))
elif (t == 'INT16'):
val = randint((- 32768), (- 256))
elif (t == 'INT32'):
val = randint((- ), (- 65536))
elif ((t == 'CARD8') or (t == 'BYTE')):
val = randint(128, 255)
elif (t == 'CARD16'):
val = randint(256, 65535)
elif (t == 'CARD32'):
val = randint(65536, )
elif (t == 'BOOL'):
val = randint(0, 1)
else:
raise RuntimeError(('unknown type: %s' % t))
return val |
class Migration(migrations.Migration):
dependencies = [('api', '0054_user_invalidate_unknown_role')]
operations = [migrations.AddField(model_name='reminder', name='mentions', field=django.contrib.postgres.fields.ArrayField(base_field=models.BigIntegerField(validators=[django.core.validators.MinValueValidator(limit_value=0, message='Mention IDs cannot be negative.')]), blank=True, default=list, help_text='IDs of roles or users to ping with the reminder.', size=None))] |
def code_assist(project, source_code, offset, resource=None, templates=None, maxfixes=1, later_locals=True):
if (templates is not None):
warnings.warn('Codeassist no longer supports templates', DeprecationWarning, stacklevel=2)
assist = _PythonCodeAssist(project, source_code, offset, resource=resource, maxfixes=maxfixes, later_locals=later_locals)
return assist() |
class VOC12AffinityDataset(VOC12SegmentationDataset):
def __init__(self, img_name_list_path, label_dir, crop_size, voc12_root, indices_from, indices_to, rescale=None, img_normal=TorchvisionNormalize(), hor_flip=False, crop_method=None):
super().__init__(img_name_list_path, label_dir, crop_size, voc12_root, rescale, img_normal, hor_flip, crop_method=crop_method)
self.extract_aff_lab_func = GetAffinityLabelFromIndices(indices_from, indices_to)
def __len__(self):
return len(self.img_name_list)
def __getitem__(self, idx):
out = super().__getitem__(idx)
reduced_label = imutils.pil_rescale(out['label'], 0.25, 0)
(out['aff_bg_pos_label'], out['aff_fg_pos_label'], out['aff_neg_label']) = self.extract_aff_lab_func(reduced_label)
return out |
class TestUpgradeToFloat():
unary_ops_vals = [(reciprocal, (list(range((- 127), 0)) + list(range(1, 127)))), (sqrt, list(range(0, 128))), (log, list(range(1, 128))), (log2, list(range(1, 128))), (log10, list(range(1, 128))), (log1p, list(range(0, 128))), (exp, list(range((- 127), 89))), (exp2, list(range((- 127), 89))), (expm1, list(range((- 127), 89))), (deg2rad, list(range((- 127), 128))), (rad2deg, list(range((- 127), 128))), (cos, list(range((- 127), 128))), (arccos, list(range((- 1), 2))), (cosh, list(range((- 89), 90))), (arccosh, list(range(1, 128))), (sin, list(range((- 127), 128))), (arcsin, list(range((- 1), 2))), (sinh, list(range((- 89), 90))), (arcsinh, list(range((- 127), 128))), (tan, list(range((- 3), 4))), (arctan, list(range((- 127), 128))), (tanh, list(range((- 127), 128))), (arctanh, [0])]
binary_ops_vals = [(arctan2, list(range((- 127), 128)), list(range((- 127), 128)))]
def _test_unary(unary_op, x_range):
xi = int8('xi')
xf = float32('xf')
ei = unary_op(xi)
fi = pytensor.function([xi], ei)
ef = unary_op(xf)
ff = pytensor.function([xf], ef)
for x_val in x_range:
outi = fi(x_val)
outf = ff(x_val)
assert (outi.dtype == outf.dtype), 'incorrect dtype'
assert np.allclose(outi, outf), 'insufficient precision'
def _test_binary(binary_op, x_range, y_range):
xi = int8('xi')
yi = int8('yi')
xf = float32('xf')
yf = float32('yf')
ei = binary_op(xi, yi)
fi = pytensor.function([xi, yi], ei)
ef = binary_op(xf, yf)
ff = pytensor.function([xf, yf], ef)
for x_val in x_range:
for y_val in y_range:
outi = fi(x_val, y_val)
outf = ff(x_val, y_val)
assert (outi.dtype == outf.dtype), 'incorrect dtype'
assert np.allclose(outi, outf), 'insufficient precision'
def test_true_div(self):
x_range = list(range((- 127), 128))
y_range = (list(range((- 127), 0)) + list(range(1, 127)))
xi = int8('xi')
yi = int8('yi')
xf = ScalarType(pytensor.config.floatX)('xf')
yf = ScalarType(pytensor.config.floatX)('yf')
ei = true_div(xi, yi)
fi = pytensor.function([xi, yi], ei)
ef = true_div(xf, yf)
ff = pytensor.function([xf, yf], ef)
for x_val in x_range:
for y_val in y_range:
outi = fi(x_val, y_val)
outf = ff(x_val, y_val)
assert (outi.dtype == outf.dtype), 'incorrect dtype'
assert np.allclose(outi, outf), 'insufficient precision'
def test_unary(self):
for (unary_op, x_range) in self.unary_ops_vals:
self._test_unary(unary_op, x_range)
def test_binary(self):
for (binary_op, x_range, y_range) in self.binary_ops_vals:
self._test_binary(binary_op, x_range, y_range) |
def load(args, base_model, logits_model, base_optimizer, logits_optimizer):
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
chkpoint = torch.load(args.resume)
if (isinstance(chkpoint, dict) and ('base_state_dict' in chkpoint)):
args.start_epoch = chkpoint['epoch']
mAP = chkpoint['mAP']
ordered_load_state(base_model, chkpoint['base_state_dict'])
ordered_load_state(logits_model, chkpoint['logits_state_dict'])
base_optimizer.load_state_dict(chkpoint['base_optimizer'])
logits_optimizer.load_state_dict(chkpoint['logits_optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, chkpoint['epoch']))
return mAP
else:
ordered_load_state(model, chkpoint)
print("=> loaded checkpoint '{}' (just weights)".format(args.resume))
return 0
else:
print("=> no checkpoint found, starting from scratch: '{}'".format(args.resume))
return 0 |
def parser_train():
parser = argparse.ArgumentParser(description='Standard + Adversarial Training.')
parser.add_argument('--augment', type=str, default='base', choices=['none', 'base', 'cutout', 'autoaugment', 'randaugment', 'idbh'], help='Augment training set.')
parser.add_argument('--batch-size', type=int, default=1024, help='Batch size for training.')
parser.add_argument('--batch-size-validation', type=int, default=512, help='Batch size for testing.')
parser.add_argument('--data-dir', type=str, default='/cluster/home/rarade/data/')
parser.add_argument('--log-dir', type=str, default='/cluster/scratch/rarade/test/')
parser.add_argument('-d', '--data', type=str, default='cifar10s', choices=DATASETS, help='Data to use.')
parser.add_argument('--desc', type=str, required=True, help='Description of experiment. It will be used to name directories.')
parser.add_argument('-m', '--model', choices=MODELS, default='wrn-28-10-swish', help='Model architecture to be used.')
parser.add_argument('--normalize', type=str2bool, default=False, help='Normalize input.')
parser.add_argument('--pretrained-file', type=str, default=None, help='Pretrained weights file name.')
parser.add_argument('-na', '--num-adv-epochs', type=int, default=400, help='Number of adversarial training epochs.')
parser.add_argument('--adv-eval-freq', type=int, default=25, help='Adversarial evaluation frequency (in epochs).')
parser.add_argument('--beta', default=None, type=float, help='Stability regularization, i.e., 1/lambda in TRADES.')
parser.add_argument('--lr', type=float, default=0.4, help='Learning rate for optimizer (SGD).')
parser.add_argument('--weight-decay', type=float, default=0.0005, help='Optimizer (SGD) weight decay.')
parser.add_argument('--scheduler', choices=SCHEDULERS, default='cosinew', help='Type of scheduler.')
parser.add_argument('--nesterov', type=str2bool, default=True, help='Use Nesterov momentum.')
parser.add_argument('--clip-grad', type=float, default=None, help='Gradient norm clipping.')
parser.add_argument('-a', '--attack', type=str, choices=ATTACKS, default='linf-pgd', help='Type of attack.')
parser.add_argument('--attack-eps', type=str2float, default=(8 / 255), help='Epsilon for the attack.')
parser.add_argument('--attack-step', type=str2float, default=(2 / 255), help='Step size for PGD attack.')
parser.add_argument('--attack-iter', type=int, default=10, help='Max. number of iterations (if any) for the attack.')
parser.add_argument('--keep-clean', type=str2bool, default=False, help='Use clean samples during adversarial training.')
parser.add_argument('--debug', action='store_true', default=False, help='Debug code. Run 1 epoch of training and evaluation.')
parser.add_argument('--mart', action='store_true', default=False, help='MART training.')
parser.add_argument('--unsup-fraction', type=float, default=0.7, help='Ratio of unlabelled data to labelled data.')
parser.add_argument('--aux-data-filename', type=str, help='Path to additional Tiny Images data.', default='/cluster/scratch/rarade/cifar10s/ti_500K_pseudo_labeled.pickle')
parser.add_argument('--seed', type=int, default=1, help='Random seed.')
parser.add_argument('--consistency', action='store_true', default=False, help='use Consistency.')
parser.add_argument('--cons_lambda', type=float, default=1.0, help='lambda for Consistency.')
parser.add_argument('--cons_tem', type=float, default=0.5, help='temperature for Consistency.')
parser.add_argument('--resume_path', default='', type=str)
parser.add_argument('--LSE', action='store_true', default=False, help='LSE training.')
parser.add_argument('--ls', type=float, default=0.1, help='label smoothing.')
parser.add_argument('--clip_value', default=0, type=float)
parser.add_argument('--CutMix', action='store_true', default=False, help='use CutMix.')
return parser |
class Lorenz96(DynSys):
def rhs(self, X, t):
Xdot = np.zeros_like(X)
Xdot[0] = ((((X[1] - X[(- 2)]) * X[(- 1)]) - X[0]) + self.f)
Xdot[1] = ((((X[2] - X[(- 1)]) * X[0]) - X[1]) + self.f)
Xdot[(- 1)] = ((((X[0] - X[(- 3)]) * X[(- 2)]) - X[(- 1)]) + self.f)
Xdot[2:(- 1)] = ((((X[3:] - X[:(- 3)]) * X[1:(- 2)]) - X[2:(- 1)]) + self.f)
return Xdot |
_module()
class FasterRCNN(TwoStageDetector):
'Implementation of `Faster R-CNN <
def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None):
super(FasterRCNN, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained) |
class UploaderTestCase(unittest.TestCase):
mime_type = 'text/plain'
params = {'x:a': 'a'}
metadata = {'x-qn-meta-name': 'qiniu', 'x-qn-meta-age': '18'}
q = Auth(access_key, secret_key)
bucket = BucketManager(q)
def test_put(self):
key = 'a\\b\\c"hello'
data = 'hello bubby!'
token = self.q.upload_token(bucket_name)
(ret, info) = put_data(token, key, data)
print(info)
assert (ret['key'] == key)
def test_put_crc(self):
key = ''
data = 'hello bubby!'
token = self.q.upload_token(bucket_name, key)
(ret, info) = put_data(token, key, data, check_crc=True)
print(info)
assert (ret['key'] == key)
def test_putfile(self):
localfile = __file__
key = 'test_file'
token = self.q.upload_token(bucket_name, key)
(ret, info) = put_file(token, key, localfile, mime_type=self.mime_type, check_crc=True)
print(info)
assert (ret['key'] == key)
assert (ret['hash'] == etag(localfile))
def test_putInvalidCrc(self):
key = 'test_invalid'
data = 'hello bubby!'
crc32 = 'wrong crc32'
token = self.q.upload_token(bucket_name)
(ret, info) = _form_put(token, key, data, None, None, crc=crc32)
print(info)
assert (ret is None)
assert (info.status_code == 400)
def test_putWithoutKey(self):
key = None
data = 'hello bubby!'
token = self.q.upload_token(bucket_name)
(ret, info) = put_data(token, key, data)
print(info)
assert (ret['hash'] == ret['key'])
data = 'hello bubby!'
token = self.q.upload_token(bucket_name, 'nokey2')
(ret, info) = put_data(token, None, data)
print(info)
assert (ret is None)
assert (info.status_code == 403)
def test_withoutRead_withoutSeek_retry(self):
try:
key = 'retry'
data = 'hello retry!'
up_host_backup = get_valid_up_host()
set_default(default_zone=Zone(' up_host_backup))
token = self.q.upload_token(bucket_name)
(ret, info) = put_data(token, key, data)
print(info)
assert (ret['key'] == key)
assert (ret['hash'] == 'FlYu0iBR1WpvYi4whKXiBuQpyLLk')
finally:
set_default(default_zone=Zone())
qiniu.config._is_customized_default['default_zone'] = False
def test_putData_without_fname(self):
if is_travis():
return
localfile = create_temp_file(((30 * 1024) * 1024))
key = 'test_putData_without_fname'
with open(localfile, 'rb') as input_stream:
token = self.q.upload_token(bucket_name)
(ret, info) = put_data(token, key, input_stream)
print(info)
assert (ret is not None)
def test_putData_without_fname1(self):
if is_travis():
return
localfile = create_temp_file(((30 * 1024) * 1024))
key = 'test_putData_without_fname1'
with open(localfile, 'rb') as input_stream:
token = self.q.upload_token(bucket_name)
(ret, info) = put_data(token, key, input_stream, self.params, self.mime_type, False, None, '')
print(info)
assert (ret is not None)
def test_putData_without_fname2(self):
if is_travis():
return
localfile = create_temp_file(((30 * 1024) * 1024))
key = 'test_putData_without_fname2'
with open(localfile, 'rb') as input_stream:
token = self.q.upload_token(bucket_name)
(ret, info) = put_data(token, key, input_stream, self.params, self.mime_type, False, None, ' ')
print(info)
assert (ret is not None)
def test_put_file_with_metadata(self):
localfile = __file__
key = 'test_file_with_metadata'
token = self.q.upload_token(bucket_name, key)
(ret, info) = put_file(token, key, localfile, metadata=self.metadata)
assert (ret['key'] == key)
assert (ret['hash'] == etag(localfile))
(ret, info) = self.bucket.stat(bucket_name, key)
assert ('x-qn-meta' in ret)
assert (ret['x-qn-meta']['name'] == 'qiniu')
assert (ret['x-qn-meta']['age'] == '18')
def test_put_data_with_metadata(self):
key = 'put_data_with_metadata'
data = 'hello metadata!'
token = self.q.upload_token(bucket_name, key)
(ret, info) = put_data(token, key, data, metadata=self.metadata)
assert (ret['key'] == key)
(ret, info) = self.bucket.stat(bucket_name, key)
assert ('x-qn-meta' in ret)
assert (ret['x-qn-meta']['name'] == 'qiniu')
assert (ret['x-qn-meta']['age'] == '18') |
('pyproj.sync.urlretrieve', autospec=True)
.parametrize('verbose', [True, False])
def test_download_resource_file(urlretrieve_mock, verbose, tmp_path, capsys):
def dummy_urlretrieve(url, local_path):
with open(local_path, 'w') as testf:
testf.write('TEST')
urlretrieve_mock.side_effect = dummy_urlretrieve
_download_resource_file(file_url='test_url', short_name='test_file.txt', directory=tmp_path, verbose=verbose, sha256='94ee059335e587e501cc4bf90613e0814f00a7b08bc7c648fd865a2af6a22cc2')
urlretrieve_mock.assert_called_with('test_url', (tmp_path / 'test_file.txt.part'))
captured = capsys.readouterr()
if (not verbose):
assert (captured.out == '')
else:
assert (captured.out == 'Downloading: test_url\n')
expected_file = (tmp_path / 'test_file.txt')
assert expected_file.exists()
assert (_sha256sum(expected_file) == '94ee059335e587e501cc4bf90613e0814f00a7b08bc7c648fd865a2af6a22cc2') |
class SemiDataset(Dataset):
def __init__(self, name, root, mode, size=None, id_path=None, nsample=None):
self.name = name
self.root = root
self.mode = mode
self.size = size
if ((mode == 'train_l') or (mode == 'train_u')):
with open(id_path, 'r') as f:
self.ids = f.read().splitlines()
if ((mode == 'train_l') and (nsample is not None)):
self.ids *= math.ceil((nsample / len(self.ids)))
random.shuffle(self.ids)
self.ids = self.ids[:nsample]
else:
with open(('partitions/%s/val.txt' % name), 'r') as f:
self.ids = f.read().splitlines()
def __getitem__(self, item):
id = self.ids[item]
if ((self.mode == 'train_l') or (self.mode == 'train_u')):
img_path = (((self.root + '/JPEGImages/') + id) + '.jpg')
mask_path = (((self.root + '/SegmentationClass/') + id) + '.png')
img = Image.open(os.path.join(img_path)).convert('RGB')
mask = Image.fromarray(np.array(Image.open(mask_path)))
else:
img = Image.open(os.path.join(self.root, id.split(' ')[0])).convert('RGB')
mask = Image.fromarray(np.array(Image.open(os.path.join(self.root, id.split(' ')[1]))))
if (self.mode == 'val'):
(img, mask) = normalize(img, mask)
return (img, mask, id)
(img, mask) = resize(img, mask, (0.5, 2.0))
ignore_value = (254 if (self.mode == 'train_u') else 255)
(img, mask) = crop(img, mask, self.size, ignore_value)
(img, mask) = hflip(img, mask, p=0.5)
if (self.mode == 'train_l'):
return normalize(img, mask)
(img_w, img_s) = (deepcopy(img), deepcopy(img))
img_w = normalize(img_w)
if (random.random() < 0.8):
img_s = transforms.ColorJitter(0.5, 0.5, 0.5, 0.25)(img_s)
img_s = transforms.RandomGrayscale(p=0.2)(img_s)
img_s = blur(img_s, p=0.5)
cutmix_box = obtain_cutmix_box(img_s.size[0], p=1.0)
ignore_mask = Image.fromarray(np.zeros((mask.size[1], mask.size[0])))
(img_s, ignore_mask) = normalize(img_s, ignore_mask)
mask = torch.from_numpy(np.array(mask)).long()
ignore_mask[(mask == 254)] = 255
return (img_w, img_s, ignore_mask, cutmix_box)
def __len__(self):
return len(self.ids) |
def get_env_group_title(env):
s = env.unwrapped.spec.entry_point
if ('gym_ple' in s):
group_title = 'gym_ple'
elif ('gym_pygame' in s):
group_title = 'gym_pygame'
elif ('gym_minatar' in s):
group_title = 'gym_minatar'
elif ('gym_exploration' in s):
group_title = 'gym_exploration'
elif ('pybullet' in s):
group_title = 'pybullet'
elif ('gym' in s):
group_title = s.split('.')[2].split(':')[0]
else:
group_title = None
return group_title |
class _GitlabProject():
def __init__(self, status):
self.commits = {REF: self._Commit(status)}
self.tags = self._Tags()
self.releases = self._Releases()
class _Commit():
def __init__(self, status):
self.statuses = self._Statuses(status)
class _Statuses():
def __init__(self, status):
if (status == 'pending'):
self.jobs = [{'name': 'good_job', 'status': 'passed', 'allow_failure': False}, {'name': 'slow_job', 'status': 'pending', 'allow_failure': False}]
elif (status == 'failure'):
self.jobs = [{'name': 'good_job', 'status': 'passed', 'allow_failure': False}, {'name': 'bad_job', 'status': 'failed', 'allow_failure': False}]
elif (status == 'allow_failure'):
self.jobs = [{'name': 'notsobad_job', 'status': 'failed', 'allow_failure': True}, {'name': 'good_job2', 'status': 'passed', 'allow_failure': False}]
elif (status == 'success'):
self.jobs = [{'name': 'good_job1', 'status': 'passed', 'allow_failure': True}, {'name': 'good_job2', 'status': 'passed', 'allow_failure': False}]
def list(self):
return self.jobs
class _Tags():
def __init__(self):
pass
def get(self, tag):
if (tag in (A_GOOD_TAG, AN_EXISTING_TAG)):
return self._Tag()
if (tag == A_LOCKED_TAG):
return self._Tag(locked=True)
raise gitlab.exceptions.GitlabGetError()
class _Tag():
def __init__(self, locked=False):
self.locked = locked
def set_release_description(self, _):
if self.locked:
raise gitlab.exceptions.GitlabUpdateError()
class _Releases():
def __init__(self):
pass
def create(self, input_):
if (input_['name'] and input_['tag_name'] and (input_['tag_name'] in (A_GOOD_TAG, A_LOCKED_TAG))):
return self._Release()
raise gitlab.exceptions.GitlabCreateError()
def update(self, tag, _):
if (tag == A_MISSING_TAG):
raise gitlab.exceptions.GitlabUpdateError()
return self._Release()
class _Release():
def __init__(self, locked=False):
pass |
class TranslationEvaluator(SentenceEvaluator):
def __init__(self, source_sentences: List[str], target_sentences: List[str], show_progress_bar: bool=False, batch_size: int=16, name: str='', print_wrong_matches: bool=False, write_csv: bool=True):
self.source_sentences = source_sentences
self.target_sentences = target_sentences
self.name = name
self.batch_size = batch_size
self.show_progress_bar = show_progress_bar
self.print_wrong_matches = print_wrong_matches
assert (len(self.source_sentences) == len(self.target_sentences))
if name:
name = ('_' + name)
self.csv_file = (('translation_evaluation' + name) + '_results.csv')
self.csv_headers = ['epoch', 'steps', 'src2trg', 'trg2src']
self.write_csv = write_csv
def __call__(self, model, output_path: str=None, epoch: int=(- 1), steps: int=(- 1)) -> float:
if (epoch != (- 1)):
if (steps == (- 1)):
out_txt = ' after epoch {}:'.format(epoch)
else:
out_txt = ' in epoch {} after {} steps:'.format(epoch, steps)
else:
out_txt = ':'
logger.info(((('Evaluating translation matching Accuracy on ' + self.name) + ' dataset') + out_txt))
embeddings1 = torch.stack(model.encode(self.source_sentences, show_progress_bar=self.show_progress_bar, batch_size=self.batch_size, convert_to_numpy=False))
embeddings2 = torch.stack(model.encode(self.target_sentences, show_progress_bar=self.show_progress_bar, batch_size=self.batch_size, convert_to_numpy=False))
cos_sims = pytorch_cos_sim(embeddings1, embeddings2).detach().cpu().numpy()
correct_src2trg = 0
correct_trg2src = 0
for i in range(len(cos_sims)):
max_idx = np.argmax(cos_sims[i])
if (i == max_idx):
correct_src2trg += 1
elif self.print_wrong_matches:
print('i:', i, 'j:', max_idx, ('INCORRECT' if (i != max_idx) else 'CORRECT'))
print('Src:', self.source_sentences[i])
print('Trg:', self.target_sentences[max_idx])
print('Argmax score:', cos_sims[i][max_idx], 'vs. correct score:', cos_sims[i][i])
results = zip(range(len(cos_sims[i])), cos_sims[i])
results = sorted(results, key=(lambda x: x[1]), reverse=True)
for (idx, score) in results[0:5]:
print('\t', idx, ('(Score: %.4f)' % score), self.target_sentences[idx])
cos_sims = cos_sims.T
for i in range(len(cos_sims)):
max_idx = np.argmax(cos_sims[i])
if (i == max_idx):
correct_trg2src += 1
acc_src2trg = (correct_src2trg / len(cos_sims))
acc_trg2src = (correct_trg2src / len(cos_sims))
logger.info('Accuracy src2trg: {:.2f}'.format((acc_src2trg * 100)))
logger.info('Accuracy trg2src: {:.2f}'.format((acc_trg2src * 100)))
if ((output_path is not None) and self.write_csv):
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode=('a' if output_file_exists else 'w'), encoding='utf-8') as f:
writer = csv.writer(f)
if (not output_file_exists):
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, acc_src2trg, acc_trg2src])
return ((acc_src2trg + acc_trg2src) / 2) |
def test_rexx_can_guess_from_text():
lx = get_lexer_by_name('rexx')
assert (lx.analyse_text('/* */') == pytest.approx(0.01))
assert (lx.analyse_text('/* Rexx */\n say "hello world"') == pytest.approx(1.0))
val = lx.analyse_text('/* */\nhello:pRoceduRe\n say "hello world"')
assert (val > 0.5)
val = lx.analyse_text('/* */\n if 1 > 0 then do\n say "ok"\n end\n else do\n say "huh?"\n end')
assert (val > 0.2)
val = lx.analyse_text('/* */\n greeting = "hello world!"\n parse value greeting "hello" name "!"\n say name')
assert (val > 0.2) |
def tensorclass(cls: T) -> T:
def __torch_function__(cls, func: Callable, types: tuple[(type, ...)], args: tuple[(Any, ...)]=(), kwargs: (dict[(str, Any)] | None)=None) -> Callable:
if ((func not in _TD_PASS_THROUGH) or (not all((issubclass(t, (Tensor, cls)) for t in types)))):
return NotImplemented
if (kwargs is None):
kwargs = {}
if (len(args) > 0):
tensorclass_instance = args[0]
else:
tensorclass_instance = kwargs.get('input', kwargs['tensors'])
if isinstance(tensorclass_instance, (tuple, list)):
tensorclass_instance = tensorclass_instance[0]
args = tuple((_arg_to_tensordict(arg) for arg in args))
kwargs = {key: _arg_to_tensordict(value) for (key, value) in kwargs.items()}
result = TD_HANDLED_FUNCTIONS[func](*args, **kwargs)
if isinstance(result, (list, tuple)):
return result.__class__((_from_tensordict_with_copy(tensorclass_instance, tensordict_result) for tensordict_result in result))
return _from_tensordict_with_copy(tensorclass_instance, result)
cls = dataclass(cls)
expected_keys = set(cls.__dataclass_fields__)
for attr in cls.__dataclass_fields__:
if (attr in dir(TensorDict)):
raise AttributeError(f"Attribute name {attr} can't be used with ")
cls.__init__ = _init_wrapper(cls.__init__)
cls._from_tensordict = classmethod(_from_tensordict_wrapper(expected_keys))
cls.from_tensordict = cls._from_tensordict
if (not hasattr(cls, '__torch_function__')):
cls.__torch_function__ = classmethod(__torch_function__)
cls.__getstate__ = _getstate
cls.__setstate__ = _setstate
cls.__getattribute__ = _getattribute_wrapper(cls.__getattribute__)
cls.__setattr__ = _setattr_wrapper(cls.__setattr__, expected_keys)
cls.__getattr__ = _getattr
cls.__getitem__ = _getitem
cls.__getitems__ = _getitem
cls.__setitem__ = _setitem
cls.__repr__ = _repr
cls.__len__ = _len
cls.__eq__ = _eq
cls.__ne__ = _ne
cls.__or__ = _or
cls.__xor__ = _xor
cls.set = _set
cls.set_at_ = _set_at_
cls.del_ = _del_
cls.get = _get
cls.get_at = _get_at
cls.unbind = _unbind
cls.state_dict = _state_dict
cls.load_state_dict = _load_state_dict
cls._memmap_ = _memmap_
cls.memmap_like = TensorDictBase.memmap_like
cls.memmap_ = TensorDictBase.memmap_
cls.memmap = TensorDictBase.memmap
cls.load_memmap = TensorDictBase.load_memmap
cls._load_memmap = classmethod(_load_memmap)
for attr in TensorDict.__dict__.keys():
func = getattr(TensorDict, attr)
if (inspect.ismethod(func) and (attr not in cls.__dict__)):
tdcls = func.__self__
if issubclass(tdcls, TensorDictBase):
setattr(cls, attr, _wrap_classmethod(tdcls, cls, func))
cls.to_tensordict = _to_tensordict
cls.device = property(_device, _device_setter)
cls.batch_size = property(_batch_size, _batch_size_setter)
cls.__doc__ = f'{cls.__name__}{inspect.signature(cls)}'
_register_tensor_class(cls)
return cls |
class Capture(object):
def __init__(self, tee=False):
self.file = StringIO()
self.tee = tee
def __enter__(self):
self.orig_stdout = sys.stdout
self.orig_exit = sys.exit
sys.stdout = self
def my_exit(res):
raise PyrockoExit(res)
sys.exit = my_exit
def __exit__(self, *args):
sys.stdout = self.orig_stdout
sys.exit = self.orig_exit
def write(self, data):
data = new_str(data)
self.file.write(data)
if self.tee:
self.orig_stdout.write(data)
def writelines(self, lines):
for line in lines:
self.write(line)
def flush(self):
self.file.flush()
def isatty(self):
return False
def getvalue(self):
return self.file.getvalue() |
_model('lightconv_lm')
class LightConvLanguageModel(FairseqLanguageModel):
def __init__(self, decoder):
super().__init__(decoder)
def add_args(parser):
parser.add_argument('--dropout', default=0.1, type=float, metavar='D', help='dropout probability')
parser.add_argument('--attention-dropout', default=0.0, type=float, metavar='D', help='dropout probability for attention weights')
parser.add_argument('--relu-dropout', default=0.0, type=float, metavar='D', help='dropout probability after ReLU in FFN')
parser.add_argument('--input-dropout', type=float, metavar='D', help='dropout probability of the inputs')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension')
parser.add_argument('--decoder-output-dim', type=int, metavar='N', help='decoder output dimension')
parser.add_argument('--decoder-input-dim', type=int, metavar='N', help='decoder input dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads or LightConv/DynamicConv heads')
parser.add_argument('--decoder-normalize-before', default=False, action='store_true', help='apply layernorm before each decoder block')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. Must be used with adaptive_loss criterion')
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--adaptive-softmax-factor', type=float, metavar='N', help='adaptive input factor')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--share-decoder-input-output-embed', default=False, action='store_true', help='share decoder input and output embeddings')
parser.add_argument('--character-embeddings', default=False, action='store_true', help='if set, uses character embedding convolutions to produce token embeddings')
parser.add_argument('--character-filters', type=str, metavar='LIST', default='[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]', help='size of character embeddings')
parser.add_argument('--character-embedding-dim', type=int, metavar='N', default=4, help='size of character embeddings')
parser.add_argument('--char-embedder-highway-layers', type=int, metavar='N', default=2, help='number of highway layers for character token embeddder')
parser.add_argument('--adaptive-input', default=False, action='store_true', help='if set, uses adaptive input')
parser.add_argument('--adaptive-input-factor', type=float, metavar='N', help='adaptive input factor')
parser.add_argument('--adaptive-input-cutoff', metavar='EXPR', help='comma separated list of adaptive input cutoff points.')
parser.add_argument('--tie-adaptive-weights', action='store_true', help='if set, ties the weights of adaptive softmax and adaptive input')
parser.add_argument('--tie-adaptive-proj', action='store_true', help='if set, ties the projection weights of adaptive softmax and adaptive input')
parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-kernel-size-list', type=(lambda x: utils.eval_str_list(x, int)), help='list of kernel size (default: "[3,7,15,31,31,31]")')
parser.add_argument('--decoder-glu', type=utils.eval_bool, help='glu after in proj')
parser.add_argument('--decoder-conv-type', default='dynamic', type=str, choices=['dynamic', 'lightweight'], help='type of convolution')
parser.add_argument('--weight-softmax', default=True, type=utils.eval_bool)
parser.add_argument('--weight-dropout', type=float, metavar='D', help='dropout probability for conv weights')
def build_model(cls, args, task):
base_lm_architecture(args)
if (getattr(args, 'max_source_positions', None) is None):
args.max_source_positions = args.tokens_per_sample
if (getattr(args, 'max_target_positions', None) is None):
args.max_target_positions = args.tokens_per_sample
if args.character_embeddings:
embed_tokens = CharacterTokenEmbedder(task.dictionary, eval(args.character_filters), args.character_embedding_dim, args.decoder_embed_dim, args.char_embedder_highway_layers)
elif args.adaptive_input:
embed_tokens = AdaptiveInput(len(task.dictionary), task.dictionary.pad(), args.decoder_input_dim, args.adaptive_input_factor, args.decoder_embed_dim, utils.eval_str_list(args.adaptive_input_cutoff, type=int))
else:
embed_tokens = Embedding(len(task.dictionary), args.decoder_input_dim, task.dictionary.pad())
if args.tie_adaptive_weights:
assert args.adaptive_input
assert (args.adaptive_input_factor == args.adaptive_softmax_factor)
assert (args.adaptive_softmax_cutoff == args.adaptive_input_cutoff), '{} != {}'.format(args.adaptive_softmax_cutoff, args.adaptive_input_cutoff)
assert (args.decoder_input_dim == args.decoder_output_dim)
decoder = LightConvDecoder(args, task.output_dictionary, embed_tokens, no_encoder_attn=True, final_norm=False)
return LightConvLanguageModel(decoder) |
class Solution():
def search(self, nums: List[int], target: int) -> int:
low = 0
high = (len(nums) - 1)
if ((target < nums[0]) or (target > nums[(- 1)])):
return (- 1)
while (low <= high):
mid = ((low + high) // 2)
if (nums[mid] == target):
return mid
elif (nums[mid] < target):
low = (mid + 1)
else:
high = (mid - 1)
return (- 1) |
.parametrize('history_num_frames_ego', [0, 1, 2, 3, 4])
.parametrize('history_num_frames_agents', [0, 1, 2, 3, 4])
def test_vector_ego_agents(zarr_dataset: ChunkedDataset, dmg: LocalDataManager, cfg: dict, history_num_frames_ego: int, history_num_frames_agents: int) -> None:
cfg['model_params']['history_num_frames_ego'] = history_num_frames_ego
cfg['model_params']['history_num_frames_agents'] = history_num_frames_agents
vect = build_vectorizer(cfg, dmg)
dataset = EgoAgentDatasetVectorized(cfg, zarr_dataset, vect)
indexes = [0, 1, 10, (- 1)]
for idx in indexes:
dataset[idx]
check_torch_loading(dataset) |
_auth
def delete_user_role(request, pk):
if (request.method == 'DELETE'):
try:
UserRole.objects.get(id=pk).delete()
return JsonResponse({'code': 200, 'data': None, 'msg': '!'})
except Exception as e:
return JsonResponse({'code': 500, 'data': None, 'msg': ',:{}'.format(e)}) |
class CmdQuit(COMMAND_DEFAULT_CLASS):
key = 'quit'
switch_options = ('all',)
locks = 'cmd:all()'
account_caller = True
def func(self):
account = self.account
if ('all' in self.switches):
account.msg('|RQuitting|n all sessions. Hope to see you soon again.', session=self.session)
reason = 'quit/all'
for session in account.sessions.all():
account.disconnect_session_from_account(session, reason)
else:
nsess = len(account.sessions.all())
reason = 'quit'
if (nsess == 2):
account.msg('|RQuitting|n. One session is still connected.', session=self.session)
elif (nsess > 2):
account.msg(('|RQuitting|n. %i sessions are still connected.' % (nsess - 1)), session=self.session)
else:
account.msg('|RQuitting|n. Hope to see you again, soon.', session=self.session)
account.disconnect_session_from_account(self.session, reason) |
class NoCapture(CaptureBase[str]):
EMPTY_BUFFER = ''
def __init__(self, fd: int) -> None:
pass
def start(self) -> None:
pass
def done(self) -> None:
pass
def suspend(self) -> None:
pass
def resume(self) -> None:
pass
def snap(self) -> str:
return ''
def writeorg(self, data: str) -> None:
pass |
def version_raises_exception(monkeypatch, pyscaffold):
def raise_exeception(name):
raise metadata.PackageNotFoundError('No version mock')
monkeypatch.setattr(metadata, 'version', raise_exeception)
reload(pyscaffold)
try:
(yield)
finally:
monkeypatch.undo()
reload(pyscaffold) |
def write_reward_csv(rewards, split):
results_df = []
for training in rewards:
for model in rewards[training][split]:
if (model == 'random'):
scores = rewards[training][split][model]['scores'][(- 1)]
smis = rewards[training][split][model]['smis'][(- 1)]
avg = rewards[training][split][model]['avg'][(- 1)]
random_results = {'Training': training, 'Model': 'random', 'Metric': 'random', 'Scores ($\\pm$ s.d.)': f'{(100 * scores[0]):0.2f} ({(100 * scores[1]):0.2f})', 'SMILES ($\\pm$ s.d.)': f'{(100 * smis[0]):0.2f} ({(100 * smis[1]):0.2f})', 'Average ($\\pm$ s.d.)': f'{(100 * avg[0]):0.2f} ({(100 * avg[1]):0.2f})'}
continue
for metric in rewards[training][split][model]:
if (metric == 'greedy'):
metric_ = metric.capitalize()
elif (metric == 'thompson'):
metric_ = 'TS'
else:
metric_ = metric.upper()
scores = rewards[training][split][model][metric]['scores'][(- 1)]
smis = rewards[training][split][model][metric]['smis'][(- 1)]
avg = rewards[training][split][model][metric]['avg'][(- 1)]
results_df.append({'Training': training, 'Model': model.upper(), 'Metric': metric_, 'Scores ($\\pm$ s.d.)': f'{(100 * scores[0]):0.1f} ({(100 * scores[1]):0.1f})', 'SMILES ($\\pm$ s.d.)': f'{(100 * smis[0]):0.1f} ({(100 * smis[1]):0.1f})', 'Average ($\\pm$ s.d.)': f'{(100 * avg[0]):0.2f} ({(100 * avg[1]):0.2f})'})
try:
results_df.append(random_results)
except UnboundLocalError:
pass
df = pd.DataFrame(results_df).set_index(['Training', 'Model', 'Metric'])
return df |
def string_to_bool(v):
if isinstance(v, bool):
return v
if (v.lower() in ('yes', 'true', 't', 'y', '1')):
return True
elif (v.lower() in ('no', 'false', 'f', 'n', '0')):
return False
else:
raise ArgumentTypeError(f'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).') |
class Effect93(BaseEffect):
type = 'passive'
def handler(fit, module, context, projectionRange, **kwargs):
fit.modules.filteredItemMultiply((lambda mod: (mod.item.group.name == 'Hybrid Weapon')), 'damageMultiplier', module.getModifiedItemAttr('damageMultiplier'), stackingPenalties=True, **kwargs) |
class ContextRenderCORTEX_M(ContextRenderARM, ArchCORTEX_M):
def __init__(self, ql, predictor):
super().__init__(ql, predictor)
ArchCORTEX_M.__init__(self)
self.regs_a_row = 3
_printer('[ REGISTERS ]')
def context_reg(self, saved_reg_dump):
cur_regs = self.dump_regs()
cur_regs = self.swap_reg_name(cur_regs)
extra_dict = {'xpsr': 'xpsr', 'control': 'control', 'primask': 'primask', 'faultmask': 'faultmask', 'basepri': 'basepri'}
cur_regs = self.swap_reg_name(cur_regs, extra_dict=extra_dict)
diff_reg = self.reg_diff(cur_regs, saved_reg_dump)
self.render_regs_dump(cur_regs, diff_reg=diff_reg)
self.print_mode_info(self.ql.arch.regs.cpsr) |
class ResNet_ImageNet(nn.Module):
def __init__(self, block, num_blocks, pretrained=False, norm=False, Embed=True, feat_dim=512, embed_dim=512):
super(ResNet_ImageNet, self).__init__()
self.in_planes = 64
self.layer0_conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.layer0_bn1 = nn.BatchNorm2d(64)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.norm = norm
self.relu = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.embed = nn.Linear(feat_dim, embed_dim)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.layer0_bn1(self.layer0_conv1(x)))
out = self.maxpool(out)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.avgpool(out)
out_features = out.view(out.size(0), (- 1))
if self.norm:
out_features = F.normalize(out_features)
return out_features |
class TestPipe():
def test_success(self):
c = pipe(str, to_bool, bool)
assert (True is c('True') is c(True))
def test_fail(self):
c = pipe(str, to_bool)
with pytest.raises(ValueError):
c(33)
with pytest.raises(ValueError):
c('33')
def test_sugar(self):
class C():
a1 = attrib(default='True', converter=pipe(str, to_bool, bool))
a2 = attrib(default=True, converter=[str, to_bool, bool])
c = C()
assert (True is c.a1 is c.a2)
def test_empty(self):
o = object()
assert (o is pipe()(o)) |
_partition_types.register('GENERAL_BIDIRECTIONAL')
def general_bidirectional(node_indices, node_labels=None):
(yield CompleteGeneralKCut(node_indices, node_labels=node_labels))
for cut_matrix in _cut_matrices(len(node_indices), symmetric=True):
(yield GeneralKCut(node_indices, cut_matrix, node_labels=node_labels)) |
class PayloadTest(object):
def assert_errors(self, client, url, data, *errors):
out = client.post_json(url, data, status=400)
assert ('message' in out)
assert ('errors' in out)
for error in errors:
assert (error in out['errors'])
def test_validation_false_on_constructor(self, app, client):
api = restx.Api(app, validate=False)
fields = api.model('Person', {'name': restx.fields.String(required=True), 'age': restx.fields.Integer, 'birthdate': restx.fields.DateTime})
('/validation/')
class ValidationOff(restx.Resource):
(fields)
def post(self):
return {}
data = client.post_json('/validation/', {})
assert (data == {})
def test_validation_false_on_constructor_with_override(self, app, client):
api = restx.Api(app, validate=False)
fields = api.model('Person', {'name': restx.fields.String(required=True), 'age': restx.fields.Integer, 'birthdate': restx.fields.DateTime})
('/validation/')
class ValidationOn(restx.Resource):
(fields, validate=True)
def post(self):
return {}
self.assert_errors(client, '/validation/', {}, 'name')
def test_validation_true_on_constructor(self, app, client):
api = restx.Api(app, validate=True)
fields = api.model('Person', {'name': restx.fields.String(required=True), 'age': restx.fields.Integer, 'birthdate': restx.fields.DateTime})
('/validation/')
class ValidationOff(restx.Resource):
(fields)
def post(self):
return {}
self.assert_errors(client, '/validation/', {}, 'name')
def test_validation_true_on_constructor_with_override(self, app, client):
api = restx.Api(app, validate=True)
fields = api.model('Person', {'name': restx.fields.String(required=True), 'age': restx.fields.Integer, 'birthdate': restx.fields.DateTime})
('/validation/')
class ValidationOff(restx.Resource):
(fields, validate=False)
def post(self):
return {}
data = client.post_json('/validation/', {})
assert (data == {})
def _setup_api_format_checker_tests(self, app, format_checker=None):
class IPAddress(restx.fields.Raw):
__schema_type__ = 'string'
__schema_format__ = 'ipv4'
api = restx.Api(app, format_checker=format_checker)
model = api.model('MyModel', {'ip': IPAddress(required=True)})
('/format_checker/')
class TestResource(restx.Resource):
(model, validate=True)
def post(self):
return {}
def test_format_checker_none_on_constructor(self, app, client):
self._setup_api_format_checker_tests(app)
out = client.post_json('/format_checker/', {'ip': '192.168.1'})
assert (out == {})
def test_format_checker_object_on_constructor(self, app, client):
from jsonschema import FormatChecker
self._setup_api_format_checker_tests(app, format_checker=FormatChecker())
out = client.post_json('/format_checker/', {'ip': '192.168.1'}, status=400)
assert ('ipv4' in out['errors']['ip'])
def test_validation_false_in_config(self, app, client):
app.config['RESTX_VALIDATE'] = False
api = restx.Api(app)
fields = api.model('Person', {'name': restx.fields.String(required=True), 'age': restx.fields.Integer, 'birthdate': restx.fields.DateTime})
('/validation/')
class ValidationOff(restx.Resource):
(fields)
def post(self):
return {}
out = client.post_json('/validation/', {})
assert (out == {})
def test_validation_in_config(self, app, client):
app.config['RESTX_VALIDATE'] = True
api = restx.Api(app)
fields = api.model('Person', {'name': restx.fields.String(required=True), 'age': restx.fields.Integer, 'birthdate': restx.fields.DateTime})
('/validation/')
class ValidationOn(restx.Resource):
(fields)
def post(self):
return {}
self.assert_errors(client, '/validation/', {}, 'name')
def test_api_payload(self, app, client):
api = restx.Api(app, validate=True)
fields = api.model('Person', {'name': restx.fields.String(required=True), 'age': restx.fields.Integer, 'birthdate': restx.fields.DateTime})
('/validation/')
class Payload(restx.Resource):
payload = None
(fields)
def post(self):
Payload.payload = api.payload
return {}
data = {'name': 'John Doe', 'age': 15}
client.post_json('/validation/', data)
assert (Payload.payload == data)
def test_validation_with_inheritance(self, app, client):
api = restx.Api(app, validate=True)
fields = api.model('Parent', {'name': restx.fields.String(required=True)})
child_fields = api.inherit('Child', fields, {'age': restx.fields.Integer})
('/validation/')
class Inheritance(restx.Resource):
(child_fields)
def post(self):
return {}
client.post_json('/validation/', {'name': 'John Doe', 'age': 15})
self.assert_errors(client, '/validation/', {'age': '15'}, 'name', 'age')
def test_validation_on_list(self, app, client):
api = restx.Api(app, validate=True)
person = api.model('Person', {'name': restx.fields.String(required=True), 'age': restx.fields.Integer(required=True)})
family = api.model('Family', {'name': restx.fields.String(required=True), 'members': restx.fields.List(restx.fields.Nested(person))})
('/validation/')
class List(restx.Resource):
(family)
def post(self):
return {}
self.assert_errors(client, '/validation/', {'name': 'Doe', 'members': [{'name': 'Jonn'}, {'age': 42}]}, 'members.0.age', 'members.1.name')
def _setup_expect_validation_single_resource_tests(self, app):
api = restx.Api(app, validate=True)
user = api.model('User', {'username': restx.fields.String()})
('/validation/')
class Users(restx.Resource):
(user)
def post(self):
return {}
def _setup_expect_validation_collection_resource_tests(self, app):
api = restx.Api(app, validate=True)
user = api.model('User', {'username': restx.fields.String()})
('/validation/')
class Users(restx.Resource):
([user])
def post(self):
return {}
def test_expect_validation_single_resource_success(self, app, client):
self._setup_expect_validation_single_resource_tests(app)
out = client.post_json('/validation/', {'username': 'alice'})
assert ({} == out)
def test_expect_validation_single_resource_error(self, app, client):
self._setup_expect_validation_single_resource_tests(app)
self.assert_errors(client, '/validation/', {'username': 123}, 'username')
self.assert_errors(client, '/validation/', [{'username': 123}], '')
def test_expect_validation_collection_resource_success(self, app, client):
self._setup_expect_validation_collection_resource_tests(app)
out = client.post_json('/validation/', {'username': 'alice'})
assert ({} == out)
out = client.post_json('/validation/', [{'username': 'alice'}, {'username': 'bob'}])
assert ({} == out)
def test_expect_validation_collection_resource_error(self, app, client):
self._setup_expect_validation_collection_resource_tests(app)
self.assert_errors(client, '/validation/', {'username': 123}, 'username')
self.assert_errors(client, '/validation/', [{'username': 'alice'}, {'username': 123}], 'username')
def test_validation_with_propagate(self, app, client):
app.config['PROPAGATE_EXCEPTIONS'] = True
api = restx.Api(app, validate=True)
fields = api.model('Person', {'name': restx.fields.String(required=True), 'age': restx.fields.Integer, 'birthdate': restx.fields.DateTime})
('/validation/')
class ValidationOff(restx.Resource):
(fields)
def post(self):
return {}
self.assert_errors(client, '/validation/', {}, 'name')
def test_empty_payload(self, app, client):
api = restx.Api(app, validate=True)
('/empty/')
class Payload(restx.Resource):
def post(self):
return {}
response = client.post('/empty/', data='', headers={'content-type': 'application/json'})
assert (response.status_code == 200) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.