code stringlengths 281 23.7M |
|---|
def to_py_obj(obj):
if isinstance(obj, (dict, UserDict)):
return {k: to_py_obj(v) for (k, v) in obj.items()}
elif isinstance(obj, (list, tuple)):
return [to_py_obj(o) for o in obj]
elif (is_tf_available() and _is_tensorflow(obj)):
return obj.numpy().tolist()
elif (is_torch_available() and _is_torch(obj)):
return obj.detach().cpu().tolist()
elif (is_flax_available() and _is_jax(obj)):
return np.asarray(obj).tolist()
elif isinstance(obj, (np.ndarray, np.number)):
return obj.tolist()
else:
return obj |
_criterion('label_smoothed_cross_entropy')
class LabelSmoothedCrossEntropyCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
self.eps = args.label_smoothing
def add_args(parser):
parser.add_argument('--label-smoothing', default=0.0, type=float, metavar='D', help='epsilon for label smoothing, 0 means no label smoothing')
def forward(self, model, sample, reduce=True):
net_output = model(**sample['net_input'])
(loss, nll_loss) = self.compute_loss(model, net_output, sample, reduce=reduce)
sample_size = (sample['target'].size(0) if self.args.sentence_avg else sample['ntokens'])
logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'nll_loss': (utils.item(nll_loss.data) if reduce else nll_loss.data), 'ntokens': sample['ntokens'], 'nsentences': sample['target'].size(0), 'sample_size': sample_size}
return (loss, sample_size, logging_output)
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view((- 1), lprobs.size((- 1)))
target = model.get_targets(sample, net_output).view((- 1), 1)
(loss, nll_loss) = label_smoothed_nll_loss(lprobs, target, self.eps, ignore_index=self.padding_idx, reduce=reduce)
return (loss, nll_loss)
def aggregate_logging_outputs(logging_outputs):
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
nsentences = sum((log.get('nsentences', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
return {'loss': (((sum((log.get('loss', 0) for log in logging_outputs)) / sample_size) / math.log(2)) if (sample_size > 0) else 0.0), 'nll_loss': (((sum((log.get('nll_loss', 0) for log in logging_outputs)) / ntokens) / math.log(2)) if (ntokens > 0) else 0.0), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size} |
def test_matchnodes_two_collections_same_file(pytester: Pytester) -> None:
pytester.makeconftest('\n import pytest\n def pytest_configure(config):\n config.pluginmanager.register(Plugin2())\n\n class Plugin2(object):\n def pytest_collect_file(self, file_path, parent):\n if file_path.suffix == ".abc":\n return MyFile2.from_parent(path=file_path, parent=parent)\n\n def pytest_collect_file(file_path, parent):\n if file_path.suffix == ".abc":\n return MyFile1.from_parent(path=file_path, parent=parent)\n\n class MyFile1(pytest.File):\n def collect(self):\n yield Item1.from_parent(name="item1", parent=self)\n\n class MyFile2(pytest.File):\n def collect(self):\n yield Item2.from_parent(name="item2", parent=self)\n\n class Item1(pytest.Item):\n def runtest(self):\n pass\n\n class Item2(pytest.Item):\n def runtest(self):\n pass\n ')
p = pytester.makefile('.abc', '')
result = pytester.runpytest()
assert (result.ret == 0)
result.stdout.fnmatch_lines(['*2 passed*'])
res = pytester.runpytest(('%s::item2' % p.name))
res.stdout.fnmatch_lines(['*1 passed*']) |
class _ROIAlignRotated(Function):
def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio):
ctx.save_for_backward(roi)
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.sampling_ratio = sampling_ratio
ctx.input_shape = input.size()
output = torch.ops.detectron2.roi_align_rotated_forward(input, roi, spatial_scale, output_size[0], output_size[1], sampling_ratio)
return output
_differentiable
def backward(ctx, grad_output):
(rois,) = ctx.saved_tensors
output_size = ctx.output_size
spatial_scale = ctx.spatial_scale
sampling_ratio = ctx.sampling_ratio
(bs, ch, h, w) = ctx.input_shape
grad_input = torch.ops.detectron2.roi_align_rotated_backward(grad_output, rois, spatial_scale, output_size[0], output_size[1], bs, ch, h, w, sampling_ratio)
return (grad_input, None, None, None, None, None) |
class DecisionTransformerModelTester():
def __init__(self, parent, batch_size=13, seq_length=7, act_dim=6, state_dim=17, hidden_size=23, max_length=11, is_training=True):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.act_dim = act_dim
self.state_dim = state_dim
self.hidden_size = hidden_size
self.max_length = max_length
self.is_training = is_training
def prepare_config_and_inputs(self):
states = floats_tensor((self.batch_size, self.seq_length, self.state_dim))
actions = floats_tensor((self.batch_size, self.seq_length, self.act_dim))
rewards = floats_tensor((self.batch_size, self.seq_length, 1))
returns_to_go = floats_tensor((self.batch_size, self.seq_length, 1))
timesteps = ids_tensor((self.batch_size, self.seq_length), vocab_size=1000)
attention_mask = random_attention_mask((self.batch_size, self.seq_length))
config = self.get_config()
return (config, states, actions, rewards, returns_to_go, timesteps, attention_mask)
def get_config(self):
return DecisionTransformerConfig(batch_size=self.batch_size, seq_length=self.seq_length, act_dim=self.act_dim, state_dim=self.state_dim, hidden_size=self.hidden_size, max_length=self.max_length)
def create_and_check_model(self, config, states, actions, rewards, returns_to_go, timesteps, attention_mask):
model = DecisionTransformerModel(config=config)
model.to(torch_device)
model.eval()
result = model(states, actions, rewards, returns_to_go, timesteps, attention_mask)
self.parent.assertEqual(result.state_preds.shape, states.shape)
self.parent.assertEqual(result.action_preds.shape, actions.shape)
self.parent.assertEqual(result.return_preds.shape, returns_to_go.shape)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, (self.seq_length * 3), self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, states, actions, rewards, returns_to_go, timesteps, attention_mask) = config_and_inputs
inputs_dict = {'states': states, 'actions': actions, 'rewards': rewards, 'returns_to_go': returns_to_go, 'timesteps': timesteps, 'attention_mask': attention_mask}
return (config, inputs_dict) |
def get_zip_manifest(zip_root, zip_filename):
zip_path = op.join(zip_root, zip_filename)
with zipfile.ZipFile(zip_path, mode='r') as f:
info = f.infolist()
manifest = {}
for i in tqdm(info):
utt_id = op.splitext(i.filename)[0]
(offset, file_size) = (((i.header_offset + 30) + len(i.filename)), i.file_size)
manifest[utt_id] = f'{zip_filename}:{offset}:{file_size}'
with open(zip_path, 'rb') as f:
f.seek(offset)
data = f.read(file_size)
assert ((len(data) > 1) and is_npy_data(data))
return manifest |
def test(args, device_id, pt, step):
device = ('cpu' if (args.visible_gpus == '-1') else 'cuda')
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info(('Loading checkpoint from %s' % test_from))
checkpoint = torch.load(test_from, map_location=(lambda storage, loc: storage))
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
tokenizer = BertTokenizer.from_pretrained(args.bert_dir)
model = Summarizer(args, device, tokenizer.vocab, checkpoint)
model.eval()
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False), args.test_batch_size, args.test_batch_ex_size, device, shuffle=False, is_test=True)
predictor = build_predictor(args, tokenizer, model, logger)
rouge = predictor.validate(test_iter, step)
return rouge |
def test_get_pipeline_path_absolute_path_ignore_parent():
abs_path = Path('tests/testpipelinewd.yaml').resolve()
str_abs_sans_yaml = str(abs_path.with_suffix(''))
path_found = fileloader.get_pipeline_path(str_abs_sans_yaml, 'parent')
expected_path = cwd_tests.joinpath('testpipelinewd.yaml')
assert (path_found == expected_path) |
def parse_search_arg(search):
groups = search.split()
entries = dict((g.split('=') for g in groups))
entry_names = list(entries.keys())
sets = [[f'--{k} {v}' for v in vs.split(':')] for (k, vs) in entries.items()]
matrix = [list(x) for x in itertools.product(*sets)]
return (matrix, entry_names) |
def test_change_mychar(skip_qtbot: pytestqt.qtbot.QtBot) -> None:
cosmetic_patches = CSCosmeticPatches()
dialog = CSCosmeticPatchesDialog(None, cosmetic_patches)
skip_qtbot.addWidget(dialog)
skip_qtbot.mouseClick(dialog.mychar_left_button, QtCore.Qt.MouseButton.LeftButton)
assert (dialog.cosmetic_patches == CSCosmeticPatches(mychar=MyChar.CUSTOM)) |
def _partial_compare_list(val1, val2, *, indent):
if (len(val1) < len(val2)):
outcome = PartialCompareOutcome('Second list is longer than first list')
print_i(outcome.error, indent, error=True)
return outcome
for (item1, item2) in zip(val1, val2):
outcome = partial_compare(item1, item2, indent=(indent + 1))
if (not outcome):
return outcome
return PartialCompareOutcome() |
class GreedyPerfPartitioner(Partitioner):
def __init__(self, sort_by: SortBy=SortBy.STORAGE, balance_modules: bool=False) -> None:
self._sort_by = sort_by
self._balance_modules = balance_modules
def partition(self, proposal: List[ShardingOption], storage_constraint: Topology) -> List[ShardingOption]:
_topology: Topology = copy.deepcopy(storage_constraint)
minheap_devices: Optional[List[OrderedDeviceHardware]] = None
_host_level_devices = GreedyPerfPartitioner._get_host_level_devices(_topology)
uniform_sharding_options = _get_uniform_sharding_options(proposal)
GreedyPerfPartitioner._uniform_partition(uniform_sharding_options, _topology.devices)
sharding_option_groups = _group_and_sort_non_uniform_sharding_options(proposal, sort_by=self._sort_by, balance_modules=self._balance_modules)
for sharding_option_group in sharding_option_groups:
if (sharding_option_group.sharding_options[0].partition_by == PartitionByType.HOST.value):
GreedyPerfPartitioner._cohost_partition(sharding_option_group, _host_level_devices)
minheap_devices = None
elif (sharding_option_group.sharding_options[0].partition_by == PartitionByType.DEVICE.value):
if (minheap_devices is None):
minheap_devices = GreedyPerfPartitioner._establish_minheap(_topology.devices, _topology.local_world_size)
assert (len(sharding_option_group.sharding_options) == 1), f'Unexpected length for sharding options: {len(sharding_option_group.sharding_options)}'
GreedyPerfPartitioner._device_partition(sharding_option_group.sharding_options[0], minheap_devices)
else:
raise RuntimeError(f'Unexpected sharding option group {sharding_option_group}')
self._topology: Topology = _topology
return proposal
def _establish_minheap(devices: List[DeviceHardware], local_world_size: int) -> List[OrderedDeviceHardware]:
minheap_devices = [OrderedDeviceHardware(device, local_world_size) for device in devices]
heapq.heapify(minheap_devices)
return minheap_devices
def _device_partition(sharding_option: ShardingOption, minheap_devices: List[OrderedDeviceHardware], bulk_heapify_threshold: float=0.25) -> None:
pushlimit = (len(minheap_devices) * bulk_heapify_threshold)
for shard in sharding_option.shards:
tmp_heap = []
while minheap_devices:
ordered_device = minheap_devices[0]
device = ordered_device.device
storage = cast(Storage, shard.storage)
if storage.fits_in(device.storage):
shard.rank = device.rank
device.storage -= cast(Storage, shard.storage)
device.perf += cast(Perf, shard.perf)
heapq.heapreplace(minheap_devices, ordered_device)
break
else:
heapq.heappop(minheap_devices)
tmp_heap.append(ordered_device)
else:
raise PlannerError(error_type=PlannerErrorType.PARTITION, message=f"Device partition failed. Couldn't find a rank for shard {shard} of table {sharding_option.name}, largest device storage: {max((ordered_device.device.storage for ordered_device in tmp_heap))}")
if tmp_heap:
if (len(tmp_heap) <= pushlimit):
for ordered_device in tmp_heap:
heapq.heappush(minheap_devices, ordered_device)
else:
minheap_devices.extend(tmp_heap)
heapq.heapify(minheap_devices)
def _cohost_partition(sharding_option_group: ShardingOptionGroup, _host_level_devices: List[List[DeviceHardware]]) -> None:
sorted_host_level_devices = _sort_devices_by_perf(_host_level_devices)
for devices in sorted_host_level_devices:
host_devices = copy.deepcopy(devices)
host_storage = Storage(hbm=0, ddr=0)
for device in host_devices:
host_storage += device.storage
if (not sharding_option_group.storage_sum.fits_in(host_storage)):
continue
success = True
minheap_devices: Optional[List[OrderedDeviceHardware]] = None
for sharding_option in sharding_option_group.sharding_options:
try:
if (sharding_option.sharding_type == ShardingType.TABLE_ROW_WISE.value):
GreedyPerfPartitioner._uniform_partition([sharding_option], host_devices)
minheap_devices = None
elif (sharding_option.sharding_type == ShardingType.TABLE_COLUMN_WISE.value):
if (minheap_devices is None):
minheap_devices = GreedyPerfPartitioner._establish_minheap(host_devices, len(host_devices))
GreedyPerfPartitioner._device_partition(sharding_option, minheap_devices)
else:
raise RuntimeError(f'unexpected cohost sharding type: {sharding_option.sharding_type}')
except PlannerError:
success = False
break
if success:
host_devices.sort(key=(lambda device: device.rank))
for (device, device_copy) in zip(devices, host_devices):
device.storage = device_copy.storage
device.perf = device_copy.perf
return
raise PlannerError(error_type=PlannerErrorType.PARTITION, message=f"can't find a host for sharding option group {sharding_option_group}")
def _get_host_level_devices(_topology: Topology) -> List[List[DeviceHardware]]:
num_hosts: int = (_topology.world_size // _topology.local_world_size)
host_level_devices: List[List[DeviceHardware]] = []
for i in range(num_hosts):
devices_in_host = _topology.devices[(i * _topology.local_world_size):((i + 1) * _topology.local_world_size)]
host_level_devices.append(devices_in_host)
return host_level_devices
def _uniform_partition(sharding_options: List[ShardingOption], devices: List[DeviceHardware]) -> None:
for sharding_option in sharding_options:
if (sharding_option.num_shards != len(devices)):
raise RuntimeError(f'For a uniform partition, the number of shards ({sharding_option.num_shards}) must equal the number of devices ({len(devices)})')
for i in range(len(devices)):
storage_needed = cast(Storage, sharding_option.shards[i].storage)
if (not storage_needed.fits_in(devices[i].storage)):
raise PlannerError(error_type=PlannerErrorType.PARTITION, message=f'Shard of size {storage_needed} bytes does not fit on any rank. Device memory cap: {devices[i].storage}.')
else:
sharding_option.shards[i].rank = devices[i].rank
devices[i].storage -= storage_needed
devices[i].perf += cast(Perf, sharding_option.shards[i].perf) |
def test_single_output(mode):
n_steps = int64('n_steps')
x0 = float64('x0')
const = float64('const')
x = (x0 + const)
op = ScalarLoop(init=[x0], constant=[const], update=[x])
x = op(n_steps, x0, const)
fn = function([n_steps, x0, const], x, mode=mode)
np.testing.assert_allclose(fn(5, 0, 1), 5)
np.testing.assert_allclose(fn(5, 0, 2), 10)
np.testing.assert_allclose(fn(4, 3, (- 1)), (- 1)) |
def test_mice_ordering():
phi1 = mice()
phi2 = mice(phi=(1.0 + (EPSILON * 2)), partition=())
assert (phi1 < phi2)
assert (phi2 > phi1)
assert (phi1 <= phi2)
assert (phi2 >= phi1)
different_direction = mice(direction='different')
assert (phi2 > different_direction)
assert (different_direction < phi2)
assert (phi2 >= different_direction)
assert (different_direction <= phi2) |
def closeness_centrality(graph, name='closeness', weight='mm_len', radius=None, distance=None, verbose=True, **kwargs):
netx = graph.copy()
if radius:
lengraph = len(netx)
for n in tqdm(netx, total=len(netx), disable=(not verbose)):
sub = nx.ego_graph(netx, n, radius=radius, distance=distance)
netx.nodes[n][name] = _closeness_centrality(sub, n, length=weight, len_graph=lengraph)
else:
vals = nx.closeness_centrality(netx, distance=weight, **kwargs)
nx.set_node_attributes(netx, vals, name)
return netx |
class FCTDecoderLayer(nn.Module):
def __init__(self, h, d_model, p, d_ff, attn_p=0.1):
super(FCTDecoderLayer, self).__init__()
self.preprocess_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_attn = PrePostProcessing(d_model, p, sequence='da', static=True)
self.preprocess_src_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_src_attn = PrePostProcessing(d_model, p, sequence='da', static=True)
self.preprocess_ffn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_ffn = PrePostProcessing(d_model, p, sequence='da', static=True)
self.multihead_tgt = UniformMultiHeadAttention(h, d_model, attn_p=attn_p)
self.multihead_src = UniformMultiHeadAttention(h, d_model, attn_p=attn_p)
if (onmt.constants.activation_layer == 'linear_relu_linear'):
ff_p = p
feedforward = FeedForward(d_model, d_ff, ff_p)
elif (onmt.constants.activation_layer == 'maxout'):
k = int(math.ceil((d_ff / d_model)))
feedforward = MaxOut(d_model, d_model, k)
self.feedforward = Bottle(feedforward)
def forward(self, input, context, memory_bank, mask_tgt, mask_src, pad_mask_tgt=None, pad_mask_src=None):
query = self.preprocess_attn(input, mask=pad_mask_tgt)
if (memory_bank is None):
memory_bank = query.unsqueeze(0)
else:
memory_bank = torch.cat([memory_bank, query.unsqueeze(0)], dim=0)
(out, _) = self.multihead_tgt(query, memory_bank, mask_tgt, query_mask=pad_mask_tgt, value_mask=pad_mask_tgt)
input = self.postprocess_attn(out, input)
query = self.preprocess_src_attn(input, mask=pad_mask_tgt)
(out, coverage) = self.multihead_src(query, context, mask_src, query_mask=pad_mask_tgt, value_mask=pad_mask_src)
input = self.postprocess_src_attn(out, input)
out = self.feedforward(self.preprocess_ffn(input, mask=pad_mask_tgt), mask=pad_mask_tgt)
input = self.postprocess_ffn(out, input)
return (input, memory_bank, coverage)
def step(self, input, context, memory_bank, mask_tgt, mask_src, pad_mask_tgt=None, pad_mask_src=None, buffer=None):
query = self.preprocess_attn(input, mask=pad_mask_tgt)
if (buffer is not None):
buffer = torch.cat([buffer, query], dim=1)
else:
buffer = query
if (memory_bank is None):
memory_bank = buffer.unsqueeze(0)
else:
memory_bank = torch.cat([memory_bank, buffer.unsqueeze(0)], dim=0)
(out, _) = self.multihead_tgt(query, memory_bank, mask_tgt, query_mask=pad_mask_tgt, value_mask=pad_mask_tgt)
input = self.postprocess_attn(out, input)
query = self.preprocess_src_attn(input, mask=pad_mask_tgt)
(out, coverage) = self.multihead_src(query, context, mask_src, query_mask=pad_mask_tgt, value_mask=pad_mask_src)
input = self.postprocess_src_attn(out, input)
out = self.feedforward(self.preprocess_ffn(input, mask=pad_mask_tgt), mask=pad_mask_tgt)
input = self.postprocess_ffn(out, input)
return (input, memory_bank, coverage, buffer) |
class PageSerializer(ThroughModelSerializerMixin, TranslationSerializerMixin, ElementModelSerializerMixin, ElementWarningSerializerMixin, ReadOnlyObjectPermissionSerializerMixin, serializers.ModelSerializer):
model = serializers.SerializerMethodField()
uri_path = serializers.CharField(required=True)
sections = serializers.PrimaryKeyRelatedField(queryset=Section.objects.all(), required=False, many=True)
questionsets = PageQuestionSetSerializer(source='page_questionsets', read_only=False, required=False, many=True)
questions = PageQuestionSerializer(source='page_questions', read_only=False, required=False, many=True)
warning = serializers.SerializerMethodField()
read_only = serializers.SerializerMethodField()
attribute_uri = serializers.CharField(source='attribute.uri', read_only=True)
condition_uris = serializers.SerializerMethodField()
class Meta():
model = Page
fields = ('id', 'model', 'uri', 'uri_prefix', 'uri_path', 'comment', 'locked', 'attribute', 'is_collection', 'title', 'help', 'verbose_name', 'sections', 'questionsets', 'questions', 'conditions', 'editors', 'warning', 'read_only', 'attribute_uri', 'condition_uris')
trans_fields = ('title', 'help', 'verbose_name')
parent_fields = (('sections', 'section', 'page', 'section_pages'),)
through_fields = (('questionsets', 'page', 'questionset', 'page_questionsets'), ('questions', 'page', 'question', 'page_questions'))
validators = (PageUniqueURIValidator(), PageLockedValidator())
warning_fields = ('title',)
def get_condition_uris(self, obj):
return [condition.uri for condition in obj.conditions.all()] |
class TestKernprof(unittest.TestCase):
def test_enable_disable(self):
profile = ContextualProfile()
self.assertEqual(profile.enable_count, 0)
profile.enable_by_count()
self.assertEqual(profile.enable_count, 1)
profile.enable_by_count()
self.assertEqual(profile.enable_count, 2)
profile.disable_by_count()
self.assertEqual(profile.enable_count, 1)
profile.disable_by_count()
self.assertEqual(profile.enable_count, 0)
profile.disable_by_count()
self.assertEqual(profile.enable_count, 0)
with profile:
self.assertEqual(profile.enable_count, 1)
with profile:
self.assertEqual(profile.enable_count, 2)
self.assertEqual(profile.enable_count, 1)
self.assertEqual(profile.enable_count, 0)
with self.assertRaises(RuntimeError):
self.assertEqual(profile.enable_count, 0)
with profile:
self.assertEqual(profile.enable_count, 1)
raise RuntimeError()
self.assertEqual(profile.enable_count, 0)
def test_function_decorator(self):
profile = ContextualProfile()
f_wrapped = profile(f)
self.assertEqual(f_wrapped.__name__, f.__name__)
self.assertEqual(f_wrapped.__doc__, f.__doc__)
self.assertEqual(profile.enable_count, 0)
value = f_wrapped(10)
self.assertEqual(profile.enable_count, 0)
self.assertEqual(value, f(10))
def test_gen_decorator(self):
profile = ContextualProfile()
g_wrapped = profile(g)
self.assertEqual(g_wrapped.__name__, g.__name__)
self.assertEqual(g_wrapped.__doc__, g.__doc__)
self.assertEqual(profile.enable_count, 0)
i = g_wrapped(10)
self.assertEqual(profile.enable_count, 0)
self.assertEqual(next(i), 20)
self.assertEqual(profile.enable_count, 0)
self.assertEqual(i.send(30), 50)
self.assertEqual(profile.enable_count, 0)
with self.assertRaises((StopIteration, RuntimeError)):
next(i)
self.assertEqual(profile.enable_count, 0) |
def write_tokenizer(tokenizer_path, input_tokenizer_path):
print(f'Fetching the tokenizer from {input_tokenizer_path}.')
os.makedirs(tokenizer_path, exist_ok=True)
write_json({}, os.path.join(tokenizer_path, 'special_tokens_map.json'))
write_json({'bos_token': '', 'eos_token': '', 'model_max_length': int(1e+30), 'tokenizer_class': 'LlamaTokenizer', 'unk_token': ''}, os.path.join(tokenizer_path, 'tokenizer_config.json'))
shutil.copyfile(input_tokenizer_path, os.path.join(tokenizer_path, 'tokenizer.model')) |
def test_logxml_changingdir(pytester: Pytester) -> None:
pytester.makepyfile('\n def test_func():\n import os\n os.chdir("a")\n ')
pytester.mkdir('a')
result = pytester.runpytest('--junitxml=a/x.xml')
assert (result.ret == 0)
assert pytester.path.joinpath('a/x.xml').exists() |
def resp_create_group_access_token():
content = {'user_id': 141, 'scopes': ['api'], 'name': 'token', 'expires_at': '2021-01-31', 'id': 42, 'active': True, 'created_at': '2021-01-20T22:11:48.151Z', 'revoked': False}
with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps:
rsps.add(method=responses.POST, url=' json=content, content_type='application/json', status=200)
(yield rsps) |
def print_scatter(model, p=0.0, interface=None):
if (interface is not None):
discontinuities = [model.discontinuity(interface)]
else:
discontinuities = model.discontinuities()
for discontinuity in discontinuities:
print(('%s (%g km)' % (discontinuity, (discontinuity.z / cake.km))))
print()
cols = []
for in_direction in (cake.DOWN, cake.UP):
for in_mode in (cake.P, cake.S):
p_critical = discontinuity.critical_ps(in_mode)[(in_direction == cake.UP)]
if ((p_critical is None) or (p >= p_critical)):
continue
vals = []
for out_direction in (cake.UP, cake.DOWN):
for out_mode in (cake.S, cake.P):
vals.append((out_direction, out_mode, discontinuity.efficiency(in_direction, out_direction, in_mode, out_mode, p)))
if all(((x[(- 1)] == 0.0) for x in vals)):
continue
sout = [scatter_out_fmt(d, m, v) for (d, m, v) in vals]
sin1 = scatter_in_fmt(in_direction, in_mode, cake.DOWN)
sin2 = scatter_in_fmt(in_direction, in_mode, cake.UP)
line1 = ('%s %5s %5s' % ((' ' * len(sin1)), sout[0][1], sout[1][1]))
line2 = ('%s %-5s %-5s' % (sin1, sout[0][0], sout[1][0]))
line4 = ('%s %-5s %-5s' % (sin2, sout[2][0], sout[3][0]))
line5 = ('%s %5s %5s' % ((' ' * len(sin2)), sout[2][1], sout[3][1]))
line3 = ('-' * len(line1))
cols.append((line1, line2, line3, line4, line5))
for cols in zip(*cols):
print((' ' + ' '.join(cols)))
print()
print() |
class NotRequiredTests(BaseTestCase):
def test_basics(self):
if (not TYPING_3_11_0):
with self.assertRaises(TypeError):
NotRequired[1]
with self.assertRaises(TypeError):
NotRequired[(int, str)]
with self.assertRaises(TypeError):
NotRequired[int][str]
def test_repr(self):
if hasattr(typing, 'NotRequired'):
mod_name = 'typing'
else:
mod_name = 'typing_extensions'
self.assertEqual(repr(NotRequired), (mod_name + '.NotRequired'))
cv = NotRequired[int]
self.assertEqual(repr(cv), (mod_name + '.NotRequired[int]'))
cv = NotRequired[Employee]
self.assertEqual(repr(cv), (mod_name + ('.NotRequired[%s.Employee]' % __name__)))
def test_cannot_subclass(self):
with self.assertRaises(TypeError):
class C(type(NotRequired)):
pass
with self.assertRaises(TypeError):
class C(type(NotRequired[int])):
pass
def test_cannot_init(self):
with self.assertRaises(TypeError):
NotRequired()
with self.assertRaises(TypeError):
type(NotRequired)()
with self.assertRaises(TypeError):
type(NotRequired[Optional[int]])()
def test_no_isinstance(self):
with self.assertRaises(TypeError):
isinstance(1, NotRequired[int])
with self.assertRaises(TypeError):
issubclass(int, NotRequired) |
class ConfigDict(Dict):
def __missing__(self, name):
raise KeyError(name)
def __getattr__(self, name):
try:
value = super(ConfigDict, self).__getattr__(name)
except KeyError:
ex = AttributeError("'{}' object has no attribute '{}'".format(self.__class__.__name__, name))
except Exception as e:
ex = e
else:
return value
raise ex |
def test_any_value() -> None:
any = AnyValue(AnySource.unannotated)
assert (not any.is_type(int))
assert_can_assign(any, KnownValue(1))
assert_can_assign(any, TypedValue(int))
assert_can_assign(any, MultiValuedValue([KnownValue(1), TypedValue(int)]))
assert (str(any) == 'Any[unannotated]')
assert (str(AnyValue(AnySource.default)) == 'Any') |
def prune_model(args):
device = torch.device(('cuda:{}'.format(args.gpu) if ((args.gpu >= 0) and torch.cuda.is_available()) else 'cpu'))
if ((args.gpu >= 0) and torch.cuda.is_available()):
cudnn.benchmark = True
if (args.type == 'float64'):
dtype = torch.float64
elif (args.type == 'float32'):
dtype = torch.float32
elif (args.type == 'float16'):
dtype = torch.float16
else:
raise ValueError('Wrong type!')
model = SparseMask(backbone_name=args.backbone_name, depth=args.depth, in_channels=3, num_classes=args.n_class)
if (args.gpu >= 0):
model = torch.nn.DataParallel(model, [args.gpu])
model.to(device=device, dtype=dtype)
checkpoint = torch.load(args.checkpoint, map_location=device)
model.load_state_dict(checkpoint['state_dict'], strict=True)
mask = prune((model.module if (args.gpu >= 0) else model), args.thres)
np.save(os.path.join(os.path.dirname(args.checkpoint), 'mask_thres_{}'.format(args.thres)), mask) |
class BaseObject(nn.Module):
def __init__(self, name=None):
super().__init__()
self._name = name
def __name__(self):
if (self._name is None):
name = self.__class__.__name__
s1 = re.sub('(.)([A-Z][a-z]+)', '\\1_\\2', name)
return re.sub('([a-z0-9])([A-Z])', '\\1_\\2', s1).lower()
else:
return self._name |
_module
class WIDERFaceDataset(XMLDataset):
CLASSES = ('face',)
def __init__(self, **kwargs):
super(WIDERFaceDataset, self).__init__(**kwargs)
def load_annotations(self, ann_file):
img_infos = []
img_ids = mmcv.list_from_file(ann_file)
for img_id in img_ids:
filename = '{}.jpg'.format(img_id)
xml_path = osp.join(self.img_prefix, 'Annotations', '{}.xml'.format(img_id))
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
folder = root.find('folder').text
img_infos.append(dict(id=img_id, filename=osp.join(folder, filename), width=width, height=height))
return img_infos |
def createRelationalTables():
query = QSqlQuery()
query.exec_('create table employee(id int, name varchar(20), city int, country int)')
query.exec_("insert into employee values(1, 'Espen', 5000, 47)")
query.exec_("insert into employee values(2, 'Harald', 80000, 49)")
query.exec_("insert into employee values(3, 'Sam', 100, 41)")
query.exec_('create table city(id int, name varchar(20))')
query.exec_("insert into city values(100, 'San Jose')")
query.exec_("insert into city values(5000, 'Oslo')")
query.exec_("insert into city values(80000, 'Munich')")
query.exec_('create table country(id int, name varchar(20))')
query.exec_("insert into country values(41, 'USA')")
query.exec_("insert into country values(47, 'Norway')")
query.exec_("insert into country values(49, 'Germany')") |
def create_inline(project, resource, offset):
pyname = _get_pyname(project, resource, offset)
message = 'Inline refactoring should be performed on a method, local variable or parameter.'
if (pyname is None):
raise exceptions.RefactoringError(message)
if isinstance(pyname, pynames.ImportedName):
pyname = pyname._get_imported_pyname()
if isinstance(pyname, pynames.AssignedName):
return InlineVariable(project, resource, offset)
if isinstance(pyname, pynames.ParameterName):
return InlineParameter(project, resource, offset)
if isinstance(pyname.get_object(), pyobjects.PyFunction):
return InlineMethod(project, resource, offset)
else:
raise exceptions.RefactoringError(message) |
class PrettifyBaseEntryTestCase(unittest.TestCase):
def setUp(self):
self.element = dict(name='soccer shoes', value=(- 123.45), date='04-01', category='sport equipment')
def test_prettify(self):
self.assertEqual(prettify_entry(self.element, default_category=CategoryEntry.DEFAULT_NAME), 'Name : Soccer Shoes\nValue : -123.45\nDate : 04-01\nCategory: Sport Equipment')
def test_prettify_default_category(self):
element = self.element.copy()
element['category'] = None
self.assertEqual(prettify_entry(element, default_category=CategoryEntry.DEFAULT_NAME), f'''Name : Soccer Shoes
Value : -123.45
Date : 04-01
Category: {CategoryEntry.DEFAULT_NAME.capitalize()}''') |
def bench_pickle_list(loops, pickle, options):
range_it = range(loops)
dumps = pickle.dumps
obj = LIST
protocol = options.protocol
t0 = pyperf.perf_counter()
for _ in range_it:
dumps(obj, protocol)
dumps(obj, protocol)
dumps(obj, protocol)
dumps(obj, protocol)
dumps(obj, protocol)
dumps(obj, protocol)
dumps(obj, protocol)
dumps(obj, protocol)
dumps(obj, protocol)
dumps(obj, protocol)
return (pyperf.perf_counter() - t0) |
def sample_initial_conditions(model, points_to_sample, traj_length=1000, pts_per_period=30):
initial_sol = model.make_trajectory(traj_length, resample=True, pts_per_period=pts_per_period, postprocess=False)
sample_inds = np.random.choice(np.arange(initial_sol.shape[0]), points_to_sample, replace=False)
sample_pts = initial_sol[sample_inds]
return sample_pts |
def pairwise(accuracy_balanced, method_names, out_results_dir, num_repetitions):
bal_acc_transposed = accuracy_balanced.T
num_datasets = len(method_names)
median_bal_acc = np.nanmedian(accuracy_balanced, axis=0)
ranks = np.rank(median_bal_acc)
critical_dist = compute_critical_dist(ranks)
signif_matrix = np.full([num_datasets, num_datasets], np.nan)
for (m1, method_name) in enumerate(method_names):
for m2 in range((m1 + 1), (num_datasets + 1), 1):
signif_matrix[(m1, m2)] = check_if_better(ranks[m1], ranks[m2], critical_dist)
return signif_matrix |
def test_translate():
mt = dlt.TranslationModel()
msg_en = 'Hello everyone, how are you?'
assert (mt.translate(msg_en, source='English', target='Spanish') == 'Hola a todos, como estas?')
fr_1 = mt.translate(msg_en, source='English', target='French')
ch = mt.translate(msg_en, source='English', target='Chinese')
fr_2 = mt.translate([msg_en, (msg_en + msg_en)], source='English', target='French')
assert (fr_1 == fr_2[0])
assert (ch != fr_1) |
class DialogueManager():
def __init__(self, rec, agent, user, bftracker):
self.tracker_idx_list = [0, 1, 2, 3, 4]
self.facet_action_dict = {'categories': 0, 'state': 1, 'city': 2, 'price': 3, 'stars': 4, 'recommend': 5}
self.rec = rec
self.agent = agent
self.user = user
self.bftracker = bftracker
self.turn_count = None
self.user_name = None
self.business_name = None
self.user_utt_list = None
self.dialogue_state = None
def initialize_episode(self, user_name, business_name):
self.user_name = user_name
self.business_name = business_name
self.turn_count = 0
self.agent.init_episode()
self.user.init_episode(user_name, business_name)
self.user_utt_list = []
self.dialogue_state = None
def agent_turn(self):
(request_facet, agent_nl) = self.agent.next_turn(self.dialogue_state)
return (request_facet, agent_nl)
def user_turn(self, request_facet):
user_nl = self.user.next_turn(request_facet)
self.user_utt_list.append(user_nl)
return user_nl
def get_dialogue_state(self):
self.dialogue_state = self.bftracker.use_tracker_from_nl(self.user_utt_list, self.tracker_idx_list)
def next_turn(self):
(request_facet, agent_nl) = self.agent_turn()
agent_action = self.facet_action_dict[request_facet]
if (agent_action == 5):
return (True, self.dialogue_state.tolist(), agent_action)
user_nl = self.user_turn(request_facet)
dialogue_state = self.dialogue_state
self.get_dialogue_state()
if (type(dialogue_state) != type(None)):
return (False, dialogue_state.tolist(), agent_action)
else:
return (False, None, None) |
.parametrize('molecule, atom_index, expected', [pytest.param('water', 0, 'O', id='O'), pytest.param('water', 1, 'X', id='Polar H'), pytest.param('acetone', 4, 'H', id='Normal H')])
def test_get_param_code(molecule, atom_index, expected, request):
molecule = request.getfixturevalue(molecule)
rfree_code = _get_parameter_code(molecule=molecule, atom_index=atom_index)
assert (rfree_code == expected) |
class Effect5793(BaseEffect):
type = 'passive'
def handler(fit, container, context, projectionRange, **kwargs):
level = (container.level if ('skill' in context) else 1)
for attr in ('maxRangeBonus', 'falloffBonus'):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Weapon Disruption')), attr, (container.getModifiedItemAttr('scanSkillEwStrengthBonus') * level), **kwargs) |
class RandomValuePicker():
def __init__(self, pools):
self._pools = []
for pool in pools:
self._pools.append(pool)
def _cell_size(self):
return self._pools[0]['cell_size']
def _get_static_size(self, th):
return ((th * 8000) * self._cell_size())
def _get_size(self):
return self._get_static_size(16)
def _get_thtype(self):
return 'static'
def _get_th(self, pool):
th = random.randint(3, 16)
if (pool['thtype'] == 'dynamic'):
return th
else:
return self._get_static_size(th)
def _get_pool(self, direction):
ing_pools = []
egr_pools = []
for pool in self._pools:
if (pool['type'] == 'ingress'):
ing_pools.append(pool)
else:
egr_pools.append(pool)
if (direction == 'ingress'):
arr = ing_pools
else:
arr = egr_pools
return arr[random.randint(0, (len(arr) - 1))]
def get_value(self, objid):
if isinstance(objid, Pool):
if (objid['pool'] in [4, 8, 9, 10]):
raise SkipTest()
else:
return (self._get_size(), self._get_thtype())
if isinstance(objid, TcBind):
if (objid['tc'] >= 8):
raise SkipTest()
else:
pool = self._get_pool(objid['type'])
th = self._get_th(pool)
pool_n = pool['pool']
return (pool_n, th)
if isinstance(objid, PortPool):
pool_n = objid['pool']
pool = self._pools[pool_n]
assert (pool['pool'] == pool_n)
th = self._get_th(pool)
return (th,) |
def apply_along_last_axis(func, *args, **kwargs):
first_arg_name = getfullargspec(func)[0][0]
has_positional_arg = (len(args) > 0)
input_arg = (args[0] if has_positional_arg else kwargs[first_arg_name])
if (input_arg.ndim == 1):
ret = func(*args, **kwargs)
else:
if (len(args) == 0):
args = kwargs.pop(first_arg_name)
ret = np.apply_along_axis(func, (- 1), *args, **kwargs)
return ret |
def test_region_locked_error():
with pytest.raises(exceptions.VideoUnavailable):
raise exceptions.VideoRegionBlocked('hZpzr8TbF08')
try:
raise exceptions.VideoRegionBlocked('hZpzr8TbF08')
except exceptions.VideoRegionBlocked as e:
assert (e.video_id == 'hZpzr8TbF08')
assert (str(e) == 'hZpzr8TbF08 is not available in your region') |
def dropout(x, drop_prob, shared_axes=[], training=False):
if ((drop_prob == 0) or (drop_prob == None) or (not training)):
return x
sz = list(x.size())
for i in shared_axes:
sz[i] = 1
mask = x.new(*sz).bernoulli_((1.0 - drop_prob)).div_((1.0 - drop_prob))
mask = mask.expand_as(x)
return (x * mask) |
def _identify_slide(group):
def _possible_offset(all_args):
for arg1 in all_args[1]:
for (i, arg0) in enumerate(all_args[0]):
if (arg0 == arg1):
return i
return None
if (isinstance(group.abstraction, dict) and (len([v for v in group.abstraction.values() if ('%' in str(v))]) != 1)):
return None
all_args = (group.original_all_args if hasattr(group, 'original_all_args') else group.all_args)
t = [x for args in all_args for x in args]
if any(((not isinstance(x, Variable)) for x in t)):
return None
scope = [x for (i, x) in enumerate(t) if (x not in t[0:i])]
arity = len(all_args[0])
sliding_scope = [[x for x in scope[i:(i + arity)]] for i in range(0, ((len(scope) - arity) + 1), 1)]
if (sliding_scope == all_args):
return (scope, 1, False)
' trying to recognize a circular Slide '
sliding_scope = [[x for x in [scope[((i + k) % len(scope))] for k in range(arity)]] for i in range(len(scope))]
if (sliding_scope == all_args):
return (scope, 1, True)
' trying to recognize a slide with an offset not equal to 1'
offset = _possible_offset(all_args)
if ((offset is None) or (offset <= 1)):
return None
sliding_scope = [[x for x in scope[i:(i + arity)]] for i in range(0, ((len(scope) - arity) + 1), offset)]
if (sliding_scope == all_args):
return (scope, offset, False)
if ((sliding_scope + [([scope[i] for i in range(((len(scope) - arity) + 1), len(scope))] + [scope[0]])]) == all_args):
return (scope, offset, True)
return None |
class RepoMirrorAPI(object):
def __init__(self, config, server_hostname=None, skip_validation=False, instance_keys=None):
feature_enabled = config.get('FEATURE_REPO_MIRROR', False)
has_valid_config = skip_validation
if ((not skip_validation) and feature_enabled):
config_validator = RepoMirrorConfigValidator(feature_enabled)
has_valid_config = config_validator.valid()
self.state = NoopRepoMirrorAPI()
if (feature_enabled and has_valid_config):
self.state = ImplementedRepoMirrorAPI(config, server_hostname, instance_keys=instance_keys)
def __getattr__(self, name):
return getattr(self.state, name, None) |
def test_error_stream(testcase: DataDrivenTestCase) -> None:
options = Options()
options.show_traceback = True
options.hide_error_codes = True
logged_messages: list[str] = []
def flush_errors(filename: (str | None), msgs: list[str], serious: bool) -> None:
if msgs:
logged_messages.append('==== Errors flushed ====')
logged_messages.extend(msgs)
sources = [BuildSource('main', '__main__', '\n'.join(testcase.input))]
try:
build.build(sources=sources, options=options, flush_errors=flush_errors)
except CompileError as e:
assert (e.messages == [])
assert_string_arrays_equal(testcase.output, logged_messages, f'Invalid output ({testcase.file}, line {testcase.line})') |
class SeedScheduler():
def has_seed_remaining(self) -> bool:
raise NotImplementedError()
def add(self, seed: Seed) -> None:
raise NotImplementedError()
def update_worklist(self, coverage: GlobalCoverage) -> None:
raise NotImplementedError()
def can_solve_models(self) -> bool:
raise NotImplementedError()
def pick(self) -> Optional[Seed]:
raise NotImplementedError()
def post_execution(self) -> None:
pass
def post_exploration(self, workspace: Workspace) -> None:
pass |
def main(gpu, ngpus_per_node, cfg, args):
args.local_rank = gpu
if (args.local_rank <= 0):
os.makedirs(args.save_path, exist_ok=True)
logger = init_log_save(args.save_path, 'global', logging.INFO)
logger.propagate = 0
if (args.local_rank <= 0):
tb_dir = args.save_path
tb = SummaryWriter(log_dir=tb_dir)
if args.ddp:
dist.init_process_group(backend='nccl', rank=args.local_rank, world_size=args.world_size)
if (args.local_rank <= 0):
logger.info('{}\n'.format(pprint.pformat(cfg)))
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.benchmark = True
model = Discrepancy_DeepLabV3Plus(args, cfg)
if (args.local_rank <= 0):
logger.info('Total params: {:.1f}M\n'.format(count_params(model)))
optimizer = SGD([{'params': model.branch1.backbone.parameters(), 'lr': (args.base_lr * args.lr_backbone)}, {'params': model.branch2.backbone.parameters(), 'lr': (args.base_lr * args.lr_backbone)}, {'params': [param for (name, param) in model.named_parameters() if ('backbone' not in name)], 'lr': (args.base_lr * args.lr_network)}], lr=args.base_lr, momentum=0.9, weight_decay=0.0001)
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda(args.local_rank)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, broadcast_buffers=False, find_unused_parameters=False)
if (args.mode_criterion == 'CE'):
criterion_l = nn.CrossEntropyLoss(reduction='mean', ignore_index=255).cuda(args.local_rank)
else:
criterion_l = ProbOhemCrossEntropy2d(**cfg['criterion']['kwargs']).cuda(args.local_rank)
criterion_u = nn.CrossEntropyLoss(reduction='none').cuda(args.local_rank)
trainset_u = SemiDataset(cfg['dataset'], cfg['data_root'], 'train_u', args.crop_size, args.unlabeled_id_path)
trainset_l = SemiDataset(cfg['dataset'], cfg['data_root'], 'train_l', args.crop_size, args.labeled_id_path, nsample=len(trainset_u.ids))
valset = SemiDataset(cfg['dataset'], cfg['data_root'], 'val')
if args.ddp:
trainsampler_l = torch.utils.data.distributed.DistributedSampler(trainset_l)
else:
trainsampler_l = None
trainloader_l = DataLoader(trainset_l, batch_size=args.batch_size, pin_memory=True, num_workers=args.num_workers, drop_last=True, sampler=trainsampler_l)
if args.ddp:
trainsampler_u = torch.utils.data.distributed.DistributedSampler(trainset_u)
else:
trainsampler_u = None
trainloader_u = DataLoader(trainset_u, batch_size=args.batch_size, pin_memory=True, num_workers=args.num_workers, drop_last=True, sampler=trainsampler_u)
valloader = DataLoader(valset, batch_size=1, pin_memory=True, num_workers=args.num_workers, drop_last=False)
total_iters = (len(trainloader_u) * args.epochs)
previous_best = 0.0
previous_best1 = 0.0
previous_best2 = 0.0
conf_threshold = args.conf_threshold
for epoch in range(args.epochs):
if (args.local_rank <= 0):
logger.info('> Epoch: {:}, backbone1 LR: {:.4f}, backbone2 LR: {:.4f}, segmentation LR: {:.4f}'.format(epoch, optimizer.param_groups[0]['lr'], optimizer.param_groups[1]['lr'], optimizer.param_groups[(- 1)]['lr']))
logger.info('> Epoch: {:}, Previous best of ave: {:.2f}, Previous best of branch1: {:.2f}, Previous best of branch2: {:.2f}'.format(epoch, previous_best, previous_best1, previous_best2))
(total_loss, total_loss_CE, total_loss_con, total_loss_dis) = (0.0, 0.0, 0.0, 0.0)
total_mask_ratio = 0.0
trainloader_l.sampler.set_epoch(epoch)
trainloader_u.sampler.set_epoch(epoch)
loader = zip(trainloader_l, trainloader_u)
total_labeled = 0
total_unlabeled = 0
for (i, ((labeled_img, labeled_img_mask), (unlabeled_img, aug_unlabeled_img, ignore_img_mask, unlabeled_cutmix_box))) in enumerate(loader):
(labeled_img, labeled_img_mask) = (labeled_img.cuda(args.local_rank), labeled_img_mask.cuda(args.local_rank))
(unlabeled_img, aug_unlabeled_img, ignore_img_mask, unlabeled_cutmix_box) = (unlabeled_img.cuda(args.local_rank), aug_unlabeled_img.cuda(args.local_rank), ignore_img_mask.cuda(args.local_rank), unlabeled_cutmix_box.cuda(args.local_rank))
optimizer.zero_grad()
dist.barrier()
(num_lb, num_ulb) = (labeled_img.shape[0], unlabeled_img.shape[0])
total_labeled += num_lb
total_unlabeled += num_ulb
model.train()
labeled_logits = model(labeled_img)
with torch.no_grad():
model.eval()
unlabeled_logits = model(unlabeled_img)
unlabeled_pseudo_label1 = unlabeled_logits['pred1'].softmax(dim=1).max(dim=1)[1].detach().long()
unlabeled_pseudo_label2 = unlabeled_logits['pred2'].softmax(dim=1).max(dim=1)[1].detach().long()
unlabeled_pred1 = unlabeled_logits['pred1'].detach()
unlabeled_pred2 = unlabeled_logits['pred2'].detach()
aug_unlabeled_img_for_mix = aug_unlabeled_img.clone()
aug_unlabeled_ignore_img_mask_for_mix = ignore_img_mask.clone()
aug_unlabeled_pseudo_label1_for_mix = unlabeled_pseudo_label1.clone()
aug_unlabeled_pseudo_label2_for_mix = unlabeled_pseudo_label2.clone()
aug_unlabeled_pred1_for_mix = unlabeled_pred1.clone()
aug_unlabeled_pred2_for_mix = unlabeled_pred2.clone()
aug_unlabeled_img_for_mix[(unlabeled_cutmix_box.unsqueeze(1).expand(aug_unlabeled_img_for_mix.shape) == 1)] = aug_unlabeled_img_for_mix.flip(0)[(unlabeled_cutmix_box.unsqueeze(1).expand(aug_unlabeled_img_for_mix.shape) == 1)]
aug_unlabeled_ignore_img_mask_for_mix[(unlabeled_cutmix_box == 1)] = aug_unlabeled_ignore_img_mask_for_mix.flip(0)[(unlabeled_cutmix_box == 1)]
aug_unlabeled_pseudo_label1_for_mix[(unlabeled_cutmix_box == 1)] = aug_unlabeled_pseudo_label1_for_mix.flip(0)[(unlabeled_cutmix_box == 1)]
aug_unlabeled_pseudo_label2_for_mix[(unlabeled_cutmix_box == 1)] = aug_unlabeled_pseudo_label2_for_mix.flip(0)[(unlabeled_cutmix_box == 1)]
aug_unlabeled_pred1_for_mix[(unlabeled_cutmix_box.unsqueeze(1).expand(aug_unlabeled_pred1_for_mix.shape) == 1)] = aug_unlabeled_pred1_for_mix.flip(0)[(unlabeled_cutmix_box.unsqueeze(1).expand(aug_unlabeled_pred1_for_mix.shape) == 1)]
aug_unlabeled_pred2_for_mix[(unlabeled_cutmix_box.unsqueeze(1).expand(aug_unlabeled_pred2_for_mix.shape) == 1)] = aug_unlabeled_pred2_for_mix.flip(0)[(unlabeled_cutmix_box.unsqueeze(1).expand(aug_unlabeled_pred2_for_mix.shape) == 1)]
model.train()
cutmixed_aug_unlabeled_logits = model(aug_unlabeled_img_for_mix)
raw_unlabeled_logits = model(unlabeled_img)
unlabeled_pred_confidence1 = raw_unlabeled_logits['pred1'].softmax(dim=1).max(dim=1)[0]
unlabeled_pred_confidence2 = raw_unlabeled_logits['pred2'].softmax(dim=1).max(dim=1)[0]
labeled_pred1 = labeled_logits['pred1']
labeled_pred2 = labeled_logits['pred2']
loss_CE1 = criterion_l(labeled_pred1, labeled_img_mask)
loss_CE2 = criterion_l(labeled_pred2, labeled_img_mask)
loss_CE = ((loss_CE1 + loss_CE2) / 2)
loss_CE = (loss_CE * args.w_CE)
raw_unlabeled_pred1 = raw_unlabeled_logits['pred1']
raw_unlabeled_pred2 = raw_unlabeled_logits['pred2']
cutmixed_aug_unlabeled_pred1 = cutmixed_aug_unlabeled_logits['pred1']
cutmixed_aug_unlabeled_pred2 = cutmixed_aug_unlabeled_logits['pred2']
if (args.mode_confident == 'normal'):
loss_con1 = (criterion_u(cutmixed_aug_unlabeled_pred2, aug_unlabeled_pseudo_label1_for_mix) * ((aug_unlabeled_pred1_for_mix.softmax(dim=1).max(dim=1)[0] > conf_threshold) & (aug_unlabeled_ignore_img_mask_for_mix != 255)))
loss_con1 = (torch.sum(loss_con1) / torch.sum((aug_unlabeled_ignore_img_mask_for_mix != 255)).item())
loss_con2 = (criterion_u(cutmixed_aug_unlabeled_pred1, aug_unlabeled_pseudo_label2_for_mix) * ((aug_unlabeled_pred2_for_mix.softmax(dim=1).max(dim=1)[0] > conf_threshold) & (aug_unlabeled_ignore_img_mask_for_mix != 255)))
loss_con2 = (torch.sum(loss_con2) / torch.sum((aug_unlabeled_ignore_img_mask_for_mix != 255)).item())
loss_raw_con1 = (criterion_u(raw_unlabeled_pred2, raw_unlabeled_logits['pred1'].softmax(dim=1).max(dim=1)[1].detach().long()) * ((raw_unlabeled_logits['pred1'].softmax(dim=1).max(dim=1)[0] > conf_threshold) & (ignore_img_mask != 255)))
loss_raw_con1 = (torch.sum(loss_raw_con1) / torch.sum((ignore_img_mask != 255)).item())
loss_raw_con2 = (criterion_u(raw_unlabeled_pred1, raw_unlabeled_logits['pred2'].softmax(dim=1).max(dim=1)[1].detach().long()) * ((raw_unlabeled_logits['pred2'].softmax(dim=1).max(dim=1)[0] > conf_threshold) & (ignore_img_mask != 255)))
loss_raw_con2 = (torch.sum(loss_raw_con2) / torch.sum((ignore_img_mask != 255)).item())
elif (args.mode_confident == 'soft'):
(confident_pred1, confident_pred2, unconfident_pred1, unconfident_pred2) = soft_label_selection(aug_unlabeled_pred1_for_mix, aug_unlabeled_pred2_for_mix, conf_threshold)
loss_con1_confident = (criterion_u(cutmixed_aug_unlabeled_pred2, aug_unlabeled_pseudo_label1_for_mix) * (confident_pred1 & (aug_unlabeled_ignore_img_mask_for_mix != 255)))
loss_con2_confident = (criterion_u(cutmixed_aug_unlabeled_pred1, aug_unlabeled_pseudo_label2_for_mix) * (confident_pred2 & (aug_unlabeled_ignore_img_mask_for_mix != 255)))
loss_con1_unconfident = ((0.5 * criterion_u(cutmixed_aug_unlabeled_pred2, aug_unlabeled_pseudo_label1_for_mix)) * (unconfident_pred1 & (aug_unlabeled_ignore_img_mask_for_mix != 255)))
loss_con2_unconfident = ((0.5 * criterion_u(cutmixed_aug_unlabeled_pred1, aug_unlabeled_pseudo_label2_for_mix)) * (unconfident_pred2 & (aug_unlabeled_ignore_img_mask_for_mix != 255)))
loss_con1 = ((torch.sum(loss_con1_confident) + torch.sum(loss_con1_unconfident)) / torch.sum((aug_unlabeled_ignore_img_mask_for_mix != 255)).item())
loss_con2 = ((torch.sum(loss_con2_confident) + torch.sum(loss_con2_unconfident)) / torch.sum((aug_unlabeled_ignore_img_mask_for_mix != 255)).item())
(raw_confident_pred1, raw_confident_pred2, raw_unconfident_pred1, raw_unconfident_pred2) = soft_label_selection(raw_unlabeled_pred1, raw_unlabeled_pred2, conf_threshold)
loss_raw_con1_confident = (criterion_u(raw_unlabeled_pred2, raw_unlabeled_logits['pred1'].softmax(dim=1).max(dim=1)[1].detach().long()) * (raw_confident_pred1 & (ignore_img_mask != 255)))
loss_raw_con2_confident = (criterion_u(raw_unlabeled_pred1, raw_unlabeled_logits['pred2'].softmax(dim=1).max(dim=1)[1].detach().long()) * (raw_confident_pred2 & (ignore_img_mask != 255)))
loss_raw_con1_unconfident = ((0.5 * criterion_u(raw_unlabeled_pred2, raw_unlabeled_logits['pred1'].softmax(dim=1).max(dim=1)[1].detach().long())) * (raw_unconfident_pred1 & (ignore_img_mask != 255)))
loss_raw_con2_unconfident = ((0.5 * criterion_u(raw_unlabeled_pred1, raw_unlabeled_logits['pred2'].softmax(dim=1).max(dim=1)[1].detach().long())) * (raw_unconfident_pred2 & (ignore_img_mask != 255)))
loss_raw_con1 = ((torch.sum(loss_raw_con1_confident) + torch.sum(loss_raw_con1_unconfident)) / torch.sum((ignore_img_mask != 255)).item())
loss_raw_con2 = ((torch.sum(loss_raw_con2_confident) + torch.sum(loss_raw_con2_unconfident)) / torch.sum((ignore_img_mask != 255)).item())
elif (args.mode_confident == 'vote'):
(same_pred, different_pred) = vote_label_selection(unlabeled_pred1, unlabeled_pred2)
loss_con1_same = (criterion_u(cutmixed_aug_unlabeled_pred2, aug_unlabeled_pseudo_label1_for_mix) * (same_pred & (aug_unlabeled_ignore_img_mask_for_mix != 255)))
loss_con2_same = (criterion_u(cutmixed_aug_unlabeled_pred1, aug_unlabeled_pseudo_label2_for_mix) * (same_pred & (aug_unlabeled_ignore_img_mask_for_mix != 255)))
loss_con1_different = ((1.5 * criterion_u(cutmixed_aug_unlabeled_pred2, aug_unlabeled_pseudo_label1_for_mix)) * (different_pred & (aug_unlabeled_ignore_img_mask_for_mix != 255)))
loss_con2_different = ((1.5 * criterion_u(cutmixed_aug_unlabeled_pred1, aug_unlabeled_pseudo_label2_for_mix)) * (different_pred & (aug_unlabeled_ignore_img_mask_for_mix != 255)))
loss_con1 = ((torch.sum(loss_con1_same) + torch.sum(loss_con1_different)) / torch.sum((aug_unlabeled_ignore_img_mask_for_mix != 255)).item())
loss_con2 = ((torch.sum(loss_con2_same) + torch.sum(loss_con2_different)) / torch.sum((aug_unlabeled_ignore_img_mask_for_mix != 255)).item())
(raw_same_pred, raw_different_pred) = vote_label_selection(raw_unlabeled_pred1, raw_unlabeled_pred2)
loss_raw_con1_same = (criterion_u(raw_unlabeled_pred2, raw_unlabeled_logits['pred1'].softmax(dim=1).max(dim=1)[1].detach().long()) * (raw_same_pred & (ignore_img_mask != 255)))
loss_raw_con2_same = (criterion_u(raw_unlabeled_pred1, raw_unlabeled_logits['pred2'].softmax(dim=1).max(dim=1)[1].detach().long()) * (raw_same_pred & (ignore_img_mask != 255)))
loss_raw_con1_different = ((1.5 * criterion_u(raw_unlabeled_pred2, raw_unlabeled_logits['pred1'].softmax(dim=1).max(dim=1)[1].detach().long())) * (raw_different_pred & (ignore_img_mask != 255)))
loss_raw_con2_different = ((1.5 * criterion_u(raw_unlabeled_pred1, raw_unlabeled_logits['pred2'].softmax(dim=1).max(dim=1)[1].detach().long())) * (raw_different_pred & (ignore_img_mask != 255)))
loss_raw_con1 = ((torch.sum(loss_raw_con1_same) + torch.sum(loss_raw_con1_different)) / torch.sum((ignore_img_mask != 255)).item())
loss_raw_con2 = ((torch.sum(loss_raw_con2_same) + torch.sum(loss_raw_con2_different)) / torch.sum((ignore_img_mask != 255)).item())
elif (args.mode_confident == 'vote_threshold'):
(different1_confident, different1_else, different2_confident, different2_else) = vote_threshold_label_selection(unlabeled_pred1, unlabeled_pred2, conf_threshold)
loss_con1_else = (criterion_u(cutmixed_aug_unlabeled_pred2, aug_unlabeled_pseudo_label1_for_mix) * (different1_else & (aug_unlabeled_ignore_img_mask_for_mix != 255)))
loss_con2_else = (criterion_u(cutmixed_aug_unlabeled_pred1, aug_unlabeled_pseudo_label2_for_mix) * (different2_else & (aug_unlabeled_ignore_img_mask_for_mix != 255)))
loss_con1_cc = ((args.w_confident * criterion_u(cutmixed_aug_unlabeled_pred2, aug_unlabeled_pseudo_label1_for_mix)) * (different1_confident & (aug_unlabeled_ignore_img_mask_for_mix != 255)))
loss_con2_cc = ((args.w_confident * criterion_u(cutmixed_aug_unlabeled_pred1, aug_unlabeled_pseudo_label2_for_mix)) * (different2_confident & (aug_unlabeled_ignore_img_mask_for_mix != 255)))
loss_con1 = ((torch.sum(loss_con1_else) + torch.sum(loss_con1_cc)) / torch.sum((aug_unlabeled_ignore_img_mask_for_mix != 255)).item())
loss_con2 = ((torch.sum(loss_con2_else) + torch.sum(loss_con2_cc)) / torch.sum((aug_unlabeled_ignore_img_mask_for_mix != 255)).item())
(raw_different1_confident, raw_different1_else, raw_different2_confident, raw_different2_else) = vote_threshold_label_selection(raw_unlabeled_pred1, raw_unlabeled_pred2, conf_threshold)
loss_raw_con1_else = (criterion_u(raw_unlabeled_pred2, raw_unlabeled_logits['pred1'].softmax(dim=1).max(dim=1)[1].detach().long()) * (raw_different1_else & (ignore_img_mask != 255)))
loss_raw_con2_else = (criterion_u(raw_unlabeled_pred1, raw_unlabeled_logits['pred2'].softmax(dim=1).max(dim=1)[1].detach().long()) * (raw_different2_else & (ignore_img_mask != 255)))
loss_raw_con1_cc = ((args.w_confident * criterion_u(raw_unlabeled_pred2, raw_unlabeled_logits['pred1'].softmax(dim=1).max(dim=1)[1].detach().long())) * (raw_different1_confident & (ignore_img_mask != 255)))
loss_raw_con2_cc = ((args.w_confident * criterion_u(raw_unlabeled_pred1, raw_unlabeled_logits['pred2'].softmax(dim=1).max(dim=1)[1].detach().long())) * (raw_different2_confident & (ignore_img_mask != 255)))
loss_raw_con1 = ((torch.sum(loss_raw_con1_else) + torch.sum(loss_raw_con1_cc)) / torch.sum((ignore_img_mask != 255)).item())
loss_raw_con2 = ((torch.sum(loss_raw_con2_else) + torch.sum(loss_raw_con2_cc)) / torch.sum((ignore_img_mask != 255)).item())
elif (args.mode_confident == 'vote_soft'):
(same_pred, different_confident_pred1, different_confident_pred2, different_unconfident_pred1, different_unconfident_pred2) = vote_soft_label_selection(unlabeled_pred1, unlabeled_pred2, conf_threshold)
loss_con1_same = (criterion_u(cutmixed_aug_unlabeled_pred2, aug_unlabeled_pseudo_label1_for_mix) * (same_pred & (aug_unlabeled_ignore_img_mask_for_mix != 255)))
loss_con2_same = (criterion_u(cutmixed_aug_unlabeled_pred1, aug_unlabeled_pseudo_label2_for_mix) * (same_pred & (aug_unlabeled_ignore_img_mask_for_mix != 255)))
loss_con1_different_confident = ((1.5 * criterion_u(cutmixed_aug_unlabeled_pred2, aug_unlabeled_pseudo_label1_for_mix)) * (different_confident_pred1 & (aug_unlabeled_ignore_img_mask_for_mix != 255)))
loss_con2_different_confident = ((1.5 * criterion_u(cutmixed_aug_unlabeled_pred1, aug_unlabeled_pseudo_label2_for_mix)) * (different_confident_pred2 & (aug_unlabeled_ignore_img_mask_for_mix != 255)))
loss_con1_different_unconfident = ((0.5 * criterion_u(cutmixed_aug_unlabeled_pred2, aug_unlabeled_pseudo_label1_for_mix)) * (different_unconfident_pred1 & (aug_unlabeled_ignore_img_mask_for_mix != 255)))
loss_con2_different_unconfident = ((0.5 * criterion_u(cutmixed_aug_unlabeled_pred1, aug_unlabeled_pseudo_label2_for_mix)) * (different_unconfident_pred2 & (aug_unlabeled_ignore_img_mask_for_mix != 255)))
loss_con1 = (((torch.sum(loss_con1_same) + torch.sum(loss_con1_different_confident)) + torch.sum(loss_con1_different_unconfident)) / torch.sum((aug_unlabeled_ignore_img_mask_for_mix != 255)).item())
loss_con2 = (((torch.sum(loss_con2_same) + torch.sum(loss_con2_different_confident)) + torch.sum(loss_con2_different_unconfident)) / torch.sum((aug_unlabeled_ignore_img_mask_for_mix != 255)).item())
(raw_same_pred, raw_different_confident_pred1, raw_different_confident_pred2, raw_different_unconfident_pred1, raw_different_unconfident_pred2) = vote_soft_label_selection(unlabeled_pred1, unlabeled_pred2, conf_threshold)
loss_raw_con1_same = (criterion_u(raw_unlabeled_pred2, raw_unlabeled_logits['pred1'].softmax(dim=1).max(dim=1)[1].detach().long()) * (raw_same_pred & (ignore_img_mask != 255)))
loss_raw_con2_same = (criterion_u(raw_unlabeled_pred1, raw_unlabeled_logits['pred2'].softmax(dim=1).max(dim=1)[1].detach().long()) * (raw_same_pred & (ignore_img_mask != 255)))
loss_raw_con1_different_confident = ((1.5 * criterion_u(raw_unlabeled_pred2, raw_unlabeled_logits['pred1'].softmax(dim=1).max(dim=1)[1].detach().long())) * (raw_different_confident_pred1 & (ignore_img_mask != 255)))
loss_raw_con2_different_confident = ((1.5 * criterion_u(raw_unlabeled_pred1, raw_unlabeled_logits['pred2'].softmax(dim=1).max(dim=1)[1].detach().long())) * (raw_different_confident_pred2 & (ignore_img_mask != 255)))
loss_raw_con1_different_unconfident = ((0.5 * criterion_u(raw_unlabeled_pred2, raw_unlabeled_logits['pred1'].softmax(dim=1).max(dim=1)[1].detach().long())) * (raw_different_unconfident_pred1 & (ignore_img_mask != 255)))
loss_raw_con2_different_unconfident = ((0.5 * criterion_u(raw_unlabeled_pred1, raw_unlabeled_logits['pred2'].softmax(dim=1).max(dim=1)[1].detach().long())) * (raw_different_unconfident_pred2 & (ignore_img_mask != 255)))
loss_raw_con1 = (((torch.sum(loss_raw_con1_same) + torch.sum(loss_raw_con1_different_confident)) + torch.sum(loss_raw_con1_different_unconfident)) / torch.sum((ignore_img_mask != 255)).item())
loss_raw_con2 = (((torch.sum(loss_raw_con2_same) + torch.sum(loss_raw_con2_different_confident)) + torch.sum(loss_raw_con2_different_unconfident)) / torch.sum((ignore_img_mask != 255)).item())
else:
loss_con1 = (criterion_u(cutmixed_aug_unlabeled_pred2, aug_unlabeled_pseudo_label1_for_mix) * (aug_unlabeled_ignore_img_mask_for_mix != 255))
loss_con1 = (torch.sum(loss_con1) / torch.sum((aug_unlabeled_ignore_img_mask_for_mix != 255)).item())
loss_con2 = (criterion_u(cutmixed_aug_unlabeled_pred1, aug_unlabeled_pseudo_label2_for_mix) * (aug_unlabeled_ignore_img_mask_for_mix != 255))
loss_con2 = (torch.sum(loss_con2) / torch.sum((aug_unlabeled_ignore_img_mask_for_mix != 255)).item())
loss_raw_con1 = (criterion_u(raw_unlabeled_pred2, raw_unlabeled_logits['pred1'].softmax(dim=1).max(dim=1)[1].detach().long()) * (ignore_img_mask != 255))
loss_raw_con1 = (torch.sum(loss_raw_con1) / torch.sum((ignore_img_mask != 255)).item())
loss_raw_con2 = (criterion_u(raw_unlabeled_pred1, raw_unlabeled_logits['pred2'].softmax(dim=1).max(dim=1)[1].detach().long()) * (ignore_img_mask != 255))
loss_raw_con2 = (torch.sum(loss_raw_con2) / torch.sum((ignore_img_mask != 255)).item())
loss_con = ((((loss_con1 + loss_con2) + loss_raw_con1) + loss_raw_con2) / 4)
loss_con = (loss_con * args.w_con)
cos_dis = nn.CosineSimilarity(dim=1, eps=1e-06)
labeled_feature1 = labeled_logits['feature1']
labeled_feature2 = labeled_logits['feature2']
loss_dis_labeled1 = (1 + cos_dis(labeled_feature1.detach(), labeled_feature2).mean())
loss_dis_labeled2 = (1 + cos_dis(labeled_feature2.detach(), labeled_feature1).mean())
loss_dis_labeled = ((loss_dis_labeled1 + loss_dis_labeled2) / 2)
cutmixed_aug_unlabeled_feature1 = cutmixed_aug_unlabeled_logits['feature1']
cutmixed_aug_unlabeled_feature2 = cutmixed_aug_unlabeled_logits['feature2']
loss_dis_cutmixed_aug_unlabeled1 = (1 + cos_dis(cutmixed_aug_unlabeled_feature1.detach(), cutmixed_aug_unlabeled_feature2).mean())
loss_dis_cutmixed_aug_unlabeled2 = (1 + cos_dis(cutmixed_aug_unlabeled_feature2.detach(), cutmixed_aug_unlabeled_feature1).mean())
loss_dis_cutmixed_aug_unlabeled = ((loss_dis_cutmixed_aug_unlabeled1 + loss_dis_cutmixed_aug_unlabeled2) / 2)
raw_unlabeled_feature1 = raw_unlabeled_logits['feature1']
raw_unlabeled_feature2 = raw_unlabeled_logits['feature2']
loss_dis_raw_unlabeled1 = (1 + cos_dis(raw_unlabeled_feature1.detach(), raw_unlabeled_feature2).mean())
loss_dis_raw_unlabeled2 = (1 + cos_dis(raw_unlabeled_feature2.detach(), raw_unlabeled_feature1).mean())
loss_dis_raw_unlabeled = ((loss_dis_raw_unlabeled1 + loss_dis_raw_unlabeled2) / 2)
loss_dis_unlabeled = ((loss_dis_cutmixed_aug_unlabeled + loss_dis_raw_unlabeled) / 2)
loss_dis = ((loss_dis_labeled + loss_dis_unlabeled) / 2)
loss_dis = (loss_dis * args.w_dis)
loss = ((loss_CE + loss_con) + loss_dis)
dist.barrier()
loss.backward()
optimizer.step()
total_loss += loss.item()
total_loss_CE += loss_CE.item()
total_loss_con += loss_con.item()
total_loss_dis += loss_dis.item()
total_confident = ((((unlabeled_pred_confidence1 >= 0.95) & (ignore_img_mask != 255)).sum().item() + ((unlabeled_pred_confidence2 >= 0.95) & (ignore_img_mask != 255)).sum().item()) / 2)
total_mask_ratio += (total_confident / (ignore_img_mask != 255).sum().item())
iters = ((epoch * len(trainloader_u)) + i)
backbone_lr = (args.base_lr * ((1 - (iters / total_iters)) ** args.mul_scheduler))
backbone_lr = (backbone_lr * args.lr_backbone)
seg_lr = (args.base_lr * ((1 - (iters / total_iters)) ** args.mul_scheduler))
seg_lr = (seg_lr * args.lr_network)
optimizer.param_groups[0]['lr'] = backbone_lr
optimizer.param_groups[1]['lr'] = backbone_lr
for ii in range(2, len(optimizer.param_groups)):
optimizer.param_groups[ii]['lr'] = seg_lr
if (((i % (len(trainloader_u) // 8)) == 0) and (args.local_rank <= 0)):
tb.add_scalar('train_loss_total', (total_loss / (i + 1)), iters)
tb.add_scalar('train_loss_CE', (total_loss_CE / (i + 1)), iters)
tb.add_scalar('train_loss_con', (total_loss_con / (i + 1)), iters)
tb.add_scalar('train_loss_dis', (total_loss_dis / (i + 1)), iters)
if (((i % (len(trainloader_u) // 8)) == 0) and (args.local_rank <= 0)):
logger.info('Iters: {:}, Total loss: {:.3f}, Loss CE: {:.3f}, Loss consistency: {:.3f}, Loss discrepancy: {:.3f}, Mask: {:.3f}'.format(i, (total_loss / (i + 1)), (total_loss_CE / (i + 1)), (total_loss_con / (i + 1)), (total_loss_dis / (i + 1)), (total_mask_ratio / (i + 1))))
if args.use_SPL:
conf_threshold += 0.01
if (conf_threshold >= 0.95):
conf_threshold = 0.95
if (cfg['dataset'] == 'cityscapes'):
eval_mode = ('center_crop' if (epoch < (args.epochs - 20)) else 'sliding_window')
else:
eval_mode = 'original'
dist.barrier()
if (args.local_rank <= 0):
if (epoch == 4):
evaluate_result = evaluate_save(cfg['dataset'], args.save_path, args.local_rank, model, valloader, eval_mode, args, cfg, idx_epoch=5)
elif (epoch == 9):
evaluate_result = evaluate_save(cfg['dataset'], args.save_path, args.local_rank, model, valloader, eval_mode, args, cfg, idx_epoch=10)
elif (epoch == 19):
evaluate_result = evaluate_save(cfg['dataset'], args.save_path, args.local_rank, model, valloader, eval_mode, args, cfg, idx_epoch=20)
elif (epoch == 39):
evaluate_result = evaluate_save(cfg['dataset'], args.save_path, args.local_rank, model, valloader, eval_mode, args, cfg, idx_epoch=40)
else:
evaluate_result = evaluate(args.local_rank, model, valloader, eval_mode, args, cfg)
mIOU1 = evaluate_result['IOU1']
mIOU2 = evaluate_result['IOU2']
mIOU_ave = evaluate_result['IOU_ave']
tb.add_scalar('meanIOU_branch1', mIOU1, epoch)
tb.add_scalar('meanIOU_branch2', mIOU2, epoch)
tb.add_scalar('meanIOU_ave', mIOU_ave, epoch)
logger.info('***** Evaluation with branch 1 {} ***** >>>> meanIOU: {:.2f}\n'.format(eval_mode, mIOU1))
logger.info('***** Evaluation with branch 2 {} ***** >>>> meanIOU: {:.2f}\n'.format(eval_mode, mIOU2))
logger.info('***** Evaluation with two branches {} ***** >>>> meanIOU: {:.2f}\n'.format(eval_mode, mIOU_ave))
if (mIOU1 > previous_best1):
if (previous_best1 != 0):
os.remove(os.path.join(args.save_path, ('branch1_%s_%.2f.pth' % (args.backbone, previous_best1))))
previous_best1 = mIOU1
torch.save(model.module.state_dict(), os.path.join(args.save_path, ('branch1_%s_%.2f.pth' % (args.backbone, mIOU1))))
if (mIOU2 > previous_best2):
if (previous_best2 != 0):
os.remove(os.path.join(args.save_path, ('branch2_%s_%.2f.pth' % (args.backbone, previous_best2))))
previous_best2 = mIOU2
torch.save(model.module.state_dict(), os.path.join(args.save_path, ('branch2_%s_%.2f.pth' % (args.backbone, mIOU2))))
if (mIOU_ave > previous_best):
if (previous_best != 0):
os.remove(os.path.join(args.save_path, ('ave_%s_%.2f.pth' % (args.backbone, previous_best))))
previous_best = mIOU_ave
torch.save(model.module.state_dict(), os.path.join(args.save_path, ('ave_%s_%.2f.pth' % (args.backbone, mIOU_ave)))) |
def set_partitions(in_dict):
rules = _get_partition_rules()
replace = _replacement_rules(rules)
initd = {k: _unmatched for k in flatten_dict(in_dict)}
result = {k: replace(k, v) for (k, v) in initd.items()}
assert (_unmatched not in result.values()), 'Incomplete partition spec.'
return freeze(unflatten_dict(result)) |
def generate_pairs_reverse(numCat, numRep, pre_graph):
result = []
for i in range(numCat):
cat_result = []
for j in range(len(pre_graph)):
if (i in pre_graph[j]):
cat_result.append(j)
for _ in range(numRep):
result.append(torch.tensor(cat_result))
return result |
class MemoryFileObject():
def __init__(self, file):
self.file = file
if ((not getattr(self.file, 'seek', None)) or (not getattr(self.file, 'tell', None))):
raise Exception('File object does not support seeking.')
self.file.seek(0, 2)
self.file_size = self.file.tell()
self.file.seek(0)
self.data = []
def read_data_cb(ref, offset, requested_length, buffer, actual_count):
self.file.seek(offset)
data = self.file.read(requested_length)
data_size = len(data)
memmove(buffer, data, data_size)
actual_count.contents.value = data_size
return 0
def getsize_cb(ref):
return self.file_size
self.getsize_func = AudioFile_GetSizeProc(getsize_cb)
self.read_func = AudioFile_ReadProc(read_data_cb) |
class LowLevelIRBuilder():
def __init__(self, current_module: str, errors: Errors, mapper: Mapper, options: CompilerOptions) -> None:
self.current_module = current_module
self.errors = errors
self.mapper = mapper
self.options = options
self.args: list[Register] = []
self.blocks: list[BasicBlock] = []
self.error_handlers: list[(BasicBlock | None)] = [None]
self.keep_alives: list[Value] = []
def set_module(self, module_name: str, module_path: str) -> None:
self.module_name = module_name
self.module_path = module_path
def add(self, op: Op) -> Value:
assert (not self.blocks[(- 1)].terminated), "Can't add to finished block"
self.blocks[(- 1)].ops.append(op)
return op
def goto(self, target: BasicBlock) -> None:
if (not self.blocks[(- 1)].terminated):
self.add(Goto(target))
def activate_block(self, block: BasicBlock) -> None:
if self.blocks:
assert self.blocks[(- 1)].terminated
block.error_handler = self.error_handlers[(- 1)]
self.blocks.append(block)
def goto_and_activate(self, block: BasicBlock) -> None:
self.goto(block)
self.activate_block(block)
def keep_alive(self, values: list[Value], *, steal: bool=False) -> None:
self.add(KeepAlive(values, steal=steal))
def push_error_handler(self, handler: (BasicBlock | None)) -> None:
self.error_handlers.append(handler)
def pop_error_handler(self) -> (BasicBlock | None):
return self.error_handlers.pop()
def self(self) -> Register:
return self.args[0]
def flush_keep_alives(self) -> None:
if self.keep_alives:
self.add(KeepAlive(self.keep_alives.copy()))
self.keep_alives = []
def box(self, src: Value) -> Value:
if src.type.is_unboxed:
if (isinstance(src, Integer) and is_tagged(src.type)):
return self.add(LoadLiteral((src.value >> 1), rtype=object_rprimitive))
return self.add(Box(src))
else:
return src
def unbox_or_cast(self, src: Value, target_type: RType, line: int, *, can_borrow: bool=False) -> Value:
if target_type.is_unboxed:
return self.add(Unbox(src, target_type, line))
else:
if can_borrow:
self.keep_alives.append(src)
return self.add(Cast(src, target_type, line, borrow=can_borrow))
def coerce(self, src: Value, target_type: RType, line: int, force: bool=False, *, can_borrow: bool=False) -> Value:
src_type = src.type
if (src_type.is_unboxed and (not target_type.is_unboxed)):
return self.box(src)
if ((src_type.is_unboxed and target_type.is_unboxed) and (not is_runtime_subtype(src_type, target_type))):
if (isinstance(src, Integer) and is_short_int_rprimitive(src_type) and is_fixed_width_rtype(target_type)):
value = src.numeric_value()
if (not check_native_int_range(target_type, value)):
self.error(f'Value {value} is out of range for "{target_type}"', line)
return Integer((src.value >> 1), target_type)
elif (is_int_rprimitive(src_type) and is_fixed_width_rtype(target_type)):
return self.coerce_int_to_fixed_width(src, target_type, line)
elif (is_fixed_width_rtype(src_type) and is_int_rprimitive(target_type)):
return self.coerce_fixed_width_to_int(src, line)
elif (is_short_int_rprimitive(src_type) and is_fixed_width_rtype(target_type)):
return self.coerce_short_int_to_fixed_width(src, target_type, line)
elif (isinstance(src_type, RPrimitive) and isinstance(target_type, RPrimitive) and src_type.is_native_int and target_type.is_native_int and (src_type.size == target_type.size) and (src_type.is_signed == target_type.is_signed)):
return src
elif ((is_bool_rprimitive(src_type) or is_bit_rprimitive(src_type)) and is_tagged(target_type)):
shifted = self.int_op(bool_rprimitive, src, Integer(1, bool_rprimitive), IntOp.LEFT_SHIFT)
return self.add(Extend(shifted, target_type, signed=False))
elif ((is_bool_rprimitive(src_type) or is_bit_rprimitive(src_type)) and is_fixed_width_rtype(target_type)):
return self.add(Extend(src, target_type, signed=False))
elif (isinstance(src, Integer) and is_float_rprimitive(target_type)):
if is_tagged(src_type):
return Float(float((src.value // 2)))
return Float(float(src.value))
elif (is_tagged(src_type) and is_float_rprimitive(target_type)):
return self.int_to_float(src, line)
elif (isinstance(src_type, RTuple) and isinstance(target_type, RTuple) and (len(src_type.types) == len(target_type.types))):
values = []
for i in range(len(src_type.types)):
v = None
if isinstance(src, TupleSet):
item = src.items[i]
if (not isinstance(item, Register)):
v = item
if (v is None):
v = TupleGet(src, i)
self.add(v)
values.append(v)
return self.add(TupleSet([self.coerce(v, t, line) for (v, t) in zip(values, target_type.types)], line))
tmp = self.box(src)
return self.unbox_or_cast(tmp, target_type, line)
if (((not src_type.is_unboxed) and target_type.is_unboxed) or (not is_subtype(src_type, target_type))):
return self.unbox_or_cast(src, target_type, line, can_borrow=can_borrow)
elif force:
tmp = Register(target_type)
self.add(Assign(tmp, src))
return tmp
return src
def coerce_int_to_fixed_width(self, src: Value, target_type: RType, line: int) -> Value:
assert is_fixed_width_rtype(target_type), target_type
assert isinstance(target_type, RPrimitive)
res = Register(target_type)
(fast, slow, end) = (BasicBlock(), BasicBlock(), BasicBlock())
check = self.check_tagged_short_int(src, line)
self.add(Branch(check, fast, slow, Branch.BOOL))
self.activate_block(fast)
size = target_type.size
if (size < int_rprimitive.size):
(fast2, fast3) = (BasicBlock(), BasicBlock())
upper_bound = (1 << ((size * 8) - 1))
if (not target_type.is_signed):
upper_bound *= 2
check2 = self.add(ComparisonOp(src, Integer(upper_bound, src.type), ComparisonOp.SLT))
self.add(Branch(check2, fast2, slow, Branch.BOOL))
self.activate_block(fast2)
if target_type.is_signed:
lower_bound = (- upper_bound)
else:
lower_bound = 0
check3 = self.add(ComparisonOp(src, Integer(lower_bound, src.type), ComparisonOp.SGE))
self.add(Branch(check3, fast3, slow, Branch.BOOL))
self.activate_block(fast3)
tmp = self.int_op(c_pyssize_t_rprimitive, src, Integer(1, c_pyssize_t_rprimitive), IntOp.RIGHT_SHIFT, line)
tmp = self.add(Truncate(tmp, target_type))
else:
if (size > int_rprimitive.size):
tmp = self.add(Extend(src, target_type, signed=True))
else:
tmp = src
tmp = self.int_op(target_type, tmp, Integer(1, target_type), IntOp.RIGHT_SHIFT, line)
self.add(Assign(res, tmp))
self.goto(end)
self.activate_block(slow)
if (is_int64_rprimitive(target_type) or (is_int32_rprimitive(target_type) and (size == int_rprimitive.size))):
ptr = self.int_op(pointer_rprimitive, src, Integer(1, pointer_rprimitive), IntOp.XOR, line)
ptr2 = Register(c_pointer_rprimitive)
self.add(Assign(ptr2, ptr))
if is_int64_rprimitive(target_type):
conv_op = int_to_int64_op
else:
conv_op = int_to_int32_op
tmp = self.call_c(conv_op, [ptr2], line)
self.add(Assign(res, tmp))
self.add(KeepAlive([src]))
self.goto(end)
elif is_int32_rprimitive(target_type):
self.call_c(int32_overflow, [], line)
self.add(Unreachable())
elif is_int16_rprimitive(target_type):
self.call_c(int16_overflow, [], line)
self.add(Unreachable())
elif is_uint8_rprimitive(target_type):
self.call_c(uint8_overflow, [], line)
self.add(Unreachable())
else:
assert False, target_type
self.activate_block(end)
return res
def coerce_short_int_to_fixed_width(self, src: Value, target_type: RType, line: int) -> Value:
if is_int64_rprimitive(target_type):
return self.int_op(target_type, src, Integer(1, target_type), IntOp.RIGHT_SHIFT, line)
assert False, (src.type, target_type)
def coerce_fixed_width_to_int(self, src: Value, line: int) -> Value:
if ((is_int32_rprimitive(src.type) and (PLATFORM_SIZE == 8)) or is_int16_rprimitive(src.type) or is_uint8_rprimitive(src.type)):
extended = self.add(Extend(src, c_pyssize_t_rprimitive, signed=src.type.is_signed))
return self.int_op(int_rprimitive, extended, Integer(1, c_pyssize_t_rprimitive), IntOp.LEFT_SHIFT, line)
assert is_fixed_width_rtype(src.type)
assert isinstance(src.type, RPrimitive)
src_type = src.type
res = Register(int_rprimitive)
(fast, fast2, slow, end) = (BasicBlock(), BasicBlock(), BasicBlock(), BasicBlock())
c1 = self.add(ComparisonOp(src, Integer(MAX_SHORT_INT, src_type), ComparisonOp.SLE))
self.add(Branch(c1, fast, slow, Branch.BOOL))
self.activate_block(fast)
c2 = self.add(ComparisonOp(src, Integer(MIN_SHORT_INT, src_type), ComparisonOp.SGE))
self.add(Branch(c2, fast2, slow, Branch.BOOL))
self.activate_block(slow)
if is_int64_rprimitive(src_type):
conv_op = int64_to_int_op
elif is_int32_rprimitive(src_type):
assert (PLATFORM_SIZE == 4)
conv_op = ssize_t_to_int_op
else:
assert False, src_type
x = self.call_c(conv_op, [src], line)
self.add(Assign(res, x))
self.goto(end)
self.activate_block(fast2)
if (int_rprimitive.size < src_type.size):
tmp = self.add(Truncate(src, c_pyssize_t_rprimitive))
else:
tmp = src
s = self.int_op(int_rprimitive, tmp, Integer(1, tmp.type), IntOp.LEFT_SHIFT, line)
self.add(Assign(res, s))
self.goto(end)
self.activate_block(end)
return res
def coerce_nullable(self, src: Value, target_type: RType, line: int) -> Value:
if ((src.type.is_unboxed == target_type.is_unboxed) and ((target_type.is_unboxed and is_runtime_subtype(src.type, target_type)) or ((not target_type.is_unboxed) and is_subtype(src.type, target_type)))):
return src
target = Register(target_type)
(valid, invalid, out) = (BasicBlock(), BasicBlock(), BasicBlock())
self.add(Branch(src, invalid, valid, Branch.IS_ERROR))
self.activate_block(valid)
coerced = self.coerce(src, target_type, line)
self.add(Assign(target, coerced, line))
self.goto(out)
self.activate_block(invalid)
error = self.add(LoadErrorValue(target_type))
self.add(Assign(target, error, line))
self.goto_and_activate(out)
return target
def get_attr(self, obj: Value, attr: str, result_type: RType, line: int, *, borrow: bool=False) -> Value:
if (isinstance(obj.type, RInstance) and obj.type.class_ir.is_ext_class and obj.type.class_ir.has_attr(attr)):
op = GetAttr(obj, attr, line, borrow=borrow)
if op.is_borrowed:
self.keep_alives.append(obj)
return self.add(op)
elif isinstance(obj.type, RUnion):
return self.union_get_attr(obj, obj.type, attr, result_type, line)
else:
return self.py_get_attr(obj, attr, line)
def union_get_attr(self, obj: Value, rtype: RUnion, attr: str, result_type: RType, line: int) -> Value:
def get_item_attr(value: Value) -> Value:
return self.get_attr(value, attr, result_type, line)
return self.decompose_union_helper(obj, rtype, result_type, get_item_attr, line)
def py_get_attr(self, obj: Value, attr: str, line: int) -> Value:
key = self.load_str(attr)
return self.call_c(py_getattr_op, [obj, key], line)
def isinstance_helper(self, obj: Value, class_irs: list[ClassIR], line: int) -> Value:
if (not class_irs):
return self.false()
ret = self.isinstance_native(obj, class_irs[0], line)
for class_ir in class_irs[1:]:
def other() -> Value:
return self.isinstance_native(obj, class_ir, line)
ret = self.shortcircuit_helper('or', bool_rprimitive, (lambda : ret), other, line)
return ret
def get_type_of_obj(self, obj: Value, line: int) -> Value:
ob_type_address = self.add(GetElementPtr(obj, PyObject, 'ob_type', line))
ob_type = self.add(LoadMem(object_rprimitive, ob_type_address))
self.add(KeepAlive([obj]))
return ob_type
def type_is_op(self, obj: Value, type_obj: Value, line: int) -> Value:
typ = self.get_type_of_obj(obj, line)
return self.add(ComparisonOp(typ, type_obj, ComparisonOp.EQ, line))
def isinstance_native(self, obj: Value, class_ir: ClassIR, line: int) -> Value:
concrete = all_concrete_classes(class_ir)
if ((concrete is None) or (len(concrete) > (FAST_ISINSTANCE_MAX_SUBCLASSES + 1))):
return self.call_c(fast_isinstance_op, [obj, self.get_native_type(class_ir)], line)
if (not concrete):
return self.false()
type_obj = self.get_native_type(concrete[0])
ret = self.type_is_op(obj, type_obj, line)
for c in concrete[1:]:
def other() -> Value:
return self.type_is_op(obj, self.get_native_type(c), line)
ret = self.shortcircuit_helper('or', bool_rprimitive, (lambda : ret), other, line)
return ret
def _construct_varargs(self, args: Sequence[tuple[(Value, ArgKind, (str | None))]], line: int, *, has_star: bool, has_star2: bool) -> tuple[((Value | None), (Value | None))]:
star_result: (Value | None) = None
star2_result: (Value | None) = None
star_values: list[Value] = []
star2_keys: list[Value] = []
star2_values: list[Value] = []
seen_empty_reg: (Register | None) = None
for (value, kind, name) in args:
if (kind == ARG_STAR):
if (star_result is None):
star_result = self.new_list_op(star_values, line)
self.call_c(list_extend_op, [star_result, value], line)
elif (kind == ARG_STAR2):
if (star2_result is None):
star2_result = self._create_dict(star2_keys, star2_values, line)
self.call_c(dict_update_in_display_op, [star2_result, value], line=line)
else:
nullable = kind.is_optional()
maybe_pos = (kind.is_positional() and has_star)
maybe_named = (kind.is_named() or (kind.is_optional() and name and has_star2))
if nullable:
if (maybe_pos and (star_result is None)):
star_result = self.new_list_op(star_values, line)
if (maybe_named and (star2_result is None)):
star2_result = self._create_dict(star2_keys, star2_values, line)
if (maybe_pos and (star_result is None)):
star_values.append(value)
continue
if (maybe_named and (star2_result is None)):
assert (name is not None)
key = self.load_str(name)
star2_keys.append(key)
star2_values.append(value)
continue
new_seen_empty_reg = seen_empty_reg
out = BasicBlock()
if nullable:
if (maybe_pos and (not seen_empty_reg)):
new_seen_empty_reg = Register(bool_rprimitive)
self.add(Assign(new_seen_empty_reg, self.false(), line))
skip = (BasicBlock() if maybe_pos else out)
keep = BasicBlock()
self.add(Branch(value, skip, keep, Branch.IS_ERROR))
self.activate_block(keep)
if (maybe_pos and maybe_named and seen_empty_reg):
(pos_block, named_block) = (BasicBlock(), BasicBlock())
self.add(Branch(seen_empty_reg, named_block, pos_block, Branch.BOOL))
else:
pos_block = named_block = BasicBlock()
self.goto(pos_block)
if maybe_pos:
self.activate_block(pos_block)
assert star_result
self.translate_special_method_call(star_result, 'append', [value], result_type=None, line=line)
self.goto(out)
if (maybe_named and ((not maybe_pos) or seen_empty_reg)):
self.activate_block(named_block)
assert (name is not None)
key = self.load_str(name)
assert star2_result
self.translate_special_method_call(star2_result, '__setitem__', [key, value], result_type=None, line=line)
self.goto(out)
if (nullable and maybe_pos and new_seen_empty_reg):
assert (skip is not out)
self.activate_block(skip)
self.add(Assign(new_seen_empty_reg, self.true(), line))
self.goto(out)
self.activate_block(out)
seen_empty_reg = new_seen_empty_reg
assert ((not (star_result or star_values)) or has_star)
assert ((not (star2_result or star2_values)) or has_star2)
if has_star:
if (star_result is None):
star_result = self.new_tuple(star_values, line)
else:
star_result = self.call_c(list_tuple_op, [star_result], line)
if (has_star2 and (star2_result is None)):
star2_result = self._create_dict(star2_keys, star2_values, line)
return (star_result, star2_result)
def py_call(self, function: Value, arg_values: list[Value], line: int, arg_kinds: (list[ArgKind] | None)=None, arg_names: (Sequence[(str | None)] | None)=None) -> Value:
if use_vectorcall(self.options.capi_version):
result = self._py_vector_call(function, arg_values, line, arg_kinds, arg_names)
if (result is not None):
return result
if ((arg_kinds is None) or all(((kind == ARG_POS) for kind in arg_kinds))):
return self.call_c(py_call_op, ([function] + arg_values), line)
assert (arg_names is not None)
(pos_args_tuple, kw_args_dict) = self._construct_varargs(list(zip(arg_values, arg_kinds, arg_names)), line, has_star=True, has_star2=True)
assert (pos_args_tuple and kw_args_dict)
return self.call_c(py_call_with_kwargs_op, [function, pos_args_tuple, kw_args_dict], line)
def _py_vector_call(self, function: Value, arg_values: list[Value], line: int, arg_kinds: (list[ArgKind] | None)=None, arg_names: (Sequence[(str | None)] | None)=None) -> (Value | None):
if ((arg_kinds is None) or all((((not kind.is_star()) and (not kind.is_optional())) for kind in arg_kinds))):
if arg_values:
coerced_args = [self.coerce(arg, object_rprimitive, line) for arg in arg_values]
arg_ptr = self.setup_rarray(object_rprimitive, coerced_args, object_ptr=True)
else:
arg_ptr = Integer(0, object_pointer_rprimitive)
num_pos = num_positional_args(arg_values, arg_kinds)
keywords = self._vectorcall_keywords(arg_names)
value = self.call_c(py_vectorcall_op, [function, arg_ptr, Integer(num_pos, c_size_t_rprimitive), keywords], line)
if arg_values:
self.add(KeepAlive(coerced_args))
return value
return None
def _vectorcall_keywords(self, arg_names: (Sequence[(str | None)] | None)) -> Value:
if arg_names:
kw_list = [name for name in arg_names if (name is not None)]
if kw_list:
return self.add(LoadLiteral(tuple(kw_list), object_rprimitive))
return Integer(0, object_rprimitive)
def py_method_call(self, obj: Value, method_name: str, arg_values: list[Value], line: int, arg_kinds: (list[ArgKind] | None), arg_names: (Sequence[(str | None)] | None)) -> Value:
if use_method_vectorcall(self.options.capi_version):
result = self._py_vector_method_call(obj, method_name, arg_values, line, arg_kinds, arg_names)
if (result is not None):
return result
if ((arg_kinds is None) or all(((kind == ARG_POS) for kind in arg_kinds))):
method_name_reg = self.load_str(method_name)
return self.call_c(py_method_call_op, ([obj, method_name_reg] + arg_values), line)
else:
method = self.py_get_attr(obj, method_name, line)
return self.py_call(method, arg_values, line, arg_kinds=arg_kinds, arg_names=arg_names)
def _py_vector_method_call(self, obj: Value, method_name: str, arg_values: list[Value], line: int, arg_kinds: (list[ArgKind] | None), arg_names: (Sequence[(str | None)] | None)) -> (Value | None):
if ((arg_kinds is None) or all((((not kind.is_star()) and (not kind.is_optional())) for kind in arg_kinds))):
method_name_reg = self.load_str(method_name)
coerced_args = [self.coerce(arg, object_rprimitive, line) for arg in ([obj] + arg_values)]
arg_ptr = self.setup_rarray(object_rprimitive, coerced_args, object_ptr=True)
num_pos = num_positional_args(arg_values, arg_kinds)
keywords = self._vectorcall_keywords(arg_names)
value = self.call_c(py_vectorcall_method_op, [method_name_reg, arg_ptr, Integer(((num_pos + 1) | PY_VECTORCALL_ARGUMENTS_OFFSET), c_size_t_rprimitive), keywords], line)
self.add(KeepAlive(coerced_args))
return value
return None
def call(self, decl: FuncDecl, args: Sequence[Value], arg_kinds: list[ArgKind], arg_names: Sequence[(str | None)], line: int, *, bitmap_args: (list[Register] | None)=None) -> Value:
args = self.native_args_to_positional(args, arg_kinds, arg_names, decl.sig, line, bitmap_args=bitmap_args)
return self.add(Call(decl, args, line))
def native_args_to_positional(self, args: Sequence[Value], arg_kinds: list[ArgKind], arg_names: Sequence[(str | None)], sig: FuncSignature, line: int, *, bitmap_args: (list[Register] | None)=None) -> list[Value]:
sig_args = sig.args
n = sig.num_bitmap_args
if n:
sig_args = sig_args[:(- n)]
sig_arg_kinds = [arg.kind for arg in sig_args]
sig_arg_names = [arg.name for arg in sig_args]
concrete_kinds = [concrete_arg_kind(arg_kind) for arg_kind in arg_kinds]
formal_to_actual = map_actuals_to_formals(concrete_kinds, arg_names, sig_arg_kinds, sig_arg_names, (lambda n: AnyType(TypeOfAny.special_form)))
has_star = has_star2 = False
star_arg_entries = []
for (lst, arg) in zip(formal_to_actual, sig_args):
if arg.kind.is_star():
star_arg_entries.extend([(args[i], arg_kinds[i], arg_names[i]) for i in lst])
has_star = (has_star or (arg.kind == ARG_STAR))
has_star2 = (has_star2 or (arg.kind == ARG_STAR2))
(star_arg, star2_arg) = self._construct_varargs(star_arg_entries, line, has_star=has_star, has_star2=has_star2)
output_args: list[Value] = []
for (lst, arg) in zip(formal_to_actual, sig_args):
if (arg.kind == ARG_STAR):
assert star_arg
output_arg = star_arg
elif (arg.kind == ARG_STAR2):
assert star2_arg
output_arg = star2_arg
elif (not lst):
if is_fixed_width_rtype(arg.type):
output_arg = Integer(0, arg.type)
elif is_float_rprimitive(arg.type):
output_arg = Float(0.0)
else:
output_arg = self.add(LoadErrorValue(arg.type, is_borrowed=True))
else:
base_arg = args[lst[0]]
if arg_kinds[lst[0]].is_optional():
output_arg = self.coerce_nullable(base_arg, arg.type, line)
else:
output_arg = self.coerce(base_arg, arg.type, line)
output_args.append(output_arg)
for i in reversed(range(n)):
if (bitmap_args and (i < len(bitmap_args))):
output_args.append(bitmap_args[i])
continue
bitmap = 0
c = 0
for (lst, arg) in zip(formal_to_actual, sig_args):
if (arg.kind.is_optional() and arg.type.error_overlap):
if ((i * BITMAP_BITS) <= c < ((i + 1) * BITMAP_BITS)):
if lst:
bitmap |= (1 << (c & (BITMAP_BITS - 1)))
c += 1
output_args.append(Integer(bitmap, bitmap_rprimitive))
return output_args
def gen_method_call(self, base: Value, name: str, arg_values: list[Value], result_type: (RType | None), line: int, arg_kinds: (list[ArgKind] | None)=None, arg_names: (list[(str | None)] | None)=None, can_borrow: bool=False) -> Value:
if ((arg_kinds is not None) and any((kind.is_star() for kind in arg_kinds))):
return self.py_method_call(base, name, arg_values, base.line, arg_kinds, arg_names)
if (isinstance(base.type, RInstance) and base.type.class_ir.is_ext_class and (not base.type.class_ir.builtin_base)):
if base.type.class_ir.has_method(name):
decl = base.type.class_ir.method_decl(name)
if (arg_kinds is None):
assert (arg_names is None), 'arg_kinds not present but arg_names is'
arg_kinds = [ARG_POS for _ in arg_values]
arg_names = [None for _ in arg_values]
else:
assert (arg_names is not None), 'arg_kinds present but arg_names is not'
assert decl.bound_sig
arg_values = self.native_args_to_positional(arg_values, arg_kinds, arg_names, decl.bound_sig, line)
return self.add(MethodCall(base, name, arg_values, line))
elif base.type.class_ir.has_attr(name):
function = self.add(GetAttr(base, name, line))
return self.py_call(function, arg_values, line, arg_kinds=arg_kinds, arg_names=arg_names)
elif isinstance(base.type, RUnion):
return self.union_method_call(base, base.type, name, arg_values, result_type, line, arg_kinds, arg_names)
if ((not arg_kinds) or (arg_kinds == ([ARG_POS] * len(arg_values)))):
target = self.translate_special_method_call(base, name, arg_values, result_type, line, can_borrow=can_borrow)
if target:
return target
return self.py_method_call(base, name, arg_values, line, arg_kinds, arg_names)
def union_method_call(self, base: Value, obj_type: RUnion, name: str, arg_values: list[Value], return_rtype: (RType | None), line: int, arg_kinds: (list[ArgKind] | None), arg_names: (list[(str | None)] | None)) -> Value:
return_rtype = (return_rtype or object_rprimitive)
def call_union_item(value: Value) -> Value:
return self.gen_method_call(value, name, arg_values, return_rtype, line, arg_kinds, arg_names)
return self.decompose_union_helper(base, obj_type, return_rtype, call_union_item, line)
def none(self) -> Value:
return Integer(1, none_rprimitive)
def true(self) -> Value:
return Integer(1, bool_rprimitive)
def false(self) -> Value:
return Integer(0, bool_rprimitive)
def none_object(self) -> Value:
return self.add(LoadAddress(none_object_op.type, none_object_op.src, line=(- 1)))
def load_int(self, value: int) -> Value:
if ((value > MAX_LITERAL_SHORT_INT) or (value < MIN_LITERAL_SHORT_INT)):
return self.add(LoadLiteral(value, int_rprimitive))
else:
return Integer(value)
def load_float(self, value: float) -> Value:
return Float(value)
def load_str(self, value: str) -> Value:
return self.add(LoadLiteral(value, str_rprimitive))
def load_bytes(self, value: bytes) -> Value:
return self.add(LoadLiteral(value, bytes_rprimitive))
def load_complex(self, value: complex) -> Value:
return self.add(LoadLiteral(value, object_rprimitive))
def load_static_checked(self, typ: RType, identifier: str, module_name: (str | None)=None, namespace: str=NAMESPACE_STATIC, line: int=(- 1), error_msg: (str | None)=None) -> Value:
if (error_msg is None):
error_msg = f'name "{identifier}" is not defined'
(ok_block, error_block) = (BasicBlock(), BasicBlock())
value = self.add(LoadStatic(typ, identifier, module_name, namespace, line=line))
self.add(Branch(value, error_block, ok_block, Branch.IS_ERROR, rare=True))
self.activate_block(error_block)
self.add(RaiseStandardError(RaiseStandardError.NAME_ERROR, error_msg, line))
self.add(Unreachable())
self.activate_block(ok_block)
return value
def load_module(self, name: str) -> Value:
return self.add(LoadStatic(object_rprimitive, name, namespace=NAMESPACE_MODULE))
def get_native_type(self, cls: ClassIR) -> Value:
fullname = f'{cls.module_name}.{cls.name}'
return self.load_native_type_object(fullname)
def load_native_type_object(self, fullname: str) -> Value:
(module, name) = fullname.rsplit('.', 1)
return self.add(LoadStatic(object_rprimitive, name, module, NAMESPACE_TYPE))
def binary_op(self, lreg: Value, rreg: Value, op: str, line: int) -> Value:
ltype = lreg.type
rtype = rreg.type
if (isinstance(ltype, RTuple) and isinstance(rtype, RTuple) and (op in ('==', '!='))):
return self.compare_tuples(lreg, rreg, op, line)
if (op in ('==', '!=')):
value = self.translate_eq_cmp(lreg, rreg, op, line)
if (value is not None):
return value
if (op in ('is', 'is not')):
return self.translate_is_op(lreg, rreg, op, line)
if (is_str_rprimitive(ltype) and is_str_rprimitive(rtype) and (op in ('==', '!='))):
return self.compare_strings(lreg, rreg, op, line)
if (is_bytes_rprimitive(ltype) and is_bytes_rprimitive(rtype) and (op in ('==', '!='))):
return self.compare_bytes(lreg, rreg, op, line)
if (is_tagged(ltype) and is_tagged(rtype) and (op in int_comparison_op_mapping)):
return self.compare_tagged(lreg, rreg, op, line)
if (is_bool_rprimitive(ltype) and is_bool_rprimitive(rtype) and (op in BOOL_BINARY_OPS)):
if (op in ComparisonOp.signed_ops):
return self.bool_comparison_op(lreg, rreg, op, line)
else:
return self.bool_bitwise_op(lreg, rreg, op[0], line)
if (isinstance(rtype, RInstance) and (op in ('in', 'not in'))):
return self.translate_instance_contains(rreg, lreg, op, line)
if is_fixed_width_rtype(ltype):
if (op in FIXED_WIDTH_INT_BINARY_OPS):
if op.endswith('='):
op = op[:(- 1)]
if (op != '//'):
op_id = int_op_to_id[op]
else:
op_id = IntOp.DIV
if (is_bool_rprimitive(rtype) or is_bit_rprimitive(rtype)):
rreg = self.coerce(rreg, ltype, line)
rtype = ltype
if (is_fixed_width_rtype(rtype) or is_tagged(rtype)):
return self.fixed_width_int_op(ltype, lreg, rreg, op_id, line)
if isinstance(rreg, Integer):
return self.fixed_width_int_op(ltype, lreg, self.coerce(rreg, ltype, line), op_id, line)
elif (op in ComparisonOp.signed_ops):
if is_int_rprimitive(rtype):
rreg = self.coerce_int_to_fixed_width(rreg, ltype, line)
elif (is_bool_rprimitive(rtype) or is_bit_rprimitive(rtype)):
rreg = self.coerce(rreg, ltype, line)
op_id = ComparisonOp.signed_ops[op]
if is_fixed_width_rtype(rreg.type):
return self.comparison_op(lreg, rreg, op_id, line)
if isinstance(rreg, Integer):
return self.comparison_op(lreg, self.coerce(rreg, ltype, line), op_id, line)
elif is_fixed_width_rtype(rtype):
if (op in FIXED_WIDTH_INT_BINARY_OPS):
if op.endswith('='):
op = op[:(- 1)]
if (op != '//'):
op_id = int_op_to_id[op]
else:
op_id = IntOp.DIV
if isinstance(lreg, Integer):
return self.fixed_width_int_op(rtype, self.coerce(lreg, rtype, line), rreg, op_id, line)
if is_tagged(ltype):
return self.fixed_width_int_op(rtype, lreg, rreg, op_id, line)
if (is_bool_rprimitive(ltype) or is_bit_rprimitive(ltype)):
lreg = self.coerce(lreg, rtype, line)
return self.fixed_width_int_op(rtype, lreg, rreg, op_id, line)
elif (op in ComparisonOp.signed_ops):
if is_int_rprimitive(ltype):
lreg = self.coerce_int_to_fixed_width(lreg, rtype, line)
elif (is_bool_rprimitive(ltype) or is_bit_rprimitive(ltype)):
lreg = self.coerce(lreg, rtype, line)
op_id = ComparisonOp.signed_ops[op]
if isinstance(lreg, Integer):
return self.comparison_op(self.coerce(lreg, rtype, line), rreg, op_id, line)
if is_fixed_width_rtype(lreg.type):
return self.comparison_op(lreg, rreg, op_id, line)
if (op in ('==', '!=')):
op_id = ComparisonOp.signed_ops[op]
if (is_tagged(ltype) and is_subtype(rtype, ltype)):
rreg = self.coerce(rreg, int_rprimitive, line)
return self.comparison_op(lreg, rreg, op_id, line)
if (is_tagged(rtype) and is_subtype(ltype, rtype)):
lreg = self.coerce(lreg, int_rprimitive, line)
return self.comparison_op(lreg, rreg, op_id, line)
elif (op in op in int_comparison_op_mapping):
if (is_tagged(ltype) and is_subtype(rtype, ltype)):
rreg = self.coerce(rreg, short_int_rprimitive, line)
return self.compare_tagged(lreg, rreg, op, line)
if (is_tagged(rtype) and is_subtype(ltype, rtype)):
lreg = self.coerce(lreg, short_int_rprimitive, line)
return self.compare_tagged(lreg, rreg, op, line)
if (is_float_rprimitive(ltype) or is_float_rprimitive(rtype)):
if isinstance(lreg, Integer):
lreg = Float(float(lreg.numeric_value()))
elif isinstance(rreg, Integer):
rreg = Float(float(rreg.numeric_value()))
elif is_int_rprimitive(lreg.type):
lreg = self.int_to_float(lreg, line)
elif is_int_rprimitive(rreg.type):
rreg = self.int_to_float(rreg, line)
if (is_float_rprimitive(lreg.type) and is_float_rprimitive(rreg.type)):
if (op in float_comparison_op_to_id):
return self.compare_floats(lreg, rreg, float_comparison_op_to_id[op], line)
if op.endswith('='):
base_op = op[:(- 1)]
else:
base_op = op
if (base_op in float_op_to_id):
return self.float_op(lreg, rreg, base_op, line)
call_c_ops_candidates = binary_ops.get(op, [])
target = self.matching_call_c(call_c_ops_candidates, [lreg, rreg], line)
assert target, ('Unsupported binary operation: %s' % op)
return target
def check_tagged_short_int(self, val: Value, line: int, negated: bool=False) -> Value:
int_tag = Integer(1, c_pyssize_t_rprimitive, line)
bitwise_and = self.int_op(c_pyssize_t_rprimitive, val, int_tag, IntOp.AND, line)
zero = Integer(0, c_pyssize_t_rprimitive, line)
op = (ComparisonOp.NEQ if negated else ComparisonOp.EQ)
check = self.comparison_op(bitwise_and, zero, op, line)
return check
def compare_tagged(self, lhs: Value, rhs: Value, op: str, line: int) -> Value:
if (is_short_int_rprimitive(lhs.type) and is_short_int_rprimitive(rhs.type)):
return self.comparison_op(lhs, rhs, int_comparison_op_mapping[op][0], line)
(op_type, c_func_desc, negate_result, swap_op) = int_comparison_op_mapping[op]
result = Register(bool_rprimitive)
(short_int_block, int_block, out) = (BasicBlock(), BasicBlock(), BasicBlock())
check_lhs = self.check_tagged_short_int(lhs, line)
if (op in ('==', '!=')):
check = check_lhs
else:
check_rhs = self.check_tagged_short_int(rhs, line)
check = self.int_op(bit_rprimitive, check_lhs, check_rhs, IntOp.AND, line)
self.add(Branch(check, short_int_block, int_block, Branch.BOOL))
self.activate_block(short_int_block)
eq = self.comparison_op(lhs, rhs, op_type, line)
self.add(Assign(result, eq, line))
self.goto(out)
self.activate_block(int_block)
if swap_op:
args = [rhs, lhs]
else:
args = [lhs, rhs]
call = self.call_c(c_func_desc, args, line)
if negate_result:
call_result = self.unary_op(call, 'not', line)
else:
call_result = call
self.add(Assign(result, call_result, line))
self.goto_and_activate(out)
return result
def compare_tagged_condition(self, lhs: Value, rhs: Value, op: str, true: BasicBlock, false: BasicBlock, line: int) -> None:
is_eq = (op in ('==', '!='))
if ((is_short_int_rprimitive(lhs.type) and is_short_int_rprimitive(rhs.type)) or (is_eq and (is_short_int_rprimitive(lhs.type) or is_short_int_rprimitive(rhs.type)))):
check = self.comparison_op(lhs, rhs, int_comparison_op_mapping[op][0], line)
self.flush_keep_alives()
self.add(Branch(check, true, false, Branch.BOOL))
return
(op_type, c_func_desc, negate_result, swap_op) = int_comparison_op_mapping[op]
(int_block, short_int_block) = (BasicBlock(), BasicBlock())
check_lhs = self.check_tagged_short_int(lhs, line, negated=True)
if (is_eq or is_short_int_rprimitive(rhs.type)):
self.flush_keep_alives()
self.add(Branch(check_lhs, int_block, short_int_block, Branch.BOOL))
else:
rhs_block = BasicBlock()
self.add(Branch(check_lhs, int_block, rhs_block, Branch.BOOL))
self.activate_block(rhs_block)
check_rhs = self.check_tagged_short_int(rhs, line, negated=True)
self.flush_keep_alives()
self.add(Branch(check_rhs, int_block, short_int_block, Branch.BOOL))
self.activate_block(int_block)
if swap_op:
args = [rhs, lhs]
else:
args = [lhs, rhs]
call = self.call_c(c_func_desc, args, line)
if negate_result:
self.add(Branch(call, false, true, Branch.BOOL))
else:
self.flush_keep_alives()
self.add(Branch(call, true, false, Branch.BOOL))
self.activate_block(short_int_block)
eq = self.comparison_op(lhs, rhs, op_type, line)
self.add(Branch(eq, true, false, Branch.BOOL))
def compare_strings(self, lhs: Value, rhs: Value, op: str, line: int) -> Value:
compare_result = self.call_c(unicode_compare, [lhs, rhs], line)
error_constant = Integer((- 1), c_int_rprimitive, line)
compare_error_check = self.add(ComparisonOp(compare_result, error_constant, ComparisonOp.EQ, line))
(exception_check, propagate, final_compare) = (BasicBlock(), BasicBlock(), BasicBlock())
branch = Branch(compare_error_check, exception_check, final_compare, Branch.BOOL)
branch.negated = False
self.add(branch)
self.activate_block(exception_check)
check_error_result = self.call_c(err_occurred_op, [], line)
null = Integer(0, pointer_rprimitive, line)
compare_error_check = self.add(ComparisonOp(check_error_result, null, ComparisonOp.NEQ, line))
branch = Branch(compare_error_check, propagate, final_compare, Branch.BOOL)
branch.negated = False
self.add(branch)
self.activate_block(propagate)
self.call_c(keep_propagating_op, [], line)
self.goto(final_compare)
self.activate_block(final_compare)
op_type = (ComparisonOp.EQ if (op == '==') else ComparisonOp.NEQ)
return self.add(ComparisonOp(compare_result, Integer(0, c_int_rprimitive), op_type, line))
def compare_bytes(self, lhs: Value, rhs: Value, op: str, line: int) -> Value:
compare_result = self.call_c(bytes_compare, [lhs, rhs], line)
op_type = (ComparisonOp.EQ if (op == '==') else ComparisonOp.NEQ)
return self.add(ComparisonOp(compare_result, Integer(1, c_int_rprimitive), op_type, line))
def compare_tuples(self, lhs: Value, rhs: Value, op: str, line: int=(- 1)) -> Value:
assert (isinstance(lhs.type, RTuple) and isinstance(rhs.type, RTuple))
equal = (True if (op == '==') else False)
result = Register(bool_rprimitive)
if ((len(lhs.type.types) == 0) and (len(rhs.type.types) == 0)):
self.add(Assign(result, (self.true() if equal else self.false()), line))
return result
length = len(lhs.type.types)
(false_assign, true_assign, out) = (BasicBlock(), BasicBlock(), BasicBlock())
check_blocks = [BasicBlock() for _ in range(length)]
lhs_items = [self.add(TupleGet(lhs, i, line)) for i in range(length)]
rhs_items = [self.add(TupleGet(rhs, i, line)) for i in range(length)]
if equal:
(early_stop, final) = (false_assign, true_assign)
else:
(early_stop, final) = (true_assign, false_assign)
for i in range(len(lhs.type.types)):
if (i != 0):
self.activate_block(check_blocks[i])
lhs_item = lhs_items[i]
rhs_item = rhs_items[i]
compare = self.binary_op(lhs_item, rhs_item, op, line)
if (not is_bool_rprimitive(compare.type)):
compare = self.call_c(bool_op, [compare], line)
if (i < (len(lhs.type.types) - 1)):
branch = Branch(compare, early_stop, check_blocks[(i + 1)], Branch.BOOL)
else:
branch = Branch(compare, early_stop, final, Branch.BOOL)
branch.negated = equal
self.add(branch)
self.activate_block(false_assign)
self.add(Assign(result, self.false(), line))
self.goto(out)
self.activate_block(true_assign)
self.add(Assign(result, self.true(), line))
self.goto_and_activate(out)
return result
def translate_instance_contains(self, inst: Value, item: Value, op: str, line: int) -> Value:
res = self.gen_method_call(inst, '__contains__', [item], None, line)
if (not is_bool_rprimitive(res.type)):
res = self.call_c(bool_op, [res], line)
if (op == 'not in'):
res = self.bool_bitwise_op(res, Integer(1, rtype=bool_rprimitive), '^', line)
return res
def bool_bitwise_op(self, lreg: Value, rreg: Value, op: str, line: int) -> Value:
if (op == '&'):
code = IntOp.AND
elif (op == '|'):
code = IntOp.OR
elif (op == '^'):
code = IntOp.XOR
else:
assert False, op
return self.add(IntOp(bool_rprimitive, lreg, rreg, code, line))
def bool_comparison_op(self, lreg: Value, rreg: Value, op: str, line: int) -> Value:
op_id = ComparisonOp.signed_ops[op]
return self.comparison_op(lreg, rreg, op_id, line)
def unary_not(self, value: Value, line: int) -> Value:
mask = Integer(1, value.type, line)
return self.int_op(value.type, value, mask, IntOp.XOR, line)
def unary_op(self, value: Value, expr_op: str, line: int) -> Value:
typ = value.type
if (is_bool_rprimitive(typ) or is_bit_rprimitive(typ)):
if (expr_op == 'not'):
return self.unary_not(value, line)
if (expr_op == '+'):
return value
if is_fixed_width_rtype(typ):
if (expr_op == '-'):
return self.int_op(typ, Integer(0, typ), value, IntOp.SUB, line)
elif (expr_op == '~'):
if typ.is_signed:
return self.int_op(typ, value, Integer((- 1), typ), IntOp.XOR, line)
else:
mask = ((1 << (typ.size * 8)) - 1)
return self.int_op(typ, value, Integer(mask, typ), IntOp.XOR, line)
elif (expr_op == '+'):
return value
if is_float_rprimitive(typ):
if (expr_op == '-'):
return self.add(FloatNeg(value, line))
elif (expr_op == '+'):
return value
if isinstance(value, Integer):
num = value.value
if is_short_int_rprimitive(typ):
num >>= 1
return Integer((- num), typ, value.line)
if (is_tagged(typ) and (expr_op == '+')):
return value
if isinstance(value, Float):
return Float((- value.value), value.line)
if isinstance(typ, RInstance):
if (expr_op == '-'):
method = '__neg__'
elif (expr_op == '+'):
method = '__pos__'
elif (expr_op == '~'):
method = '__invert__'
else:
method = ''
if (method and typ.class_ir.has_method(method)):
return self.gen_method_call(value, method, [], None, line)
call_c_ops_candidates = unary_ops.get(expr_op, [])
target = self.matching_call_c(call_c_ops_candidates, [value], line)
assert target, ('Unsupported unary operation: %s' % expr_op)
return target
def make_dict(self, key_value_pairs: Sequence[DictEntry], line: int) -> Value:
result: (Value | None) = None
keys: list[Value] = []
values: list[Value] = []
for (key, value) in key_value_pairs:
if (key is not None):
if (result is None):
keys.append(key)
values.append(value)
continue
self.translate_special_method_call(result, '__setitem__', [key, value], result_type=None, line=line)
else:
if (result is None):
result = self._create_dict(keys, values, line)
self.call_c(dict_update_in_display_op, [result, value], line=line)
if (result is None):
result = self._create_dict(keys, values, line)
return result
def new_list_op_with_length(self, length: Value, line: int) -> Value:
return self.call_c(new_list_op, [length], line)
def new_list_op(self, values: list[Value], line: int) -> Value:
length: list[Value] = [Integer(len(values), c_pyssize_t_rprimitive, line)]
if (len(values) >= LIST_BUILDING_EXPANSION_THRESHOLD):
return self.call_c(list_build_op, (length + values), line)
result_list = self.call_c(new_list_op, length, line)
if (not values):
return result_list
args = [self.coerce(item, object_rprimitive, line) for item in values]
ob_item_ptr = self.add(GetElementPtr(result_list, PyListObject, 'ob_item', line))
ob_item_base = self.add(LoadMem(pointer_rprimitive, ob_item_ptr, line))
for i in range(len(values)):
if (i == 0):
item_address = ob_item_base
else:
offset = Integer((PLATFORM_SIZE * i), c_pyssize_t_rprimitive, line)
item_address = self.add(IntOp(pointer_rprimitive, ob_item_base, offset, IntOp.ADD, line))
self.add(SetMem(object_rprimitive, item_address, args[i], line))
self.add(KeepAlive([result_list]))
return result_list
def new_set_op(self, values: list[Value], line: int) -> Value:
return self.call_c(new_set_op, values, line)
def setup_rarray(self, item_type: RType, values: Sequence[Value], *, object_ptr: bool=False) -> Value:
array = Register(RArray(item_type, len(values)))
self.add(AssignMulti(array, list(values)))
return self.add(LoadAddress((object_pointer_rprimitive if object_ptr else c_pointer_rprimitive), array))
def shortcircuit_helper(self, op: str, expr_type: RType, left: Callable[([], Value)], right: Callable[([], Value)], line: int) -> Value:
target = Register(expr_type)
(left_body, right_body, next_block) = (BasicBlock(), BasicBlock(), BasicBlock())
(true_body, false_body) = ((right_body, left_body) if (op == 'and') else (left_body, right_body))
left_value = left()
self.add_bool_branch(left_value, true_body, false_body)
self.activate_block(left_body)
left_coerced = self.coerce(left_value, expr_type, line)
self.add(Assign(target, left_coerced))
self.goto(next_block)
self.activate_block(right_body)
right_value = right()
right_coerced = self.coerce(right_value, expr_type, line)
self.add(Assign(target, right_coerced))
self.goto(next_block)
self.activate_block(next_block)
return target
def bool_value(self, value: Value) -> Value:
if (is_bool_rprimitive(value.type) or is_bit_rprimitive(value.type)):
result = value
elif is_runtime_subtype(value.type, int_rprimitive):
zero = Integer(0, short_int_rprimitive)
result = self.comparison_op(value, zero, ComparisonOp.NEQ, value.line)
elif is_fixed_width_rtype(value.type):
zero = Integer(0, value.type)
result = self.add(ComparisonOp(value, zero, ComparisonOp.NEQ))
elif is_same_type(value.type, str_rprimitive):
result = self.call_c(str_check_if_true, [value], value.line)
elif (is_same_type(value.type, list_rprimitive) or is_same_type(value.type, dict_rprimitive)):
length = self.builtin_len(value, value.line)
zero = Integer(0)
result = self.binary_op(length, zero, '!=', value.line)
elif (isinstance(value.type, RInstance) and value.type.class_ir.is_ext_class and value.type.class_ir.has_method('__bool__')):
result = self.gen_method_call(value, '__bool__', [], bool_rprimitive, value.line)
elif is_float_rprimitive(value.type):
result = self.compare_floats(value, Float(0.0), FloatComparisonOp.NEQ, value.line)
else:
value_type = optional_value_type(value.type)
if (value_type is not None):
not_none = self.translate_is_op(value, self.none_object(), 'is not', value.line)
always_truthy = False
if isinstance(value_type, RInstance):
if ((not value_type.class_ir.has_method('__bool__')) and value_type.class_ir.is_method_final('__bool__')):
always_truthy = True
if always_truthy:
result = not_none
else:
result = Register(bit_rprimitive)
(true, false, end) = (BasicBlock(), BasicBlock(), BasicBlock())
branch = Branch(not_none, true, false, Branch.BOOL)
self.add(branch)
self.activate_block(true)
remaining = self.unbox_or_cast(value, value_type, value.line)
as_bool = self.bool_value(remaining)
self.add(Assign(result, as_bool))
self.goto(end)
self.activate_block(false)
self.add(Assign(result, Integer(0, bit_rprimitive)))
self.goto(end)
self.activate_block(end)
else:
result = self.call_c(bool_op, [value], value.line)
return result
def add_bool_branch(self, value: Value, true: BasicBlock, false: BasicBlock) -> None:
opt_value_type = optional_value_type(value.type)
if (opt_value_type is None):
bool_value = self.bool_value(value)
self.add(Branch(bool_value, true, false, Branch.BOOL))
else:
is_none = self.translate_is_op(value, self.none_object(), 'is not', value.line)
branch = Branch(is_none, true, false, Branch.BOOL)
self.add(branch)
always_truthy = False
if isinstance(opt_value_type, RInstance):
if ((not opt_value_type.class_ir.has_method('__bool__')) and opt_value_type.class_ir.is_method_final('__bool__')):
always_truthy = True
if (not always_truthy):
branch.true = BasicBlock()
self.activate_block(branch.true)
remaining = self.unbox_or_cast(value, opt_value_type, value.line)
self.add_bool_branch(remaining, true, false)
def call_c(self, desc: CFunctionDescription, args: list[Value], line: int, result_type: (RType | None)=None) -> Value:
coerced = []
for i in range(min(len(args), len(desc.arg_types))):
formal_type = desc.arg_types[i]
arg = args[i]
arg = self.coerce(arg, formal_type, line)
coerced.append(arg)
if (desc.ordering is not None):
assert (desc.var_arg_type is None)
coerced = [coerced[i] for i in desc.ordering]
var_arg_idx = (- 1)
if (desc.var_arg_type is not None):
var_arg_idx = len(desc.arg_types)
for i in range(len(desc.arg_types), len(args)):
arg = args[i]
arg = self.coerce(arg, desc.var_arg_type, line)
coerced.append(arg)
for item in desc.extra_int_constants:
(val, typ) = item
extra_int_constant = Integer(val, typ, line)
coerced.append(extra_int_constant)
error_kind = desc.error_kind
if (error_kind == ERR_NEG_INT):
error_kind = ERR_NEVER
target = self.add(CallC(desc.c_function_name, coerced, desc.return_type, desc.steals, desc.is_borrowed, error_kind, line, var_arg_idx))
if desc.is_borrowed:
for arg in coerced:
if (not isinstance(arg, (Integer, LoadLiteral))):
self.keep_alives.append(arg)
if (desc.error_kind == ERR_NEG_INT):
comp = ComparisonOp(target, Integer(0, desc.return_type, line), ComparisonOp.SGE, line)
comp.error_kind = ERR_FALSE
self.add(comp)
if (desc.truncated_type is None):
result = target
else:
truncate = self.add(Truncate(target, desc.truncated_type))
result = truncate
if (result_type and (not is_runtime_subtype(result.type, result_type))):
if is_none_rprimitive(result_type):
result = self.none()
else:
result = self.coerce(target, result_type, line, can_borrow=desc.is_borrowed)
return result
def matching_call_c(self, candidates: list[CFunctionDescription], args: list[Value], line: int, result_type: (RType | None)=None, can_borrow: bool=False) -> (Value | None):
matching: (CFunctionDescription | None) = None
for desc in candidates:
if (len(desc.arg_types) != len(args)):
continue
if (all((is_subtype(actual.type, formal) for (actual, formal) in zip(args, desc.arg_types))) and ((not desc.is_borrowed) or can_borrow)):
if matching:
assert (matching.priority != desc.priority), 'Ambiguous:\n1) {}\n2) {}'.format(matching, desc)
if (desc.priority > matching.priority):
matching = desc
else:
matching = desc
if matching:
target = self.call_c(matching, args, line, result_type)
return target
return None
def int_op(self, type: RType, lhs: Value, rhs: Value, op: int, line: int=(- 1)) -> Value:
return self.add(IntOp(type, lhs, rhs, op, line))
def float_op(self, lhs: Value, rhs: Value, op: str, line: int) -> Value:
op_id = float_op_to_id[op]
if (op_id in (FloatOp.DIV, FloatOp.MOD)):
if (not (isinstance(rhs, Float) and (rhs.value != 0.0))):
c = self.compare_floats(rhs, Float(0.0), FloatComparisonOp.EQ, line)
(err, ok) = (BasicBlock(), BasicBlock())
self.add(Branch(c, err, ok, Branch.BOOL, rare=True))
self.activate_block(err)
if (op_id == FloatOp.DIV):
msg = 'float division by zero'
else:
msg = 'float modulo'
self.add(RaiseStandardError(RaiseStandardError.ZERO_DIVISION_ERROR, msg, line))
self.add(Unreachable())
self.activate_block(ok)
if (op_id == FloatOp.MOD):
return self.float_mod(lhs, rhs, line)
else:
return self.add(FloatOp(lhs, rhs, op_id, line))
def float_mod(self, lhs: Value, rhs: Value, line: int) -> Value:
mod = self.add(FloatOp(lhs, rhs, FloatOp.MOD, line))
res = Register(float_rprimitive)
self.add(Assign(res, mod))
(tricky, adjust, copysign, done) = (BasicBlock(), BasicBlock(), BasicBlock(), BasicBlock())
is_zero = self.add(FloatComparisonOp(res, Float(0.0), FloatComparisonOp.EQ, line))
self.add(Branch(is_zero, copysign, tricky, Branch.BOOL))
self.activate_block(tricky)
same_signs = self.is_same_float_signs(lhs, rhs, line)
self.add(Branch(same_signs, done, adjust, Branch.BOOL))
self.activate_block(adjust)
adj = self.float_op(res, rhs, '+', line)
self.add(Assign(res, adj))
self.add(Goto(done))
self.activate_block(copysign)
adj = self.call_c(copysign_op, [Float(0.0), rhs], line)
self.add(Assign(res, adj))
self.add(Goto(done))
self.activate_block(done)
return res
def compare_floats(self, lhs: Value, rhs: Value, op: int, line: int) -> Value:
return self.add(FloatComparisonOp(lhs, rhs, op, line))
def fixed_width_int_op(self, type: RPrimitive, lhs: Value, rhs: Value, op: int, line: int) -> Value:
lhs = self.coerce(lhs, type, line)
rhs = self.coerce(rhs, type, line)
if (op == IntOp.DIV):
if (isinstance(rhs, Integer) and (rhs.value not in ((- 1), 0))):
if (not type.is_signed):
return self.int_op(type, lhs, rhs, IntOp.DIV, line)
else:
return self.inline_fixed_width_divide(type, lhs, rhs, line)
if is_int64_rprimitive(type):
prim = int64_divide_op
elif is_int32_rprimitive(type):
prim = int32_divide_op
elif is_int16_rprimitive(type):
prim = int16_divide_op
elif is_uint8_rprimitive(type):
self.check_for_zero_division(rhs, type, line)
return self.int_op(type, lhs, rhs, op, line)
else:
assert False, type
return self.call_c(prim, [lhs, rhs], line)
if (op == IntOp.MOD):
if (isinstance(rhs, Integer) and (rhs.value not in ((- 1), 0))):
if (not type.is_signed):
return self.int_op(type, lhs, rhs, IntOp.MOD, line)
else:
return self.inline_fixed_width_mod(type, lhs, rhs, line)
if is_int64_rprimitive(type):
prim = int64_mod_op
elif is_int32_rprimitive(type):
prim = int32_mod_op
elif is_int16_rprimitive(type):
prim = int16_mod_op
elif is_uint8_rprimitive(type):
self.check_for_zero_division(rhs, type, line)
return self.int_op(type, lhs, rhs, op, line)
else:
assert False, type
return self.call_c(prim, [lhs, rhs], line)
return self.int_op(type, lhs, rhs, op, line)
def check_for_zero_division(self, rhs: Value, type: RType, line: int) -> None:
(err, ok) = (BasicBlock(), BasicBlock())
is_zero = self.binary_op(rhs, Integer(0, type), '==', line)
self.add(Branch(is_zero, err, ok, Branch.BOOL))
self.activate_block(err)
self.add(RaiseStandardError(RaiseStandardError.ZERO_DIVISION_ERROR, 'integer division or modulo by zero', line))
self.add(Unreachable())
self.activate_block(ok)
def inline_fixed_width_divide(self, type: RType, lhs: Value, rhs: Value, line: int) -> Value:
res = Register(type)
div = self.int_op(type, lhs, rhs, IntOp.DIV, line)
self.add(Assign(res, div))
same_signs = self.is_same_native_int_signs(type, lhs, rhs, line)
(tricky, adjust, done) = (BasicBlock(), BasicBlock(), BasicBlock())
self.add(Branch(same_signs, done, tricky, Branch.BOOL))
self.activate_block(tricky)
mul = self.int_op(type, res, rhs, IntOp.MUL, line)
mul_eq = self.add(ComparisonOp(mul, lhs, ComparisonOp.EQ, line))
self.add(Branch(mul_eq, done, adjust, Branch.BOOL))
self.activate_block(adjust)
adj = self.int_op(type, res, Integer(1, type), IntOp.SUB, line)
self.add(Assign(res, adj))
self.add(Goto(done))
self.activate_block(done)
return res
def inline_fixed_width_mod(self, type: RType, lhs: Value, rhs: Value, line: int) -> Value:
res = Register(type)
mod = self.int_op(type, lhs, rhs, IntOp.MOD, line)
self.add(Assign(res, mod))
same_signs = self.is_same_native_int_signs(type, lhs, rhs, line)
(tricky, adjust, done) = (BasicBlock(), BasicBlock(), BasicBlock())
self.add(Branch(same_signs, done, tricky, Branch.BOOL))
self.activate_block(tricky)
is_zero = self.add(ComparisonOp(res, Integer(0, type), ComparisonOp.EQ, line))
self.add(Branch(is_zero, done, adjust, Branch.BOOL))
self.activate_block(adjust)
adj = self.int_op(type, res, rhs, IntOp.ADD, line)
self.add(Assign(res, adj))
self.add(Goto(done))
self.activate_block(done)
return res
def is_same_native_int_signs(self, type: RType, a: Value, b: Value, line: int) -> Value:
neg1 = self.add(ComparisonOp(a, Integer(0, type), ComparisonOp.SLT, line))
neg2 = self.add(ComparisonOp(b, Integer(0, type), ComparisonOp.SLT, line))
return self.add(ComparisonOp(neg1, neg2, ComparisonOp.EQ, line))
def is_same_float_signs(self, a: Value, b: Value, line: int) -> Value:
neg1 = self.add(FloatComparisonOp(a, Float(0.0), FloatComparisonOp.LT, line))
neg2 = self.add(FloatComparisonOp(b, Float(0.0), FloatComparisonOp.LT, line))
return self.add(ComparisonOp(neg1, neg2, ComparisonOp.EQ, line))
def comparison_op(self, lhs: Value, rhs: Value, op: int, line: int) -> Value:
return self.add(ComparisonOp(lhs, rhs, op, line))
def builtin_len(self, val: Value, line: int, use_pyssize_t: bool=False) -> Value:
typ = val.type
size_value = None
if (is_list_rprimitive(typ) or is_tuple_rprimitive(typ) or is_bytes_rprimitive(typ)):
elem_address = self.add(GetElementPtr(val, PyVarObject, 'ob_size'))
size_value = self.add(LoadMem(c_pyssize_t_rprimitive, elem_address))
self.add(KeepAlive([val]))
elif is_set_rprimitive(typ):
elem_address = self.add(GetElementPtr(val, PySetObject, 'used'))
size_value = self.add(LoadMem(c_pyssize_t_rprimitive, elem_address))
self.add(KeepAlive([val]))
elif is_dict_rprimitive(typ):
size_value = self.call_c(dict_ssize_t_size_op, [val], line)
elif is_str_rprimitive(typ):
size_value = self.call_c(str_ssize_t_size_op, [val], line)
if (size_value is not None):
if use_pyssize_t:
return size_value
offset = Integer(1, c_pyssize_t_rprimitive, line)
return self.int_op(short_int_rprimitive, size_value, offset, IntOp.LEFT_SHIFT, line)
if isinstance(typ, RInstance):
assert (not use_pyssize_t)
length = self.gen_method_call(val, '__len__', [], int_rprimitive, line)
length = self.coerce(length, int_rprimitive, line)
(ok, fail) = (BasicBlock(), BasicBlock())
self.compare_tagged_condition(length, Integer(0), '>=', ok, fail, line)
self.activate_block(fail)
self.add(RaiseStandardError(RaiseStandardError.VALUE_ERROR, '__len__() should return >= 0', line))
self.add(Unreachable())
self.activate_block(ok)
return length
if use_pyssize_t:
return self.call_c(generic_ssize_t_len_op, [val], line)
else:
return self.call_c(generic_len_op, [val], line)
def new_tuple(self, items: list[Value], line: int) -> Value:
size: Value = Integer(len(items), c_pyssize_t_rprimitive)
return self.call_c(new_tuple_op, ([size] + items), line)
def new_tuple_with_length(self, length: Value, line: int) -> Value:
return self.call_c(new_tuple_with_length_op, [length], line)
def int_to_float(self, n: Value, line: int) -> Value:
return self.call_c(int_to_float_op, [n], line)
def decompose_union_helper(self, obj: Value, rtype: RUnion, result_type: RType, process_item: Callable[([Value], Value)], line: int) -> Value:
fast_items = []
rest_items = []
for item in rtype.items:
if isinstance(item, RInstance):
fast_items.append(item)
else:
rest_items.append(item)
exit_block = BasicBlock()
result = Register(result_type)
for (i, item) in enumerate(fast_items):
more_types = ((i < (len(fast_items) - 1)) or rest_items)
if more_types:
op = self.isinstance_native(obj, item.class_ir, line)
(true_block, false_block) = (BasicBlock(), BasicBlock())
self.add_bool_branch(op, true_block, false_block)
self.activate_block(true_block)
coerced = self.coerce(obj, item, line)
temp = process_item(coerced)
temp2 = self.coerce(temp, result_type, line)
self.add(Assign(result, temp2))
self.goto(exit_block)
if more_types:
self.activate_block(false_block)
if rest_items:
coerced = self.coerce(obj, object_rprimitive, line, force=True)
temp = process_item(coerced)
temp2 = self.coerce(temp, result_type, line)
self.add(Assign(result, temp2))
self.goto(exit_block)
self.activate_block(exit_block)
return result
def translate_special_method_call(self, base_reg: Value, name: str, args: list[Value], result_type: (RType | None), line: int, can_borrow: bool=False) -> (Value | None):
call_c_ops_candidates = method_call_ops.get(name, [])
call_c_op = self.matching_call_c(call_c_ops_candidates, ([base_reg] + args), line, result_type, can_borrow=can_borrow)
return call_c_op
def translate_eq_cmp(self, lreg: Value, rreg: Value, expr_op: str, line: int) -> (Value | None):
ltype = lreg.type
rtype = rreg.type
if (not (isinstance(ltype, RInstance) and (ltype == rtype))):
return None
class_ir = ltype.class_ir
cmp_varies_at_runtime = ((not class_ir.is_method_final('__eq__')) or (not class_ir.is_method_final('__ne__')) or class_ir.inherits_python or class_ir.is_augmented)
if cmp_varies_at_runtime:
return None
if (not class_ir.has_method('__eq__')):
identity_ref_op = ('is' if (expr_op == '==') else 'is not')
return self.translate_is_op(lreg, rreg, identity_ref_op, line)
return self.gen_method_call(lreg, op_methods[expr_op], [rreg], ltype, line)
def translate_is_op(self, lreg: Value, rreg: Value, expr_op: str, line: int) -> Value:
op = (ComparisonOp.EQ if (expr_op == 'is') else ComparisonOp.NEQ)
lhs = self.coerce(lreg, object_rprimitive, line)
rhs = self.coerce(rreg, object_rprimitive, line)
return self.add(ComparisonOp(lhs, rhs, op, line))
def _create_dict(self, keys: list[Value], values: list[Value], line: int) -> Value:
size = len(keys)
if (size > 0):
size_value: Value = Integer(size, c_pyssize_t_rprimitive)
items = [i for t in list(zip(keys, values)) for i in t]
return self.call_c(dict_build_op, ([size_value] + items), line)
else:
return self.call_c(dict_new_op, [], line)
def error(self, msg: str, line: int) -> None:
self.errors.error(msg, self.module_path, line) |
def calculate_SNR(pxx_pred, f_pred, currHR, signal):
currHR = (currHR / 60)
f = f_pred
pxx = pxx_pred
gtmask1 = ((f >= (currHR - 0.1)) & (f <= (currHR + 0.1)))
gtmask2 = ((f >= ((currHR * 2) - 0.1)) & (f <= ((currHR * 2) + 0.1)))
sPower = np.sum(np.take(pxx, np.where((gtmask1 | gtmask2))))
if (signal == 'pulse'):
fmask2 = ((f >= 0.75) & (f <= 4))
else:
fmask2 = ((f >= 0.08) & (f <= 0.5))
allPower = np.sum(np.take(pxx, np.where((fmask2 == True))))
SNR_temp = mag2db((sPower / (allPower - sPower)))
return SNR_temp |
class HFProxy(Proxy):
def __init__(self, node: Node, tracer: Optional[Tracer]=None):
super().__init__(node, tracer=tracer)
if (hasattr(self, 'tracer') and (self.tracer is not None)):
self.device = self.tracer.root.device
self.dtype = next(self.tracer.root.parameters()).dtype
self.cache = None
def shape(self):
return self.size()
def __setitem__(self, key, value):
pass
def __contains__(self, key):
return False
def __eq__(self, other):
if (self.cache is not None):
return (self.cache == other)
elif isinstance(other, HFProxy):
return True
else:
return super().__eq__(other)
def __ne__(self, other):
return (not (self == other))
def __len__(self):
if (self.cache is not None):
if isinstance(self.cache, int):
return self.cache
elif isinstance(self.cache, (torch.Size, list, tuple)):
return len(self.cache)
else:
return super().__len__(self)
return super().__len__(self)
def __torch_function__(self, orig_method, types, args=None, kwargs=None):
proxy = super().__torch_function__(orig_method, types, args=args, kwargs=kwargs)
proxy.cache = self.cache
return proxy |
class AbbeMaterial(Material):
def __init__(self, n=1.0, v=np.inf, lambda_ref=lambda_d, lambda_long=lambda_C, lambda_short=lambda_F, **kwargs):
super().__init__(**kwargs)
self.n = n
self.v = v
self.lambda_ref = lambda_ref
self.lambda_short = lambda_short
self.lambda_long = lambda_long
def from_string(cls, txt, name=None):
txt = str(txt)
val = [float(_) for _ in txt.split('/')]
if (len(val) == 1):
(n,) = val
v = np.inf
if (len(val) == 2):
(n, v) = val
else:
raise ValueError
if (name is None):
name = '-'
return cls(name=name, n=n, v=v)
_cache(maxsize=1024)
def refractive_index(self, wavelength):
return (self.n + ((((wavelength - self.lambda_ref) / (self.lambda_long - self.lambda_short)) * (1 - self.n)) / self.v))
def dict(self):
dat = super().dict()
dat['n'] = self.n
dat['v'] = self.v
if (self.lambda_ref != lambda_d):
dat['lambda_ref'] = self.lambda_ref
if (self.lambda_short != lambda_F):
dat['lambda_short'] = self.lambda_short
if (self.lambda_long != lambda_C):
dat['lambda_long'] = self.lambda_long
return dat |
class AverageLearner(BaseLearner):
def __init__(self, function: Callable[([int], Real)], atol: (float | None)=None, rtol: (float | None)=None, min_npoints: int=2) -> None:
if ((atol is None) and (rtol is None)):
raise Exception('At least one of `atol` and `rtol` should be set.')
if (atol is None):
atol = np.inf
if (rtol is None):
rtol = np.inf
self.data = {}
self.pending_points = set()
self.function = function
self.atol = atol
self.rtol = rtol
self.npoints = 0
self.min_npoints = max(min_npoints, 2)
self.sum_f: Real = 0.0
self.sum_f_sq: Real = 0.0
def new(self) -> AverageLearner:
return AverageLearner(self.function, self.atol, self.rtol, self.min_npoints)
def n_requested(self) -> int:
return (self.npoints + len(self.pending_points))
def to_numpy(self):
return np.array(sorted(self.data.items()))
def to_dataframe(self, with_default_function_args: bool=True, function_prefix: str='function.', seed_name: str='seed', y_name: str='y') -> pandas.DataFrame:
if (not with_pandas):
raise ImportError('pandas is not installed.')
df = pandas.DataFrame(sorted(self.data.items()), columns=[seed_name, y_name])
df.attrs['inputs'] = [seed_name]
df.attrs['output'] = y_name
if with_default_function_args:
assign_defaults(self.function, df, function_prefix)
return df
def load_dataframe(self, df: pandas.DataFrame, with_default_function_args: bool=True, function_prefix: str='function.', seed_name: str='seed', y_name: str='y'):
self.tell_many(df[seed_name].values, df[y_name].values)
if with_default_function_args:
self.function = partial_function_from_dataframe(self.function, df, function_prefix)
def ask(self, n: int, tell_pending: bool=True) -> tuple[(list[int], list[Float])]:
points = list(range(self.n_requested, (self.n_requested + n)))
if any((((p in self.data) or (p in self.pending_points)) for p in points)):
points = list(((set(range((self.n_requested + n))) - set(self.data)) - set(self.pending_points)))[:n]
loss_improvements = ([(self._loss_improvement(n) / n)] * n)
if tell_pending:
for p in points:
self.tell_pending(p)
return (points, loss_improvements)
def tell(self, n: Int, value: Real) -> None:
if (n in self.data):
return
self.data[n] = value
self.pending_points.discard(n)
self.sum_f += value
self.sum_f_sq += (value ** 2)
self.npoints += 1
def tell_pending(self, n: int) -> None:
self.pending_points.add(n)
def mean(self) -> Float:
return (self.sum_f / self.npoints)
def std(self) -> Float:
n = self.npoints
if (n < self.min_npoints):
return np.inf
numerator = (self.sum_f_sq - (n * (self.mean ** 2)))
if (numerator < 0):
return 0
return sqrt((numerator / (n - 1)))
_latest
def loss(self, real: bool=True, *, n=None) -> Float:
if (n is None):
n = (self.npoints if real else self.n_requested)
else:
n = n
if (n < self.min_npoints):
return np.inf
standard_error = (self.std / sqrt(n))
aloss = (standard_error / self.atol)
rloss = (standard_error / self.rtol)
mean = self.mean
if (mean != 0):
rloss /= abs(mean)
return max(aloss, rloss)
def _loss_improvement(self, n: int) -> Float:
loss = self.loss()
if np.isfinite(loss):
return (loss - self.loss(n=(self.npoints + n)))
else:
return np.inf
def remove_unfinished(self):
self.pending_points = set()
def plot(self):
hv = ensure_holoviews()
vals = [v for v in self.data.values() if (v is not None)]
if (not vals):
return hv.Histogram([[], []])
num_bins = int(max(5, sqrt(self.npoints)))
vals = hv.Points(vals)
return hv.operation.histogram(vals, num_bins=num_bins, dimension='y')
def _get_data(self) -> tuple[(dict[(int, Real)], int, Real, Real)]:
return (self.data, self.npoints, self.sum_f, self.sum_f_sq)
def _set_data(self, data: tuple[(dict[(int, Real)], int, Real, Real)]) -> None:
(self.data, self.npoints, self.sum_f, self.sum_f_sq) = data
def __getstate__(self):
return (cloudpickle.dumps(self.function), self.atol, self.rtol, self.min_npoints, self._get_data())
def __setstate__(self, state):
(function, atol, rtol, min_npoints, data) = state
function = cloudpickle.loads(function)
self.__init__(function, atol, rtol, min_npoints)
self._set_data(data) |
def test_driven_control_default_values():
_rabi_rates = np.array([np.pi, np.pi, 0])
_azimuthal_angles = np.array([(np.pi / 2), 0, (- np.pi)])
_detunings = np.array([0, 0, 0])
_durations = np.array([1, 2, 3])
_name = 'driven_control'
driven_control = DrivenControl(rabi_rates=None, azimuthal_angles=_azimuthal_angles, detunings=_detunings, durations=_durations, name=_name)
assert np.allclose(driven_control.rabi_rates, np.array([0.0, 0.0, 0.0]))
assert np.allclose(driven_control.durations, _durations)
assert np.allclose(driven_control.detunings, _detunings)
assert np.allclose(driven_control.azimuthal_angles, _azimuthal_angles)
driven_control = DrivenControl(rabi_rates=_rabi_rates, azimuthal_angles=None, detunings=_detunings, durations=_durations, name=_name)
assert np.allclose(driven_control.rabi_rates, _rabi_rates)
assert np.allclose(driven_control.durations, _durations)
assert np.allclose(driven_control.detunings, _detunings)
assert np.allclose(driven_control.azimuthal_angles, np.array([0.0, 0.0, 0.0]))
driven_control = DrivenControl(rabi_rates=_rabi_rates, azimuthal_angles=_azimuthal_angles, detunings=None, durations=_durations, name=_name)
assert np.allclose(driven_control.rabi_rates, _rabi_rates)
assert np.allclose(driven_control.durations, _durations)
assert np.allclose(driven_control.detunings, np.array([0.0, 0.0, 0.0]))
assert np.allclose(driven_control.azimuthal_angles, _azimuthal_angles)
driven_control = DrivenControl(durations=np.array([1]))
assert np.allclose(driven_control.rabi_rates, np.array([0.0]))
assert np.allclose(driven_control.durations, np.array([1.0]))
assert np.allclose(driven_control.detunings, np.array([0.0]))
assert np.allclose(driven_control.azimuthal_angles, np.array([0.0]))
with pytest.raises(ArgumentsValueError):
_ = DrivenControl(durations=np.array([1]), rabi_rates=np.array([(- 1)]))
with pytest.raises(ArgumentsValueError):
_ = DrivenControl(durations=np.array([0]))
with pytest.raises(ArgumentsValueError):
_ = DrivenControl(durations=np.array([1]), rabi_rates=np.array([1, 2]), azimuthal_angles=np.array([1, 2, 3])) |
class Module():
def __init__(self, name, title=None, *, automodule_options=None, append=None):
self.append = append
self.name = name
self.title = (title or ' '.join(map(str.title, self.name.split('.')[1:])))
self.automodule_options = (automodule_options or list())
def __repr__(self):
return '<{} ({})>'.format(self.title, self.name)
def asdict(self):
return {'append': self.append, 'automodule': True, 'automodule_options': self.automodule_options, 'name': self.name, 'title': self.title}
def get_path(self):
return (os.path.join(__path__, apidoc_root, *self.name.split('.')) + '.rst') |
def random_date(begin: datetime.datetime, end: datetime.datetime):
epoch = datetime.datetime(1970, 1, 1)
begin_seconds = int((begin - epoch).total_seconds())
end_seconds = int((end - epoch).total_seconds())
dt_seconds = random.randint(begin_seconds, end_seconds)
return datetime.datetime.fromtimestamp(dt_seconds) |
def test_aws_session_class_endpoint():
pytest.importorskip('boto3')
sesh = AWSSession(endpoint_url='example.com')
assert (sesh.get_credential_options()['AWS_S3_ENDPOINT'] == 'example.com')
sesh = AWSSession(endpoint_url='example.com', aws_unsigned=True)
assert (sesh.get_credential_options()['AWS_S3_ENDPOINT'] == 'example.com') |
def _multiprocessing_managers_transform():
return parse("\n import array\n import threading\n import multiprocessing.pool as pool\n import queue\n\n class Namespace(object):\n pass\n\n class Value(object):\n def __init__(self, typecode, value, lock=True):\n self._typecode = typecode\n self._value = value\n def get(self):\n return self._value\n def set(self, value):\n self._value = value\n def __repr__(self):\n return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)\n value = property(get, set)\n\n def Array(typecode, sequence, lock=True):\n return array.array(typecode, sequence)\n\n class SyncManager(object):\n Queue = JoinableQueue = queue.Queue\n Event = threading.Event\n RLock = threading.RLock\n Lock = threading.Lock\n BoundedSemaphore = threading.BoundedSemaphore\n Condition = threading.Condition\n Barrier = threading.Barrier\n Pool = pool.Pool\n list = list\n dict = dict\n Value = Value\n Array = Array\n Namespace = Namespace\n __enter__ = lambda self: self\n __exit__ = lambda *args: args\n\n def start(self, initializer=None, initargs=None):\n pass\n def shutdown(self):\n pass\n ") |
class _StreamCloser(_Closer):
def __init__(self, write, close_on_exit):
self.write = write
self.close_on_exit = close_on_exit
def close(self, parent_close):
super().close(parent_close)
if self.close_on_exit:
closer = getattr(self.write, 'close', None)
if closer:
closer(self.fp.safer_failed)
def _write_on_success(self, v):
while True:
written = self.write(v)
v = ((written is not None) and v[written:])
if (not v):
break |
def test_multi_addr():
r2p = r2pipe.open('-', flags=['-2'])
r2p.cmd('wa mov [rax], rbx')
esilsolver = ESILSolver(r2p, debug=True, trace=False)
state = esilsolver.init_state()
state.set_symbolic_register('rax')
rax = state.registers['rax']
state.solver.add((rax > 7))
state.solver.add((rax < 16))
esilsolver.run(target=3)
sat = state.solver.check()
print(sat)
m = state.solver.model()
print(m.eval(rax)) |
class TestPolyFillArc(EndianTest):
def setUp(self):
self.req_args_0 = {'arcs': [{'x': (- 3276), 'y': (- 22928), 'width': 33490, 'height': 20525, 'angle1': (- 10916), 'angle2': (- 19386)}], 'drawable': , 'gc': }
self.req_bin_0 = b'G\x00\x06\x00\x82\\\xc1)\x1b\xdb\x8234\xf3p\xa6\xd2\x82-P\\\xd5F\xb4'
def testPackRequest0(self):
bin = request.PolyFillArc._request.to_binary(*(), **self.req_args_0)
self.assertBinaryEqual(bin, self.req_bin_0)
def testUnpackRequest0(self):
(args, remain) = request.PolyFillArc._request.parse_binary(self.req_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_0) |
class BaseLastTransitionLog(BaseTransitionLog):
class Meta():
verbose_name = _('XWorkflow last transition log')
verbose_name_plural = _('XWorkflow last transition logs')
abstract = True
def _update_or_create(cls, unique_fields, **kwargs):
(last_transition, created) = cls.objects.get_or_create(defaults=kwargs, **unique_fields)
if (not created):
for (field, value) in kwargs.items():
setattr(last_transition, field, value)
last_transition.timestamp = timezone.now()
last_transition.save()
return last_transition
def log_transition(cls, transition, from_state, to_state, modified_object, **kwargs):
kwargs.update({'transition': transition, 'from_state': from_state, 'to_state': to_state})
non_defaults = {cls.MODIFIED_OBJECT_FIELD: modified_object}
return cls._update_or_create(non_defaults, **kwargs) |
def get_hiformer_b_configs():
cfg = ml_collections.ConfigDict()
cfg.swin_pyramid_fm = [96, 192, 384]
cfg.image_size = 224
cfg.patch_size = 4
cfg.num_classes = 9
if (not os.path.isfile('./weights/swin_tiny_patch4_window7_224.pth')):
print('Downloading Swin-transformer model ...')
wget.download(' './weights/swin_tiny_patch4_window7_224.pth')
cfg.swin_pretrained_path = './weights/swin_tiny_patch4_window7_224.pth'
cfg.cnn_backbone = 'resnet50'
cfg.cnn_pyramid_fm = [256, 512, 1024]
cfg.resnet_pretrained = True
cfg.depth = [[1, 2, 0]]
cfg.num_heads = (6, 12)
cfg.mlp_ratio = (2.0, 2.0, 1.0)
cfg.drop_rate = 0.0
cfg.attn_drop_rate = 0.0
cfg.drop_path_rate = 0.0
cfg.qkv_bias = True
cfg.qk_scale = None
cfg.cross_pos_embed = True
return cfg |
class RandomMaskingGenerator():
def __init__(self, input_size, mask_ratio):
self.num_patches = int(input_size)
self.num_mask = int((mask_ratio * self.num_patches))
def __repr__(self):
repr_str = 'Maks: total patches {}, mask patches {}'.format(self.num_patches, self.num_mask)
return repr_str
def __call__(self):
mask = np.hstack([np.zeros((self.num_patches - self.num_mask)), np.ones(self.num_mask)])
np.random.shuffle(mask)
return mask |
def seek_backward(_request: WSGIRequest) -> None:
player.seek_backward(SEEK_DISTANCE)
try:
current_song = models.CurrentSong.objects.get()
now = timezone.now()
current_song.created += datetime.timedelta(seconds=SEEK_DISTANCE)
current_song.created = min(current_song.created, now)
current_song.save()
except models.CurrentSong.DoesNotExist:
pass |
def create_data_files(input_path: str, output_path: str, download: bool):
input_path = os.path.expanduser(input_path)
output_path = os.path.expanduser(output_path)
if download:
os.makedirs(input_path, exist_ok=True)
output_train_folder = os.path.join(output_path, 'train')
train_set = PatchCamelyon(input_path=input_path, output_path=output_train_folder, split='train', download=download)
to_disk_folder_split(train_set, output_train_folder, num_workers=8)
output_valid_folder = os.path.join(output_path, 'val')
valid_set = PatchCamelyon(input_path=input_path, output_path=output_valid_folder, split='valid', download=False)
to_disk_folder_split(valid_set, output_valid_folder, num_workers=8) |
def main(args):
assert (args.dataset in ['mnist', 'cifar', 'svhn']), "Dataset parameter must be either 'mnist', 'cifar' or 'svhn'"
assert (args.attack in ['fgsm', 'bim-a', 'bim-b', 'jsma', 'cw-l2', 'all']), "Attack parameter must be either 'fgsm', 'bim-a', 'bim-b', 'jsma' or 'cw-l2'"
assert (args.characteristic in ['kd', 'bu', 'lid', 'km', 'all']), "Characteristic(s) to use 'kd', 'bu', 'lid', 'km', 'all'"
model_file = os.path.join(PATH_DATA, ('model_%s.h5' % args.dataset))
assert os.path.isfile(model_file), 'model file not found... must first train model using train_model.py.'
adv_file = os.path.join(PATH_DATA, ('Adv_%s_%s.npy' % (args.dataset, args.attack)))
assert os.path.isfile(adv_file), 'adversarial sample file not found... must first craft adversarial samples using craft_adv_samples.py'
print('Loading the data and model...')
model = load_model(model_file)
(X_train, Y_train, X_test, Y_test) = get_data(args.dataset)
print('Loading noisy and adversarial samples...')
if (args.attack == 'all'):
raise NotImplementedError("'All' types detector not yet implemented.")
else:
X_test_adv = np.load(adv_file)
print('X_test_adv: ', X_test_adv.shape)
noisy_file = os.path.join(PATH_DATA, ('Noisy_%s_%s.npy' % (args.dataset, args.attack)))
if os.path.isfile(noisy_file):
X_test_noisy = np.load(noisy_file)
else:
print(('Crafting %s noisy samples. ' % args.dataset))
X_test_noisy = get_noisy_samples(X_test, X_test_adv, args.dataset, args.attack)
np.save(noisy_file, X_test_noisy)
for (s_type, dataset) in zip(['normal', 'noisy', 'adversarial'], [X_test, X_test_noisy, X_test_adv]):
(_, acc) = model.evaluate(dataset, Y_test, batch_size=args.batch_size, verbose=0)
print(('Model accuracy on the %s test set: %0.2f%%' % (s_type, (100 * acc))))
if (not (s_type == 'normal')):
l2_diff = np.linalg.norm((dataset.reshape((len(X_test), (- 1))) - X_test.reshape((len(X_test), (- 1)))), axis=1).mean()
print(('Average L-2 perturbation size of the %s test set: %0.2f' % (s_type, l2_diff)))
preds_test = model.predict_classes(X_test, verbose=0, batch_size=args.batch_size)
inds_correct = np.where((preds_test == Y_test.argmax(axis=1)))[0]
print(('Number of correctly predict images: %s' % len(inds_correct)))
X_test = X_test[inds_correct]
X_test_noisy = X_test_noisy[inds_correct]
X_test_adv = X_test_adv[inds_correct]
print('X_test: ', X_test.shape)
print('X_test_noisy: ', X_test_noisy.shape)
print('X_test_adv: ', X_test_adv.shape)
if (args.characteristic == 'kd'):
(characteristics, labels) = get_kd(model, X_train, Y_train, X_test, X_test_noisy, X_test_adv)
print('KD: [characteristic shape: ', characteristics.shape, ', label shape: ', labels.shape)
bandwidth = BANDWIDTHS[args.dataset]
file_name = os.path.join(PATH_DATA, ('kd_%s_%s_%.4f.npy' % (args.dataset, args.attack, bandwidth)))
data = np.concatenate((characteristics, labels), axis=1)
np.save(file_name, data)
elif (args.characteristic == 'bu'):
(characteristics, labels) = get_bu(model, X_test, X_test_noisy, X_test_adv)
print('BU: [characteristic shape: ', characteristics.shape, ', label shape: ', labels.shape)
file_name = os.path.join(PATH_DATA, ('bu_%s_%s.npy' % (args.dataset, args.attack)))
data = np.concatenate((characteristics, labels), axis=1)
np.save(file_name, data)
elif (args.characteristic == 'lid'):
(characteristics, labels) = get_lid(model, X_test, X_test_noisy, X_test_adv, args.k_nearest, args.batch_size, args.dataset)
print('LID: [characteristic shape: ', characteristics.shape, ', label shape: ', labels.shape)
file_name = os.path.join('../data_grid_search/lid_large_batch/', ('lid_%s_%s_%s.npy' % (args.dataset, args.attack, args.k_nearest)))
data = np.concatenate((characteristics, labels), axis=1)
np.save(file_name, data)
elif (args.characteristic == 'km'):
(characteristics, labels) = get_kmeans(model, X_test, X_test_noisy, X_test_adv, args.k_nearest, args.batch_size, args.dataset)
print('K-Mean: [characteristic shape: ', characteristics.shape, ', label shape: ', labels.shape)
file_name = os.path.join(PATH_DATA, ('km_pca_%s_%s.npy' % (args.dataset, args.attack)))
data = np.concatenate((characteristics, labels), axis=1)
np.save(file_name, data)
elif (args.characteristic == 'all'):
(characteristics, labels) = get_kd(model, X_train, Y_train, X_test, X_test_noisy, X_test_adv)
file_name = os.path.join(PATH_DATA, ('kd_%s_%s.npy' % (args.dataset, args.attack)))
data = np.concatenate((characteristics, labels), axis=1)
np.save(file_name, data)
(characteristics, labels) = get_bu(model, X_test, X_test_noisy, X_test_adv)
file_name = os.path.join(PATH_DATA, ('bu_%s_%s.npy' % (args.dataset, args.attack)))
data = np.concatenate((characteristics, labels), axis=1)
np.save(file_name, data)
(characteristics, labels) = get_lid(model, X_test, X_test_noisy, X_test_adv, args.k_nearest, args.batch_size, args.dataset)
file_name = os.path.join(PATH_DATA, ('lid_%s_%s.npy' % (args.dataset, args.attack)))
data = np.concatenate((characteristics, labels), axis=1)
np.save(file_name, data) |
class RandomUsernameTest(BaseActionTest):
user_data_body = json.dumps({'id': 1, 'avatar_url': ' 'gravatar_id': 'somehexcode', 'url': ' 'name': 'monalisa foobar', 'company': 'GitHub', 'blog': ' 'location': 'San Francisco', 'email': '', 'hireable': False, 'bio': 'There once was...', 'public_repos': 2, 'public_gists': 1, 'followers': 20, 'following': 0, 'html_url': ' 'created_at': '2008-01-14T04:33:35Z', 'type': 'User', 'total_private_repos': 100, 'owned_private_repos': 100, 'private_gists': 81, 'disk_usage': 10000, 'collaborators': 8, 'plan': {'name': 'Medium', 'space': 400, 'collaborators': 10, 'private_repos': 20}})
def test_random_username(self):
self.do_login(after_complete_checks=False) |
class RayExecutor():
def set_env_var(self, key: str, value: str):
if (value is not None):
value = str(value)
os.environ[key] = value
def set_env_vars(self, keys: List[str], values: List[str]):
assert (len(keys) == len(values))
for (key, value) in zip(keys, values):
self.set_env_var(key, value)
def get_node_ip(self):
return ray.util.get_node_ip_address()
def get_node_and_gpu_ids(self):
return (ray.get_runtime_context().node_id.hex(), ray.get_gpu_ids())
def execute(self, fn: Callable, *args, **kwargs):
return fn(*args, **kwargs) |
class PushTImageEnv(PushTEnv):
metadata = {'render.modes': ['rgb_array'], 'video.frames_per_second': 10}
def __init__(self, legacy=False, block_cog=None, damping=None, render_size=96):
super().__init__(legacy=legacy, block_cog=block_cog, damping=damping, render_size=render_size, render_action=False)
ws = self.window_size
self.observation_space = spaces.Dict({'image': spaces.Box(low=0, high=1, shape=(3, render_size, render_size), dtype=np.float32), 'agent_pos': spaces.Box(low=0, high=ws, shape=(2,), dtype=np.float32)})
self.render_cache = None
def _get_obs(self):
img = super()._render_frame(mode='rgb_array')
agent_pos = np.array(self.agent.position)
img_obs = np.moveaxis((img.astype(np.float32) / 255), (- 1), 0)
obs = {'image': img_obs, 'agent_pos': agent_pos}
if (self.latest_action is not None):
action = np.array(self.latest_action)
coord = ((action / 512) * 96).astype(np.int32)
marker_size = int(((8 / 96) * self.render_size))
thickness = int(((1 / 96) * self.render_size))
cv2.drawMarker(img, coord, color=(255, 0, 0), markerType=cv2.MARKER_CROSS, markerSize=marker_size, thickness=thickness)
self.render_cache = img
return obs
def render(self, mode):
assert (mode == 'rgb_array')
if (self.render_cache is None):
self._get_obs()
return self.render_cache |
def update_maxmind_dbs(outdir):
print('Updating the GeoIP databases from MaxMind...')
if (not MAXMIND_LICENSE_KEY):
raise RuntimeError('No envvar MAXMIND_LICENSE_KEY. Cannot download the databases without this. Create a MaxMind account.')
for url in (MAXMIND_COUNTRY_DATABASE, MAXMIND_CITY_DATABASE):
resp = requests.get(url)
if (not resp.ok):
raise RuntimeError(f'Failed to update GeoIP database: {url}. Status_code={resp.status_code}')
with tarfile.open(mode='r:gz', fileobj=io.BytesIO(resp.content)) as tar:
for member in tar.getmembers():
if member.name.endswith('.mmdb'):
filename = member.name[(member.name.rfind('/') + 1):]
outpath = os.path.join(outdir, filename)
print(f'Writing database to {outpath}...')
buf = tar.extractfile(member)
with open(outpath, 'wb') as fd:
fd.write(buf.read())
break
else:
raise RuntimeError('No .mmdb file found in the download') |
class DirectSDBWriter():
def __init__(self, sdb_filename, buffering=BUFFER_SIZE, audio_type=AUDIO_TYPE_OPUS, bitrate=None, id_prefix=None, labeled=True):
self.sdb_filename = sdb_filename
self.id_prefix = (sdb_filename if (id_prefix is None) else id_prefix)
self.labeled = labeled
if (audio_type not in SERIALIZABLE_AUDIO_TYPES):
raise ValueError('Audio type "{}" not supported'.format(audio_type))
self.audio_type = audio_type
self.bitrate = bitrate
self.sdb_file = open(sdb_filename, 'wb', buffering=buffering)
self.offsets = []
self.num_samples = 0
self.sdb_file.write(MAGIC)
schema_entries = [{CONTENT_KEY: CONTENT_TYPE_SPEECH, MIME_TYPE_KEY: audio_type}]
if self.labeled:
schema_entries.append({CONTENT_KEY: CONTENT_TYPE_TRANSCRIPT, MIME_TYPE_KEY: MIME_TYPE_TEXT})
meta_data = {SCHEMA_KEY: schema_entries}
meta_data = json.dumps(meta_data).encode()
self.write_big_int(len(meta_data))
self.sdb_file.write(meta_data)
self.offset_samples = self.sdb_file.tell()
self.sdb_file.seek((2 * BIGINT_SIZE), 1)
def write_int(self, n):
return self.sdb_file.write(n.to_bytes(INT_SIZE, BIG_ENDIAN))
def write_big_int(self, n):
return self.sdb_file.write(n.to_bytes(BIGINT_SIZE, BIG_ENDIAN))
def __enter__(self):
return self
def add(self, sample):
def to_bytes(n):
return n.to_bytes(INT_SIZE, BIG_ENDIAN)
sample.change_audio_type(self.audio_type, bitrate=self.bitrate)
opus = sample.audio.getbuffer()
opus_len = to_bytes(len(opus))
if self.labeled:
transcript = sample.transcript.encode()
transcript_len = to_bytes(len(transcript))
entry_len = to_bytes((((len(opus_len) + len(opus)) + len(transcript_len)) + len(transcript)))
buffer = b''.join([entry_len, opus_len, opus, transcript_len, transcript])
else:
entry_len = to_bytes((len(opus_len) + len(opus)))
buffer = b''.join([entry_len, opus_len, opus])
self.offsets.append(self.sdb_file.tell())
self.sdb_file.write(buffer)
sample.sample_id = '{}:{}'.format(self.id_prefix, self.num_samples)
self.num_samples += 1
return sample.sample_id
def close(self):
if (self.sdb_file is None):
return
offset_index = self.sdb_file.tell()
self.sdb_file.seek(self.offset_samples)
self.write_big_int(((offset_index - self.offset_samples) - BIGINT_SIZE))
self.write_big_int(self.num_samples)
self.sdb_file.seek((offset_index + BIGINT_SIZE))
self.write_big_int(self.num_samples)
for offset in self.offsets:
self.write_big_int(offset)
offset_end = self.sdb_file.tell()
self.sdb_file.seek(offset_index)
self.write_big_int(((offset_end - offset_index) - BIGINT_SIZE))
self.sdb_file.close()
self.sdb_file = None
def __len__(self):
return len(self.offsets)
def __exit__(self, exc_type, exc_val, exc_tb):
self.close() |
class LineSegmentROI(ROI):
def __init__(self, positions=(None, None), pos=None, handles=(None, None), **args):
if (pos is None):
pos = [0, 0]
ROI.__init__(self, pos, [1, 1], **args)
if (len(positions) > 2):
raise Exception('LineSegmentROI must be defined by exactly 2 positions. For more points, use PolyLineROI.')
for (i, p) in enumerate(positions):
self.addFreeHandle(p, item=handles[i])
def endpoints(self):
return [h['item'] for h in self.handles]
def listPoints(self):
return [p['item'].pos() for p in self.handles]
def getState(self):
state = ROI.getState(self)
state['points'] = [Point(h.pos()) for h in self.getHandles()]
return state
def saveState(self):
state = ROI.saveState(self)
state['points'] = [tuple(h.pos()) for h in self.getHandles()]
return state
def setState(self, state):
ROI.setState(self, state)
p1 = [(state['points'][0][0] + state['pos'][0]), (state['points'][0][1] + state['pos'][1])]
p2 = [(state['points'][1][0] + state['pos'][0]), (state['points'][1][1] + state['pos'][1])]
self.movePoint(self.getHandles()[0], p1, finish=False)
self.movePoint(self.getHandles()[1], p2)
def paint(self, p, *args):
p.setRenderHint(QtGui.QPainter.RenderHint.Antialiasing)
p.setPen(self.currentPen)
h1 = self.endpoints[0].pos()
h2 = self.endpoints[1].pos()
p.drawLine(h1, h2)
def boundingRect(self):
return self.shape().boundingRect()
def shape(self):
p = QtGui.QPainterPath()
h1 = self.endpoints[0].pos()
h2 = self.endpoints[1].pos()
dh = (h2 - h1)
if (dh.length() == 0):
return p
pxv = self.pixelVectors(dh)[1]
if (pxv is None):
return p
pxv *= 4
p.moveTo((h1 + pxv))
p.lineTo((h2 + pxv))
p.lineTo((h2 - pxv))
p.lineTo((h1 - pxv))
p.lineTo((h1 + pxv))
return p
def getArrayRegion(self, data, img, axes=(0, 1), order=1, returnMappedCoords=False, **kwds):
imgPts = [self.mapToItem(img, h.pos()) for h in self.endpoints]
d = Point((imgPts[1] - imgPts[0]))
o = Point(imgPts[0])
rgn = fn.affineSlice(data, shape=(int(d.length()),), vectors=[Point(d.norm())], origin=o, axes=axes, order=order, returnCoords=returnMappedCoords, **kwds)
return rgn |
(params=[{}, {'teleporters': TeleporterShuffleMode.ONE_WAY_ANYTHING, 'translator_configuration': True}])
def layout_config(request, default_echoes_configuration):
if ('translator_configuration' in request.param):
translator_requirement = copy.copy(default_echoes_configuration.translator_configuration.translator_requirement)
for gate in translator_requirement.keys():
translator_requirement[gate] = LayoutTranslatorRequirement.RANDOM
break
new_gate = dataclasses.replace(default_echoes_configuration.translator_configuration, translator_requirement=translator_requirement)
request.param['translator_configuration'] = new_gate
return dataclasses.replace(default_echoes_configuration, **request.param) |
.end_to_end()
def test_parametrization_in_for_loop_with_ids(tmp_path, runner):
source = '\n import pytask\n\n for i in range(2):\n\n .task(\n "deco_task", id=str(i), kwargs={"i": i, "produces": f"out_{i}.txt"}\n )\n def example(produces, i):\n produces.write_text(str(i))\n '
tmp_path.joinpath('task_module.py').write_text(textwrap.dedent(source))
result = runner.invoke(cli, [tmp_path.as_posix()])
assert (result.exit_code == ExitCode.OK)
assert ('deco_task[0]' in result.output)
assert ('deco_task[1]' in result.output) |
def test_order_with_dependency(item_names_for):
tests_content = '\n import pytest\n\n .dependency(depends=["test_b"])\n .order("second")\n def test_a():\n pass\n\n .dependency()\n def test_b():\n pass\n '
assert (item_names_for(tests_content) == ['test_b', 'test_a']) |
.utils
.parametrize('arguments, func_args, expected', [(['a'], [0, 0], 0), (['b'], [1, 1], 2), (['a', 'b'], [0, 1], 1), (['b', 'a'], [0, 1], 1)])
def test_without_error(arguments, func_args, expected):
_kwargs(*arguments)
def simple_sum(alpha, beta, a=0, b=0):
return (alpha + beta)
assert (simple_sum(*func_args) == expected) |
def main(mode='folder'):
opt = {}
opt['dist'] = False
opt['phase'] = 'train'
opt['name'] = 'DIV2K'
opt['type'] = 'PairedImageDataset'
if (mode == 'folder'):
opt['dataroot_gt'] = 'datasets/DIV2K/DIV2K_train_HR_sub'
opt['dataroot_lq'] = 'datasets/DIV2K/DIV2K_train_LR_bicubic/X4_sub'
opt['filename_tmpl'] = '{}'
opt['io_backend'] = dict(type='disk')
elif (mode == 'meta_info_file'):
opt['dataroot_gt'] = 'datasets/DIV2K/DIV2K_train_HR_sub'
opt['dataroot_lq'] = 'datasets/DIV2K/DIV2K_train_LR_bicubic/X4_sub'
opt['meta_info_file'] = 'basicsr/data/meta_info/meta_info_DIV2K800sub_GT.txt'
opt['filename_tmpl'] = '{}'
opt['io_backend'] = dict(type='disk')
elif (mode == 'lmdb'):
opt['dataroot_gt'] = 'datasets/DIV2K/DIV2K_train_HR_sub.lmdb'
opt['dataroot_lq'] = 'datasets/DIV2K/DIV2K_train_LR_bicubic_X4_sub.lmdb'
opt['io_backend'] = dict(type='lmdb')
opt['gt_size'] = 128
opt['use_flip'] = True
opt['use_rot'] = True
opt['use_shuffle'] = True
opt['num_worker_per_gpu'] = 2
opt['batch_size_per_gpu'] = 16
opt['scale'] = 4
opt['dataset_enlarge_ratio'] = 1
os.makedirs('tmp', exist_ok=True)
dataset = create_dataset(opt)
data_loader = create_dataloader(dataset, opt, num_gpu=0, dist=opt['dist'], sampler=None)
nrow = int(math.sqrt(opt['batch_size_per_gpu']))
padding = (2 if (opt['phase'] == 'train') else 0)
print('start...')
for (i, data) in enumerate(data_loader):
if (i > 5):
break
print(i)
lq = data['lq']
gt = data['gt']
lq_path = data['lq_path']
gt_path = data['gt_path']
print(lq_path, gt_path)
torchvision.utils.save_image(lq, f'tmp/lq_{i:03d}.png', nrow=nrow, padding=padding, normalize=False)
torchvision.utils.save_image(gt, f'tmp/gt_{i:03d}.png', nrow=nrow, padding=padding, normalize=False) |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, gcb=None, gen_attention=None):
super(BasicBlock, self).__init__()
assert (dcn is None), 'Not implemented yet.'
assert (gen_attention is None), 'Not implemented yet.'
assert (gcb is None), 'Not implemented yet.'
(self.norm1_name, norm1) = build_norm_layer(norm_cfg, planes, postfix=1)
(self.norm2_name, norm2) = build_norm_layer(norm_cfg, planes, postfix=2)
self.conv1 = build_conv_layer(conv_cfg, inplanes, planes, 3, stride=stride, padding=dilation, dilation=dilation, bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(conv_cfg, planes, planes, 3, padding=1, bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
assert (not with_cp)
def norm1(self):
return getattr(self, self.norm1_name)
def norm2(self):
return getattr(self, self.norm2_name)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out |
()
def _django_db_helper(request: pytest.FixtureRequest, django_db_setup: None, django_db_blocker: DjangoDbBlocker) -> Generator[(None, None, None)]:
from django import VERSION
if is_django_unittest(request):
(yield)
return
marker = request.node.get_closest_marker('django_db')
if marker:
(transactional, reset_sequences, databases, serialized_rollback, available_apps) = validate_django_db(marker)
else:
(transactional, reset_sequences, databases, serialized_rollback, available_apps) = (False, False, None, False, None)
transactional = (transactional or reset_sequences or (('transactional_db' in request.fixturenames) or ('live_server' in request.fixturenames)))
reset_sequences = (reset_sequences or ('django_db_reset_sequences' in request.fixturenames))
serialized_rollback = (serialized_rollback or ('django_db_serialized_rollback' in request.fixturenames))
django_db_blocker.unblock()
import django.db
import django.test
if transactional:
test_case_class = django.test.TransactionTestCase
else:
test_case_class = django.test.TestCase
_reset_sequences = reset_sequences
_serialized_rollback = serialized_rollback
_databases = databases
_available_apps = available_apps
class PytestDjangoTestCase(test_case_class):
reset_sequences = _reset_sequences
serialized_rollback = _serialized_rollback
if (_databases is not None):
databases = _databases
if (_available_apps is not None):
available_apps = _available_apps
if (not transactional):
def setUpClass(cls) -> None:
super(django.test.TestCase, cls).setUpClass()
if (VERSION < (4, 1)):
django.db.transaction.Atomic._ensure_durability = False
def tearDownClass(cls) -> None:
if (VERSION < (4, 1)):
django.db.transaction.Atomic._ensure_durability = True
super(django.test.TestCase, cls).tearDownClass()
PytestDjangoTestCase.setUpClass()
test_case = PytestDjangoTestCase(methodName='__init__')
test_case._pre_setup()
(yield)
test_case._post_teardown()
PytestDjangoTestCase.tearDownClass()
if (VERSION >= (4, 0)):
PytestDjangoTestCase.doClassCleanups()
django_db_blocker.restore() |
def modality_fcn(net_spec, data, modality):
n = net_spec
(n[('conv1_1' + modality)], n[('relu1_1' + modality)]) = conv_relu(n[data], 64, pad=100)
(n[('conv1_2' + modality)], n[('relu1_2' + modality)]) = conv_relu(n[('relu1_1' + modality)], 64)
n[('pool1' + modality)] = max_pool(n[('relu1_2' + modality)])
(n[('conv2_1' + modality)], n[('relu2_1' + modality)]) = conv_relu(n[('pool1' + modality)], 128)
(n[('conv2_2' + modality)], n[('relu2_2' + modality)]) = conv_relu(n[('relu2_1' + modality)], 128)
n[('pool2' + modality)] = max_pool(n[('relu2_2' + modality)])
(n[('conv3_1' + modality)], n[('relu3_1' + modality)]) = conv_relu(n[('pool2' + modality)], 256)
(n[('conv3_2' + modality)], n[('relu3_2' + modality)]) = conv_relu(n[('relu3_1' + modality)], 256)
(n[('conv3_3' + modality)], n[('relu3_3' + modality)]) = conv_relu(n[('relu3_2' + modality)], 256)
n[('pool3' + modality)] = max_pool(n[('relu3_3' + modality)])
(n[('conv4_1' + modality)], n[('relu4_1' + modality)]) = conv_relu(n[('pool3' + modality)], 512)
(n[('conv4_2' + modality)], n[('relu4_2' + modality)]) = conv_relu(n[('relu4_1' + modality)], 512)
(n[('conv4_3' + modality)], n[('relu4_3' + modality)]) = conv_relu(n[('relu4_2' + modality)], 512)
n[('pool4' + modality)] = max_pool(n[('relu4_3' + modality)])
(n[('conv5_1' + modality)], n[('relu5_1' + modality)]) = conv_relu(n[('pool4' + modality)], 512)
(n[('conv5_2' + modality)], n[('relu5_2' + modality)]) = conv_relu(n[('relu5_1' + modality)], 512)
(n[('conv5_3' + modality)], n[('relu5_3' + modality)]) = conv_relu(n[('relu5_2' + modality)], 512)
n[('pool5' + modality)] = max_pool(n[('relu5_3' + modality)])
(n[('fc6' + modality)], n[('relu6' + modality)]) = conv_relu(n[('pool5' + modality)], 4096, ks=7, pad=0)
n[('drop6' + modality)] = L.Dropout(n[('relu6' + modality)], dropout_ratio=0.5, in_place=True)
(n[('fc7' + modality)], n[('relu7' + modality)]) = conv_relu(n[('drop6' + modality)], 4096, ks=1, pad=0)
n[('drop7' + modality)] = L.Dropout(n[('relu7' + modality)], dropout_ratio=0.5, in_place=True)
n[('score_fr' + modality)] = L.Convolution(n[('drop7' + modality)], num_output=40, kernel_size=1, pad=0, param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)])
return n |
class ObjectEditForm(ObjectCreateForm):
class Meta(object):
fields = '__all__'
db_lock_storage = forms.CharField(label='Locks', required=False, widget=forms.Textarea(attrs={'cols': '100', 'rows': '2'}), help_text='In-game lock definition string. If not given, defaults will be used. This string should be on the form <i>type:lockfunction(args);type2:lockfunction2(args);...') |
def test_equation_of_time():
times = pd.date_range(start='1/1/2015 0:00', end='12/31/2015 23:00', freq='H')
output = solarposition.spa_python(times, 37.8, (- 122.25), 100)
eot = output['equation_of_time']
eot_rng = (eot.max() - eot.min())
eot_1 = solarposition.equation_of_time_spencer71(times.dayofyear)
eot_2 = solarposition.equation_of_time_pvcdrom(times.dayofyear)
assert np.allclose((eot_1 / eot_rng), (eot / eot_rng), atol=0.3)
assert np.allclose((eot_2 / eot_rng), (eot / eot_rng), atol=0.4) |
_time('14-06-15 15:44:25')
class TestListAPIView(CassandraTestCase):
def test_get(self):
thing = create_thing()
response = self.client.get(reverse('thing_listview_api'))
self.assertEqual(response.status_code, client.OK)
expected_response = [{'created_on': '2015-06-14T15:44:25Z', 'data_abstract': None, 'another_id': str(thing.another_id), 'id': str(thing.id)}]
self.assertEqual(response.json(), expected_response) |
class SlugModelTest(TestCase):
def setUp(self):
User = get_user_model()
u = User.objects.create_user('julia', password='julia')
def test_autocreateSlug(self):
pu = PytitionUser.objects.get(user__username='julia')
self.assertEqual(SlugModel.objects.count(), 0)
p = Petition.objects.create(title='Petition1', user=pu)
self.assertEqual(Petition.objects.count(), 1)
self.assertEqual(SlugModel.objects.count(), 1)
def test_createSlug(self):
pu = PytitionUser.objects.get(user__username='julia')
p = Petition.objects.create(title='Petition1', user=pu)
self.assertEqual(Petition.objects.count(), 1)
self.assertEqual(SlugModel.objects.count(), 1)
p.add_slug('this-is-a-cool-slug')
self.assertEqual(SlugModel.objects.count(), 2)
def test_SlugShouldBeUniq(self):
pu = PytitionUser.objects.get(user__username='julia')
p = Petition.objects.create(title='Petition1', user=pu)
self.assertEqual(Petition.objects.count(), 1)
self.assertEqual(SlugModel.objects.count(), 1)
p.add_slug('this-is-a-cool-slug')
self.assertEqual(SlugModel.objects.count(), 2)
self.assertRaises(ValueError, p.add_slug, 'this-is-a-cool-slug')
self.assertEqual(SlugModel.objects.count(), 2)
def test_SlugDelete(self):
pu = PytitionUser.objects.get(user__username='julia')
p = Petition.objects.create(title='Petition1', user=pu)
self.assertEqual(Petition.objects.count(), 1)
self.assertEqual(SlugModel.objects.count(), 1)
p.add_slug('this-is-a-cool-slug')
self.assertEqual(SlugModel.objects.count(), 2)
p.del_slug('this-is-a-cool-slug')
self.assertEqual(SlugModel.objects.count(), 1) |
def test_parameterset_for_fail_at_collect(pytester: Pytester) -> None:
pytester.makeini('\n [pytest]\n {}=fail_at_collect\n '.format(EMPTY_PARAMETERSET_OPTION))
config = pytester.parseconfig()
from _pytest.mark import pytest_configure, get_empty_parameterset_mark
pytest_configure(config)
with pytest.raises(Collector.CollectError, match="Empty parameter set in 'pytest_configure' at line \\d\\d+"):
get_empty_parameterset_mark(config, ['a'], pytest_configure)
p1 = pytester.makepyfile('\n import pytest\n\n .parametrize("empty", [])\n def test():\n pass\n ')
result = pytester.runpytest(str(p1))
result.stdout.fnmatch_lines(['collected 0 items / 1 error', '* ERROR collecting test_parameterset_for_fail_at_collect.py *', "Empty parameter set in 'test' at line 3", '*= 1 error in *'])
assert (result.ret == ExitCode.INTERRUPTED) |
class EnclaveCreationBreakpoint():
def __init__(self, target):
breakpoint = target.BreakpointCreateByName('oe_debug_enclave_created_hook')
breakpoint.SetScriptCallbackFunction('lldb_sgx_plugin.EnclaveCreationBreakpoint.onHit')
def onHit(frame, bp_loc, dict):
enclave_addr = frame.FindValue('rdi', lldb.eValueTypeRegister).signed
enable_oeenclave_debug(enclave_addr)
return False |
class ANETclassification(object):
GROUND_TRUTH_FIELDS = ['database', 'taxonomy', 'version']
PREDICTION_FIELDS = ['results', 'version', 'external_data']
def __init__(self, ground_truth_filename=None, prediction_filename=None, ground_truth_fields=GROUND_TRUTH_FIELDS, prediction_fields=PREDICTION_FIELDS, subset='validation', verbose=False, top_k=3, check_status=True):
if (not ground_truth_filename):
raise IOError('Please input a valid ground truth file.')
if (not prediction_filename):
raise IOError('Please input a valid prediction file.')
self.subset = subset
self.verbose = verbose
self.gt_fields = ground_truth_fields
self.pred_fields = prediction_fields
self.top_k = top_k
self.ap = None
self.hit_at_k = None
self.check_status = check_status
if self.check_status:
self.blocked_videos = get_blocked_videos()
else:
self.blocked_videos = list()
(self.ground_truth, self.activity_index) = self._import_ground_truth(ground_truth_filename)
self.prediction = self._import_prediction(prediction_filename)
if self.verbose:
print('[INIT] Loaded annotations from {} subset.'.format(subset))
nr_gt = len(self.ground_truth)
print('\tNumber of ground truth instances: {}'.format(nr_gt))
nr_pred = len(self.prediction)
print('\tNumber of predictions: {}'.format(nr_pred))
def _import_ground_truth(self, ground_truth_filename):
with open(ground_truth_filename, 'r') as fobj:
data = json.load(fobj)
if (not all([(field in list(data.keys())) for field in self.gt_fields])):
raise IOError('Please input a valid ground truth file.')
(activity_index, cidx) = ({}, 0)
(video_lst, label_lst) = ([], [])
for (videoid, v) in data['database'].items():
if (self.subset != v['subset']):
continue
if (videoid in self.blocked_videos):
continue
for ann in v['annotations']:
if (ann['label'] not in activity_index):
activity_index[ann['label']] = cidx
cidx += 1
video_lst.append(videoid)
label_lst.append(activity_index[ann['label']])
ground_truth = pd.DataFrame({'video-id': video_lst, 'label': label_lst})
ground_truth = ground_truth.drop_duplicates().reset_index(drop=True)
return (ground_truth, activity_index)
def _import_prediction(self, prediction_filename):
with open(prediction_filename, 'r') as fobj:
data = json.load(fobj)
if (not all([(field in list(data.keys())) for field in self.pred_fields])):
raise IOError('Please input a valid prediction file.')
(video_lst, label_lst, score_lst) = ([], [], [])
for (videoid, v) in data['results'].items():
if (videoid in self.blocked_videos):
continue
for result in v:
label = self.activity_index[result['label']]
video_lst.append(videoid)
label_lst.append(label)
score_lst.append(result['score'])
prediction = pd.DataFrame({'video-id': video_lst, 'label': label_lst, 'score': score_lst})
return prediction
def wrapper_compute_average_precision(self):
ap = np.zeros(len(list(self.activity_index.items())))
for (activity, cidx) in self.activity_index.items():
gt_idx = (self.ground_truth['label'] == cidx)
pred_idx = (self.prediction['label'] == cidx)
ap[cidx] = compute_average_precision_classification(self.ground_truth.loc[gt_idx].reset_index(drop=True), self.prediction.loc[pred_idx].reset_index(drop=True))
return ap
def evaluate(self):
ap = self.wrapper_compute_average_precision()
hit_at_k = compute_video_hit_at_k(self.ground_truth, self.prediction, top_k=self.top_k)
avg_hit_at_k = compute_video_hit_at_k(self.ground_truth, self.prediction, top_k=self.top_k, avg=True)
if self.verbose:
print('[RESULTS] Performance on ActivityNet untrimmed video classification task.')
print('\tMean Average Precision: {}'.format(ap.mean()))
print('\{}: {}'.format(self.top_k, (1.0 - hit_at_k)))
self.ap = ap
self.hit_at_k = hit_at_k
self.avg_hit_at_k = avg_hit_at_k |
class QuantizableInception(Inception):
def __init__(self, *args, **kwargs):
super(QuantizableInception, self).__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs)
self.cat = nn.quantized.FloatFunctional()
def forward(self, x):
outputs = self._forward(x)
return self.cat.cat(outputs, 1) |
class Migration(migrations.Migration):
dependencies = [('devices', '0001_initial')]
operations = [migrations.AlterField(model_name='device', name='verification_code_expires_at', field=models.DateTimeField(default=junction.devices.models.expiry_time, verbose_name='Verification Code Expires At'), preserve_default=True), migrations.AlterField(model_name='device', name='verification_code_sent_at', field=models.DateTimeField(auto_now_add=True, verbose_name='Verification Code Sent At'), preserve_default=True)] |
def append_call_sample_docstring(model_class, tokenizer_class, checkpoint, output_type, config_class, mask=None):
model_class.__call__ = copy_func(model_class.__call__)
model_class.__call__ = add_code_sample_docstrings(processor_class=tokenizer_class, checkpoint=checkpoint, output_type=output_type, config_class=config_class, model_cls=model_class.__name__)(model_class.__call__) |
def nca(similarities, targets, class_weights=None, focal_gamma=None, scale=1.0, margin=0.6, exclude_pos_denominator=True, hinge_proxynca=False, memory_flags=None):
margins = torch.zeros_like(similarities)
margins[(torch.arange(margins.shape[0]), targets)] = margin
similarities = (scale * (similarities - margin))
if exclude_pos_denominator:
similarities = (similarities - similarities.max(1)[0].view((- 1), 1))
disable_pos = torch.zeros_like(similarities)
disable_pos[(torch.arange(len(similarities)), targets)] = similarities[(torch.arange(len(similarities)), targets)]
numerator = similarities[(torch.arange(similarities.shape[0]), targets)]
denominator = (similarities - disable_pos)
losses = (numerator - torch.log(torch.exp(denominator).sum((- 1))))
if (class_weights is not None):
losses = (class_weights[targets] * losses)
losses = (- losses)
if hinge_proxynca:
losses = torch.clamp(losses, min=0.0)
loss = torch.mean(losses)
return loss
return F.cross_entropy(similarities, targets, weight=class_weights, reduction='mean') |
class P3S_TD3(MARLAlgorithm, Serializable):
def __init__(self, base_kwargs, env, arr_actor, best_actor, dict_ph, arr_initial_exploration_policy, with_best=False, initial_beta_t=1, plotter=None, specific_type=0, target_noise_scale=0.2, target_noise_clip=0.5, target_ratio=2, target_range=0.04, lr=0.003, discount=0.99, tau=0.01, policy_update_interval=2, best_update_interval=2, reparameterize=False, save_full_state=False):
Serializable.quick_init(self, locals())
super(P3S_TD3, self).__init__(**base_kwargs)
self._env = env
self._max_actions = int(self._env.action_space.high[0])
self._arr_actor = arr_actor
self._best_actor = best_actor
self._best_actor_num = (- 1)
self._num_iter_select_best = 1
assert (len(self._env.envs) == len(self._arr_actor))
self._num_actor = len(self._arr_actor)
self._n_train_repeat = self._num_actor
self._dict_ph = dict_ph
self._arr_initial_exploration_policy = arr_initial_exploration_policy
self._with_best = with_best
self._best_flag = np.ones(self._num_actor)
self._beta_t = initial_beta_t
self._plotter = plotter
self._target_noise_scale = target_noise_scale
self._target_noise_clip = target_noise_clip
self._target_ratio = target_ratio
self._target_range = target_range
self._policy_lr = lr
self._qf_lr = lr
self._vf_lr = lr
self._discount = discount
self._tau = tau
self._policy_update_interval = policy_update_interval
self._best_update_interval = best_update_interval
self._save_full_state = save_full_state
self._saver = tf.train.Saver(max_to_keep=1000)
self._save_dir = '/home/wisrl/wyjung/Result/log/Mujoco/ant_delay20/test_IPE_TD3_NA4_TRatio2_Trange0.03_update1_ver3_new_201906/iter6/'
self._save_iter_num = 40000
self._Da = self._env.action_space.flat_dim
self._Do = self._env.observation_space.flat_dim
if (self._best_actor is not None):
self._init_critic_update(actor=self._best_actor)
self._init_actor_update(actor=self._best_actor)
self._init_target_ops(actor=self._best_actor)
for actor in self._arr_actor:
self._init_critic_update(actor=actor)
self._init_actor_update(actor=actor)
self._init_target_ops(actor=actor)
self._init_update_old_new_ops(actor=actor)
self._sess.run(tf.variables_initializer([variable for variable in tf.global_variables() if ('low_level_policy' not in variable.name)]))
self._update_old_new()
for actor in self._arr_actor:
source_params = actor.current_params()
target_params = actor.target_params()
copy_ops = [tf.assign(target, source) for (target, source) in zip(target_params, source_params)]
self._sess.run(copy_ops)
if (self._best_actor is not None):
source_params = self._best_actor.current_params()
target_params = self._best_actor.target_params()
copy_ops = [tf.assign(target, source) for (target, source) in zip(target_params, source_params)]
self._sess.run(copy_ops)
for actor in self._arr_actor:
source_params = self._best_actor.trainable_params()
target_params = actor.trainable_params()
copy_ops = [tf.assign(target, source) for (target, source) in zip(target_params, source_params)]
self._sess.run(copy_ops)
print('Initialization is finished!')
def train(self):
self._train(self._env, self._arr_actor, self._arr_initial_exploration_policy)
def _init_critic_update(self, actor):
arr_target_qf_t = [target_qf.output_t for target_qf in actor.arr_target_qf]
min_target_qf_t = tf.minimum(arr_target_qf_t[0], arr_target_qf_t[1])
ys = tf.stop_gradient((self._dict_ph['rewards_ph'] + (((1 - self._dict_ph['terminals_ph']) * self._discount) * min_target_qf_t)))
arr_td_loss_t = []
for qf in actor.arr_qf:
arr_td_loss_t.append(tf.reduce_mean(((ys - qf.output_t) ** 2)))
td_loss_t = tf.add_n(arr_td_loss_t)
qf_train_op = tf.train.AdamOptimizer(self._qf_lr).minimize(loss=td_loss_t, var_list=actor.qf_params())
actor.qf_training_ops = qf_train_op
print('qf params:', actor.qf_params())
print('target qf param: ', actor.target_qf_params())
def _init_actor_update(self, actor):
with tf.variable_scope(actor.name, reuse=tf.AUTO_REUSE):
qf_t = actor.arr_qf[0].get_output_for(self._dict_ph['observations_ph'], actor.policy.action_t, reuse=tf.AUTO_REUSE)
actor.oldkl = actor.policy.dist(actor.oldpolicy)
if self._with_best:
actor.bestkl = actor.policy.dist(self._best_actor.policy)
not_best_flag = tf.reduce_sum((self._dict_ph['not_best_ph'] * tf.one_hot(actor.actor_num, self._num_actor)))
policy_kl_loss = (tf.reduce_mean((- qf_t)) + ((not_best_flag * self._dict_ph['beta_ph']) * tf.reduce_mean(actor.bestkl)))
else:
policy_kl_loss = tf.reduce_mean((- qf_t))
policy_regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope=((actor.name + '/') + actor.policy.name))
print('policy regular loss', policy_regularization_losses)
policy_regularization_loss = tf.reduce_sum(policy_regularization_losses)
policy_loss = (policy_kl_loss + policy_regularization_loss)
print('policy param: ', actor.policy_params())
print('old policy param: ', actor.old_policy_params())
print('target policy param: ', actor.target_policy_params())
policy_train_op = tf.train.AdamOptimizer(self._policy_lr).minimize(loss=policy_loss, var_list=actor.policy_params())
actor.policy_training_ops = policy_train_op
def _init_target_ops(self, actor):
source_params = actor.current_params()
target_params = actor.target_params()
actor.target_ops = [tf.assign(target, (((1 - self._tau) * target) + (self._tau * source))) for (target, source) in zip(target_params, source_params)]
def _init_update_old_new_ops(self, actor):
source_params = actor.policy_params()
target_params = actor.old_policy_params()
actor.copy_old_new_ops = [tf.assign(target, source) for (target, source) in zip(target_params, source_params)]
def _init_training(self, env, arr_actor):
super(P3S_TD3, self)._init_training(env, arr_actor)
self._best_actor_num = 0
if self._with_best:
self._copy_best_actor()
def _do_training(self, actor, iteration, batch):
if ((iteration > 1) and ((iteration % self._epoch_length) == 0) and (actor.actor_num == 0)):
self._update_old_new()
if (self._with_best and ((iteration % int((self._best_update_interval * self._epoch_length))) == 0)):
self._select_best_actor()
self._best_flag = np.array([int((i == self._best_actor_num)) for i in range(len(self._arr_actor))])
self._copy_best_actor()
feed_dict = self._get_feed_dict(iteration, batch)
next_actions = self._get_next_actions(actor, feed_dict)
feed_dict[self._dict_ph['next_actions_ph']] = next_actions
self._sess.run(actor.qf_training_ops, feed_dict)
if ((iteration % self._policy_update_interval) == 0):
self._sess.run(actor.policy_training_ops, feed_dict)
self._sess.run(actor.target_ops)
oldkl_t = self._sess.run(actor.oldkl, feed_dict)
oldkl_t = np.clip(oldkl_t, (1 / 10000), 10000)
self._arr_oldkl[actor.actor_num].extend([np.mean(oldkl_t[np.isfinite(oldkl_t)])])
if self._with_best:
bestkl_t = self._sess.run(actor.bestkl, feed_dict)
bestkl_t = np.clip(bestkl_t, (1 / 10000), 10000)
self._arr_bestkl[actor.actor_num].extend([np.mean(bestkl_t[np.isfinite(bestkl_t)])])
if ((iteration > 1) and ((iteration % self._epoch_length) == 0) and self._with_best and (actor.actor_num == (self._num_actor - 1))):
self._update_beta_t()
def _get_next_actions(self, actor, feed_dict):
actions = np.array(self._sess.run(actor.targetpolicy.action_t, feed_dict))
noise = np.clip((self._target_noise_scale * np.random.randn(actions.shape[0], actions.shape[1])), (- self._target_noise_clip), self._target_noise_clip)
return np.clip((actions + noise), (- self._max_actions), self._max_actions)
def _get_feed_dict(self, iteration, batch):
feed_dict = {self._dict_ph['observations_ph']: batch['observations'], self._dict_ph['actions_ph']: batch['actions'], self._dict_ph['next_observations_ph']: batch['next_observations'], self._dict_ph['rewards_ph']: batch['rewards'], self._dict_ph['terminals_ph']: batch['terminals'], self._dict_ph['not_best_ph']: (1 - self._best_flag), self._dict_ph['beta_ph']: self._beta_t}
if (iteration is not None):
feed_dict[self._dict_ph['iteration_ph']] = iteration
return feed_dict
def _select_best_actor(self):
mean_returns = [np.mean(self.sampler._arr_return[i]) for i in range(self._num_actor)]
best_actor_num = np.argmax(mean_returns)
self._best_actor_num = best_actor_num
def _copy_best_actor(self):
source_params = self._arr_actor[self._best_actor_num].policy_params()
target_params = self._best_actor.policy_params()
copy_best_ops = [tf.assign(target, source) for (target, source) in zip(target_params, source_params)]
self._sess.run(copy_best_ops)
print('best actor is copied by the best actor, the actor{i}'.format(i=self._best_actor_num))
def _update_beta_t(self):
mean_best = []
mean_old = []
for i in range(self._num_actor):
if (i == self._best_actor_num):
continue
mean_best.append(np.mean(self._arr_bestkl[i]))
mean_old.append(np.mean(self._arr_oldkl[i]))
if (np.mean(mean_best) > (max((self._target_ratio * np.mean(mean_old)), self._target_range) * 1.5)):
if (self._beta_t < 1000):
self._beta_t = (self._beta_t * 2)
if (np.mean(mean_best) < (max((self._target_ratio * np.mean(mean_old)), self._target_range) / 1.5)):
if (self._beta_t > (1 / 1000)):
self._beta_t = (self._beta_t / 2)
print('next beta_t : ', self._beta_t)
def _update_old_new(self):
self._sess.run([actor.copy_old_new_ops for actor in self._arr_actor])
def save(self, iter):
save_filename = (self._save_dir + ('IPE_TD3_model.ckpt' % int(iter)))
self._saver.save(self._sess, save_filename)
print((('' + save_filename) + ' is saved '))
def log_diagnostics(self, actor, iteration, batch):
feed_dict = self._get_feed_dict(iteration, batch)
(min_qf, vf) = self._sess.run((actor.min_qf_t, actor.vf_t), feed_dict)
if self._with_best:
logger.record_tabular('beta_t', self._beta_t)
logger.record_tabular('beta', self._beta_t)
logger.record_tabular('best_actor_num', self._best_actor.actor_num)
logger.record_tabular('min-qf-avg/{i}'.format(i=actor.actor_num), np.mean(min_qf))
logger.record_tabular('min-qf-std/{i}'.format(i=actor.actor_num), np.std(min_qf))
logger.record_tabular('vf-avg/{i}'.format(i=actor.actor_num), np.mean(vf))
logger.record_tabular('vf-std/{i}'.format(i=actor.actor_num), np.std(vf))
actor.policy.log_diagnostics(iteration, batch)
if self._plotter:
self._plotter.draw()
def get_snapshot(self, epoch):
if self._save_full_state:
snapshot = {'epoch': epoch, 'algo': self}
else:
snapshot = {'epoch': epoch}
for actor in self._arr_actor:
snapshot[(actor.name + '/policy')] = actor.policy
snapshot[(actor.name + '/target_policy')] = actor.targetpolicy
for (i, qf) in enumerate(actor.arr_qf):
snapshot[(actor.name + '/qf{i}'.format(i=i))] = qf
for (i, target_qf) in enumerate(actor.arr_target_qf):
snapshot[(actor.name + '/target_qf{i}'.format(i=i))] = target_qf
if self._with_best:
snapshot['best_actor_num'] = self._best_actor_num
snapshot['beta_t'] = self._beta_t
snapshot['beta'] = self._beta_t
return snapshot
def __getstate__(self):
d = Serializable.__getstate__(self)
d.update({'actor-qf-params': [[qf.get_param_values() for qf in actor.arr_qf] for actor in self._arr_actor], 'actor-target-qf-params': [[target_qf.get_param_values() for target_qf in actor.arr_target_qf] for actor in self._arr_actor], 'actor-policy-params': [actor.policy.get_param_values() for actor in self._arr_actor], 'actor-target-policy-params': [actor.targetpolicy.get_param_values() for actor in self._arr_actor], 'actor-pool': [actor.pool.__getstate__() for actor in self._arr_actor], 'env': [env.__getstate__() for env in self._env.envs]})
return d
def __setstate__(self, d):
Serializable.__setstate__(self, d)
for (i, actor) in enumerate(self._arr_actor):
for (j, qf) in enumerate(actor.arr_qf):
qf.set_param_values(d['actor-qf-params'][i][j])
for (j, target_qf) in enumerate(actor.arr_target_qf):
target_qf.set_param_values(d['actor-target-qf-params'][i][j])
actor.policy.set_param_values(d['actor-policy-params'][i])
actor.targetpolicy.set_param_values(d['actor-target-policy-params'][i])
actor.pool.__setstate__(d['actor-pool'][i])
self._env.envs[i].__setstate__(d['env'][i]) |
class UniformPolicy(Policy, Serializable):
def __init__(self, env_spec):
Serializable.quick_init(self, locals())
self._Da = env_spec.action_space.flat_dim
super(UniformPolicy, self).__init__(env_spec)
def get_action(self, observation):
return (np.random.uniform((- 1.0), 1.0, self._Da), None)
def get_actions(self, observations):
pass
def log_diagnostics(self, paths):
pass
def get_params_internal(self, **tags):
pass |
class Git():
def __init__(self, directory):
self.directory = directory
def last_commit_time(self):
with TemporaryFile(mode='w+') as out:
with open(os.devnull, 'w') as DEVNULL:
Executable('git').check_call('log -r -1 --pretty="%ci"'.split(), cwd=self.directory, stdout=out, stderr=DEVNULL)
out.seek(0)
[timestamp] = [line.replace('\n', '') for line in out]
return datetime.datetime.strptime(timestamp, '"%Y-%m-%d %H:%M:%S %z"')
def config(self, key, value, apply_globally=False):
with open(os.devnull, 'w') as DEVNULL:
scope = ('--global' if apply_globally else '--local')
return_code = Executable('git').call(['config', scope, key, value], cwd=self.directory, stdout=DEVNULL, stderr=DEVNULL)
return (return_code == 0)
def is_version_controlled(self):
return self.uses_git()
def is_checked_in(self):
with TemporaryFile(mode='w+') as out:
return_code = Executable('git').call('status --porcelain'.split(), cwd=self.directory, stdout=out, stderr=out)
out.seek(0)
return ((return_code == 0) and (not out.read()))
def uses_git(self):
with TemporaryFile(mode='w+') as out:
try:
Executable('git').call('rev-parse --is-inside-work-tree'.split(), cwd=self.directory, stdout=out, stderr=out)
out.seek(0)
return (out.read().replace('\n', '') == 'true')
except Exception as ex:
logging.error(('Error trying to execute "git rev-parse --is-inside-work-tree" in %s: %s' % (self.directory, ex)))
return False
def tag(self, tag_string):
with open(os.devnull, 'w') as DEVNULL:
Executable('git').check_call(('tag %s' % tag_string).split(), cwd=self.directory, stdout=DEVNULL, stderr=DEVNULL)
def get_tags(self, head_only=False):
tags = []
with TemporaryFile(mode='w+') as out:
with open(os.devnull, 'w') as DEVNULL:
head_only = (' -l --points-at HEAD' if head_only else '')
Executable('git').check_call(('tag' + head_only).split(), cwd=self.directory, stdout=out, stderr=DEVNULL)
out.seek(0)
tags = [line.split()[0] for line in out if line]
return tags
def commit(self, message, allow_empty=False):
with open(os.devnull, 'w') as DEVNULL:
args = ('-am "%s"' % message)
if allow_empty:
args += ' --allow-empty'
return_code = Executable('git').call(('commit %s' % args).split(), cwd=self.directory, stdout=DEVNULL, stderr=DEVNULL)
return (return_code == 0) |
def command_server(conn):
while True:
(cmd, cwd, verbose) = conn.recv()
res: subprocess.CompletedProcess = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd)
if verbose:
print((f'{cwd}$ ' + ' '.join(res.args)))
print(res.stdout.decode(), end='')
conn.send(res.returncode) |
def parse_replay(replay_player_path, sampled_action_path, reward):
if os.path.isfile(os.path.join(FLAGS.parsed_replay_path, 'GlobalFeatures', replay_player_path)):
return
with open(os.path.join(FLAGS.parsed_replay_path, 'GlobalInfos', replay_player_path)) as f:
global_info = json.load(f)
units_info = static_data.StaticData(Parse(global_info['data_raw'], sc_pb.ResponseData())).units
feat = features.Features(Parse(global_info['game_info'], sc_pb.ResponseGameInfo()))
with open(sampled_action_path) as f:
sampled_action = json.load(f)
sampled_action_id = [((id // FLAGS.step_mul) + 1) for id in sampled_action]
with open(os.path.join(FLAGS.parsed_replay_path, 'Actions', replay_player_path)) as f:
actions = json.load(f)
actions = [(None if (len(actions[idx]) == 0) else Parse(actions[idx][0], sc_pb.Action())) for idx in sampled_action_id]
observations = [obs for obs in stream.parse(os.path.join(FLAGS.parsed_replay_path, 'SampledObservations', replay_player_path), sc_pb.ResponseObservation)]
assert (len(sampled_action) == len(sampled_action_id) == len(actions) == len(observations))
states = process_replay(sampled_action, actions, observations, feat, units_info, reward)
with open(os.path.join(FLAGS.parsed_replay_path, 'GlobalFeatures', replay_player_path), 'w') as f:
json.dump(states, f) |
_LAYERS.register_module('DCNv2')
class ModulatedDeformConv2dPack(ModulatedDeformConv2d):
_version = 2
def __init__(self, *args, **kwargs):
super(ModulatedDeformConv2dPack, self).__init__(*args, **kwargs)
self.conv_offset = nn.Conv2d(self.in_channels, (((self.deform_groups * 3) * self.kernel_size[0]) * self.kernel_size[1]), kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, dilation=self.dilation, bias=True)
self.init_weights()
def init_weights(self):
super(ModulatedDeformConv2dPack, self).init_weights()
if hasattr(self, 'conv_offset'):
self.conv_offset.weight.data.zero_()
self.conv_offset.bias.data.zero_()
def forward(self, x):
out = self.conv_offset(x)
(o1, o2, mask) = torch.chunk(out, 3, dim=1)
offset = torch.cat((o1, o2), dim=1)
mask = torch.sigmoid(mask)
return modulated_deform_conv2d(x, offset, mask, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, self.deform_groups)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
version = local_metadata.get('version', None)
if ((version is None) or (version < 2)):
if (((prefix + 'conv_offset.weight') not in state_dict) and ((prefix[:(- 1)] + '_offset.weight') in state_dict)):
state_dict[(prefix + 'conv_offset.weight')] = state_dict.pop((prefix[:(- 1)] + '_offset.weight'))
if (((prefix + 'conv_offset.bias') not in state_dict) and ((prefix[:(- 1)] + '_offset.bias') in state_dict)):
state_dict[(prefix + 'conv_offset.bias')] = state_dict.pop((prefix[:(- 1)] + '_offset.bias'))
if ((version is not None) and (version > 1)):
print_log(f"ModulatedDeformConvPack {prefix.rstrip('.')} is upgraded to version 2.", logger='root')
super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.