code stringlengths 281 23.7M |
|---|
class Trio_Asyncio_Wrapper():
def __init__(self, proc, loop=None):
self.proc = proc
self._loop = loop
def loop(self):
loop = self._loop
if (loop is None):
loop = current_loop.get()
return loop
def __get__(self, obj, cls):
if (obj is None):
return partial(self.__call__, cls)
return partial(self.__call__, obj)
def __call__(self, *args, **kwargs):
proc = self.proc
if kwargs:
proc = partial(proc, **kwargs)
return self.loop.trio_as_future(proc, *args)
def __aenter__(self):
proc_enter = getattr(self.proc, '__aenter__', None)
if (proc_enter is None):
raise RuntimeError("Call 'trio_as_aio(ctxfactory(*args))', not 'trio_as_aio(ctxfactory, *args)'")
return self.loop.trio_as_future(proc_enter)
def __aexit__(self, *tb):
proc_exit = self.proc.__aexit__
return self.loop.trio_as_future(proc_exit, *tb)
def __aiter__(self):
proc_iter = getattr(self.proc, '__aiter__', None)
if (proc_iter is None):
raise RuntimeError("Call 'trio_as_aio(gen(*args))', not 'trio_as_aio(gen, *args)'")
return run_trio_generator(self.loop, proc_iter()) |
def test_transaction_rollback(tmp_path):
filename = f'v{RAIDEN_DB_VERSION}_log.db'
db_path = Path((tmp_path / filename))
storage = SQLiteStorage(db_path)
storage.update_version()
assert (storage.get_version() == RAIDEN_DB_VERSION)
with pytest.raises(RuntimeError):
with storage.transaction():
with patch('raiden.storage.sqlite.RAIDEN_DB_VERSION', new=1000):
storage.update_version()
raise RuntimeError()
assert (storage.get_version() == RAIDEN_DB_VERSION) |
class EpochBatchIterating(object):
def __len__(self) -> int:
raise NotImplementedError
def next_epoch_idx(self):
raise NotImplementedError
def next_epoch_itr(self, shuffle=True, pin_memory=False):
raise NotImplementedError
def end_of_epoch(self) -> bool:
raise NotImplementedError
def iterations_in_epoch(self) -> int:
raise NotImplementedError
def state_dict(self):
raise NotImplementedError
def load_state_dict(self, state_dict):
raise NotImplementedError |
def find_speaker_f0_median_std(speaker_utt_path, fs, window, hop, voiced_prob_cutoff):
frame_len_samples = int((fs * window))
hop_len_samples = int((fs * hop))
utterance_files = get_speaker_utterance_paths(speaker_utt_path)
k = min(50, len(utterance_files))
utterance_files = random.sample(utterance_files, k)
all_f0_vals = []
for utt_file in tqdm(utterance_files):
try:
y = load_and_resample(utt_file, fs)
f0_vals = extract_utterance_log_f0(y, fs, frame_len_samples, hop_len_samples, voiced_prob_cutoff)
f0_vals = f0_vals[(~ np.isnan(f0_vals))]
all_f0_vals.extend(f0_vals.tolist())
except:
pass
all_f0_vals = np.array(all_f0_vals)
f0_median = np.median(all_f0_vals).astype(np.float32)
f0_std = np.std(all_f0_vals).astype(np.float32)
return (f0_median, f0_std) |
def load_matrix(embedding_file_path, word_dict, word_embedding_dim):
embedding_matrix = np.random.uniform(size=((len(word_dict) + 1), word_embedding_dim))
have_word = []
if (embedding_file_path is not None):
with open(embedding_file_path, 'rb') as f:
while True:
line = f.readline()
if (len(line) == 0):
break
line = line.split()
word = line[0].decode()
if (word in word_dict):
index = word_dict[word]
tp = [float(x) for x in line[1:]]
embedding_matrix[index] = np.array(tp)
have_word.append(word)
return (embedding_matrix, have_word) |
_cache(maxsize=512)
def parse_git_url(url: str) -> ParsedGitUrl:
log.debug('Parsing git url %r', url)
normalizers = [('^(\\w+)', 'ssh://\\1'), ('^git\\+ssh://', 'ssh://'), ('(ssh://(?:\\w+)?[\\w.]+):(?!\\d{1,5}/\\w+/)(.*)$', '\\1/\\2'), ('^([C-Z]:/)|^/(\\w)', 'file:///\\1\\2')]
for (pattern, replacement) in normalizers:
url = re.compile(pattern).sub(replacement, url)
urllib_split = urlsplit(url)
if (not urllib_split.scheme):
raise ValueError(f'Cannot parse {url!r}')
(namespace, _, name) = str(PurePosixPath(urllib_split.path)).lstrip('/').rpartition('/')
name = (name[:(- 4)] if name.endswith('.git') else name)
required_parts = [urllib_split.scheme, (True if (urllib_split.scheme == 'file') else urllib_split.netloc), namespace, name]
if (not all(required_parts)):
raise ValueError(f'Bad url: {url!r}')
return ParsedGitUrl(scheme=urllib_split.scheme, netloc=urllib_split.netloc, namespace=namespace, repo_name=name) |
def gen_random_test():
data = []
for i in range(128):
data.append(random.randint(0, ))
asm_code = []
for i in range(50):
a = random.randint(0, 127)
b = random.randint(0, 127)
base = Bits32((8192 + (4 * b)))
offset = Bits16((4 * (a - b)))
result = data[a]
asm_code.append('\n\n csrr x1, mngr2proc < {src} # Move src value into register\n csrr x2, mngr2proc < {base} # Move base value into register\n\n # Instruction under test\n sw x1, {offset}(x2)\n\n # Check the result\n csrr x4, mngr2proc < {lw_base}\n lw x3, 0(x4)\n csrw proc2mngr, x3 > {result}\n\n '.format(src=result, lw_base=((base.uint() + offset.int()) & ), offset=offset.int(), base=base.uint(), result=result))
initial_data = []
for i in range(128):
initial_data.append(random.randint(0, ))
asm_code.append(gen_word_data(initial_data))
return asm_code |
def test_load_rsa_nist_vectors():
vector_data = textwrap.dedent('\n # CAVS 11.4\n # "SigGen PKCS#1 RSASSA-PSS" information\n # Mod sizes selected: 1024 1536 2048 3072 4096\n # SHA Algorithm selected:SHA1 SHA224 SHA256 SHA384 SHA512\n # Salt len: 20\n\n [mod = 1024]\n\n n = bcb47b2e0dafcba81ff2a2b5cb115ca7e757184c9d72bcdcda707a146b3b4e29989d\n\n e = \n SHAAlg = SHA1\n Msg = 1248f62a4389f42f7b4bb131053d6c88a994db2075b912ccbe3ea7dc611714f14e\n S = 682cf53c1145d22a50caa9eb1a9ba70670c5915e0fdfde6457a765de2a8fe12de97\n\n SHAAlg = SHA384\n Msg = e511903c2f1bfbaac95413ac4746c984c3750a728c388aa628b0ebf\n S = 9c748702bbcc1f9468864cd360c8c39d007b2d8aaee833606c70f7593cf0d1519\n\n [mod = 1024]\n\n n = \n\n e = 0010001\n\n SHAAlg = SHA512\n Msg = fab829\n S = deadbeef0000\n ').splitlines()
vectors = load_rsa_nist_vectors(vector_data)
assert (vectors == [{'modulus': int('bcb47b2e0dafcba81ff2a2b5cb115ca7e757184c9d72bcdcda707a146b3b4e29989d', 16), 'public_exponent': 65537, 'algorithm': 'SHA1', 'salt_length': 20, 'msg': b'1248f62a4389f42f7b4bb131053d6c88a994db2075b912ccbe3ea7dc611714f14e', 's': b'682cf53c1145d22a50caa9eb1a9ba70670c5915e0fdfde6457a765de2a8fe12de97', 'fail': False}, {'modulus': int('bcb47b2e0dafcba81ff2a2b5cb115ca7e757184c9d72bcdcda707a146b3b4e29989d', 16), 'public_exponent': 65537, 'algorithm': 'SHA384', 'salt_length': 20, 'msg': b'e511903c2f1bfbaac95413ac4746c984c3750a728c388aa628b0ebf', 's': b'9c748702bbcc1f9468864cd360c8c39d007b2d8aaee833606c70f7593cf0d1519', 'fail': False}, {'modulus': , 'public_exponent': 65537, 'algorithm': 'SHA512', 'salt_length': 20, 'msg': b'fab829', 's': b'deadbeef0000', 'fail': False}]) |
class SpreadSheetDelegate(QItemDelegate):
def __init__(self, parent=None):
super(SpreadSheetDelegate, self).__init__(parent)
def createEditor(self, parent, styleOption, index):
if (index.column() == 1):
editor = QDateTimeEdit(parent)
editor.setDisplayFormat(self.parent().currentDateFormat)
editor.setCalendarPopup(True)
return editor
editor = QLineEdit(parent)
allStrings = []
for i in range(1, index.model().rowCount()):
strItem = index.model().data(index.sibling(i, index.column()), Qt.EditRole)
if (strItem not in allStrings):
allStrings.append(strItem)
autoComplete = QCompleter(allStrings)
editor.setCompleter(autoComplete)
editor.editingFinished.connect(self.commitAndCloseEditor)
return editor
def commitAndCloseEditor(self):
editor = self.sender()
self.commitData.emit(editor)
self.closeEditor.emit(editor, QItemDelegate.NoHint)
def setEditorData(self, editor, index):
if isinstance(editor, QLineEdit):
editor.setText(index.model().data(index, Qt.EditRole))
elif isinstance(editor, QDateTimeEdit):
editor.setDate(QDate.fromString(index.model().data(index, Qt.EditRole), self.parent().currentDateFormat))
def setModelData(self, editor, model, index):
if isinstance(editor, QLineEdit):
model.setData(index, editor.text())
elif isinstance(editor, QDateTimeEdit):
model.setData(index, editor.date().toString(self.parent().currentDateFormat)) |
def evaluate(args):
model.eval()
losses = []
for (step, batch) in enumerate(eval_dataloader):
with torch.no_grad():
outputs = model(batch, labels=batch)
loss = outputs.loss.repeat(args.valid_batch_size)
losses.append(accelerator.gather(loss))
if ((args.max_eval_steps > 0) and (step >= args.max_eval_steps)):
break
loss = torch.mean(torch.cat(losses))
try:
perplexity = torch.exp(loss)
except OverflowError:
perplexity = float('inf')
return (loss.item(), perplexity.item()) |
def get_alt_az(utc_time, lon, lat):
lon = np.deg2rad(lon)
lat = np.deg2rad(lat)
(ra_, dec) = sun_ra_dec(utc_time)
h__ = _local_hour_angle(utc_time, lon, ra_)
return (np.arcsin(((np.sin(lat) * np.sin(dec)) + ((np.cos(lat) * np.cos(dec)) * np.cos(h__)))), np.arctan2((- np.sin(h__)), ((np.cos(lat) * np.tan(dec)) - (np.sin(lat) * np.cos(h__))))) |
class Fates(enum.Enum):
FALSE_POSITIVE = 0
INITIALIZE = 1
TERMINATE = 2
LINK = 3
DIVIDE = 4
APOPTOSIS = 5
MERGE = 6
EXTRUDE = 7
INITIALIZE_BORDER = 10
INITIALIZE_FRONT = 11
INITIALIZE_LAZY = 12
TERMINATE_BORDER = 20
TERMINATE_BACK = 21
TERMINATE_LAZY = 22
DEAD = 666
UNDEFINED = 999 |
def get_nonlinearity_layer(activation_type='PReLU'):
if (activation_type == 'ReLU'):
nonlinearity_layer = nn.ReLU()
elif (activation_type == 'SELU'):
nonlinearity_layer = nn.SELU()
elif (activation_type == 'LeakyReLU'):
nonlinearity_layer = nn.LeakyReLU(0.1)
elif (activation_type == 'PReLU'):
nonlinearity_layer = nn.PReLU()
else:
raise NotImplementedError(('activation layer [%s] is not found' % activation_type))
return nonlinearity_layer |
def plot_fig(args, test_img, recon_imgs, scores, gts, threshold, save_dir):
num = len(scores)
vmax = (scores.max() * 255.0)
vmin = (scores.min() * 255.0)
for i in range(num):
img = test_img[i]
img = denormalization(img)
recon_img = recon_imgs[i]
recon_img = denormalization(recon_img)
gt = gts[i].transpose(1, 2, 0).squeeze()
heat_map = (scores[i] * 255)
mask = scores[i]
mask[(mask > threshold)] = 1
mask[(mask <= threshold)] = 0
kernel = morphology.disk(4)
mask = morphology.opening(mask, kernel)
mask *= 255
vis_img = mark_boundaries(img, mask, color=(1, 0, 0), mode='thick')
(fig_img, ax_img) = plt.subplots(1, 6, figsize=(12, 3))
fig_img.subplots_adjust(right=0.9)
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
for ax_i in ax_img:
ax_i.axes.xaxis.set_visible(False)
ax_i.axes.yaxis.set_visible(False)
ax_img[0].imshow(img)
ax_img[0].title.set_text('Image')
ax_img[1].imshow(recon_img)
ax_img[1].title.set_text('Reconst')
ax_img[2].imshow(gt, cmap='gray')
ax_img[2].title.set_text('GroundTruth')
ax = ax_img[3].imshow(heat_map, cmap='jet', norm=norm)
ax_img[3].imshow(img, cmap='gray', interpolation='none')
ax_img[3].imshow(heat_map, cmap='jet', alpha=0.5, interpolation='none')
ax_img[3].title.set_text('Predicted heat map')
ax_img[4].imshow(mask, cmap='gray')
ax_img[4].title.set_text('Predicted mask')
ax_img[5].imshow(vis_img)
ax_img[5].title.set_text('Segmentation result')
left = 0.92
bottom = 0.15
width = 0.015
height = (1 - (2 * bottom))
rect = [left, bottom, width, height]
cbar_ax = fig_img.add_axes(rect)
cb = plt.colorbar(ax, shrink=0.6, cax=cbar_ax, fraction=0.046)
cb.ax.tick_params(labelsize=8)
font = {'family': 'serif', 'color': 'black', 'weight': 'normal', 'size': 8}
cb.set_label('Anomaly Score', fontdict=font)
fig_img.savefig(os.path.join(save_dir, (args.obj + '_{}_png'.format(i))), dpi=100)
plt.close() |
def set_random_seed(seed, deterministic=False, use_rank_shift=False):
if use_rank_shift:
(rank, _) = mmcv.runner.get_dist_info()
seed += rank
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False |
def get_num_bond_types(mol):
bonds = mol.GetBonds()
num_bonds = 0
num_double = 0
num_triple = 0
num_single = 0
num_aromatic = 0
for b in bonds:
num_bonds += 1
if (b.GetBondType() == rdkit.Chem.rdchem.BondType.SINGLE):
num_single += 1
if (b.GetBondType() == rdkit.Chem.rdchem.BondType.DOUBLE):
num_double += 1
if (b.GetBondType() == rdkit.Chem.rdchem.BondType.TRIPLE):
num_triple += 1
if (b.GetBondType() == rdkit.Chem.rdchem.BondType.AROMATIC):
num_aromatic += 1
if (num_bonds == 0):
return [0, 0, 0, 0]
else:
return [(num_single / num_bonds), (num_double / num_bonds), (num_triple / num_bonds), (num_aromatic / num_bonds)] |
def test_update_catalog_error(db):
catalog = Catalog.objects.first()
catalog.locked = True
catalog.save()
section = Section.objects.exclude(catalogs=catalog).first()
with pytest.raises(ValidationError):
SectionLockedValidator(section)({'catalogs': [catalog], 'locked': False}) |
class IpAddressMiddleware():
PROVIDER_NAME = 'REMOTE_ADDR header'
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
request.ip_address = self.get_ip_address(request)
response = self.get_response(request)
if (settings.DEBUG or request.user.is_staff):
response['X-Adserver-IpAddress-Provider'] = self.PROVIDER_NAME
return response
def get_ip_address(self, request):
return request.META.get('REMOTE_ADDR', None) |
def test_mix_resize():
allocator = RegionAllocator(1)
regions = []
for i in range(10):
regions.append(allocator.force_alloc(3))
for region in regions[:5]:
allocator.force_realloc(region, 8)
for i in range(10):
regions.append(allocator.force_alloc((i + 1)))
for region in regions[5:15]:
allocator.force_realloc(region, 5)
for region in regions[3:18:2]:
allocator.dealloc(region)
regions.remove(region)
for i in range(5):
regions.append(allocator.force_alloc(3))
for region in regions[(- 10):]:
allocator.force_realloc(region, 6)
for region in regions:
allocator.dealloc(region)
assert (allocator.get_free_size() == allocator.capacity) |
class TestCallableGuards(TestNameCheckVisitorBase):
_passes()
def test_callable(self):
from pyanalyze.signature import ANY_SIGNATURE
def capybara(o: object) -> None:
assert_is_value(o, TypedValue(object))
if callable(o):
assert_is_value(o, CallableValue(ANY_SIGNATURE))
_passes()
def test_isfunction(self):
import inspect
from types import FunctionType
def capybara(o: object) -> None:
assert_is_value(o, TypedValue(object))
if inspect.isfunction(o):
assert_is_value(o, TypedValue(FunctionType)) |
class ImageFilelist(data.Dataset):
def __init__(self, root, flist, transform=None, target_transform=None, flist_reader=default_flist_reader, loader=default_loader):
self.root = root
self.imlist = flist_reader(flist)
self.transform = transform
self.target_transform = target_transform
self.loader = loader
def __getitem__(self, index):
(impath, target) = self.imlist[index]
img = self.loader(os.path.join(self.root, impath))
if (self.transform is not None):
img = self.transform(img)
if (self.target_transform is not None):
target = self.target_transform(target)
return (img, target, index)
def __len__(self):
return len(self.imlist) |
def test_quantizable_mha_with_mask():
B = 5
T = 8
S = 4
q_inputs = keras.Input(shape=(T, 16))
v_inputs = keras.Input(shape=(S, 16))
k_inputs = keras.Input(shape=(S, 16))
m_inputs = keras.Input(shape=(T, S))
model_output = keras.layers.MultiHeadAttention(key_dim=2, num_heads=2)(q_inputs, v_inputs, k_inputs, m_inputs)
unquantized_model = keras.Model(inputs=[q_inputs, v_inputs, k_inputs, m_inputs], outputs=model_output)
quantized_model = QuantizationSimModel(unquantized_model)
query = np.ones([B, T, 16])
value = np.ones([B, S, 16])
key = np.ones([B, S, 16])
mask = np.zeros([B, T, S])
unquantized_model_tensor = unquantized_model([query, value, key, mask])
quantized_model_tensor = quantized_model.model([query, value, key, mask])
for layer in quantized_model.model.layers:
if isinstance(layer, QcQuantizableMultiHeadAttention):
layer.deactivate_quantizers()
quantized_model_tensor_without_quantizers = quantized_model.model([query, value, key, mask])
for layer in quantized_model.model.layers:
if isinstance(layer, QcQuantizableMultiHeadAttention):
layer.reactivate_quantizers()
assert (unquantized_model_tensor.shape == quantized_model_tensor.shape == quantized_model_tensor_without_quantizers.shape)
assert tf.equal(unquantized_model_tensor, quantized_model_tensor_without_quantizers).numpy().flatten().all()
assert (not any((isinstance(layer, QcQuantizableMultiHeadAttention) for layer in unquantized_model.layers)))
assert any((isinstance(layer, QcQuantizableMultiHeadAttention) for layer in quantized_model.model.layers)) |
class TestValidationCountingAggregator(unittest.TestCase):
def test_aggregate_scenes(self) -> None:
agg = validators.ValidationCountingAggregator()
mock_validator_output = mock.Mock()
is_valid_scene = mock.PropertyMock(return_value=False)
type(mock_validator_output).is_valid_scene = is_valid_scene
scene_validation_mock: Dict[(int, Dict[(str, validators.ValidatorOutput)])] = {0: {'mock_validator1': mock_validator_output}, 1: {'mock_validator2': mock_validator_output}, 2: {'mock_validator2': mock_validator_output}}
agg_scenes = agg.aggregate_scenes(scene_validation_mock)
is_valid_scene.assert_called()
self.assertEqual(len(agg_scenes), 2)
self.assertEqual(agg_scenes['mock_validator1'].item(), 1)
self.assertEqual(agg_scenes['mock_validator2'].item(), 2)
def test_aggregate_count_failed_frames(self) -> None:
agg = validators.ValidationCountingAggregator(failed_frames=True)
mock_validator_output = mock.Mock()
is_valid_scene = mock.PropertyMock(return_value=False)
failed_frames = mock.PropertyMock(return_value=[1, 2, 3, 4])
type(mock_validator_output).is_valid_scene = is_valid_scene
type(mock_validator_output).failed_frames = failed_frames
scene_validation_mock: Dict[(int, Dict[(str, validators.ValidatorOutput)])] = {0: {'mock_validator1': mock_validator_output}, 1: {'mock_validator2': mock_validator_output}, 2: {'mock_validator2': mock_validator_output}}
agg_scenes = agg.aggregate_scenes(scene_validation_mock)
failed_frames.assert_called()
self.assertEqual(len(agg_scenes), 2)
self.assertEqual(agg_scenes['mock_validator1'].item(), 4)
self.assertEqual(agg_scenes['mock_validator2'].item(), 8) |
class DisabledQuerySet(models.QuerySet):
def __init__(self, *args, **kwargs):
self.missing_scopes = kwargs.pop('missing_scopes', None)
super().__init__(*args, **kwargs)
def error(self, *args, **kwargs):
raise ScopeError('A scope on dimension(s) {} needs to be active for this query.'.format(', '.join(self.missing_scopes)))
def _clone(self):
c = super()._clone()
c.missing_scopes = self.missing_scopes
return c
def none(self):
c = models.QuerySet(model=self.model, query=self.query.chain(), using=self._db, hints=self._hints)
c._sticky_filter = self._sticky_filter
c._for_write = self._for_write
c._prefetch_related_lookups = self._prefetch_related_lookups[:]
c._known_related_objects = self._known_related_objects
c._iterable_class = self._iterable_class
c._fields = self._fields
return c.none()
__bool__ = error
__getitem__ = error
__iter__ = error
__len__ = error
__erpr__ = error
all = error
aggregate = error
annotate = error
count = error
earliest = error
complex_filter = error
select_for_update = error
filter = error
first = error
get = error
get_or_create = error
update_or_create = error
delete = error
dates = error
datetimes = error
iterator = error
last = error
latest = error
only = error
order_by = error
reverse = error
union = error
update = error
raw = error
values = error
values_list = error |
class TestMonteCarloGFormula():
def test_error_continuous_treatment(self, sim_t_fixed_data):
with pytest.raises(ValueError):
MonteCarloGFormula(sim_t_fixed_data, idvar='id', exposure='W1', outcome='Y', time_out='t', time_in='t0')
def test_error_continuous_outcome(self, sim_t_fixed_data):
with pytest.raises(ValueError):
MonteCarloGFormula(sim_t_fixed_data, idvar='id', exposure='A', outcome='W1', time_out='t', time_in='t0')
def test_error_covariate_label(self, sim_t_fixed_data):
g = MonteCarloGFormula(sim_t_fixed_data, idvar='id', exposure='A', outcome='Y', time_out='t', time_in='t0')
with pytest.raises(ValueError):
g.add_covariate_model(label='first', covariate='W1', model='W2')
def test_error_covariate_type(self, sim_t_fixed_data):
g = MonteCarloGFormula(sim_t_fixed_data, idvar='id', exposure='A', outcome='Y', time_out='t', time_in='t0')
with pytest.raises(ValueError):
g.add_covariate_model(label=1, covariate='W1', model='W2', var_type='categorical')
def test_error_no_outcome_model(self, sim_t_fixed_data):
g = MonteCarloGFormula(sim_t_fixed_data, idvar='id', exposure='A', outcome='Y', time_out='t', time_in='t0')
with pytest.raises(ValueError):
g.fit(treatment='all')
def test_error_treatment_type(self, sim_t_fixed_data):
g = MonteCarloGFormula(sim_t_fixed_data, idvar='id', exposure='A', outcome='Y', time_out='t', time_in='t0')
with pytest.raises(ValueError):
g.fit(treatment=1)
def test_monte_carlo_for_single_t(self, sim_t_fixed_data):
gt = MonteCarloGFormula(sim_t_fixed_data, idvar='id', exposure='A', outcome='Y', time_out='t', time_in='t0')
gt.outcome_model('A + W1_sq + W2 + W3', print_results=False)
gt.exposure_model('W1_sq', print_results=False)
gt.fit(treatment='all', sample=1000000)
print(gt.predicted_outcomes)
gf = TimeFixedGFormula(sim_t_fixed_data, exposure='A', outcome='Y')
gf.outcome_model(model='A + W1_sq + W2 + W3', print_results=False)
gf.fit(treatment='all')
npt.assert_allclose(gf.marginal_outcome, np.mean(gt.predicted_outcomes['Y']), rtol=0.001)
def test_mc_detect_censoring(self):
df = load_sample_data(timevary=True)
not_censored = np.where(((df['id'] != df['id'].shift((- 1))) & (df['dead'] == 0)), 0, 1)
not_censored = np.where((df['out'] == np.max(df['out'])), 1, not_censored)
g = MonteCarloGFormula(df, idvar='id', exposure='art', outcome='dead', time_in='enter', time_out='out')
npt.assert_equal(np.array(g.gf['__uncensored__']), not_censored)
def test_mc_detect_censoring2(self):
df = load_gvhd_data()
g = MonteCarloGFormula(df, idvar='id', exposure='gvhd', outcome='d', time_in='day', time_out='tomorrow')
npt.assert_equal(np.array(g.gf['__uncensored__']), (1 - df['censlost']))
def test_complete_mc_procedure_completes(self):
df = load_sample_data(timevary=True)
df['lag_art'] = df['art'].shift(1)
df['lag_art'] = np.where((df.groupby('id').cumcount() == 0), 0, df['lag_art'])
df['lag_cd4'] = df['cd4'].shift(1)
df['lag_cd4'] = np.where((df.groupby('id').cumcount() == 0), df['cd40'], df['lag_cd4'])
df['lag_dvl'] = df['dvl'].shift(1)
df['lag_dvl'] = np.where((df.groupby('id').cumcount() == 0), df['dvl0'], df['lag_dvl'])
df[['age_rs0', 'age_rs1', 'age_rs2']] = spline(df, 'age0', n_knots=4, term=2, restricted=True)
df['cd40_sq'] = (df['cd40'] ** 2)
df['cd40_cu'] = (df['cd40'] ** 3)
df['cd4_sq'] = (df['cd4'] ** 2)
df['cd4_cu'] = (df['cd4'] ** 3)
df['enter_sq'] = (df['enter'] ** 2)
df['enter_cu'] = (df['enter'] ** 3)
g = MonteCarloGFormula(df, idvar='id', exposure='art', outcome='dead', time_in='enter', time_out='out')
exp_m = 'male + age0 + age_rs0 + age_rs1 + age_rs2 + cd40 + cd40_sq + cd40_cu + dvl0 + cd4 + cd4_sq + \n cd4_cu + dvl + enter + enter_sq + enter_cu'
g.exposure_model(exp_m, restriction="g['lag_art']==0")
out_m = 'art + male + age0 + age_rs0 + age_rs1 + age_rs2 + cd40 + cd40_sq + cd40_cu + dvl0 + cd4 + \n cd4_sq + cd4_cu + dvl + enter + enter_sq + enter_cu'
g.outcome_model(out_m, restriction="g['drop']==0")
dvl_m = 'male + age0 + age_rs0 + age_rs1 + age_rs2 + cd40 + cd40_sq + cd40_cu + dvl0 + lag_cd4 + \n lag_dvl + lag_art + enter + enter_sq + enter_cu'
g.add_covariate_model(label=1, covariate='dvl', model=dvl_m, var_type='binary')
cd4_m = 'male + age0 + age_rs0 + age_rs1 + age_rs2 + cd40 + cd40_sq + cd40_cu + dvl0 + lag_cd4 + \n lag_dvl + lag_art + enter + enter_sq + enter_cu'
cd4_recode_scheme = "g['cd4'] = np.maximum(g['cd4'],1);g['cd4_sq'] = g['cd4']**2;g['cd4_cu'] = g['cd4']**3"
g.add_covariate_model(label=2, covariate='cd4', model=cd4_m, recode=cd4_recode_scheme, var_type='continuous')
cens_m = 'male + age0 + age_rs0 + age_rs1 + age_rs2 + cd40 + cd40_sq + cd40_cu + dvl0 + lag_cd4 +\n lag_dvl + lag_art + enter + enter_sq + enter_cu'
g.censoring_model(cens_m)
g.fit(treatment="((g['art']==1) | (g['lag_art']==1))", lags={'art': 'lag_art', 'cd4': 'lag_cd4', 'dvl': 'lag_dvl'}, sample=5000, t_max=None, in_recode="g['enter_sq'] = g['enter']**2;g['enter_cu'] = g['enter']**3")
assert isinstance(g.predicted_outcomes, type(pd.DataFrame())) |
(scope='session')
def special_char_name():
base = 'e-$ e-j'
encoding = ('ascii' if IS_WIN else sys.getfilesystemencoding())
result = ''
for char in base:
try:
trip = char.encode(encoding, errors='strict').decode(encoding)
if (char == trip):
result += char
except ValueError:
continue
assert result
return result |
def rolling_volatility(qf_series: QFSeries, frequency: Frequency=None, annualise: bool=True, window_size: int=None) -> QFSeries:
returns_tms = qf_series.to_log_returns()
if annualise:
assert (frequency is not None)
volatility_values = []
for i in range((window_size - 1), len(returns_tms)):
start_index = ((i - window_size) + 1)
end_index = (i + 1)
returns_from_window = returns_tms[start_index:end_index]
volatility = get_volatility(returns_from_window, frequency, annualise)
volatility_values.append(volatility)
first_date_idx = (window_size - 1)
dates = returns_tms.index[first_date_idx:]
volatility_tms = QFSeries(data=volatility_values, index=dates)
return volatility_tms |
class CausalLMOutputWithCrossAttentions(ModelOutput):
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None |
def init_network_weights(model, init_type='normal', gain=0.02):
def _init_func(m):
classname = m.__class__.__name__
if (hasattr(m, 'weight') and ((classname.find('Conv') != (- 1)) or (classname.find('Linear') != (- 1)))):
if (init_type == 'normal'):
nn.init.normal_(m.weight.data, 0.0, gain)
elif (init_type == 'xavier'):
nn.init.xavier_normal_(m.weight.data, gain=gain)
elif (init_type == 'kaiming'):
nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif (init_type == 'orthogonal'):
nn.init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError(f'initialization method {init_type} is not implemented')
if (hasattr(m, 'bias') and (m.bias is not None)):
nn.init.constant_(m.bias.data, 0.0)
elif (classname.find('BatchNorm') != (- 1)):
nn.init.constant_(m.weight.data, 1.0)
nn.init.constant_(m.bias.data, 0.0)
elif (classname.find('InstanceNorm') != (- 1)):
if ((m.weight is not None) and (m.bias is not None)):
nn.init.constant_(m.weight.data, 1.0)
nn.init.constant_(m.bias.data, 0.0)
model.apply(_init_func) |
class OP_RunSynthesis(bpy.types.Operator):
bl_idname = 'genmm.run_synthesis'
bl_label = 'Run synthesis'
bl_description = ''
bl_options = {'REGISTER', 'UNDO'}
def __init__(self) -> None:
super().__init__()
def execute(self, context: bpy.types.Context):
setting = context.scene.setting
anim = context.object.animation_data.action
(start_frame, end_frame) = map(int, anim.frame_range)
start_frame = (start_frame if (setting.start_frame == (- 1)) else start_frame)
end_frame = (end_frame if (setting.end_frame == (- 1)) else end_frame)
bvh_str = get_bvh_data(context, frame_start=start_frame, frame_end=end_frame)
(frames_str, frame_time_str) = bvh_str.split('MOTION\n')[1].split('\n')[:2]
motion_data_str = bvh_str.split('MOTION\n')[1].split('\n')[2:(- 1)]
motion_data = np.array([item.strip().split(' ') for item in motion_data_str], dtype=np.float32)
motion = [BlenderMotion(motion_data, repr='repr6d', use_velo=True, keep_up_pos=True, up_axis=setting.up_axis, padding_last=False)]
model = GenMM(device=('cuda' if torch.cuda.is_available() else 'cpu'), silent=True)
criteria = PatchCoherentLoss(patch_size=setting.patch_size, alpha=setting.alpha, loop=setting.loop, cache=True)
syn = model.run(motion, criteria, num_frames=str(setting.num_syn_frames), num_steps=setting.num_steps, noise_sigma=setting.noise, patch_size=setting.patch_size, coarse_ratio=f'{setting.coarse_ratio}x_nframes', pyr_factor=setting.pyr_factor)
motion_data_str = [' '.join((str(x) for x in item)) for item in motion[0].parse(syn)]
load(context, ((((bvh_str.split('MOTION\n')[0].split('\n') + ['MOTION']) + [frames_str]) + [frame_time_str]) + motion_data_str))
return {'FINISHED'} |
def patch_plots(function):
from functools import wraps
(function)
def decorated(*args, **kwargs):
with patch('matplotlib.pyplot.show', (lambda *x, **y: None)):
import matplotlib
matplotlib.use('Agg')
return function(*args, **kwargs)
return decorated |
class DistributedTrainingParams(FairseqDataclass):
distributed_world_size: int = field(default=max(1, torch.cuda.device_count()), metadata={'help': 'total number of GPUs across all nodes (default: all visible GPUs)'})
distributed_rank: Optional[int] = field(default=0, metadata={'help': 'rank of the current worker'})
distributed_backend: str = field(default='nccl', metadata={'help': 'distributed backend'})
distributed_init_method: Optional[str] = field(default=None, metadata={'help': 'typically tcp://hostname:port that will be used to establish initial connetion'})
distributed_port: int = field(default=(- 1), metadata={'help': 'port number (not required if using --distributed-init-method)'})
device_id: int = field(default=0, metadata={'help': 'which GPU to use (usually configured automatically)'})
local_rank: int = field(default=0, metadata={'help': 'which GPU to use (usually configured automatically)'})
distributed_no_spawn: bool = field(default=False, metadata={'help': 'do not spawn multiple processes even if multiple GPUs are visible'})
ddp_backend: DDP_BACKEND_CHOICES = field(default='c10d', metadata={'help': 'DistributedDataParallel backend'})
bucket_cap_mb: int = field(default=25, metadata={'help': 'bucket size for reduction'})
fix_batches_to_gpus: bool = field(default=False, metadata={'help': "don't shuffle batches between GPUs; this reduces overall randomness and may affect precision but avoids the cost of re-reading the data"})
find_unused_parameters: bool = field(default=False, metadata={'help': 'disable unused parameter detection (not applicable to no_c10d ddp-backend'})
fast_stat_sync: bool = field(default=False, metadata={'help': '[deprecated] this is now defined per Criterion'})
broadcast_buffers: bool = field(default=False, metadata={'help': 'Copy non-trainable parameters between GPUs, such as batchnorm population statistics'})
distributed_wrapper: DISTRIBUTED_WRAPPER_CHOICES = field(default='DDP', metadata={'help': 'DistributedDataParallel backend'})
slowmo_momentum: Optional[float] = field(default=None, metadata={'help': 'SlowMo momentum term; by default use 0.0 for 16 GPUs, 0.2 for 32 GPUs; 0.5 for 64 GPUs, 0.6 for > 64 GPUs'})
slowmo_algorithm: str = field(default='LocalSGD', metadata={'help': 'whether to use LocalSGD or SGP'})
localsgd_frequency: int = field(default=3, metadata={'help': 'Local SGD allreduce frequency'})
nprocs_per_node: int = field(default=max(1, torch.cuda.device_count()), metadata={'help': 'number of GPUs in each node. An allreduce operation across GPUs in a node is very fast. Hence, we do allreduce across GPUs in a node, and gossip across different nodes'})
pipeline_model_parallel: bool = field(default=False, metadata={'help': 'if set, use pipeline model parallelism across GPUs'})
pipeline_balance: str = field(default=None, metadata={'help': 'partition the model into N_K pieces, where each piece contains N_i layers. The sum(args.pipeline_balance) should equal the total number of layers in the model'})
pipeline_devices: str = field(default=None, metadata={'help': 'a list of device indices indicating which device to place each of the N_K partitions. The length of this list should equal the length of the --pipeline-balance argument'})
pipeline_chunks: int = field(default=0, metadata={'help': 'microbatch count for pipeline model parallelism'})
pipeline_checkpoint: PIPELINE_CHECKPOINT_CHOICES = field(default='never', metadata={'help': 'checkpointing mode for pipeline model parallelism'})
zero_sharding: ZERO_SHARDING_CHOICES = field(default='none', metadata={'help': 'ZeRO sharding'}) |
def test_substrate_presence_profile():
wavelength = (np.linspace(300, 800, 3) * 1e-09)
GaAs = material('GaAs')(T=300)
my_structure = SolarCell([Layer(si(700, 'nm'), material=GaAs)], substrate=GaAs)
solar_cell_solver(my_structure, 'optics', user_options={'wavelength': wavelength, 'optics_method': 'TMM', 'no_back_reflection': False})
z_pos = np.linspace(0, my_structure.width, 50)
profile_subs = my_structure[0].absorbed(z_pos)
my_structure = SolarCell([Layer(si(700, 'nm'), material=GaAs)])
solar_cell_solver(my_structure, 'optics', user_options={'wavelength': wavelength, 'optics_method': 'TMM', 'no_back_reflection': False})
profile_nosubs = my_structure[0].absorbed(z_pos)
profile = np.vstack((profile_subs, profile_nosubs))
data_path = ((Path(__file__).parent / 'data') / 'substrate_presence_profile.csv')
expected = np.loadtxt(data_path, delimiter=',')
assert (profile.shape == expected.shape)
assert (profile == approx(expected)) |
def test_xdg_vars_set_single(freebsd, xdg_env_single):
pp = platform.get_platform_paths('pypyr', 'config.yaml')
assert (pp == platform.PlatformPaths(config_user=Path('/ch//pypyr/config.yaml'), config_common=[Path('/cc/pypyr/config.yaml')], data_dir_user=Path('/dh/pypyr'), data_dir_common=[Path('/dc/pypyr')])) |
def write_results(filename: str, insts):
f = open(filename, 'w', encoding='utf-8')
for inst in insts:
for i in range(len(inst.input)):
words = inst.input.words
tags = inst.input.pos_tags
heads = inst.input.heads
dep_labels = inst.input.dep_labels
output = inst.output
prediction = inst.prediction
assert (len(output) == len(prediction))
f.write('{}\t{}\t{}\t{}\t{}\t{}\t{}\n'.format(i, words[i], tags[i], heads[i], dep_labels[i], output[i], prediction[i]))
f.write('\n')
f.close() |
class TestWeightFi():
def setup_class(self):
torch.manual_seed(0)
batch_size = 1
workers = 1
channels = 3
img_size = 32
use_gpu = False
(self.model, self.dataset) = CIFAR10_set_up_custom(batch_size, workers)
dataiter = iter(self.dataset)
(self.images, self.labels) = dataiter.next()
self.model.eval()
with torch.no_grad():
self.golden_output = self.model(self.images)
self.p = pfi_core(self.model, batch_size, input_shape=[channels, img_size, img_size], layer_types=[torch.nn.Conv2d, torch.nn.Linear], use_cuda=use_gpu)
def test_single_weight_fi_cpu(self):
layer_i = [1]
k = [15]
c_i = [20]
h_i = [2]
w_i = [3]
inj_value_base = 10000.0
inj_value_i = [inj_value_base]
corrupt_model = self.p.declare_weight_fault_injection(layer_num=layer_i, k=k, dim1=c_i, dim2=h_i, dim3=w_i, value=inj_value_i)
corrupt_model.eval()
with torch.no_grad():
corrupt_output = corrupt_model(self.images)
assert (not torch.all(corrupt_output.eq(self.golden_output)))
corrupt_model = self.p.declare_weight_fault_injection(layer_num=layer_i, k=k, dim1=c_i, dim2=h_i, dim3=w_i, value=[0.])
corrupt_model.eval()
with torch.no_grad():
uncorrupted_output = corrupt_model(self.images)
assert torch.all(uncorrupted_output.eq(self.golden_output))
corrupt_model = self.p.declare_weight_fault_injection(layer_num=layer_i, k=k, dim1=c_i, dim2=h_i, dim3=w_i, value=[(inj_value_base * 2)])
corrupt_model.eval()
with torch.no_grad():
corrupt_output_2 = corrupt_model(self.images)
assert (not torch.all(corrupt_output_2.eq(self.golden_output)))
assert torch.all(corrupt_output_2.eq(corrupt_output_2))
def test_single_weight_fi_no_error_cpu(self):
layer_i = [4]
k = [153]
c_i = [254]
h_i = [0]
w_i = [0]
inj_value_i = [10000.0]
corrupt_model = self.p.declare_weight_fault_injection(layer_num=layer_i, k=k, dim1=c_i, dim2=h_i, dim3=w_i, value=inj_value_i)
corrupt_model.eval()
with torch.no_grad():
corrupt_output = corrupt_model(self.images)
assert torch.all(corrupt_output.eq(self.golden_output))
def test_multi_weight_fi_cpu(self):
layer_i = [1, 2, 5]
k = [15, 12, 1]
c_i = [20, 8, 1]
h_i = [2, 1, None]
w_i = [3, 1, None]
inj_value_base = 10000.0
inj_value_i = [inj_value_base, inj_value_base, inj_value_base]
corrupt_model = self.p.declare_weight_fault_injection(layer_num=layer_i, k=k, dim1=c_i, dim2=h_i, dim3=w_i, value=inj_value_i)
corrupt_model.eval()
with torch.no_grad():
corrupt_output = corrupt_model(self.images)
assert (not torch.all(corrupt_output.eq(self.golden_output))) |
class TPUDistributedDataParallel(nn.Module):
def __init__(self, module, process_group):
super().__init__()
self.module = module
self.process_group = process_group
self.world_size = distributed_utils.get_world_size(self.process_group)
def forward(self, *inputs, **kwargs):
return self.module(*inputs, **kwargs)
def all_reduce_grads(self):
gradients = []
for p in self.parameters():
if (not p.requires_grad):
continue
if (p.grad is None):
p.grad = torch.zeros_like(p)
if p.grad.requires_grad:
raise RuntimeError("TPUDistributedDataParallel only works with gradients that don't require grad")
gradients.append(p.grad)
import torch_xla.core.xla_model as xm
xm.all_reduce('sum', gradients, scale=(1.0 / self.world_size), groups=self.process_group[1]) |
class Effect5132(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Medium Projectile Turret')), 'falloff', ship.getModifiedItemAttr('shipBonusMC2'), skill='Minmatar Cruiser', **kwargs) |
class TestURIReferenceComparesToURIReferences():
def test_same_basic_uri(self, basic_uri):
uri = URIReference.from_string(basic_uri)
assert (uri == uri)
def test_different_basic_uris(self, basic_uri, basic_uri_with_port):
uri = URIReference.from_string(basic_uri)
assert ((uri == URIReference.from_string(basic_uri_with_port)) is False) |
class BirthdayParty(QObject):
def __init__(self, parent=None):
super(BirthdayParty, self).__init__(parent)
self._host = None
self._guests = []
(Person)
def host(self):
return self._host
def host(self, host):
self._host = host
(QQmlListProperty)
def guests(self):
return QQmlListProperty(Person, self, self._guests)
(str)
def invite(self, name):
person = Person(self)
person.name = name
self._guests.append(person) |
def test_direct_junction_offsets_pre_suc_3_left(direct_junction_left_lane_fixture):
(main_road, small_road, junction_creator) = direct_junction_left_lane_fixture
main_road.add_predecessor(xodr.ElementType.junction, junction_creator.id)
small_road.add_successor(xodr.ElementType.junction, junction_creator.id)
junction_creator.add_connection(main_road, small_road)
assert (main_road.pred_direct_junction == {small_road.id: 0})
assert (small_road.succ_direct_junction == {main_road.id: 0})
assert (junction_creator.junction.connections[0].links[0] == (1, 1)) |
class ProjectUpdateViewsView(ObjectPermissionMixin, RedirectViewMixin, UpdateView):
model = Project
queryset = Project.objects.all()
form_class = ProjectUpdateViewsForm
permission_required = 'projects.change_project_object'
def get_form_kwargs(self):
views = View.objects.filter_current_site().filter_catalog(self.object.catalog).filter_group(self.request.user).filter_availability(self.request.user)
form_kwargs = super().get_form_kwargs()
form_kwargs.update({'views': views})
return form_kwargs |
def init_client(client: QdrantBase, records: List[models.Record], collection_name: str=COLLECTION_NAME, vectors_config: Optional[Union[(Dict[(str, models.VectorParams)], models.VectorParams)]]=None, sparse_vectors_config: Optional[Dict[(str, models.SparseVectorParams)]]=None) -> None:
initialize_fixture_collection(client=client, collection_name=collection_name, vectors_config=vectors_config, sparse_vectors_config=sparse_vectors_config)
client.upload_records(collection_name, records, wait=True) |
class StoreKeyValuePairAction(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, const=None, default=None, type=None, choices=None, required=False, help=None, metavar=None):
if (type not in (None, str)):
raise ValueError('type for StoreKeyValuePairAction must be str')
super(StoreKeyValuePairAction, self).__init__(option_strings=option_strings, dest=dest, nargs=nargs, const=const, default=default, type=type, choices=choices, required=required, help=help, metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
dest = getattr(namespace, self.dest)
if (dest is None):
setattr(namespace, self.dest, {})
dest = getattr(namespace, self.dest)
for value in values:
pair = value.split('=', maxsplit=1)
(k, v) = pair
dest[k] = v |
class Zoom(object):
def __init__(self, value, interp='bilinear', lazy=False):
if (not isinstance(value, (tuple, list))):
value = (value, value)
self.value = value
self.interp = interp
self.lazy = lazy
def __call__(self, *inputs):
if (not isinstance(self.interp, (tuple, list))):
interp = ([self.interp] * len(inputs))
else:
interp = self.interp
(zx, zy) = self.value
zoom_matrix = th.FloatTensor([[zx, 0, 0], [0, zy, 0], [0, 0, 1]])
if self.lazy:
return zoom_matrix
else:
outputs = []
for (idx, _input) in enumerate(inputs):
input_tf = th_affine2d(_input, zoom_matrix, mode=interp[idx], center=True)
outputs.append(input_tf)
return (outputs if (idx > 1) else outputs[0]) |
def init_QKV_forward_buffer():
args = get_args()
batch_pp = (args.batch_size // args.summa_dim)
seq_length = args.seq_length
hidden_pp = (args.hidden_size // args.summa_dim)
global _QKV_FORWARD_BUFFER
assert (_QKV_FORWARD_BUFFER is None), '_QKV_FORWARD_BUFFER is already initialized'
space = (((3 * batch_pp) * seq_length) * hidden_pp)
name = 'QKV forward buffer'
_QKV_FORWARD_BUFFER = allocate_mem_buff(name, space, args.params_dtype, track_usage=False) |
def _search_noise_pauses(levels, tsc):
pauses = list()
possible_start = None
for i in range(2, (len(levels) - 2)):
if (((levels[i] - levels[(i - 1)]) >= tsc) and ((levels[(i - 1)] - levels[(i - 2)]) < tsc)):
possible_start = i
if (((levels[i] - levels[(i + 1)]) >= tsc) and ((levels[(i + 1)] - levels[(i + 2)]) < tsc)):
if possible_start:
pauses.append((possible_start, i))
possible_start = None
return pauses |
class SetComprehension(Expression):
__slots__ = ('generator',)
__match_args__ = ('generator',)
generator: GeneratorExpr
def __init__(self, generator: GeneratorExpr) -> None:
super().__init__()
self.generator = generator
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_set_comprehension(self) |
_module()
class GCHead(FCNHead):
def __init__(self, ratio=(1 / 4.0), pooling_type='att', fusion_types=('channel_add',), **kwargs):
super(GCHead, self).__init__(num_convs=2, **kwargs)
self.ratio = ratio
self.pooling_type = pooling_type
self.fusion_types = fusion_types
self.gc_block = ContextBlock(in_channels=self.channels, ratio=self.ratio, pooling_type=self.pooling_type, fusion_types=self.fusion_types)
def forward(self, inputs):
x = self._transform_inputs(inputs)
output = self.convs[0](x)
output = self.gc_block(output)
output = self.convs[1](output)
if self.concat_input:
output = self.conv_cat(torch.cat([x, output], dim=1))
output = self.cls_seg(output)
return output |
class Effect4809(BaseEffect):
type = 'passive'
def handler(fit, module, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'ECM')), 'scanGravimetricStrengthBonus', module.getModifiedItemAttr('ecmStrengthBonusPercent'), stackingPenalties=True, **kwargs) |
class FeatureGraphNet(nn.Module):
def __init__(self, model, out_indices, out_map=None):
super().__init__()
assert has_fx_feature_extraction, 'Please update to PyTorch 1.10+, torchvision 0.11+ for FX feature extraction'
self.feature_info = _get_feature_info(model, out_indices)
if (out_map is not None):
assert (len(out_map) == len(out_indices))
return_nodes = {info['module']: (out_map[i] if (out_map is not None) else info['module']) for (i, info) in enumerate(self.feature_info) if (i in out_indices)}
self.graph_module = create_feature_extractor(model, return_nodes)
def forward(self, x):
return list(self.graph_module(x).values()) |
def nice_value(x):
if (x == 0.0):
return 0.0
exp = 1.0
sign = 1
if (x < 0.0):
x = (- x)
sign = (- 1)
while (x >= 1.0):
x /= 10.0
exp *= 10.0
while (x < 0.1):
x *= 10.0
exp /= 10.0
if (x >= 0.75):
return ((sign * 1.0) * exp)
if (x >= 0.35):
return ((sign * 0.5) * exp)
if (x >= 0.15):
return ((sign * 0.2) * exp)
return ((sign * 0.1) * exp) |
def make_tcl_script_vis_annot(subject_id, hemi, out_vis_dir, annot_file='aparc.annot'):
script_file = (out_vis_dir / f'vis_annot_{hemi}.tcl')
vis = dict()
for view in cfg.tksurfer_surface_vis_angles:
vis[view] = (out_vis_dir / f'{subject_id}_{hemi}_{view}.tif')
img_format = 'tiff'
cmds = list()
cmds.append('labl_import_annotation {}'.format(annot_file))
cmds.append('scale_brain 1.37')
cmds.append('redraw')
cmds.append('save_{} {}'.format(img_format, vis['lateral']))
cmds.append('rotate_brain_y 180.0')
cmds.append('redraw')
cmds.append('save_{} {}'.format(img_format, vis['medial']))
cmds.append('rotate_brain_z -90.0')
cmds.append('rotate_brain_y 135.0')
cmds.append('redraw')
cmds.append('save_{} {}'.format(img_format, vis['transverse']))
cmds.append('exit 0')
try:
with open(script_file, 'w') as sf:
sf.write('\n'.join(cmds))
except:
raise IOError('Unable to write the script file to\n {}'.format(script_file))
return (script_file, vis) |
def residual_bottleneck(feature_dim=256, num_blocks=1, l2norm=True, final_conv=False, norm_scale=1.0, out_dim=None, interp_cat=False, final_relu=False, final_pool=False):
if (out_dim is None):
out_dim = feature_dim
feat_layers = []
if interp_cat:
feat_layers.append(InterpCat())
for i in range(num_blocks):
planes = (feature_dim if (i < ((num_blocks - 1) + int(final_conv))) else (out_dim // 4))
feat_layers.append(Bottleneck((4 * feature_dim), planes))
if final_conv:
feat_layers.append(nn.Conv2d((4 * feature_dim), out_dim, kernel_size=3, padding=1, bias=False))
if final_relu:
feat_layers.append(nn.ReLU(inplace=True))
if final_pool:
feat_layers.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
if l2norm:
feat_layers.append(InstanceL2Norm(scale=norm_scale))
return nn.Sequential(*feat_layers) |
class RegexLexerMeta(LexerMeta):
def _process_regex(cls, regex, rflags, state):
if isinstance(regex, Future):
regex = regex.get()
return re.compile(regex, rflags).match
def _process_token(cls, token):
assert ((type(token) is _TokenType) or callable(token)), ('token type must be simple type or callable, not %r' % (token,))
return token
def _process_new_state(cls, new_state, unprocessed, processed):
if isinstance(new_state, str):
if (new_state == '#pop'):
return (- 1)
elif (new_state in unprocessed):
return (new_state,)
elif (new_state == '#push'):
return new_state
elif (new_state[:5] == '#pop:'):
return (- int(new_state[5:]))
else:
assert False, ('unknown new state %r' % new_state)
elif isinstance(new_state, combined):
tmp_state = ('_tmp_%d' % cls._tmpname)
cls._tmpname += 1
itokens = []
for istate in new_state:
assert (istate != new_state), ('circular state ref %r' % istate)
itokens.extend(cls._process_state(unprocessed, processed, istate))
processed[tmp_state] = itokens
return (tmp_state,)
elif isinstance(new_state, tuple):
for istate in new_state:
assert ((istate in unprocessed) or (istate in ('#pop', '#push'))), ('unknown new state ' + istate)
return new_state
else:
assert False, ('unknown new state def %r' % new_state)
def _process_state(cls, unprocessed, processed, state):
assert isinstance(state, str), ('wrong state name %r' % state)
assert (state[0] != '#'), ('invalid state name %r' % state)
if (state in processed):
return processed[state]
tokens = processed[state] = []
rflags = cls.flags
for tdef in unprocessed[state]:
if isinstance(tdef, include):
assert (tdef != state), ('circular state reference %r' % state)
tokens.extend(cls._process_state(unprocessed, processed, str(tdef)))
continue
if isinstance(tdef, _inherit):
continue
if isinstance(tdef, default):
new_state = cls._process_new_state(tdef.state, unprocessed, processed)
tokens.append((re.compile('').match, None, new_state))
continue
assert (type(tdef) is tuple), ('wrong rule def %r' % tdef)
try:
rex = cls._process_regex(tdef[0], rflags, state)
except Exception as err:
raise ValueError(('uncompilable regex %r in state %r of %r: %s' % (tdef[0], state, cls, err))) from err
token = cls._process_token(tdef[1])
if (len(tdef) == 2):
new_state = None
else:
new_state = cls._process_new_state(tdef[2], unprocessed, processed)
tokens.append((rex, token, new_state))
return tokens
def process_tokendef(cls, name, tokendefs=None):
processed = cls._all_tokens[name] = {}
tokendefs = (tokendefs or cls.tokens[name])
for state in list(tokendefs):
cls._process_state(tokendefs, processed, state)
return processed
def get_tokendefs(cls):
tokens = {}
inheritable = {}
for c in cls.__mro__:
toks = c.__dict__.get('tokens', {})
for (state, items) in toks.items():
curitems = tokens.get(state)
if (curitems is None):
tokens[state] = items
try:
inherit_ndx = items.index(inherit)
except ValueError:
continue
inheritable[state] = inherit_ndx
continue
inherit_ndx = inheritable.pop(state, None)
if (inherit_ndx is None):
continue
curitems[inherit_ndx:(inherit_ndx + 1)] = items
try:
new_inh_ndx = items.index(inherit)
except ValueError:
pass
else:
inheritable[state] = (inherit_ndx + new_inh_ndx)
return tokens
def __call__(cls, *args, **kwds):
if ('_tokens' not in cls.__dict__):
cls._all_tokens = {}
cls._tmpname = 0
if (hasattr(cls, 'token_variants') and cls.token_variants):
pass
else:
cls._tokens = cls.process_tokendef('', cls.get_tokendefs())
return type.__call__(cls, *args, **kwds) |
def get_protocol_member(left: Instance, member: str, class_obj: bool) -> (ProperType | None):
if ((member == '__call__') and class_obj):
from mypy.checkmember import type_object_type
def named_type(fullname: str) -> Instance:
return Instance(left.type.mro[(- 1)], [])
return type_object_type(left.type, named_type)
if ((member == '__call__') and left.type.is_metaclass()):
return None
from mypy.subtypes import find_member
return get_proper_type(find_member(member, left, left, class_obj=class_obj)) |
def load_lvis_v1_json(json_file, image_root, dataset_name=None):
from lvis import LVIS
json_file = PathManager.get_local_path(json_file)
timer = Timer()
lvis_api = LVIS(json_file)
if (timer.seconds() > 1):
logger.info('Loading {} takes {:.2f} seconds.'.format(json_file, timer.seconds()))
if (dataset_name is not None):
meta = get_lvis_v1_instances_meta()
MetadataCatalog.get(dataset_name).set(**meta)
img_ids = sorted(lvis_api.imgs.keys())
imgs = lvis_api.load_imgs(img_ids)
anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids]
ann_ids = [ann['id'] for anns_per_image in anns for ann in anns_per_image]
assert (len(set(ann_ids)) == len(ann_ids)), "Annotation ids in '{}' are not unique".format(json_file)
imgs_anns = list(zip(imgs, anns))
logger.info('Loaded {} images in the LVIS v1 format from {}'.format(len(imgs_anns), json_file))
dataset_dicts = []
inst_count = 0
for (img_dict, anno_dict_list) in imgs_anns:
record = {}
if ('file_name' in img_dict):
file_name = img_dict['file_name']
if img_dict['file_name'].startswith('COCO'):
file_name = file_name[(- 16):]
else:
file_name = img_dict['coco_url'][30:]
record['file_name'] = os.path.join(image_root, file_name)
record['height'] = img_dict['height']
record['width'] = img_dict['width']
record['not_exhaustive_category_ids'] = img_dict.get('not_exhaustive_category_ids', [])
record['neg_category_ids'] = img_dict.get('neg_category_ids', [])
record['neg_category_ids'] = [(x - 1) for x in record['neg_category_ids']]
image_id = record['image_id'] = img_dict['id']
objs = []
for anno in anno_dict_list:
assert (anno['image_id'] == image_id)
if (anno.get('iscrowd', 0) > 0):
continue
obj = {'bbox': anno['bbox'], 'bbox_mode': BoxMode.XYWH_ABS}
obj['category_id'] = (anno['category_id'] - 1)
inst_count = (inst_count + 1)
obj['instance_id'] = inst_count
obj['score'] = (anno['score'] if ('score' in anno) else 1.0)
objs.append(obj)
record['annotations'] = objs
dataset_dicts.append(record)
print('inst_count', dataset_name, inst_count)
return dataset_dicts |
class TwoStepParameters6(TwoStepParametersCommon):
zka_id = DataElementField(type='an', max_length=32, _d='ZKA TAN-Verfahren')
zka_version = DataElementField(type='an', max_length=10, _d='Version ZKA TAN-Verfahren')
name = DataElementField(type='an', max_length=30, _d='Name des Zwei-Schritt-Verfahrens')
max_length_input = DataElementField(type='num', max_length=2, _d='Maximale Lange des Eingabewertes im Zwei-Schritt-Verfahren')
allowed_format = CodeField(enum=AllowedFormat, length=1, _d='Erlaubtes Format im Zwei-Schritt-Verfahren')
text_return_value = DataElementField(type='an', max_length=30, _d='Text zur Belegung des Ruckgabewertes im Zwei-Schritt-Verfahren')
max_length_return_value = DataElementField(type='num', max_length=4, _d='Maximale Lange des Ruckgabewertes im Zwei-Schritt-Verfahren')
multiple_tans_allowed = DataElementField(type='jn', _d='Mehrfach-TAN erlaubt')
tan_time_dialog_association = CodeField(enum=TANTimeDialogAssociation, length=1, _d='TAN Zeit- und Dialogbezug')
cancel_allowed = DataElementField(type='jn', _d='Auftragsstorno erlaubt')
sms_charge_account_required = CodeField(enum=SMSChargeAccountRequired, length=1, _d='SMS-Abbuchungskonto erforderlich')
principal_account_required = CodeField(enum=PrincipalAccountRequired, length=1, _d='Auftraggeberkonto erforderlich')
challenge_class_required = DataElementField(type='jn', _d='Challenge-Klasse erforderlich')
challenge_structured = DataElementField(type='jn', _d='Challenge strukturiert')
initialization_mode = CodeField(enum=InitializationMode, _d='Initialisierungsmodus')
description_required = CodeField(enum=DescriptionRequired, length=1, _d='Bezeichnung des TAN-Medium erforderlich')
response_hhd_uc_required = DataElementField(type='jn', _d='Antwort HHD_UC erforderlich')
supported_media_number = DataElementField(type='num', length=1, required=False, _d='Anzahl unterstutzter aktiver TAN-Medien') |
def create_pytensor_params(dist_params, obs, size):
dist_params_at = []
for p in dist_params:
p_aet = pt.as_tensor(p).type()
p_aet.tag.test_value = p
dist_params_at.append(p_aet)
size_at = []
for s in size:
s_aet = pt.iscalar()
s_aet.tag.test_value = s
size_at.append(s_aet)
obs_at = pt.as_tensor(obs).type()
obs_at.tag.test_value = obs
return (dist_params_at, obs_at, size_at) |
class SplAtConv2d(Module):
def __init__(self, in_channels, channels, kernel_size, stride=(1, 1), padding=(0, 0), dilation=(1, 1), groups=1, bias=True, radix=2, reduction_factor=4, rectify=False, rectify_avg=False, norm_layer=None, dropblock_prob=0.0, **kwargs):
super(SplAtConv2d, self).__init__()
padding = _pair(padding)
self.rectify = (rectify and ((padding[0] > 0) or (padding[1] > 0)))
self.rectify_avg = rectify_avg
inter_channels = max(((in_channels * radix) // reduction_factor), 32)
self.radix = radix
self.cardinality = groups
self.channels = channels
self.dropblock_prob = dropblock_prob
if self.rectify:
from rfconv import RFConv2d
self.conv = RFConv2d(in_channels, (channels * radix), kernel_size, stride, padding, dilation, groups=(groups * radix), bias=bias, average_mode=rectify_avg, **kwargs)
else:
self.conv = Conv2d(in_channels, (channels * radix), kernel_size, stride, padding, dilation, groups=(groups * radix), bias=bias, **kwargs)
self.use_bn = (norm_layer is not None)
if self.use_bn:
self.bn0 = norm_layer((channels * radix))
self.bn2 = norm_layer(channels)
self.relu = ReLU(inplace=True)
self.fc1 = Conv2d(channels, inter_channels, 1, groups=self.cardinality)
if self.use_bn:
self.bn1 = norm_layer(inter_channels)
self.fc2 = Conv2d(inter_channels, (channels * radix), 1, groups=self.cardinality)
if (dropblock_prob > 0.0):
self.dropblock = DropBlock2D(dropblock_prob, 3)
self.rsoftmax = rSoftMax(radix, groups)
self.conv2 = Conv2d(channels, channels, kernel_size, stride, padding, dilation, groups=(groups * radix), bias=bias, **kwargs)
self.dropout = nn.Dropout(0.2)
def forward(self, x):
x = self.conv(x)
x = self.bn0(x)
x = self.relu(x)
(batch, rchannel) = x.shape[:2]
(x1, x2) = torch.split(x, (rchannel // self.radix), dim=1)
x2 = (x2 + x1)
x2 = self.conv2(x2)
x2 = self.bn2(x2)
x2 = self.relu(x2)
splited = (x1, x2)
gap = sum(splited)
gap = F.adaptive_avg_pool2d(gap, 1)
gap = self.fc1(gap)
if self.use_bn:
gap = self.bn1(gap)
gap = self.relu(gap)
atten = self.fc2(gap)
atten = self.rsoftmax(atten).view(batch, (- 1), 1, 1)
attens = torch.split(atten, (rchannel // self.radix), dim=1)
out = sum([(att * split) for (att, split) in zip(attens, splited)])
return out.contiguous() |
class TestTupleEqual(TestCase):
def test_simple(self):
assert (100 == klm)
assert (456 == (aaa and bbb))
assert (789 == (ccc or ddd))
assert (123 == (True if You else False))
def test_simple_msg(self):
assert (klm == 100), 'This is wrong!'
def test_simple_msg2(self):
assert (klm == 100), 'This is wrong!'
def test_line_wrapping(self):
assert (('a', 'b') == ('b',)), 'This is wrong!'
assert (100 == klm) |
def inception_v1(inputs, num_classes=1000, is_training=True, dropout_keep_prob=0.8, prediction_fn=slim.softmax, spatial_squeeze=True, reuse=None, scope='InceptionV1'):
with tf.variable_scope(scope, 'InceptionV1', [inputs, num_classes], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training):
(net, end_points) = inception_v1_base(inputs, scope=scope)
with tf.variable_scope('Logits'):
net = slim.avg_pool2d(net, [7, 7], stride=1, scope='AvgPool_0a_7x7')
net = slim.dropout(net, dropout_keep_prob, scope='Dropout_0b')
logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='Conv2d_0c_1x1')
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return (logits, end_points) |
class BackgroundDataset(Dataset):
def __getitem__(self, index):
texture_img_path = self.data[index]
texture_img = cv2.imread(texture_img_path)
texture_img = cv2.cvtColor(texture_img, cv2.COLOR_BGR2RGB)
texture_img = self.resize(texture_img)
if self.random:
texture_img = self.random_flip(texture_img)
texture_img = self.to_tensor(texture_img)
return texture_img
def __len__(self):
return len(self.data)
def __init__(self, data_path_list, img_size=(128, 64), normalize=True, random=True):
self.data_path_list = data_path_list
self.img_size = img_size
self.normalize = normalize
self.to_tensor = ToTensor(normalize=self.normalize)
self.data = []
self.generate_index()
self.random = random
self.random_crop = RandomCrop(output_size=self.img_size)
self.random_flip = RandomFlip(flip_prob=0.5)
self.resize = Resize_pose(output_size=img_size)
def generate_index(self):
print('generating background index')
for data_path in self.data_path_list:
for (root, dirs, files) in os.walk(data_path):
for name in tqdm.tqdm(files):
if name.endswith('.jpg'):
self.data.append(os.path.join(root, name))
print('finish generating background index, found texture image: {}'.format(len(self.data))) |
class Migration(migrations.Migration):
dependencies = [('typeclasses', '0010_delete_old_player_tables')]
operations = [migrations.DeleteModel(name='DefaultAccount'), migrations.DeleteModel(name='DefaultCharacter'), migrations.DeleteModel(name='DefaultExit'), migrations.DeleteModel(name='DefaultGuest'), migrations.DeleteModel(name='DefaultObject'), migrations.DeleteModel(name='DefaultRoom'), migrations.DeleteModel(name='DefaultScript'), migrations.DeleteModel(name='DoNothing'), migrations.DeleteModel(name='ScriptBase'), migrations.DeleteModel(name='Store'), migrations.AlterField(model_name='attribute', name='db_attrtype', field=models.CharField(blank=True, db_index=True, help_text='Subclass of Attribute (None or nick)', max_length=16, null=True, verbose_name='attrtype')), migrations.AlterField(model_name='attribute', name='db_category', field=models.CharField(blank=True, db_index=True, help_text='Optional categorization of attribute.', max_length=128, null=True, verbose_name='category')), migrations.AlterField(model_name='attribute', name='db_date_created', field=models.DateTimeField(auto_now_add=True, verbose_name='date_created')), migrations.AlterField(model_name='attribute', name='db_key', field=models.CharField(db_index=True, max_length=255, verbose_name='key')), migrations.AlterField(model_name='attribute', name='db_lock_storage', field=models.TextField(blank=True, help_text='Lockstrings for this object are stored here.', verbose_name='locks')), migrations.AlterField(model_name='attribute', name='db_model', field=models.CharField(blank=True, db_index=True, help_text="Which model of object this attribute is attached to (A natural key like 'objects.objectdb'). You should not change this value unless you know what you are doing.", max_length=32, null=True, verbose_name='model')), migrations.AlterField(model_name='attribute', name='db_strvalue', field=models.TextField(blank=True, help_text='String-specific storage for quick look-up', null=True, verbose_name='strvalue')), migrations.AlterField(model_name='attribute', name='db_value', field=evennia.utils.picklefield.PickledObjectField(help_text='The data returned when the attribute is accessed. Must be written as a Python literal if editing through the admin interface. Attribute values which are not Python literals cannot be edited through the admin interface.', null=True, verbose_name='value')), migrations.AlterField(model_name='tag', name='db_category', field=models.CharField(db_index=True, help_text='tag category', max_length=64, null=True, verbose_name='category')), migrations.AlterField(model_name='tag', name='db_data', field=models.TextField(blank=True, help_text='optional data field with extra information. This is not searched for.', null=True, verbose_name='data')), migrations.AlterField(model_name='tag', name='db_key', field=models.CharField(db_index=True, help_text='tag identifier', max_length=255, null=True, verbose_name='key')), migrations.AlterField(model_name='tag', name='db_model', field=models.CharField(db_index=True, help_text='database model to Tag', max_length=32, null=True, verbose_name='model')), migrations.AlterField(model_name='tag', name='db_tagtype', field=models.CharField(db_index=True, help_text='overall type of Tag', max_length=16, null=True, verbose_name='tagtype'))] |
def generate_and_save(env_name, traj_len, cache_size=100000, qpos_only=False, qpos_qvel=False, delta=True, whiten=True, pixels=False, source_img_width=64):
dataset = GymData(env_name, traj_len, cache_size, qpos_only, qpos_qvel, delta, whiten, pixels, source_img_width)
print('Generating dataset to save.')
for i in range(dataset.cache_size):
_ = dataset[i]
if ((i % 10000) == 0):
print('{}/{}'.format(i, dataset.cache_size))
path = data_path(env_name, traj_len, cache_size, qpos_only, qpos_qvel, delta, whiten, pixels, source_img_width)
data_folder = os.path.dirname(path)
os.makedirs(data_folder, exist_ok=True)
torch.save(dataset, path)
return dataset |
class LinePrecisionReporter(AbstractReporter):
def __init__(self, reports: Reports, output_dir: str) -> None:
super().__init__(reports, output_dir)
self.files: list[FileInfo] = []
def on_file(self, tree: MypyFile, modules: dict[(str, MypyFile)], type_map: dict[(Expression, Type)], options: Options) -> None:
try:
path = os.path.relpath(tree.path)
except ValueError:
return
if should_skip_path(path):
return
visitor = stats.StatisticsVisitor(inferred=True, filename=tree.fullname, modules=modules, typemap=type_map, all_nodes=True)
tree.accept(visitor)
file_info = FileInfo(path, tree._fullname)
for (lineno, _) in iterate_python_lines(path):
status = visitor.line_map.get(lineno, stats.TYPE_EMPTY)
file_info.counts[status] += 1
self.files.append(file_info)
def on_finish(self) -> None:
if (not self.files):
return
output_files = sorted(self.files, key=(lambda x: x.module))
report_file = os.path.join(self.output_dir, 'lineprecision.txt')
width = max(4, max((len(info.module) for info in output_files)))
titles = ('Lines', 'Precise', 'Imprecise', 'Any', 'Empty', 'Unanalyzed')
widths = ((width,) + tuple((len(t) for t in titles)))
fmt = ('{:%d} {:%d} {:%d} {:%d} {:%d} {:%d} {:%d}\n' % widths)
with open(report_file, 'w') as f:
f.write(fmt.format('Name', *titles))
f.write((('-' * (width + 51)) + '\n'))
for file_info in output_files:
counts = file_info.counts
f.write(fmt.format(file_info.module.ljust(width), file_info.total(), counts[stats.TYPE_PRECISE], counts[stats.TYPE_IMPRECISE], counts[stats.TYPE_ANY], counts[stats.TYPE_EMPTY], counts[stats.TYPE_UNANALYZED])) |
class PersianSecondDirective(PersianNumberDirective):
def format(self, d):
return super(PersianSecondDirective, self).format(d.second)
def post_parser(self, ctx, formatter):
super(PersianSecondDirective, self).post_parser(ctx, formatter)
if ((self.name in ctx) and ctx[self.name]):
ctx['second'] = ctx[self.name] |
def parse_unit_patterns(data, tree):
unit_patterns = data.setdefault('unit_patterns', {})
compound_patterns = data.setdefault('compound_unit_patterns', {})
unit_display_names = data.setdefault('unit_display_names', {})
for elem in tree.findall('.//units/unitLength'):
unit_length_type = elem.attrib['type']
for unit in elem.findall('unit'):
unit_type = unit.attrib['type']
unit_and_length_patterns = unit_patterns.setdefault(unit_type, {}).setdefault(unit_length_type, {})
for pattern in unit.findall('unitPattern'):
if (pattern.attrib.get('case', 'nominative') != 'nominative'):
continue
unit_and_length_patterns[pattern.attrib['count']] = _text(pattern)
per_unit_pat = unit.find('perUnitPattern')
if (per_unit_pat is not None):
unit_and_length_patterns['per'] = _text(per_unit_pat)
display_name = unit.find('displayName')
if (display_name is not None):
unit_display_names.setdefault(unit_type, {})[unit_length_type] = _text(display_name)
for unit in elem.findall('compoundUnit'):
unit_type = unit.attrib['type']
compound_unit_info = {}
compound_variations = {}
for child in unit:
if (child.attrib.get('case', 'nominative') != 'nominative'):
continue
if (child.tag == 'unitPrefixPattern'):
compound_unit_info['prefix'] = _text(child)
elif (child.tag == 'compoundUnitPattern'):
compound_variations[None] = _text(child)
elif (child.tag == 'compoundUnitPattern1'):
compound_variations[child.attrib.get('count')] = _text(child)
if compound_variations:
compound_variation_values = set(compound_variations.values())
if (len(compound_variation_values) == 1):
compound_unit_info['compound'] = next(iter(compound_variation_values))
else:
compound_unit_info['compound_variations'] = compound_variations
compound_patterns.setdefault(unit_type, {})[unit_length_type] = compound_unit_info |
def main(options, arguments):
devices = ['/gpu:0', '/gpu:1']
if (options.device == None):
device = devices[0]
else:
device = devices[int(options.device)]
if (options.distance == None):
distance = 8
else:
distance = int(options.distance)
global DISTANCE_THRESHOLD
DISTANCE_THRESHOLD = distance
phases_path = options.phashes
clusters_file = options.clustering
kym_phashes_file = 'kym_phashes_classes.txt'
outfile = options.output
clusters = process_clusters_file(clusters_file)
src_hashes_dic = read_phashes_manifest(phases_path)
src_hashes = precompute_vectors(src_hashes_dic, phases_path)
src_values = list(src_hashes_dic.values())
cluster_hashes = []
for cid in clusters:
medroid = clusters[cid]
index = src_values.index(medroid)
cluster_hashes.append(src_hashes[index])
print('[i] computed cluster backnone with #hashes', len(cluster_hashes))
(kym_phashes_by_meme_dic, kym_images_dic, kym_images_dic_reverse, kym_meme_name) = process_kym_files(kym_phashes_file)
kym_phashes = precompute_vectors(kym_images_dic_reverse, kym_phashes_file)
hashes_i = cluster_hashes
hashes_j = kym_phashes
hashes_diff = {}
blacklist = []
print('[i] seek_queue_many init')
with tf.device(device):
seek_queue_many(list(clusters.keys()), list(kym_images_dic_reverse.keys()), hashes_i, hashes_j, outfile, blacklist, hashes_diff)
print('[i] seek_queue_many end', outfile) |
class BaseResampler():
def __init__(self, source_geo_def: Union[(SwathDefinition, AreaDefinition)], target_geo_def: Union[(CoordinateDefinition, AreaDefinition)]):
self.source_geo_def = source_geo_def
self.target_geo_def = target_geo_def
def get_hash(self, source_geo_def=None, target_geo_def=None, **kwargs):
if (source_geo_def is None):
source_geo_def = self.source_geo_def
if (target_geo_def is None):
target_geo_def = self.target_geo_def
the_hash = source_geo_def.update_hash()
target_geo_def.update_hash(the_hash)
hash_dict(kwargs, the_hash)
return the_hash.hexdigest()
def precompute(self, **kwargs):
return None
def compute(self, data, **kwargs):
raise NotImplementedError
def resample(self, data, cache_dir=None, mask_area=None, **kwargs):
if self._geometries_are_the_same():
return data
if ((mask_area is None) and isinstance(self.source_geo_def, SwathDefinition)):
mask_area = True
if mask_area:
if isinstance(self.source_geo_def, SwathDefinition):
geo_dims = self.source_geo_def.lons.dims
else:
geo_dims = ('y', 'x')
flat_dims = [dim for dim in data.dims if (dim not in geo_dims)]
if np.issubdtype(data.dtype, np.integer):
kwargs['mask'] = (data == data.attrs.get('_FillValue', np.iinfo(data.dtype.type).max))
else:
kwargs['mask'] = data.isnull()
kwargs['mask'] = kwargs['mask'].all(dim=flat_dims)
cache_id = self.precompute(cache_dir=cache_dir, **kwargs)
return self.compute(data, cache_id=cache_id, **kwargs)
def _geometries_are_the_same(self):
if (self.source_geo_def is self.target_geo_def):
return True
if (type(self.source_geo_def) is not type(self.target_geo_def)):
return False
if isinstance(self.source_geo_def, AreaDefinition):
return (self.source_geo_def == self.target_geo_def)
(src_lons, src_lats) = self.source_geo_def.get_lonlats()
(dst_lons, dst_lats) = self.target_geo_def.get_lonlats()
if ((src_lons is dst_lons) and (src_lats is dst_lats)):
return True
if (not all((isinstance(arr, da.Array) for arr in (src_lons, src_lats, dst_lons, dst_lats)))):
return False
return ((src_lons.name == dst_lons.name) and (src_lats.name == dst_lats.name))
def _create_cache_filename(self, cache_dir=None, prefix='', fmt='.zarr', **kwargs):
cache_dir = (cache_dir or '.')
hash_str = self.get_hash(**kwargs)
return os.path.join(cache_dir, ((prefix + hash_str) + fmt)) |
class RedisOrchestrator(Orchestrator):
def __init__(self, host='127.0.0.1', port=6379, password=None, db=0, cert_and_key=None, ca_cert=None, ssl=False, skip_keyspace_event_setup=False, canceller_only=False, **kwargs):
self.is_canceller_only = canceller_only
(cert, key) = (tuple(cert_and_key) if (cert_and_key is not None) else (None, None))
self._client = redis.StrictRedis(host=host, port=port, password=password, db=db, ssl_certfile=cert, ssl_keyfile=key, ssl_ca_certs=ca_cert, ssl=ssl, socket_connect_timeout=1, health_check_interval=2)
self._shutting_down = False
self._watched_keys = {}
self._pubsub_key = slash_join(kwargs.get('orchestrator_prefix', ''), REDIS_DEFAULT_PUBSUB_KEY).lstrip('/')
if (not self.is_canceller_only):
logger.debug('creating pubsub with key %s', self._pubsub_key)
self._pubsub = self._client.pubsub()
self._pubsub.subscribe(**{self._pubsub_key: self._published_key_handler})
self._pubsub_thread = self._pubsub.run_in_thread(daemon=True, sleep_time=5)
if (not skip_keyspace_event_setup):
self._client.config_set(REDIS_KEYSPACE_EVENT_CONFIG_KEY, REDIS_KEYSPACE_EXPIRED_EVENT_CONFIG_VALUE)
self._pubsub_expiring = self._client.pubsub()
self._pubsub_expiring.psubscribe(**{(REDIS_EXPIRED_KEYSPACE_PATTERN % (db, '*')): self._expiring_key_handler})
self._pubsub_expiring_thread = self._pubsub_expiring.run_in_thread(daemon=True, sleep_time=5)
def _expiring_key_handler(self, message):
try:
message_tup = (message.get('type'), message.get('pattern').decode('utf-8'), message.get('channel').decode('utf-8'), message.get('data').decode('utf-8'))
if self._is_expired_keyspace_event(message_tup):
key = self._key_from_expiration(message_tup)
expired_value = self._client.get(key)
if expired_value:
self._client.set(slash_join(key, REDIS_EXPIRED_SUFFIX), expired_value)
self._client.delete(key)
except redis.ConnectionError:
_sleep_orchestrator()
except redis.RedisError as re:
logger.exception('Redis exception watching redis expirations: %s - %s', key, re)
except Exception as e:
logger.exception('Unknown exception watching redis expirations: %s - %s', key, e)
if (self._is_expired_keyspace_event(message_tup) and (expired_value is not None)):
for (watched_key, callback) in self._watched_keys.items():
if key.startswith(watched_key):
callback(KeyChange(KeyEvent.EXPIRE, key, expired_value))
def _published_key_handler(self, message):
try:
(redis_event, event_key, event_value) = (message.get('type'), message.get('channel').decode('utf-8'), message.get('data').decode('utf-8'))
except redis.ConnectionError:
_sleep_orchestrator()
except redis.RedisError as re:
logger.exception('Redis exception watching redis expirations: %s - %s', key, re)
except Exception as e:
logger.exception('Unknown exception watching redis expirations: %s - %s', key, e)
if (redis_event == REDIS_EVENT_KIND_MESSAGE):
keychange = self._publish_to_keychange(event_value)
for (watched_key, callback) in self._watched_keys.items():
if keychange.key.startswith(watched_key):
callback(keychange)
def on_key_change(self, key, callback, restarter=None):
assert (not self.is_canceller_only)
logger.debug('watching key: %s', key)
self._watched_keys[key] = callback
def _is_expired_keyspace_event(event_result):
if (event_result is None):
return False
(redis_event, _pattern, matched_key, expired) = event_result
return ((redis_event == REDIS_EVENT_KIND_PMESSAGE) and (expired == 'expired') and (REDIS_EXPIRED_KEYSPACE_REGEX.match(matched_key) is not None))
def _key_from_expiration(event_result):
(_redis_event, _pattern, matched_key, _expired) = event_result
return REDIS_EXPIRED_KEYSPACE_REGEX.match(matched_key).groups()[1]
def _publish_to_keychange(event_value):
e = json.loads(event_value)
return KeyChange(KeyEvent(e['event']), e['key'], e['value'])
def get_prefixed_keys(self, prefix):
assert (not self.is_canceller_only)
keys = self._client.keys((prefix + '*'))
results = {}
for key in keys:
if (key.decode('utf-8').endswith(REDIS_EXPIRING_SUFFIX) or key.decode('utf-8').endswith(REDIS_EXPIRED_SUFFIX)):
continue
ttl = self._client.ttl(key)
if (ttl == REDIS_NONEXPIRING_KEY):
try:
value = self._client.get(key)
if (value is None):
raise KeyError(key)
except redis.ConnectionError as rce:
raise OrchestratorConnectionError(rce)
except redis.RedisError as re:
raise OrchestratorError(re)
results.update({key.decode('utf-8'): value.decode('utf-8')})
return results
def _key_is_expired(self, key):
expired_key = slash_join(key, REDIS_EXPIRED_SUFFIX)
expired_val = self._client.get(key)
if (expired_val is None):
return False
return True
def get_key(self, key):
assert (not self.is_canceller_only)
try:
value = self._client.get(key)
if (value is None):
if self._key_is_expired(key):
self._client.delete(slash_join(key, REDIS_EXPIRED_SUFFIX))
raise KeyError(key)
except redis.ConnectionError as rce:
raise OrchestratorConnectionError(rce)
except redis.RedisError as re:
raise OrchestratorError(re)
return value.decode('utf-8')
def set_key(self, key, value, overwrite=False, expiration=None):
try:
already_exists = self._client.exists(key)
if (already_exists and (not overwrite)):
raise KeyError(key)
self._client.set(key, value, xx=overwrite)
if (expiration is not None):
self._client.expire(key, (expiration + ONE_DAY))
overwrite_expiring_key = self._client.exists(slash_join(key, REDIS_EXPIRING_SUFFIX))
self._client.set(slash_join(key, REDIS_EXPIRING_SUFFIX), '', xx=overwrite_expiring_key, ex=expiration)
self._client.delete(slash_join(key, REDIS_EXPIRED_SUFFIX))
key_event = (KeyEvent.SET if already_exists else KeyEvent.CREATE)
self._publish(event=key_event, key=key, value=value)
except redis.ConnectionError as rce:
raise OrchestratorConnectionError(rce)
except redis.RedisError as re:
raise OrchestratorError(re)
def _publish(self, **kwargs):
kwargs['event'] = int(kwargs['event'])
event_json = json.dumps(kwargs)
logger.debug('publishing event: %s', event_json)
self._client.publish(self._pubsub_key, event_json)
def delete_key(self, key):
assert (not self.is_canceller_only)
try:
value = self._client.get(key)
if (value is None):
raise KeyError(key)
self._client.delete(key)
self._client.delete(slash_join(key, REDIS_EXPIRING_SUFFIX))
self._client.delete(slash_join(key, REDIS_EXPIRED_SUFFIX))
if (value is not None):
self._publish(event=KeyEvent.DELETE, key=key, value=value.decode('utf-8'))
except redis.ConnectionError as rce:
raise OrchestratorConnectionError(rce)
except redis.RedisError as re:
raise OrchestratorError(re)
def lock(self, key, expiration=DEFAULT_LOCK_EXPIRATION):
assert (not self.is_canceller_only)
try:
self.set_key(key, '', overwrite=False, expiration=expiration)
except KeyError:
return False
return True
def shutdown(self):
logger.debug('Shutting down redis client.')
self._shutting_down = True
if self.is_canceller_only:
return
self._pubsub_thread.stop()
self._pubsub_expiring_thread.stop() |
class PerfTimer():
def __init__(self, timer_name: str, perf_stats: Optional['PerfStats']):
self.skip: bool = False
if (perf_stats is None):
self.skip = True
return
self.name: str = timer_name
self.elapsed: float = 0.0
self._last_interval: float = 0.0
self._perf_stats: PerfStats = perf_stats
self._is_running: bool = False
if perf_stats.use_cuda_events():
self._cuda_event_intervals: List[Tuple[(CudaEvent, CudaEvent)]] = []
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exception, traceback):
self.stop()
if (exc_type is None):
self.record()
return False
def start(self):
if (self.skip or self._is_running):
return
self._last_interval = 0.0
self._is_running = True
self._start_time: float = perf_counter()
if self._perf_stats.use_cuda_events():
self._start_event = torch.cuda.Event(enable_timing=True)
self._start_event.record()
def stop(self):
if (self.skip or (not self._is_running)):
return
self._last_interval = (perf_counter() - self._start_time)
self.elapsed += self._last_interval
if self._perf_stats.use_cuda_events():
end_event = torch.cuda.Event(enable_timing=True)
end_event.record()
self._cuda_event_intervals.append((self._start_event, end_event))
self._is_running = False
def record(self):
if self.skip:
return
assert (not self._is_running)
self._perf_stats.update_with_timer(self) |
class RTLIRGetter():
ifc_primitive_types = (dsl.InPort, dsl.OutPort, dsl.Interface)
def __init__(self, cache=True):
if cache:
self._rtlir_cache = {}
self.get_rtlir = self._get_rtlir_cached
else:
self.get_rtlir = self._get_rtlir_uncached
self._RTLIR_ifc_handlers = [(list, self._handle_Array), (dsl.InPort, self._handle_InPort), (dsl.OutPort, self._handle_OutPort), (dsl.Interface, self._handle_Interface)]
self._RTLIR_handlers = [(list, self._handle_Array), (dsl.InPort, self._handle_InPort), (dsl.OutPort, self._handle_OutPort), (dsl.Wire, self._handle_Wire), ((int, Bits), self._handle_Const), (dsl.Interface, self._handle_Interface), (dsl.Component, self._handle_Component)]
def get_component_ifc_rtlir(self, obj):
def _is_interface(id_, obj):
_type = type(obj)
if isinstance(obj, self.ifc_primitive_types):
return True
if (not isinstance(obj, list)):
return False
if (len(obj) == 0):
return False
obj = obj[0]
while isinstance(obj, list):
if (len(obj) == 0):
return False
obj = obj[0]
return isinstance(obj, self.ifc_primitive_types)
try:
assert isinstance(obj, dsl.Component), 'the given object is not a PyMTL component!'
properties = {}
collected_objs = collect_objs(obj, object)
for (_id, _obj) in collected_objs:
if _is_interface(_id, _obj):
for (Type, handler) in self._RTLIR_ifc_handlers:
if isinstance(_obj, Type):
_obj_type = handler(_id, _obj)
properties[_id] = _obj_type
if isinstance(_obj_type, Array):
_add_packed_instances(_id, _obj_type, properties)
break
return Component(obj, properties)
except AssertionError as e:
msg = ('' if (e.args[0] is None) else e.args[0])
raise RTLIRConversionError(obj, msg)
def _handle_Component(self, c_id, obj):
properties = {}
collected_objs = collect_objs(obj, object)
for (_id, _obj) in collected_objs:
if is_rtlir_convertible(_obj):
_obj_type = self.get_rtlir(_obj)
if (_obj_type is not None):
properties[_id] = _obj_type
if isinstance(_obj_type, Array):
_add_packed_instances(_id, _obj_type, properties)
return Component(obj, properties)
def _get_rtlir_uncached(self, _obj):
obj = _freeze(_obj)
try:
for (Type, handler) in self._RTLIR_handlers:
if isinstance(_obj, Type):
return handler('<NA>', _obj)
if is_bitstruct_inst(_obj):
return self._handle_Const('<NA>', _obj)
assert False, f'unrecognized object {_obj}!'
except AssertionError as e:
msg = ('' if (e.args[0] is None) else e.args[0])
raise RTLIRConversionError(_obj, msg)
def _get_rtlir_cached(self, _obj):
obj = _freeze(_obj)
if (obj in self._rtlir_cache):
return self._rtlir_cache[obj]
else:
try:
for (Type, handler) in self._RTLIR_handlers:
if isinstance(_obj, Type):
ret = self._rtlir_cache[obj] = handler(NA, _obj)
return ret
if is_bitstruct_inst(_obj):
ret = self._rtlir_cache[obj] = self._handle_Const(NA, _obj)
return ret
assert False, f'unrecognized object {_obj}!'
except AssertionError as e:
msg = ('' if (e.args[0] is None) else e.args[0])
raise RTLIRConversionError(_obj, msg)
def _handle_Array(self, _id, _obj):
if (not _obj):
return None
obj = _obj
ref_type = self.get_rtlir(obj[0])
for x in obj[1:]:
assert (self.get_rtlir(x) == ref_type), f'all elements of array {obj} must have the same type {repr(ref_type)}!'
dim_sizes = []
while isinstance(obj, list):
if (not obj):
return None
dim_sizes.append(len(obj))
obj = obj[0]
if isinstance(obj, (int, Bits)):
return Array(dim_sizes, self.get_rtlir(obj), _obj)
else:
return Array(dim_sizes, self.get_rtlir(obj))
def _handle_InPort(self, p_id, obj):
return Port('input', get_rtlir_dtype(obj))
def _handle_OutPort(self, p_id, obj):
return Port('output', get_rtlir_dtype(obj))
def _handle_Wire(self, w_id, obj):
return Wire(get_rtlir_dtype(obj))
def _handle_Const(self, c_id, obj):
return Const(get_rtlir_dtype(obj), obj)
def _handle_Interface(self, i_id, obj):
properties = {}
collected_objs = collect_objs(obj, object)
for (_id, _obj) in collected_objs:
if _is_rtlir_ifc_convertible(_obj):
_obj_type = self.get_rtlir(_obj)
if (_obj_type is not None):
properties[_id] = _obj_type
if isinstance(_obj_type, Array):
_add_packed_instances(_id, _obj_type, properties)
return InterfaceView(obj.__class__.__name__, properties, obj)
def _handle_Component(self, c_id, obj):
properties = {}
collected_objs = collect_objs(obj, object)
for (_id, _obj) in collected_objs:
if is_rtlir_convertible(_obj):
_obj_type = self.get_rtlir(_obj)
if (_obj_type is not None):
properties[_id] = _obj_type
if isinstance(_obj_type, Array):
_add_packed_instances(_id, _obj_type, properties)
return Component(obj, properties) |
class ExecAccuracyEvaluationResult(evaluate.EvaluationResult):
def __init__(self, prompts, scores):
self.prompts = prompts
self.scores = scores
def _agg_scores(self, method):
if (method == 'mean'):
return [np.mean(s) for s in self.scores]
elif (method == 'median'):
return [np.median(s) for s in self.scores]
elif (method == 'std'):
return [np.std(s) for s in self.scores]
elif (method == 'max'):
return [np.max(s) for s in self.scores]
elif (method == 'min'):
return [np.min(s) for s in self.scores]
elif (method == 'iqm'):
return [np.mean(np.percentile(lps, [25, 75])) for lps in self.scores]
else:
raise ValueError('Invalid method: {}'.format(method))
def sorted(self, method='default'):
if (method == 'default'):
scores = self._agg_scores('mean')
else:
scores = self._agg_scores(method)
sorted_prompts = [p for (_, p) in sorted(zip(scores, self.prompts))]
sorted_scores = sorted(scores)
sorted_prompts = list(reversed(sorted_prompts))
sorted_scores = list(reversed(sorted_scores))
return (sorted_prompts, sorted_scores)
def in_place(self, method='default'):
if (method == 'default'):
scores = self._agg_scores('mean')
else:
scores = self._agg_scores(method)
return (self.prompts, scores) |
class GaussianDiffusion():
def __init__(self, *, betas, model_mean_type, model_var_type, loss_type, rescale_timesteps=False):
self.model_mean_type = model_mean_type
self.model_var_type = model_var_type
self.loss_type = loss_type
self.rescale_timesteps = rescale_timesteps
betas = np.array(betas, dtype=np.float64)
self.betas = betas
assert (len(betas.shape) == 1), 'betas must be 1-D'
assert ((betas > 0).all() and (betas <= 1).all())
self.num_timesteps = int(betas.shape[0])
alphas = (1.0 - betas)
self.alphas_cumprod = np.cumprod(alphas, axis=0)
self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:(- 1)])
self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)
assert (self.alphas_cumprod_prev.shape == (self.num_timesteps,))
self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
self.sqrt_one_minus_alphas_cumprod = np.sqrt((1.0 - self.alphas_cumprod))
self.log_one_minus_alphas_cumprod = np.log((1.0 - self.alphas_cumprod))
self.sqrt_recip_alphas_cumprod = np.sqrt((1.0 / self.alphas_cumprod))
self.sqrt_recipm1_alphas_cumprod = np.sqrt(((1.0 / self.alphas_cumprod) - 1))
self.posterior_variance = ((betas * (1.0 - self.alphas_cumprod_prev)) / (1.0 - self.alphas_cumprod))
self.posterior_log_variance_clipped = np.log(np.append(self.posterior_variance[1], self.posterior_variance[1:]))
self.posterior_mean_coef1 = ((betas * np.sqrt(self.alphas_cumprod_prev)) / (1.0 - self.alphas_cumprod))
self.posterior_mean_coef2 = (((1.0 - self.alphas_cumprod_prev) * np.sqrt(alphas)) / (1.0 - self.alphas_cumprod))
self.imgs_diff_stages = []
self.grad_map = []
def q_mean_variance(self, x_start, t):
mean = (_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
variance = _extract_into_tensor((1.0 - self.alphas_cumprod), t, x_start.shape)
log_variance = _extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
return (mean, variance, log_variance)
def q_sample(self, x_start, t, noise=None):
if (noise is None):
noise = th.randn_like(x_start)
assert (noise.shape == x_start.shape)
return ((_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) + (_extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise))
def q_posterior_mean_variance(self, x_start, x_t, t):
assert (x_start.shape == x_t.shape)
posterior_mean = ((_extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start) + (_extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t))
posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = _extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
assert (posterior_mean.shape[0] == posterior_variance.shape[0] == posterior_log_variance_clipped.shape[0] == x_start.shape[0])
return (posterior_mean, posterior_variance, posterior_log_variance_clipped)
def p_mean_variance(self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None):
if (model_kwargs is None):
model_kwargs = {}
(B, C) = x.shape[:2]
assert (t.shape == (B,))
model_output = model(x, self._scale_timesteps(t), **model_kwargs)
if (self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]):
assert (model_output.shape == (B, (C * 2), *x.shape[2:]))
(model_output, model_var_values) = th.split(model_output, C, dim=1)
if (self.model_var_type == ModelVarType.LEARNED):
model_log_variance = model_var_values
model_variance = th.exp(model_log_variance)
else:
min_log = _extract_into_tensor(self.posterior_log_variance_clipped, t, x.shape)
max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)
frac = ((model_var_values + 1) / 2)
model_log_variance = ((frac * max_log) + ((1 - frac) * min_log))
model_variance = th.exp(model_log_variance)
else:
(model_variance, model_log_variance) = {ModelVarType.FIXED_LARGE: (np.append(self.posterior_variance[1], self.betas[1:]), np.log(np.append(self.posterior_variance[1], self.betas[1:]))), ModelVarType.FIXED_SMALL: (self.posterior_variance, self.posterior_log_variance_clipped)}[self.model_var_type]
model_variance = _extract_into_tensor(model_variance, t, x.shape)
model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)
def process_xstart(x):
if (denoised_fn is not None):
x = denoised_fn(x)
if clip_denoised:
return x.clamp((- 1), 1)
return x
if (self.model_mean_type == ModelMeanType.PREVIOUS_X):
pred_xstart = process_xstart(self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output))
model_mean = model_output
elif (self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]):
if (self.model_mean_type == ModelMeanType.START_X):
pred_xstart = process_xstart(model_output)
else:
pred_xstart = process_xstart(self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output))
(model_mean, _, _) = self.q_posterior_mean_variance(x_start=pred_xstart, x_t=x, t=t)
else:
raise NotImplementedError(self.model_mean_type)
assert (model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape)
return {'mean': model_mean, 'variance': model_variance, 'log_variance': model_log_variance, 'pred_xstart': pred_xstart}
def _predict_xstart_from_eps(self, x_t, t, eps):
assert (x_t.shape == eps.shape)
return ((_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t) - (_extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps))
def _predict_xstart_from_xprev(self, x_t, t, xprev):
assert (x_t.shape == xprev.shape)
return ((_extract_into_tensor((1.0 / self.posterior_mean_coef1), t, x_t.shape) * xprev) - (_extract_into_tensor((self.posterior_mean_coef2 / self.posterior_mean_coef1), t, x_t.shape) * x_t))
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
return (((_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t) - pred_xstart) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape))
def _scale_timesteps(self, t):
if self.rescale_timesteps:
return (t.float() * (1000.0 / self.num_timesteps))
return t
def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
gradient = cond_fn(x, self._scale_timesteps(t), **model_kwargs)
new_mean = (p_mean_var['mean'].float() + (p_mean_var['variance'] * gradient.float()))
return new_mean
def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
eps = self._predict_eps_from_xstart(x, t, p_mean_var['pred_xstart'])
eps = (eps - ((1 - alpha_bar).sqrt() * cond_fn(x, self._scale_timesteps(t), **model_kwargs)))
out = p_mean_var.copy()
out['pred_xstart'] = self._predict_xstart_from_eps(x, t, eps)
(out['mean'], _, _) = self.q_posterior_mean_variance(x_start=out['pred_xstart'], x_t=x, t=t)
return out
def p_sample(self, model, x, t, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None):
out = self.p_mean_variance(model, x, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, model_kwargs=model_kwargs)
noise = th.randn_like(x)
nonzero_mask = (t != 0).float().view((- 1), *([1] * (len(x.shape) - 1)))
if (cond_fn is not None):
out['mean'] = self.condition_mean(cond_fn, out, x, t, model_kwargs=model_kwargs)
sample = (out['mean'] + ((nonzero_mask * th.exp((0.5 * out['log_variance']))) * noise))
return {'sample': sample, 'pred_xstart': out['pred_xstart']}
def p_sample_loop(self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, device=None, progress=False):
final = None
for sample in self.p_sample_loop_progressive(model, shape, noise=noise, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, model_kwargs=model_kwargs, device=device, progress=progress):
final = sample
return final['sample']
def p_sample_loop_progressive(self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, device=None, progress=False):
if (device is None):
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if (noise is not None):
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps))[::(- 1)]
print(indices)
if progress:
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor(([i] * shape[0]), device=device)
with th.no_grad():
out = self.p_sample(model, img, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, model_kwargs=model_kwargs)
(yield out)
img = out['sample']
def ddim_sample(self, model, x, t, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, eta=0.0):
out = self.p_mean_variance(model, x, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, model_kwargs=model_kwargs)
if (cond_fn is not None):
out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)
eps = self._predict_eps_from_xstart(x, t, out['pred_xstart'])
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
sigma = ((eta * th.sqrt(((1 - alpha_bar_prev) / (1 - alpha_bar)))) * th.sqrt((1 - (alpha_bar / alpha_bar_prev))))
noise = th.randn_like(x)
mean_pred = ((out['pred_xstart'] * th.sqrt(alpha_bar_prev)) + (th.sqrt(((1 - alpha_bar_prev) - (sigma ** 2))) * eps))
nonzero_mask = (t != 0).float().view((- 1), *([1] * (len(x.shape) - 1)))
sample = mean_pred
return {'sample': sample, 'pred_xstart': out['pred_xstart']}
def ddim_reverse_sample(self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None, eta=0.0):
assert (eta == 0.0), 'Reverse ODE only for deterministic path'
out = self.p_mean_variance(model, x, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, model_kwargs=model_kwargs)
eps = (((_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x) - out['pred_xstart']) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape))
alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)
mean_pred = ((out['pred_xstart'] * th.sqrt(alpha_bar_next)) + (th.sqrt((1 - alpha_bar_next)) * eps))
return {'sample': mean_pred, 'pred_xstart': out['pred_xstart']}
def ddim_sample_loop(self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, device=None, progress=False, eta=0.0):
final = None
for sample in self.ddim_sample_loop_progressive(model, shape, noise=noise, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, model_kwargs=model_kwargs, device=device, progress=progress, eta=eta):
final = sample
return final['sample']
def ddim_sample_loop_progressive(self, model, shape, noise=None, clip_denoised=True, denoised_fn=None, cond_fn=None, model_kwargs=None, device=None, progress=False, eta=0.0):
if (device is None):
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if (noise is not None):
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps))[::(- 1)]
if progress:
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor(([i] * shape[0]), device=device)
with th.no_grad():
out = self.ddim_sample(model, img, t, clip_denoised=clip_denoised, denoised_fn=denoised_fn, cond_fn=cond_fn, model_kwargs=model_kwargs, eta=eta)
(yield out)
img = out['sample']
def _vb_terms_bpd(self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None):
(true_mean, _, true_log_variance_clipped) = self.q_posterior_mean_variance(x_start=x_start, x_t=x_t, t=t)
out = self.p_mean_variance(model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs)
kl = normal_kl(true_mean, true_log_variance_clipped, out['mean'], out['log_variance'])
kl = (mean_flat(kl) / np.log(2.0))
decoder_nll = (- discretized_gaussian_log_likelihood(x_start, means=out['mean'], log_scales=(0.5 * out['log_variance'])))
assert (decoder_nll.shape == x_start.shape)
decoder_nll = (mean_flat(decoder_nll) / np.log(2.0))
output = th.where((t == 0), decoder_nll, kl)
return {'output': output, 'pred_xstart': out['pred_xstart']}
def training_losses(self, model, x_start, t, model_kwargs=None, noise=None):
if (model_kwargs is None):
model_kwargs = {}
if (noise is None):
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start, t, noise=noise)
terms = {}
if ((self.loss_type == LossType.KL) or (self.loss_type == LossType.RESCALED_KL)):
terms['loss'] = self._vb_terms_bpd(model=model, x_start=x_start, x_t=x_t, t=t, clip_denoised=False, model_kwargs=model_kwargs)['output']
if (self.loss_type == LossType.RESCALED_KL):
terms['loss'] *= self.num_timesteps
elif ((self.loss_type == LossType.MSE) or (self.loss_type == LossType.RESCALED_MSE)):
model_output = model(x_t, self._scale_timesteps(t), **model_kwargs)
if (self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]):
(B, C) = x_t.shape[:2]
assert (model_output.shape == (B, (C * 2), *x_t.shape[2:]))
(model_output, model_var_values) = th.split(model_output, C, dim=1)
frozen_out = th.cat([model_output.detach(), model_var_values], dim=1)
terms['vb'] = self._vb_terms_bpd(model=(lambda *args, r=frozen_out: r), x_start=x_start, x_t=x_t, t=t, clip_denoised=False)['output']
if (self.loss_type == LossType.RESCALED_MSE):
terms['vb'] *= (self.num_timesteps / 1000.0)
target = {ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(x_start=x_start, x_t=x_t, t=t)[0], ModelMeanType.START_X: x_start, ModelMeanType.EPSILON: noise}[self.model_mean_type]
assert (model_output.shape == target.shape == x_start.shape)
terms['mse'] = mean_flat(((target - model_output) ** 2))
if ('vb' in terms):
terms['loss'] = (terms['mse'] + terms['vb'])
else:
terms['loss'] = terms['mse']
else:
raise NotImplementedError(self.loss_type)
return terms
def _prior_bpd(self, x_start):
batch_size = x_start.shape[0]
t = th.tensor(([(self.num_timesteps - 1)] * batch_size), device=x_start.device)
(qt_mean, _, qt_log_variance) = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
return (mean_flat(kl_prior) / np.log(2.0))
def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):
device = x_start.device
batch_size = x_start.shape[0]
vb = []
xstart_mse = []
mse = []
for t in list(range(self.num_timesteps))[::(- 1)]:
t_batch = th.tensor(([t] * batch_size), device=device)
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise)
with th.no_grad():
out = self._vb_terms_bpd(model, x_start=x_start, x_t=x_t, t=t_batch, clip_denoised=clip_denoised, model_kwargs=model_kwargs)
vb.append(out['output'])
xstart_mse.append(mean_flat(((out['pred_xstart'] - x_start) ** 2)))
eps = self._predict_eps_from_xstart(x_t, t_batch, out['pred_xstart'])
mse.append(mean_flat(((eps - noise) ** 2)))
vb = th.stack(vb, dim=1)
xstart_mse = th.stack(xstart_mse, dim=1)
mse = th.stack(mse, dim=1)
prior_bpd = self._prior_bpd(x_start)
total_bpd = (vb.sum(dim=1) + prior_bpd)
return {'total_bpd': total_bpd, 'prior_bpd': prior_bpd, 'vb': vb, 'xstart_mse': xstart_mse, 'mse': mse} |
class RepeatListForever(Repeat):
name = 'repeat_all'
display_name = _('Repeat all')
accelerated_name = _('Repeat _all')
def next(self, playlist, iter):
next = self.wrapped.next(playlist, iter)
if next:
return next
self.wrapped.reset(playlist)
print_d('Restarting songlist')
return playlist.get_iter_first() |
class SyncPerformerTests(TestCase):
def test_success(self):
_performer
def succeed(dispatcher, intent):
return intent
dispatcher = (lambda _: succeed)
result = sync_perform(dispatcher, Effect('foo'))
self.assertEqual(result, 'foo')
def test_failure(self):
_performer
def fail(dispatcher, intent):
raise intent
dispatcher = (lambda _: fail)
self.assertThat(sync_perform(dispatcher, Effect(ValueError('oh dear')).on(error=(lambda e: e))), MatchesException(ValueError('oh dear')))
def test_instance_method_performer(self):
eff = Effect('meaningless')
class PerformerContainer(object):
_performer
def performer(self, dispatcher, intent):
return (self, dispatcher, intent)
container = PerformerContainer()
dispatcher = (lambda i: container.performer)
result = sync_perform(dispatcher, eff)
self.assertEqual(result, (container, dispatcher, 'meaningless'))
def test_promote_metadata(self):
def original(dispatcher, intent):
pass
original.attr = 1
wrapped = sync_performer(original)
self.assertEqual(wrapped.__name__, 'original')
self.assertEqual(wrapped.attr, 1)
self.assertEqual(wrapped.__doc__, 'Original!')
def test_ignore_lack_of_metadata(self):
def original(something, dispatcher, intent):
pass
new_func = partial(original, 'something')
original.attr = 1
wrapped = sync_performer(new_func)
self.assertEqual(wrapped.__name__, 'sync_wrapper')
def test_kwargs(self):
_performer
def p(dispatcher, intent, extra):
return extra
dispatcher = (lambda _: partial(p, extra='extra val'))
result = sync_perform(dispatcher, Effect('foo'))
self.assertEqual(result, 'extra val') |
_REGISTRY.register()
def build_mit_backbone(cfg, input_shape):
if (cfg.MODEL.MIT_BACKBONE.NAME == 'b0'):
return mit_b0()
elif (cfg.MODEL.MIT_BACKBONE.NAME == 'b1'):
return mit_b1()
elif (cfg.MODEL.MIT_BACKBONE.NAME == 'b2'):
return mit_b2()
elif (cfg.MODEL.MIT_BACKBONE.NAME == 'b3'):
return mit_b3()
elif (cfg.MODEL.MIT_BACKBONE.NAME == 'b4'):
return mit_b4()
elif (cfg.MODEL.MIT_BACKBONE.NAME == 'b5'):
return mit_b5() |
def test_no_linecache():
class A():
a: int
c = Converter()
before = len(linecache.cache)
c.structure(c.unstructure(A(1)), A)
after = len(linecache.cache)
assert (after == (before + 2))
class B():
a: int
before = len(linecache.cache)
c.register_structure_hook(B, make_dict_structure_fn(B, c, _cattrs_use_linecache=False))
c.register_unstructure_hook(B, make_dict_unstructure_fn(B, c, _cattrs_use_linecache=False))
c.structure(c.unstructure(B(1)), B)
assert (len(linecache.cache) == before) |
def test_delete_cur_item_no_func():
callback = mock.Mock(spec=[])
model = completionmodel.CompletionModel()
cat = listcategory.ListCategory('', [('foo', 'bar')], delete_func=None)
model.rowsAboutToBeRemoved.connect(callback)
model.rowsRemoved.connect(callback)
model.add_category(cat)
parent = model.index(0, 0)
with pytest.raises(cmdutils.CommandError):
model.delete_cur_item(model.index(0, 0, parent))
callback.assert_not_called() |
class dense121(torch.nn.Module):
def __init__(self, requires_grad=False):
super(dense121, self).__init__()
dense_pretrained_features = light_estimation.module.dense
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.slice6 = torch.nn.Sequential()
self.pool = light_estimation.module.pool
color = [light_estimation.module.color1, light_estimation.module.relu1, light_estimation.module.color2, light_estimation.module.relu2, light_estimation.module.color3]
dir = [light_estimation.module.dir1, light_estimation.module.relu3, light_estimation.module.dir2, light_estimation.module.relu4, light_estimation.module.dir3]
for x in range(6):
self.slice1.add_module(str(x), dense_pretrained_features[x])
for x in range(6, 8):
self.slice2.add_module(str(x), dense_pretrained_features[x])
for x in range(8, 10):
self.slice3.add_module(str(x), dense_pretrained_features[x])
for x in range(10, 12):
self.slice4.add_module(str(x), dense_pretrained_features[x])
for x in range(5):
self.slice5.add_module(str(x), color[x])
for x in range(5):
self.slice6.add_module(str(x), dir[x])
if (not requires_grad):
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1_2 = h
h = self.slice2(h)
h_relu2_2 = h
h = self.slice3(h)
h_relu3_3 = h
h = self.slice4(h)
h_relu4_3 = h
h = self.pool(h).squeeze()
Color = self.slice5(h)
Dir = self.slice6(h)
dense_outputs = namedtuple('denseOutputs', ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'color', 'dir'])
out = dense_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, Color, Dir)
return out |
.skipif(utils.is_windows, reason="current CPython/win can't recover from SIGSEGV")
def test_debug_crash_segfault():
caught = False
def _handler(num, frame):
nonlocal caught
caught = (num == signal.SIGSEGV)
with _trapped_segv(_handler):
with pytest.raises(Exception, match='Segfault failed'):
misccommands.debug_crash(typ='segfault')
time.sleep(0.001)
assert caught |
def get_arch(filename: Union[(str, Path)]) -> List[ArchitectureType]:
this_platform = sys.platform
if this_platform.startswith('win'):
machine_type = get_shared_library_arch(filename)
if (machine_type == PEMachineType.I386):
return [ArchitectureType.I386]
elif (machine_type == PEMachineType.AMD64):
return [ArchitectureType.X86_64]
elif (machine_type == PEMachineType.AARCH64):
return [ArchitectureType.AARCH64]
else:
return []
elif (this_platform not in ('linux', 'darwin')):
raise OSError(('Unsupported platform: %s' % this_platform))
res = subprocess.run(['file', filename], capture_output=True)
out = res.stdout.decode('ascii')
if this_platform.startswith('linux'):
if ('80386' in out):
return [ArchitectureType.I386]
if ('x86-64' in out):
return [ArchitectureType.X86_64]
if ('aarch64' in out):
return [ArchitectureType.AARCH64]
return []
else:
archs: List[ArchitectureType] = []
if ('executable i386' in out):
archs.append(ArchitectureType.I386)
if ('executable x86_64' in out):
archs.append(ArchitectureType.X86_64)
if ('executable arm64' in out):
archs.append(ArchitectureType.AARCH64)
return archs |
def plot(*args, **kargs):
mkQApp()
pwArgList = ['title', 'labels', 'name', 'left', 'right', 'top', 'bottom', 'background']
pwArgs = {}
dataArgs = {}
for k in kargs:
if (k in pwArgList):
pwArgs[k] = kargs[k]
else:
dataArgs[k] = kargs[k]
windowTitle = pwArgs.pop('title', 'PlotWidget')
w = PlotWidget(**pwArgs)
w.setWindowTitle(windowTitle)
if ((len(args) > 0) or (len(dataArgs) > 0)):
w.plot(*args, **dataArgs)
plots.append(w)
w.show()
return w |
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config['dask_profile'])
query_date = f'''
select min(d_date_sk) as min_d_date_sk,
max(d_date_sk) as max_d_date_sk
from date_dim
where d_year = {q17_year}
and d_moy = {q17_month}
'''
dates_result = bc.sql(query_date).compute()
min_date_sk_val = dates_result['min_d_date_sk'][0]
max_date_sk_val = dates_result['max_d_date_sk'][0]
query = f'''
SELECT sum(promotional) as promotional,
sum(total) as total,
CASE WHEN sum(total) > 0.0 THEN (100.0 * sum(promotional)) / sum(total)
ELSE 0.0 END as promo_percent
FROM
(
SELECT p_channel_email,
p_channel_dmail,
p_channel_tv,
SUM( CAST(ss_ext_sales_price AS DOUBLE) ) total,
CASE WHEN (p_channel_dmail = 'Y' OR p_channel_email = 'Y' OR p_channel_tv = 'Y')
THEN SUM(CAST(ss_ext_sales_price AS DOUBLE)) ELSE 0 END as promotional
FROM store_sales ss
INNER JOIN promotion p ON ss.ss_promo_sk = p.p_promo_sk
inner join item i on ss.ss_item_sk = i.i_item_sk
inner join store s on ss.ss_store_sk = s.s_store_sk
inner join customer c on c.c_customer_sk = ss.ss_customer_sk
inner join customer_address ca
on c.c_current_addr_sk = ca.ca_address_sk
WHERE i.i_category IN ({q17_i_category_IN})
AND s.s_gmt_offset = {q17_gmt_offset}
AND ca.ca_gmt_offset = {q17_gmt_offset}
AND ss.ss_sold_date_sk >= {min_date_sk_val}
AND ss.ss_sold_date_sk <= {max_date_sk_val}
GROUP BY p_channel_email, p_channel_dmail, p_channel_tv
) sum_promotional
-- we don't need a 'ON' join condition. result is just two numbers.
'''
result = bc.sql(query)
return result |
def InceptionV3Body(net, from_layer, output_pred=False):
use_scale = False
out_layer = 'conv'
ConvBNLayer(net, from_layer, out_layer, use_bn=True, use_relu=True, num_output=32, kernel_size=3, pad=0, stride=2, use_scale=use_scale)
from_layer = out_layer
out_layer = 'conv_1'
ConvBNLayer(net, from_layer, out_layer, use_bn=True, use_relu=True, num_output=32, kernel_size=3, pad=0, stride=1, use_scale=use_scale)
from_layer = out_layer
out_layer = 'conv_2'
ConvBNLayer(net, from_layer, out_layer, use_bn=True, use_relu=True, num_output=64, kernel_size=3, pad=1, stride=1, use_scale=use_scale)
from_layer = out_layer
out_layer = 'pool'
net[out_layer] = L.Pooling(net[from_layer], pool=P.Pooling.MAX, kernel_size=3, stride=2, pad=0)
from_layer = out_layer
out_layer = 'conv_3'
ConvBNLayer(net, from_layer, out_layer, use_bn=True, use_relu=True, num_output=80, kernel_size=1, pad=0, stride=1, use_scale=use_scale)
from_layer = out_layer
out_layer = 'conv_4'
ConvBNLayer(net, from_layer, out_layer, use_bn=True, use_relu=True, num_output=192, kernel_size=3, pad=0, stride=1, use_scale=use_scale)
from_layer = out_layer
out_layer = 'pool_1'
net[out_layer] = L.Pooling(net[from_layer], pool=P.Pooling.MAX, kernel_size=3, stride=2, pad=0)
from_layer = out_layer
for inception_id in xrange(0, 3):
if (inception_id == 0):
out_layer = 'mixed'
tower_2_conv_num_output = 32
else:
out_layer = 'mixed_{}'.format(inception_id)
tower_2_conv_num_output = 64
towers = []
tower_name = '{}'.format(out_layer)
tower = InceptionTower(net, from_layer, tower_name, [dict(name='conv', num_output=64, kernel_size=1, pad=0, stride=1)])
towers.append(tower)
tower_name = '{}/tower'.format(out_layer)
tower = InceptionTower(net, from_layer, tower_name, [dict(name='conv', num_output=48, kernel_size=1, pad=0, stride=1), dict(name='conv_1', num_output=64, kernel_size=5, pad=2, stride=1)])
towers.append(tower)
tower_name = '{}/tower_1'.format(out_layer)
tower = InceptionTower(net, from_layer, tower_name, [dict(name='conv', num_output=64, kernel_size=1, pad=0, stride=1), dict(name='conv_1', num_output=96, kernel_size=3, pad=1, stride=1), dict(name='conv_2', num_output=96, kernel_size=3, pad=1, stride=1)])
towers.append(tower)
tower_name = '{}/tower_2'.format(out_layer)
tower = InceptionTower(net, from_layer, tower_name, [dict(name='pool', pool=P.Pooling.AVE, kernel_size=3, pad=1, stride=1), dict(name='conv', num_output=tower_2_conv_num_output, kernel_size=1, pad=0, stride=1)])
towers.append(tower)
out_layer = '{}/join'.format(out_layer)
net[out_layer] = L.Concat(*towers, axis=1)
from_layer = out_layer
out_layer = 'mixed_3'
towers = []
tower_name = '{}'.format(out_layer)
tower = InceptionTower(net, from_layer, tower_name, [dict(name='conv', num_output=384, kernel_size=3, pad=0, stride=2)])
towers.append(tower)
tower_name = '{}/tower'.format(out_layer)
tower = InceptionTower(net, from_layer, tower_name, [dict(name='conv', num_output=64, kernel_size=1, pad=0, stride=1), dict(name='conv_1', num_output=96, kernel_size=3, pad=1, stride=1), dict(name='conv_2', num_output=96, kernel_size=3, pad=0, stride=2)])
towers.append(tower)
tower_name = '{}'.format(out_layer)
tower = InceptionTower(net, from_layer, tower_name, [dict(name='pool', pool=P.Pooling.MAX, kernel_size=3, pad=0, stride=2)])
towers.append(tower)
out_layer = '{}/join'.format(out_layer)
net[out_layer] = L.Concat(*towers, axis=1)
from_layer = out_layer
for inception_id in xrange(4, 8):
if (inception_id == 4):
num_output = 128
elif ((inception_id == 5) or (inception_id == 6)):
num_output = 160
elif (inception_id == 7):
num_output = 192
out_layer = 'mixed_{}'.format(inception_id)
towers = []
tower_name = '{}'.format(out_layer)
tower = InceptionTower(net, from_layer, tower_name, [dict(name='conv', num_output=192, kernel_size=1, pad=0, stride=1)])
towers.append(tower)
tower_name = '{}/tower'.format(out_layer)
tower = InceptionTower(net, from_layer, tower_name, [dict(name='conv', num_output=num_output, kernel_size=1, pad=0, stride=1), dict(name='conv_1', num_output=num_output, kernel_size=[1, 7], pad=[0, 3], stride=[1, 1]), dict(name='conv_2', num_output=192, kernel_size=[7, 1], pad=[3, 0], stride=[1, 1])])
towers.append(tower)
tower_name = '{}/tower_1'.format(out_layer)
tower = InceptionTower(net, from_layer, tower_name, [dict(name='conv', num_output=num_output, kernel_size=1, pad=0, stride=1), dict(name='conv_1', num_output=num_output, kernel_size=[7, 1], pad=[3, 0], stride=[1, 1]), dict(name='conv_2', num_output=num_output, kernel_size=[1, 7], pad=[0, 3], stride=[1, 1]), dict(name='conv_3', num_output=num_output, kernel_size=[7, 1], pad=[3, 0], stride=[1, 1]), dict(name='conv_4', num_output=192, kernel_size=[1, 7], pad=[0, 3], stride=[1, 1])])
towers.append(tower)
tower_name = '{}/tower_2'.format(out_layer)
tower = InceptionTower(net, from_layer, tower_name, [dict(name='pool', pool=P.Pooling.AVE, kernel_size=3, pad=1, stride=1), dict(name='conv', num_output=192, kernel_size=1, pad=0, stride=1)])
towers.append(tower)
out_layer = '{}/join'.format(out_layer)
net[out_layer] = L.Concat(*towers, axis=1)
from_layer = out_layer
out_layer = 'mixed_8'
towers = []
tower_name = '{}/tower'.format(out_layer)
tower = InceptionTower(net, from_layer, tower_name, [dict(name='conv', num_output=192, kernel_size=1, pad=0, stride=1), dict(name='conv_1', num_output=320, kernel_size=3, pad=0, stride=2)])
towers.append(tower)
tower_name = '{}/tower_1'.format(out_layer)
tower = InceptionTower(net, from_layer, tower_name, [dict(name='conv', num_output=192, kernel_size=1, pad=0, stride=1), dict(name='conv_1', num_output=192, kernel_size=[1, 7], pad=[0, 3], stride=[1, 1]), dict(name='conv_2', num_output=192, kernel_size=[7, 1], pad=[3, 0], stride=[1, 1]), dict(name='conv_3', num_output=192, kernel_size=3, pad=0, stride=2)])
towers.append(tower)
tower_name = '{}'.format(out_layer)
tower = InceptionTower(net, from_layer, tower_name, [dict(name='pool', pool=P.Pooling.MAX, kernel_size=3, pad=0, stride=2)])
towers.append(tower)
out_layer = '{}/join'.format(out_layer)
net[out_layer] = L.Concat(*towers, axis=1)
from_layer = out_layer
for inception_id in xrange(9, 11):
num_output = 384
num_output2 = 448
if (inception_id == 9):
pool = P.Pooling.AVE
else:
pool = P.Pooling.MAX
out_layer = 'mixed_{}'.format(inception_id)
towers = []
tower_name = '{}'.format(out_layer)
tower = InceptionTower(net, from_layer, tower_name, [dict(name='conv', num_output=320, kernel_size=1, pad=0, stride=1)])
towers.append(tower)
tower_name = '{}/tower'.format(out_layer)
tower = InceptionTower(net, from_layer, tower_name, [dict(name='conv', num_output=num_output, kernel_size=1, pad=0, stride=1)])
subtowers = []
subtower_name = '{}/mixed'.format(tower_name)
subtower = InceptionTower(net, '{}/conv'.format(tower_name), subtower_name, [dict(name='conv', num_output=num_output, kernel_size=[1, 3], pad=[0, 1], stride=[1, 1])])
subtowers.append(subtower)
subtower = InceptionTower(net, '{}/conv'.format(tower_name), subtower_name, [dict(name='conv_1', num_output=num_output, kernel_size=[3, 1], pad=[1, 0], stride=[1, 1])])
subtowers.append(subtower)
net[subtower_name] = L.Concat(*subtowers, axis=1)
towers.append(net[subtower_name])
tower_name = '{}/tower_1'.format(out_layer)
tower = InceptionTower(net, from_layer, tower_name, [dict(name='conv', num_output=num_output2, kernel_size=1, pad=0, stride=1), dict(name='conv_1', num_output=num_output, kernel_size=3, pad=1, stride=1)])
subtowers = []
subtower_name = '{}/mixed'.format(tower_name)
subtower = InceptionTower(net, '{}/conv_1'.format(tower_name), subtower_name, [dict(name='conv', num_output=num_output, kernel_size=[1, 3], pad=[0, 1], stride=[1, 1])])
subtowers.append(subtower)
subtower = InceptionTower(net, '{}/conv_1'.format(tower_name), subtower_name, [dict(name='conv_1', num_output=num_output, kernel_size=[3, 1], pad=[1, 0], stride=[1, 1])])
subtowers.append(subtower)
net[subtower_name] = L.Concat(*subtowers, axis=1)
towers.append(net[subtower_name])
tower_name = '{}/tower_2'.format(out_layer)
tower = InceptionTower(net, from_layer, tower_name, [dict(name='pool', pool=pool, kernel_size=3, pad=1, stride=1), dict(name='conv', num_output=192, kernel_size=1, pad=0, stride=1)])
towers.append(tower)
out_layer = '{}/join'.format(out_layer)
net[out_layer] = L.Concat(*towers, axis=1)
from_layer = out_layer
if output_pred:
net.pool_3 = L.Pooling(net[from_layer], pool=P.Pooling.AVE, kernel_size=8, pad=0, stride=1)
net.softmax = L.InnerProduct(net.pool_3, num_output=1008)
net.softmax_prob = L.Softmax(net.softmax)
return net |
def EnsureNonBlockingDataPipe(validated_datapipe):
if (not isinstance(validated_datapipe, IterDataPipe)):
raise Exception(('Not Iterable DataPipe ' + str(validated_datapipe.__class__)))
if isinstance(validated_datapipe, NonBlocking):
return validated_datapipe
if (not hasattr(validated_datapipe, '_as_iterator')):
validated_datapipe._as_iterator = None
if (not hasattr(validated_datapipe, 'nonblocking_next')):
def nonblocking_next(self):
if (self._as_iterator is None):
self._as_iterator = iter(self)
return next(self._as_iterator)
validated_datapipe.nonblocking_next = types.MethodType(nonblocking_next, validated_datapipe)
if (not hasattr(validated_datapipe, 'reset_iterator')):
def reset_iterator(self):
self._as_iterator = None
validated_datapipe.reset_iterator = types.MethodType(reset_iterator, validated_datapipe)
return validated_datapipe |
_kernel_api(params={'handle': UINT})
def hook__sflt_unregister(ql, address, params):
handle = str(params['handle']).encode()
evs = ql.os.ev_manager.get_events_by_name(b'', keyword=handle)
for found in evs:
ql.os.ev_manager.emit(found.name, MacOSEventType.EV_SFLT_UNREGISTERED, [params['handle']])
break
ql.os.ev_manager.deregister(b'', keyword=handle)
ql.log.debug(('A sflt event has been deregistered: %s' % handle))
return 0 |
def test_run_all(mock_pipe):
out = run(pipeline_name='arb pipe', args_in='arb context input', parse_args=True, dict_in={'a': 'b'}, groups=['g'], success_group='sg', failure_group='fg', loader='arb loader', py_dir='arb/dir')
assert (type(out) is Context)
assert (out == Context({'a': 'b'}))
assert (not out.is_in_pipeline_scope)
mock_pipe.assert_called_once_with(name='arb pipe', context_args='arb context input', parse_input=True, groups=['g'], success_group='sg', failure_group='fg', loader='arb loader', py_dir='arb/dir')
mock_pipe.return_value.run.assert_called_once_with(out) |
def configure_stabilization_augs(img, init_image_pil, params, loss_augs):
stabilization_augs = ['direct_stabilization_weight', 'depth_stabilization_weight', 'edge_stabilization_weight']
stabilization_augs = [build_loss(x, params[x], 'stabilization', img, init_image_pil) for x in stabilization_augs if (params[x] not in ['', '0'])]
loss_augs.extend(stabilization_augs)
return (loss_augs, img, init_image_pil, stabilization_augs) |
class StopOrder(ExecutionStyle):
def __init__(self, stop_price, asset=None, exchange=None):
check_stoplimit_prices(stop_price, 'stop')
self.stop_price = stop_price
self._exchange = exchange
self.asset = asset
def get_limit_price(self, _is_buy):
return None
def get_stop_price(self, is_buy):
return asymmetric_round_price(self.stop_price, (not is_buy), tick_size=(0.01 if (self.asset is None) else self.asset.tick_size)) |
def tw_mock():
class TWMock():
WRITE = object()
def __init__(self):
self.lines = []
self.is_writing = False
def sep(self, sep, line=None):
self.lines.append((sep, line))
def write(self, msg, **kw):
self.lines.append((TWMock.WRITE, msg))
def _write_source(self, lines, indents=()):
if (not indents):
indents = ([''] * len(lines))
for (indent, line) in zip(indents, lines):
self.line((indent + line))
def line(self, line, **kw):
self.lines.append(line)
def markup(self, text, **kw):
return text
def get_write_msg(self, idx):
(flag, msg) = self.lines[idx]
assert (flag == TWMock.WRITE)
return msg
fullwidth = 80
return TWMock() |
(scope='module')
def inline_query_result_contact():
return InlineQueryResultContact(TestInlineQueryResultContactBase.id_, TestInlineQueryResultContactBase.phone_number, TestInlineQueryResultContactBase.first_name, last_name=TestInlineQueryResultContactBase.last_name, thumbnail_url=TestInlineQueryResultContactBase.thumbnail_url, thumbnail_width=TestInlineQueryResultContactBase.thumbnail_width, thumbnail_height=TestInlineQueryResultContactBase.thumbnail_height, input_message_content=TestInlineQueryResultContactBase.input_message_content, reply_markup=TestInlineQueryResultContactBase.reply_markup) |
_arg_scope
def batch_norm(inputs, decay=0.999, center=True, scale=False, epsilon=0.001, activation_fn=None, param_initializers=None, param_regularizers=None, updates_collections=ops.GraphKeys.UPDATE_OPS, is_training=True, reuse=None, variables_collections=None, outputs_collections=None, trainable=True, batch_weights=None, fused=None, data_format=DATA_FORMAT_NHWC, zero_debias_moving_mean=False, scope=None, renorm=False, renorm_clipping=None, renorm_decay=0.99, adjustment=None):
if (fused is None):
fused = True
inputs = ops.convert_to_tensor(inputs)
rank = inputs.get_shape().ndims
possible_to_fuse = ((batch_weights is None) and (not renorm) and (rank in [2, 4]) and (adjustment is None))
if (fused and possible_to_fuse and (zero_debias_moving_mean or (rank == 2) or (updates_collections is not ops.GraphKeys.UPDATE_OPS))):
return _fused_batch_norm(inputs, decay=decay, center=center, scale=scale, epsilon=epsilon, activation_fn=activation_fn, param_initializers=param_initializers, param_regularizers=param_regularizers, updates_collections=updates_collections, is_training=is_training, reuse=reuse, variables_collections=variables_collections, outputs_collections=outputs_collections, trainable=trainable, data_format=data_format, zero_debias_moving_mean=zero_debias_moving_mean, scope=scope)
if (data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC)):
raise ValueError('data_format has to be either NCHW or NHWC.')
layer_variable_getter = _build_variable_getter()
with variable_scope.variable_scope(scope, 'BatchNorm', [inputs], reuse=reuse, custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
if ((batch_weights is None) and (updates_collections is ops.GraphKeys.UPDATE_OPS) and (not zero_debias_moving_mean)):
axis = (1 if (data_format == DATA_FORMAT_NCHW) else (- 1))
if (not param_initializers):
param_initializers = {}
beta_initializer = param_initializers.get('beta', init_ops.zeros_initializer())
gamma_initializer = param_initializers.get('gamma', init_ops.ones_initializer())
moving_mean_initializer = param_initializers.get('moving_mean', init_ops.zeros_initializer())
moving_variance_initializer = param_initializers.get('moving_variance', init_ops.ones_initializer())
if (not param_regularizers):
param_regularizers = {}
beta_regularizer = param_regularizers.get('beta')
gamma_regularizer = param_regularizers.get('gamma')
layer = normalization_layers.BatchNormalization(axis=axis, momentum=decay, epsilon=epsilon, center=center, scale=scale, beta_initializer=beta_initializer, gamma_initializer=gamma_initializer, moving_mean_initializer=moving_mean_initializer, moving_variance_initializer=moving_variance_initializer, beta_regularizer=beta_regularizer, gamma_regularizer=gamma_regularizer, trainable=trainable, renorm=renorm, renorm_clipping=renorm_clipping, renorm_momentum=renorm_decay, adjustment=adjustment, name=sc.name, _scope=sc, _reuse=reuse, fused=fused)
outputs = layer.apply(inputs, training=is_training)
_add_variable_to_collections(layer.moving_mean, variables_collections, 'moving_mean')
_add_variable_to_collections(layer.moving_variance, variables_collections, 'moving_variance')
if (layer.beta is not None):
_add_variable_to_collections(layer.beta, variables_collections, 'beta')
if (layer.gamma is not None):
_add_variable_to_collections(layer.gamma, variables_collections, 'gamma')
if (activation_fn is not None):
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
if renorm:
raise ValueError('renorm is not supported with batch_weights, updates_collections or zero_debias_moving_mean')
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if (inputs_rank is None):
raise ValueError(('Inputs %s has undefined rank.' % inputs.name))
dtype = inputs.dtype.base_dtype
if (batch_weights is not None):
batch_weights = ops.convert_to_tensor(batch_weights)
inputs_shape[0:1].assert_is_compatible_with(batch_weights.get_shape())
nshape = ([(- 1)] + [1 for _ in range((inputs_rank - 1))])
batch_weights = array_ops.reshape(batch_weights, nshape)
if (data_format == DATA_FORMAT_NCHW):
moments_axes = ([0] + list(range(2, inputs_rank)))
params_shape = inputs_shape[1:2]
params_shape_broadcast = list(([1, inputs_shape.dims[1].value] + [1 for _ in range(2, inputs_rank)]))
else:
moments_axes = list(range((inputs_rank - 1)))
params_shape = inputs_shape[(- 1):]
params_shape_broadcast = None
if (not params_shape.is_fully_defined()):
raise ValueError(('Inputs %s has undefined channels dimension %s.' % (inputs.name, params_shape)))
(beta, gamma) = (None, None)
if (not param_initializers):
param_initializers = {}
if center:
beta_collections = utils.get_variable_collections(variables_collections, 'beta')
beta_initializer = param_initializers.get('beta', init_ops.zeros_initializer())
beta = variables.model_variable('beta', shape=params_shape, dtype=dtype, initializer=beta_initializer, collections=beta_collections, trainable=trainable)
if scale:
gamma_collections = utils.get_variable_collections(variables_collections, 'gamma')
gamma_initializer = param_initializers.get('gamma', init_ops.ones_initializer())
gamma = variables.model_variable('gamma', shape=params_shape, dtype=dtype, initializer=gamma_initializer, collections=gamma_collections, trainable=trainable)
with variable_scope.variable_scope(variable_scope.get_variable_scope()) as local_scope:
local_scope.set_partitioner(None)
moving_mean_collections = utils.get_variable_collections(variables_collections, 'moving_mean')
moving_mean_initializer = param_initializers.get('moving_mean', init_ops.zeros_initializer())
moving_mean = variables.model_variable('moving_mean', shape=params_shape, dtype=dtype, initializer=moving_mean_initializer, trainable=False, collections=moving_mean_collections)
moving_variance_collections = utils.get_variable_collections(variables_collections, 'moving_variance')
moving_variance_initializer = param_initializers.get('moving_variance', init_ops.ones_initializer())
moving_variance = variables.model_variable('moving_variance', shape=params_shape, dtype=dtype, initializer=moving_variance_initializer, trainable=False, collections=moving_variance_collections)
is_training_value = utils.constant_value(is_training)
need_moments = ((is_training_value is None) or is_training_value)
if need_moments:
if (batch_weights is None):
if (data_format == DATA_FORMAT_NCHW):
(mean, variance) = nn.moments(inputs, moments_axes, keep_dims=True)
mean = array_ops.reshape(mean, [(- 1)])
variance = array_ops.reshape(variance, [(- 1)])
else:
(mean, variance) = nn.moments(inputs, moments_axes)
elif (data_format == DATA_FORMAT_NCHW):
(mean, variance) = nn.weighted_moments(inputs, moments_axes, batch_weights, keepdims=True)
mean = array_ops.reshape(mean, [(- 1)])
variance = array_ops.reshape(variance, [(- 1)])
else:
(mean, variance) = nn.weighted_moments(inputs, moments_axes, batch_weights)
moving_vars_fn = (lambda : (moving_mean, moving_variance))
if (updates_collections is None):
def _force_updates():
update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(moving_variance, variance, decay, zero_debias=False)
with ops.control_dependencies([update_moving_mean, update_moving_variance]):
return (array_ops.identity(mean), array_ops.identity(variance))
(mean, variance) = utils.smart_cond(is_training, _force_updates, moving_vars_fn)
else:
def _delay_updates():
update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(moving_variance, variance, decay, zero_debias=False)
return (update_moving_mean, update_moving_variance)
(update_mean, update_variance) = utils.smart_cond(is_training, _delay_updates, moving_vars_fn)
ops.add_to_collections(updates_collections, update_mean)
ops.add_to_collections(updates_collections, update_variance)
vars_fn = (lambda : (mean, variance))
(mean, variance) = utils.smart_cond(is_training, vars_fn, moving_vars_fn)
else:
(mean, variance) = (moving_mean, moving_variance)
if (data_format == DATA_FORMAT_NCHW):
mean = array_ops.reshape(mean, params_shape_broadcast)
variance = array_ops.reshape(variance, params_shape_broadcast)
if (beta is not None):
beta = array_ops.reshape(beta, params_shape_broadcast)
if (gamma is not None):
gamma = array_ops.reshape(gamma, params_shape_broadcast)
outputs = nn.batch_normalization(inputs, mean, variance, beta, gamma, epsilon)
outputs.set_shape(inputs_shape)
if (activation_fn is not None):
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs) |
def get_example_spectral_response(wavelength=None):
SR_DATA = np.array([[290, 0.0], [350, 0.27], [400, 0.37], [500, 0.52], [650, 0.71], [800, 0.88], [900, 0.97], [950, 1.0], [1000, 0.93], [1050, 0.58], [1100, 0.21], [1150, 0.05], [1190, 0.0]]).transpose()
if (wavelength is None):
resolution = 5.0
wavelength = np.arange(280, (1200 + resolution), resolution)
interpolator = interp1d(SR_DATA[0], SR_DATA[1], kind='cubic', bounds_error=False, fill_value=0.0, copy=False, assume_sorted=True)
sr = pd.Series(data=interpolator(wavelength), index=wavelength)
sr.index.name = 'wavelength'
sr.name = 'spectral_response'
return sr |
class ResNet3dPathway(ResNet3d):
def __init__(self, *args, lateral=False, speed_ratio=8, channel_ratio=8, fusion_kernel=5, **kwargs):
self.lateral = lateral
self.speed_ratio = speed_ratio
self.channel_ratio = channel_ratio
self.fusion_kernel = fusion_kernel
super().__init__(*args, **kwargs)
self.inplanes = self.base_channels
if self.lateral:
self.conv1_lateral = ConvModule((self.inplanes // self.channel_ratio), ((self.inplanes * 2) // self.channel_ratio), kernel_size=(fusion_kernel, 1, 1), stride=(self.speed_ratio, 1, 1), padding=(((fusion_kernel - 1) // 2), 0, 0), bias=False, conv_cfg=self.conv_cfg, norm_cfg=None, act_cfg=None)
self.lateral_connections = []
for i in range(len(self.stage_blocks)):
planes = (self.base_channels * (2 ** i))
self.inplanes = (planes * self.block.expansion)
if (lateral and (i != (self.num_stages - 1))):
lateral_name = f'layer{(i + 1)}_lateral'
setattr(self, lateral_name, ConvModule((self.inplanes // self.channel_ratio), ((self.inplanes * 2) // self.channel_ratio), kernel_size=(fusion_kernel, 1, 1), stride=(self.speed_ratio, 1, 1), padding=(((fusion_kernel - 1) // 2), 0, 0), bias=False, conv_cfg=self.conv_cfg, norm_cfg=None, act_cfg=None))
self.lateral_connections.append(lateral_name)
def make_res_layer(self, block, inplanes, planes, blocks, spatial_stride=1, temporal_stride=1, dilation=1, style='pytorch', inflate=1, inflate_style='3x1x1', non_local=0, non_local_cfg=dict(), conv_cfg=None, norm_cfg=None, act_cfg=None, with_cp=False):
inflate = (inflate if (not isinstance(inflate, int)) else ((inflate,) * blocks))
non_local = (non_local if (not isinstance(non_local, int)) else ((non_local,) * blocks))
assert ((len(inflate) == blocks) and (len(non_local) == blocks))
if self.lateral:
lateral_inplanes = ((inplanes * 2) // self.channel_ratio)
else:
lateral_inplanes = 0
if ((spatial_stride != 1) or ((inplanes + lateral_inplanes) != (planes * block.expansion))):
downsample = ConvModule((inplanes + lateral_inplanes), (planes * block.expansion), kernel_size=1, stride=(temporal_stride, spatial_stride, spatial_stride), bias=False, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None)
else:
downsample = None
layers = []
layers.append(block((inplanes + lateral_inplanes), planes, spatial_stride, temporal_stride, dilation, downsample, style=style, inflate=(inflate[0] == 1), inflate_style=inflate_style, non_local=(non_local[0] == 1), non_local_cfg=non_local_cfg, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, with_cp=with_cp))
inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(inplanes, planes, 1, 1, dilation, style=style, inflate=(inflate[i] == 1), inflate_style=inflate_style, non_local=(non_local[i] == 1), non_local_cfg=non_local_cfg, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, with_cp=with_cp))
return nn.Sequential(*layers)
def inflate_weights(self, logger):
state_dict_r2d = _load_checkpoint(self.pretrained)
if ('state_dict' in state_dict_r2d):
state_dict_r2d = state_dict_r2d['state_dict']
inflated_param_names = []
for (name, module) in self.named_modules():
if ('lateral' in name):
continue
if isinstance(module, ConvModule):
if ('downsample' in name):
original_conv_name = (name + '.0')
original_bn_name = (name + '.1')
else:
original_conv_name = name
original_bn_name = name.replace('conv', 'bn')
if ((original_conv_name + '.weight') not in state_dict_r2d):
logger.warning(f'Module not exist in the state_dict_r2d: {original_conv_name}')
else:
self._inflate_conv_params(module.conv, state_dict_r2d, original_conv_name, inflated_param_names)
if ((original_bn_name + '.weight') not in state_dict_r2d):
logger.warning(f'Module not exist in the state_dict_r2d: {original_bn_name}')
else:
self._inflate_bn_params(module.bn, state_dict_r2d, original_bn_name, inflated_param_names)
remaining_names = (set(state_dict_r2d.keys()) - set(inflated_param_names))
if remaining_names:
logger.info(f'These parameters in the 2d checkpoint are not loaded: {remaining_names}')
def _inflate_conv_params(self, conv3d, state_dict_2d, module_name_2d, inflated_param_names):
weight_2d_name = (module_name_2d + '.weight')
conv2d_weight = state_dict_2d[weight_2d_name]
old_shape = conv2d_weight.shape
new_shape = conv3d.weight.data.shape
kernel_t = new_shape[2]
if (new_shape[1] != old_shape[1]):
new_channels = (new_shape[1] - old_shape[1])
pad_shape = old_shape
pad_shape = ((pad_shape[:1] + (new_channels,)) + pad_shape[2:])
conv2d_weight = torch.cat((conv2d_weight, torch.zeros(pad_shape).type_as(conv2d_weight).to(conv2d_weight.device)), dim=1)
new_weight = (conv2d_weight.data.unsqueeze(2).expand_as(conv3d.weight) / kernel_t)
conv3d.weight.data.copy_(new_weight)
inflated_param_names.append(weight_2d_name)
if (getattr(conv3d, 'bias') is not None):
bias_2d_name = (module_name_2d + '.bias')
conv3d.bias.data.copy_(state_dict_2d[bias_2d_name])
inflated_param_names.append(bias_2d_name)
def _freeze_stages(self):
if (self.frozen_stages >= 0):
self.conv1.eval()
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, (self.frozen_stages + 1)):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
if ((i != len(self.res_layers)) and self.lateral):
lateral_name = self.lateral_connections[(i - 1)]
conv_lateral = getattr(self, lateral_name)
conv_lateral.eval()
for param in conv_lateral.parameters():
param.requires_grad = False
def init_weights(self, pretrained=None):
if pretrained:
self.pretrained = pretrained
super().init_weights()
for module_name in self.lateral_connections:
layer = getattr(self, module_name)
for m in layer.modules():
if isinstance(m, (nn.Conv3d, nn.Conv2d)):
kaiming_init(m) |
class SCMIFileHandler(BaseFileHandler):
def __init__(self, filename, filename_info, filetype_info):
super(SCMIFileHandler, self).__init__(filename, filename_info, filetype_info)
self.nc = xr.open_dataset(self.filename, decode_cf=True, mask_and_scale=False, chunks={'x': LOAD_CHUNK_SIZE, 'y': LOAD_CHUNK_SIZE})
self.platform_name = self.nc.attrs['satellite_id']
self.sensor = self._get_sensor()
self.nlines = self.nc.dims['y']
self.ncols = self.nc.dims['x']
self.coords = {}
def _get_sensor(self):
is_h8 = ('H8' in self.platform_name)
is_h9 = ('H9' in self.platform_name)
is_ahi = (is_h8 or is_h9)
return ('ahi' if is_ahi else 'abi')
def sensor_names(self):
return [self.sensor]
def __getitem__(self, item):
data = self.nc[item]
attrs = data.attrs
factor = data.attrs.get('scale_factor')
offset = data.attrs.get('add_offset')
fill = data.attrs.get('_FillValue')
if (fill is not None):
data = data.where((data != fill))
if (factor is not None):
data = ((data * float(factor)) + offset)
data.attrs = attrs
new_coords = {}
if ('time' in data.coords):
data = data.drop_vars('time')
if (item in data.coords):
self.coords[item] = data
for coord_name in data.coords.keys():
if (coord_name not in self.coords):
self.coords[coord_name] = self[coord_name]
new_coords[coord_name] = self.coords[coord_name]
data.coords.update(new_coords)
return data
def get_shape(self, key, info):
return (self.nlines, self.ncols)
def get_dataset(self, key, info):
logger.debug('Reading in get_dataset %s.', key['name'])
var_name = info.get('file_key', self.filetype_info.get('file_key'))
if var_name:
data = self[var_name]
elif ('Sectorized_CMI' in self.nc):
data = self['Sectorized_CMI']
elif ('data' in self.nc):
data = self['data']
data = data.chunk({'x': CHUNK_SIZE, 'y': CHUNK_SIZE})
factor = data.attrs.pop('scale_factor', 1)
offset = data.attrs.pop('add_offset', 0)
units = data.attrs.get('units', 1)
if ((units in ['1', '*1']) and (key['calibration'] == 'reflectance')):
data *= 100
factor *= 100
data.attrs['units'] = '%'
data.attrs.update({'platform_name': self.platform_name, 'sensor': data.attrs.get('sensor', self.sensor)})
if ('satellite_longitude' in self.nc.attrs):
data.attrs['orbital_parameters'] = {'projection_longitude': self.nc.attrs['satellite_longitude'], 'projection_latitude': self.nc.attrs['satellite_latitude'], 'projection_altitude': self.nc.attrs['satellite_altitude']}
scene_id = self.nc.attrs.get('scene_id')
if (scene_id is not None):
data.attrs['scene_id'] = scene_id
data.attrs.update(key.to_dict())
data.attrs.pop('_FillValue', None)
if ('valid_min' in data.attrs):
vmin = data.attrs.pop('valid_min')
vmax = data.attrs.pop('valid_max')
vmin = ((vmin * factor) + offset)
vmax = ((vmax * factor) + offset)
data.attrs['valid_min'] = vmin
data.attrs['valid_max'] = vmax
return data
def _get_cf_grid_mapping_var(self):
gmaps = ['fixedgrid_projection', 'goes_imager_projection', 'lambert_projection', 'polar_projection', 'mercator_projection']
if ('grid_mapping' in self.filename_info):
gmaps = ([self.filename_info.get('grid_mapping')] + gmaps)
for grid_mapping in gmaps:
if (grid_mapping in self.nc):
return self.nc[grid_mapping]
raise KeyError("Can't find grid mapping variable in SCMI file")
def _get_proj4_name(self, projection):
gmap_name = projection.attrs['grid_mapping_name']
proj = {'geostationary': 'geos', 'lambert_conformal_conic': 'lcc', 'polar_stereographic': 'stere', 'mercator': 'merc'}.get(gmap_name, gmap_name)
return proj
def _get_proj_specific_params(self, projection):
proj = self._get_proj4_name(projection)
proj_dict = {'proj': proj, 'a': float(projection.attrs['semi_major_axis']), 'b': float(projection.attrs['semi_minor_axis']), 'units': 'm'}
if (proj == 'geos'):
proj_dict['h'] = float(projection.attrs['perspective_point_height'])
proj_dict['sweep'] = projection.attrs.get('sweep_angle_axis', 'y')
proj_dict['lon_0'] = float(projection.attrs['longitude_of_projection_origin'])
proj_dict['lat_0'] = float(projection.attrs.get('latitude_of_projection_origin', 0.0))
elif (proj == 'lcc'):
proj_dict['lat_0'] = float(projection.attrs['standard_parallel'])
proj_dict['lon_0'] = float(projection.attrs['longitude_of_central_meridian'])
proj_dict['lat_1'] = float(projection.attrs['latitude_of_projection_origin'])
elif (proj == 'stere'):
proj_dict['lat_ts'] = float(projection.attrs['standard_parallel'])
proj_dict['lon_0'] = float(projection.attrs['straight_vertical_longitude_from_pole'])
proj_dict['lat_0'] = float(projection.attrs['latitude_of_projection_origin'])
elif (proj == 'merc'):
proj_dict['lat_ts'] = float(projection.attrs['standard_parallel'])
proj_dict['lat_0'] = proj_dict['lat_ts']
proj_dict['lon_0'] = float(projection.attrs['longitude_of_projection_origin'])
else:
raise ValueError("Can't handle projection '{}'".format(proj))
return proj_dict
def _calc_extents(self, proj_dict):
h = float(proj_dict.get('h', 1.0))
x = self['x']
y = self['y']
x_units = x.attrs.get('units', 'rad')
if (x_units == 'meters'):
h_factor = 1.0
factor = 1.0
elif (x_units == 'microradian'):
h_factor = h
factor = 1000000.0
else:
h_factor = h
factor = 1.0
x_l = ((h_factor * x[0]) / factor)
x_r = ((h_factor * x[(- 1)]) / factor)
y_l = ((h_factor * y[(- 1)]) / factor)
y_u = ((h_factor * y[0]) / factor)
x_half = (((x_r - x_l) / (self.ncols - 1)) / 2.0)
y_half = (((y_u - y_l) / (self.nlines - 1)) / 2.0)
return ((x_l - x_half), (y_l - y_half), (x_r + x_half), (y_u + y_half))
def get_area_def(self, key):
projection = self._get_cf_grid_mapping_var()
proj_dict = self._get_proj_specific_params(projection)
area_extent = self._calc_extents(proj_dict)
area_name = '{}_{}'.format(self.sensor, proj_dict['proj'])
return geometry.AreaDefinition(area_name, 'SCMI file area', area_name, proj_dict, self.ncols, self.nlines, np.asarray(area_extent))
def start_time(self):
return datetime.strptime(self.nc.attrs['start_date_time'], '%Y%j%H%M%S')
def end_time(self):
return self.start_time
def __del__(self):
try:
self.nc.close()
except OSError:
pass |
class MathSATOptions(SolverOptions):
def __init__(self, **base_options):
SolverOptions.__init__(self, **base_options)
def _set_option(msat_config, name, value):
check = mathsat.msat_set_option(msat_config, name, value)
if (check != 0):
raise PysmtValueError(("Error setting the option '%s=%s'" % (name, value)))
def __call__(self, solver):
if self.generate_models:
self._set_option(solver.msat_config, 'model_generation', 'true')
if (self.unsat_cores_mode is not None):
self._set_option(solver.msat_config, 'unsat_core_generation', '1')
if (self.random_seed is not None):
self._set_option(solver.msat_config, 'random_seed', str(self.random_seed))
for (k, v) in self.solver_options.items():
self._set_option(solver.msat_config, str(k), str(v))
if ('debug.api_call_trace_filename' in self.solver_options):
self._set_option(solver.msat_config, 'debug.api_call_trace', '1')
self._set_option(solver.msat_config, 'theory.bv.div_by_zero_mode', '0') |
_module()
class CosineAnnealingLrUpdaterHook(LrUpdaterHook):
def __init__(self, min_lr=None, min_lr_ratio=None, **kwargs):
assert ((min_lr is None) ^ (min_lr_ratio is None))
self.min_lr = min_lr
self.min_lr_ratio = min_lr_ratio
super(CosineAnnealingLrUpdaterHook, self).__init__(**kwargs)
def get_lr(self, runner, base_lr):
if self.by_epoch:
progress = runner.epoch
max_progress = runner.max_epochs
else:
progress = runner.iter
max_progress = runner.max_iters
if (self.min_lr_ratio is not None):
target_lr = (base_lr * self.min_lr_ratio)
else:
target_lr = self.min_lr
return annealing_cos(base_lr, target_lr, (progress / max_progress)) |
def test_change_rv_size_default_update():
rng = pytensor.shared(np.random.default_rng(0))
x = normal(rng=rng)
rng.default_update = x.owner.outputs[0]
new_x = change_dist_size(x, new_size=(2,))
new_rng = new_x.owner.inputs[0]
assert (rng.default_update is x.owner.outputs[0])
assert (new_rng.default_update is new_x.owner.outputs[0])
next_rng = pytensor.shared(np.random.default_rng(1))
rng.default_update = next_rng
with pytest.warns(UserWarning, match='could not be replicated in resized variable'):
new_x = change_dist_size(x, new_size=(2,))
new_rng = new_x.owner.inputs[0]
assert (rng.default_update is next_rng)
assert (new_rng.default_update is None)
rng.default_update = None
new_x = change_dist_size(x, new_size=(2,))
new_rng = new_x.owner.inputs[0]
assert (new_rng.default_update is None) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.